diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_167_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_167_mp_rank_02_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..80ff7496b0c07dd8176f638065299fdbe4919ae6 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_167_mp_rank_02_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fc987ed520695167ddebae29a47f4077bd142603f44de3aef4b6846c2c0f090 +size 41830404 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_18_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_18_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..7c0a6c56bd58b232f7c6be5f14156cce9a18f6c1 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_18_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb4911bc865348ac9aab9a6d571cd277d8807896df02eacc888508e6820b868f +size 41830394 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_30_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_30_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..a01e698762317bc9c147ff64f0b0b372d198d56b --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_30_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a4dfbc7fcce9283c5da58af6007fe5fe07d9acd8554edd6f22e23bd8242f5013 +size 6291456 diff --git a/venv/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79f3bd65ed0af616444936576a389236fd74e172 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84aeafadc72e4c66aed9dd1febc80fe7d0194997 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/__pycache__/_typing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b6280a676e28abad41e63084b107d69d99e62ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/__pycache__/_version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df8cce4b4a641fdea7f6c3f96a9802ba242eed62 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/__pycache__/_version_meson.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fab495eed361f74da10e4753c9fa6b618dc78998 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19dee368d4f1e4a7bd708899b691710bf97b60ae Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/__pycache__/testing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/__init__.py b/venv/lib/python3.10/site-packages/pandas/_testing/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..361998db8e38bdd5a56cb4e20fa9c80e2f8b1af7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_testing/__init__.py @@ -0,0 +1,638 @@ +from __future__ import annotations + +from decimal import Decimal +import operator +import os +from sys import byteorder +from typing import ( + TYPE_CHECKING, + Callable, + ContextManager, + cast, +) +import warnings + +import numpy as np + +from pandas._config.localization import ( + can_set_locale, + get_locales, + set_locale, +) + +from pandas.compat import pa_version_under10p1 + +from pandas.core.dtypes.common import is_string_dtype + +import pandas as pd +from pandas import ( + ArrowDtype, + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, +) +from pandas._testing._io import ( + round_trip_localpath, + round_trip_pathlib, + round_trip_pickle, + write_to_compressed, +) +from pandas._testing._warnings import ( + assert_produces_warning, + maybe_produces_warning, +) +from pandas._testing.asserters import ( + assert_almost_equal, + assert_attr_equal, + assert_categorical_equal, + assert_class_equal, + assert_contains_all, + assert_copy, + assert_datetime_array_equal, + assert_dict_equal, + assert_equal, + assert_extension_array_equal, + assert_frame_equal, + assert_index_equal, + assert_indexing_slices_equivalent, + assert_interval_array_equal, + assert_is_sorted, + assert_is_valid_plot_return_object, + assert_metadata_equivalent, + assert_numpy_array_equal, + assert_period_array_equal, + assert_series_equal, + assert_sp_array_equal, + assert_timedelta_array_equal, + raise_assert_detail, +) +from pandas._testing.compat import ( + get_dtype, + get_obj, +) +from pandas._testing.contexts import ( + assert_cow_warning, + decompress_file, + ensure_clean, + raises_chained_assignment_error, + set_timezone, + use_numexpr, + with_csv_dialect, +) +from pandas.core.arrays import ( + BaseMaskedArray, + ExtensionArray, + NumpyExtensionArray, +) +from pandas.core.arrays._mixins import NDArrayBackedExtensionArray +from pandas.core.construction import extract_array + +if TYPE_CHECKING: + from pandas._typing import ( + Dtype, + NpDtype, + ) + + from pandas.core.arrays import ArrowExtensionArray + +UNSIGNED_INT_NUMPY_DTYPES: list[NpDtype] = ["uint8", "uint16", "uint32", "uint64"] +UNSIGNED_INT_EA_DTYPES: list[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"] +SIGNED_INT_NUMPY_DTYPES: list[NpDtype] = [int, "int8", "int16", "int32", "int64"] +SIGNED_INT_EA_DTYPES: list[Dtype] = ["Int8", "Int16", "Int32", "Int64"] +ALL_INT_NUMPY_DTYPES = UNSIGNED_INT_NUMPY_DTYPES + SIGNED_INT_NUMPY_DTYPES +ALL_INT_EA_DTYPES = UNSIGNED_INT_EA_DTYPES + SIGNED_INT_EA_DTYPES +ALL_INT_DTYPES: list[Dtype] = [*ALL_INT_NUMPY_DTYPES, *ALL_INT_EA_DTYPES] + +FLOAT_NUMPY_DTYPES: list[NpDtype] = [float, "float32", "float64"] +FLOAT_EA_DTYPES: list[Dtype] = ["Float32", "Float64"] +ALL_FLOAT_DTYPES: list[Dtype] = [*FLOAT_NUMPY_DTYPES, *FLOAT_EA_DTYPES] + +COMPLEX_DTYPES: list[Dtype] = [complex, "complex64", "complex128"] +STRING_DTYPES: list[Dtype] = [str, "str", "U"] + +DATETIME64_DTYPES: list[Dtype] = ["datetime64[ns]", "M8[ns]"] +TIMEDELTA64_DTYPES: list[Dtype] = ["timedelta64[ns]", "m8[ns]"] + +BOOL_DTYPES: list[Dtype] = [bool, "bool"] +BYTES_DTYPES: list[Dtype] = [bytes, "bytes"] +OBJECT_DTYPES: list[Dtype] = [object, "object"] + +ALL_REAL_NUMPY_DTYPES = FLOAT_NUMPY_DTYPES + ALL_INT_NUMPY_DTYPES +ALL_REAL_EXTENSION_DTYPES = FLOAT_EA_DTYPES + ALL_INT_EA_DTYPES +ALL_REAL_DTYPES: list[Dtype] = [*ALL_REAL_NUMPY_DTYPES, *ALL_REAL_EXTENSION_DTYPES] +ALL_NUMERIC_DTYPES: list[Dtype] = [*ALL_REAL_DTYPES, *COMPLEX_DTYPES] + +ALL_NUMPY_DTYPES = ( + ALL_REAL_NUMPY_DTYPES + + COMPLEX_DTYPES + + STRING_DTYPES + + DATETIME64_DTYPES + + TIMEDELTA64_DTYPES + + BOOL_DTYPES + + OBJECT_DTYPES + + BYTES_DTYPES +) + +NARROW_NP_DTYPES = [ + np.float16, + np.float32, + np.int8, + np.int16, + np.int32, + np.uint8, + np.uint16, + np.uint32, +] + +PYTHON_DATA_TYPES = [ + str, + int, + float, + complex, + list, + tuple, + range, + dict, + set, + frozenset, + bool, + bytes, + bytearray, + memoryview, +] + +ENDIAN = {"little": "<", "big": ">"}[byteorder] + +NULL_OBJECTS = [None, np.nan, pd.NaT, float("nan"), pd.NA, Decimal("NaN")] +NP_NAT_OBJECTS = [ + cls("NaT", unit) + for cls in [np.datetime64, np.timedelta64] + for unit in [ + "Y", + "M", + "W", + "D", + "h", + "m", + "s", + "ms", + "us", + "ns", + "ps", + "fs", + "as", + ] +] + +if not pa_version_under10p1: + import pyarrow as pa + + UNSIGNED_INT_PYARROW_DTYPES = [pa.uint8(), pa.uint16(), pa.uint32(), pa.uint64()] + SIGNED_INT_PYARROW_DTYPES = [pa.int8(), pa.int16(), pa.int32(), pa.int64()] + ALL_INT_PYARROW_DTYPES = UNSIGNED_INT_PYARROW_DTYPES + SIGNED_INT_PYARROW_DTYPES + ALL_INT_PYARROW_DTYPES_STR_REPR = [ + str(ArrowDtype(typ)) for typ in ALL_INT_PYARROW_DTYPES + ] + + # pa.float16 doesn't seem supported + # https://github.com/apache/arrow/blob/master/python/pyarrow/src/arrow/python/helpers.cc#L86 + FLOAT_PYARROW_DTYPES = [pa.float32(), pa.float64()] + FLOAT_PYARROW_DTYPES_STR_REPR = [ + str(ArrowDtype(typ)) for typ in FLOAT_PYARROW_DTYPES + ] + DECIMAL_PYARROW_DTYPES = [pa.decimal128(7, 3)] + STRING_PYARROW_DTYPES = [pa.string()] + BINARY_PYARROW_DTYPES = [pa.binary()] + + TIME_PYARROW_DTYPES = [ + pa.time32("s"), + pa.time32("ms"), + pa.time64("us"), + pa.time64("ns"), + ] + DATE_PYARROW_DTYPES = [pa.date32(), pa.date64()] + DATETIME_PYARROW_DTYPES = [ + pa.timestamp(unit=unit, tz=tz) + for unit in ["s", "ms", "us", "ns"] + for tz in [None, "UTC", "US/Pacific", "US/Eastern"] + ] + TIMEDELTA_PYARROW_DTYPES = [pa.duration(unit) for unit in ["s", "ms", "us", "ns"]] + + BOOL_PYARROW_DTYPES = [pa.bool_()] + + # TODO: Add container like pyarrow types: + # https://arrow.apache.org/docs/python/api/datatypes.html#factory-functions + ALL_PYARROW_DTYPES = ( + ALL_INT_PYARROW_DTYPES + + FLOAT_PYARROW_DTYPES + + DECIMAL_PYARROW_DTYPES + + STRING_PYARROW_DTYPES + + BINARY_PYARROW_DTYPES + + TIME_PYARROW_DTYPES + + DATE_PYARROW_DTYPES + + DATETIME_PYARROW_DTYPES + + TIMEDELTA_PYARROW_DTYPES + + BOOL_PYARROW_DTYPES + ) + ALL_REAL_PYARROW_DTYPES_STR_REPR = ( + ALL_INT_PYARROW_DTYPES_STR_REPR + FLOAT_PYARROW_DTYPES_STR_REPR + ) +else: + FLOAT_PYARROW_DTYPES_STR_REPR = [] + ALL_INT_PYARROW_DTYPES_STR_REPR = [] + ALL_PYARROW_DTYPES = [] + ALL_REAL_PYARROW_DTYPES_STR_REPR = [] + +ALL_REAL_NULLABLE_DTYPES = ( + FLOAT_NUMPY_DTYPES + ALL_REAL_EXTENSION_DTYPES + ALL_REAL_PYARROW_DTYPES_STR_REPR +) + +arithmetic_dunder_methods = [ + "__add__", + "__radd__", + "__sub__", + "__rsub__", + "__mul__", + "__rmul__", + "__floordiv__", + "__rfloordiv__", + "__truediv__", + "__rtruediv__", + "__pow__", + "__rpow__", + "__mod__", + "__rmod__", +] + +comparison_dunder_methods = ["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"] + + +# ----------------------------------------------------------------------------- +# Comparators + + +def box_expected(expected, box_cls, transpose: bool = True): + """ + Helper function to wrap the expected output of a test in a given box_class. + + Parameters + ---------- + expected : np.ndarray, Index, Series + box_cls : {Index, Series, DataFrame} + + Returns + ------- + subclass of box_cls + """ + if box_cls is pd.array: + if isinstance(expected, RangeIndex): + # pd.array would return an IntegerArray + expected = NumpyExtensionArray(np.asarray(expected._values)) + else: + expected = pd.array(expected, copy=False) + elif box_cls is Index: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Index(expected) + elif box_cls is Series: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Series(expected) + elif box_cls is DataFrame: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", "Dtype inference", category=FutureWarning) + expected = Series(expected).to_frame() + if transpose: + # for vector operations, we need a DataFrame to be a single-row, + # not a single-column, in order to operate against non-DataFrame + # vectors of the same length. But convert to two rows to avoid + # single-row special cases in datetime arithmetic + expected = expected.T + expected = pd.concat([expected] * 2, ignore_index=True) + elif box_cls is np.ndarray or box_cls is np.array: + expected = np.array(expected) + elif box_cls is to_array: + expected = to_array(expected) + else: + raise NotImplementedError(box_cls) + return expected + + +def to_array(obj): + """ + Similar to pd.array, but does not cast numpy dtypes to nullable dtypes. + """ + # temporary implementation until we get pd.array in place + dtype = getattr(obj, "dtype", None) + + if dtype is None: + return np.asarray(obj) + + return extract_array(obj, extract_numpy=True) + + +class SubclassedSeries(Series): + _metadata = ["testattr", "name"] + + @property + def _constructor(self): + # For testing, those properties return a generic callable, and not + # the actual class. In this case that is equivalent, but it is to + # ensure we don't rely on the property returning a class + # See https://github.com/pandas-dev/pandas/pull/46018 and + # https://github.com/pandas-dev/pandas/issues/32638 and linked issues + return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs) + + @property + def _constructor_expanddim(self): + return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs) + + +class SubclassedDataFrame(DataFrame): + _metadata = ["testattr"] + + @property + def _constructor(self): + return lambda *args, **kwargs: SubclassedDataFrame(*args, **kwargs) + + @property + def _constructor_sliced(self): + return lambda *args, **kwargs: SubclassedSeries(*args, **kwargs) + + +def convert_rows_list_to_csv_str(rows_list: list[str]) -> str: + """ + Convert list of CSV rows to single CSV-formatted string for current OS. + + This method is used for creating expected value of to_csv() method. + + Parameters + ---------- + rows_list : List[str] + Each element represents the row of csv. + + Returns + ------- + str + Expected output of to_csv() in current OS. + """ + sep = os.linesep + return sep.join(rows_list) + sep + + +def external_error_raised(expected_exception: type[Exception]) -> ContextManager: + """ + Helper function to mark pytest.raises that have an external error message. + + Parameters + ---------- + expected_exception : Exception + Expected error to raise. + + Returns + ------- + Callable + Regular `pytest.raises` function with `match` equal to `None`. + """ + import pytest + + return pytest.raises(expected_exception, match=None) + + +cython_table = pd.core.common._cython_table.items() + + +def get_cython_table_params(ndframe, func_names_and_expected): + """ + Combine frame, functions from com._cython_table + keys and expected result. + + Parameters + ---------- + ndframe : DataFrame or Series + func_names_and_expected : Sequence of two items + The first item is a name of a NDFrame method ('sum', 'prod') etc. + The second item is the expected return value. + + Returns + ------- + list + List of three items (DataFrame, function, expected result) + """ + results = [] + for func_name, expected in func_names_and_expected: + results.append((ndframe, func_name, expected)) + results += [ + (ndframe, func, expected) + for func, name in cython_table + if name == func_name + ] + return results + + +def get_op_from_name(op_name: str) -> Callable: + """ + The operator function for a given op name. + + Parameters + ---------- + op_name : str + The op name, in form of "add" or "__add__". + + Returns + ------- + function + A function performing the operation. + """ + short_opname = op_name.strip("_") + try: + op = getattr(operator, short_opname) + except AttributeError: + # Assume it is the reverse operator + rop = getattr(operator, short_opname[1:]) + op = lambda x, y: rop(y, x) + + return op + + +# ----------------------------------------------------------------------------- +# Indexing test helpers + + +def getitem(x): + return x + + +def setitem(x): + return x + + +def loc(x): + return x.loc + + +def iloc(x): + return x.iloc + + +def at(x): + return x.at + + +def iat(x): + return x.iat + + +# ----------------------------------------------------------------------------- + +_UNITS = ["s", "ms", "us", "ns"] + + +def get_finest_unit(left: str, right: str): + """ + Find the higher of two datetime64 units. + """ + if _UNITS.index(left) >= _UNITS.index(right): + return left + return right + + +def shares_memory(left, right) -> bool: + """ + Pandas-compat for np.shares_memory. + """ + if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): + return np.shares_memory(left, right) + elif isinstance(left, np.ndarray): + # Call with reversed args to get to unpacking logic below. + return shares_memory(right, left) + + if isinstance(left, RangeIndex): + return False + if isinstance(left, MultiIndex): + return shares_memory(left._codes, right) + if isinstance(left, (Index, Series)): + return shares_memory(left._values, right) + + if isinstance(left, NDArrayBackedExtensionArray): + return shares_memory(left._ndarray, right) + if isinstance(left, pd.core.arrays.SparseArray): + return shares_memory(left.sp_values, right) + if isinstance(left, pd.core.arrays.IntervalArray): + return shares_memory(left._left, right) or shares_memory(left._right, right) + + if ( + isinstance(left, ExtensionArray) + and is_string_dtype(left.dtype) + and left.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] + ): + # https://github.com/pandas-dev/pandas/pull/43930#discussion_r736862669 + left = cast("ArrowExtensionArray", left) + if ( + isinstance(right, ExtensionArray) + and is_string_dtype(right.dtype) + and right.dtype.storage in ("pyarrow", "pyarrow_numpy") # type: ignore[attr-defined] + ): + right = cast("ArrowExtensionArray", right) + left_pa_data = left._pa_array + right_pa_data = right._pa_array + left_buf1 = left_pa_data.chunk(0).buffers()[1] + right_buf1 = right_pa_data.chunk(0).buffers()[1] + return left_buf1 == right_buf1 + + if isinstance(left, BaseMaskedArray) and isinstance(right, BaseMaskedArray): + # By convention, we'll say these share memory if they share *either* + # the _data or the _mask + return np.shares_memory(left._data, right._data) or np.shares_memory( + left._mask, right._mask + ) + + if isinstance(left, DataFrame) and len(left._mgr.arrays) == 1: + arr = left._mgr.arrays[0] + return shares_memory(arr, right) + + raise NotImplementedError(type(left), type(right)) + + +__all__ = [ + "ALL_INT_EA_DTYPES", + "ALL_INT_NUMPY_DTYPES", + "ALL_NUMPY_DTYPES", + "ALL_REAL_NUMPY_DTYPES", + "assert_almost_equal", + "assert_attr_equal", + "assert_categorical_equal", + "assert_class_equal", + "assert_contains_all", + "assert_copy", + "assert_datetime_array_equal", + "assert_dict_equal", + "assert_equal", + "assert_extension_array_equal", + "assert_frame_equal", + "assert_index_equal", + "assert_indexing_slices_equivalent", + "assert_interval_array_equal", + "assert_is_sorted", + "assert_is_valid_plot_return_object", + "assert_metadata_equivalent", + "assert_numpy_array_equal", + "assert_period_array_equal", + "assert_produces_warning", + "assert_series_equal", + "assert_sp_array_equal", + "assert_timedelta_array_equal", + "assert_cow_warning", + "at", + "BOOL_DTYPES", + "box_expected", + "BYTES_DTYPES", + "can_set_locale", + "COMPLEX_DTYPES", + "convert_rows_list_to_csv_str", + "DATETIME64_DTYPES", + "decompress_file", + "ENDIAN", + "ensure_clean", + "external_error_raised", + "FLOAT_EA_DTYPES", + "FLOAT_NUMPY_DTYPES", + "get_cython_table_params", + "get_dtype", + "getitem", + "get_locales", + "get_finest_unit", + "get_obj", + "get_op_from_name", + "iat", + "iloc", + "loc", + "maybe_produces_warning", + "NARROW_NP_DTYPES", + "NP_NAT_OBJECTS", + "NULL_OBJECTS", + "OBJECT_DTYPES", + "raise_assert_detail", + "raises_chained_assignment_error", + "round_trip_localpath", + "round_trip_pathlib", + "round_trip_pickle", + "setitem", + "set_locale", + "set_timezone", + "shares_memory", + "SIGNED_INT_EA_DTYPES", + "SIGNED_INT_NUMPY_DTYPES", + "STRING_DTYPES", + "SubclassedDataFrame", + "SubclassedSeries", + "TIMEDELTA64_DTYPES", + "to_array", + "UNSIGNED_INT_EA_DTYPES", + "UNSIGNED_INT_NUMPY_DTYPES", + "use_numexpr", + "with_csv_dialect", + "write_to_compressed", +] diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..483e571dde1f2bcfff96019fa29a66ec61043c7c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..181e001098a00de5c48a54ac74222dab513e018c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_hypothesis.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..141ee79308afa28056028b2abd781711a1fc2ed3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_io.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d2fccb318bfde85d97dd32fbaec3ec2c50df273 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/_warnings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df20cc077dfe6dade1454f7a99d597075099c161 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/asserters.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d30c37e7b169459e46935396d3af269ded728feb Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b7c653d03d50778bcdfabe7b761a1071aa24c97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/_testing/__pycache__/contexts.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py b/venv/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py new file mode 100644 index 0000000000000000000000000000000000000000..084ca9c306d192a2543108249dbc345d1259be01 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_testing/_hypothesis.py @@ -0,0 +1,93 @@ +""" +Hypothesis data generator helpers. +""" +from datetime import datetime + +from hypothesis import strategies as st +from hypothesis.extra.dateutil import timezones as dateutil_timezones +from hypothesis.extra.pytz import timezones as pytz_timezones + +from pandas.compat import is_platform_windows + +import pandas as pd + +from pandas.tseries.offsets import ( + BMonthBegin, + BMonthEnd, + BQuarterBegin, + BQuarterEnd, + BYearBegin, + BYearEnd, + MonthBegin, + MonthEnd, + QuarterBegin, + QuarterEnd, + YearBegin, + YearEnd, +) + +OPTIONAL_INTS = st.lists(st.one_of(st.integers(), st.none()), max_size=10, min_size=3) + +OPTIONAL_FLOATS = st.lists(st.one_of(st.floats(), st.none()), max_size=10, min_size=3) + +OPTIONAL_TEXT = st.lists(st.one_of(st.none(), st.text()), max_size=10, min_size=3) + +OPTIONAL_DICTS = st.lists( + st.one_of(st.none(), st.dictionaries(st.text(), st.integers())), + max_size=10, + min_size=3, +) + +OPTIONAL_LISTS = st.lists( + st.one_of(st.none(), st.lists(st.text(), max_size=10, min_size=3)), + max_size=10, + min_size=3, +) + +OPTIONAL_ONE_OF_ALL = st.one_of( + OPTIONAL_DICTS, OPTIONAL_FLOATS, OPTIONAL_INTS, OPTIONAL_LISTS, OPTIONAL_TEXT +) + +if is_platform_windows(): + DATETIME_NO_TZ = st.datetimes(min_value=datetime(1900, 1, 1)) +else: + DATETIME_NO_TZ = st.datetimes() + +DATETIME_JAN_1_1900_OPTIONAL_TZ = st.datetimes( + min_value=pd.Timestamp( + 1900, 1, 1 + ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues] + max_value=pd.Timestamp( + 1900, 1, 1 + ).to_pydatetime(), # pyright: ignore[reportGeneralTypeIssues] + timezones=st.one_of(st.none(), dateutil_timezones(), pytz_timezones()), +) + +DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ = st.datetimes( + min_value=pd.Timestamp.min.to_pydatetime(warn=False), + max_value=pd.Timestamp.max.to_pydatetime(warn=False), +) + +INT_NEG_999_TO_POS_999 = st.integers(-999, 999) + +# The strategy for each type is registered in conftest.py, as they don't carry +# enough runtime information (e.g. type hints) to infer how to build them. +YQM_OFFSET = st.one_of( + *map( + st.from_type, + [ + MonthBegin, + MonthEnd, + BMonthBegin, + BMonthEnd, + QuarterBegin, + QuarterEnd, + BQuarterBegin, + BQuarterEnd, + YearBegin, + YearEnd, + BYearBegin, + BYearEnd, + ], + ) +) diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/_io.py b/venv/lib/python3.10/site-packages/pandas/_testing/_io.py new file mode 100644 index 0000000000000000000000000000000000000000..95977edb600ade42a8f8a1fada2b5085cee1da56 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_testing/_io.py @@ -0,0 +1,170 @@ +from __future__ import annotations + +import gzip +import io +import pathlib +import tarfile +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) +import uuid +import zipfile + +from pandas.compat import ( + get_bz2_file, + get_lzma_file, +) +from pandas.compat._optional import import_optional_dependency + +import pandas as pd +from pandas._testing.contexts import ensure_clean + +if TYPE_CHECKING: + from pandas._typing import ( + FilePath, + ReadPickleBuffer, + ) + + from pandas import ( + DataFrame, + Series, + ) + +# ------------------------------------------------------------------ +# File-IO + + +def round_trip_pickle( + obj: Any, path: FilePath | ReadPickleBuffer | None = None +) -> DataFrame | Series: + """ + Pickle an object and then read it again. + + Parameters + ---------- + obj : any object + The object to pickle and then re-read. + path : str, path object or file-like object, default None + The path where the pickled object is written and then read. + + Returns + ------- + pandas object + The original object that was pickled and then re-read. + """ + _path = path + if _path is None: + _path = f"__{uuid.uuid4()}__.pickle" + with ensure_clean(_path) as temp_path: + pd.to_pickle(obj, temp_path) + return pd.read_pickle(temp_path) + + +def round_trip_pathlib(writer, reader, path: str | None = None): + """ + Write an object to file specified by a pathlib.Path and read it back + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + pandas object + The original object that was serialized and then re-read. + """ + Path = pathlib.Path + if path is None: + path = "___pathlib___" + with ensure_clean(path) as path: + writer(Path(path)) # type: ignore[arg-type] + obj = reader(Path(path)) # type: ignore[arg-type] + return obj + + +def round_trip_localpath(writer, reader, path: str | None = None): + """ + Write an object to file specified by a py.path LocalPath and read it back. + + Parameters + ---------- + writer : callable bound to pandas object + IO writing function (e.g. DataFrame.to_csv ) + reader : callable + IO reading function (e.g. pd.read_csv ) + path : str, default None + The path where the object is written and then read. + + Returns + ------- + pandas object + The original object that was serialized and then re-read. + """ + import pytest + + LocalPath = pytest.importorskip("py.path").local + if path is None: + path = "___localpath___" + with ensure_clean(path) as path: + writer(LocalPath(path)) + obj = reader(LocalPath(path)) + return obj + + +def write_to_compressed(compression, path, data, dest: str = "test") -> None: + """ + Write data to a compressed file. + + Parameters + ---------- + compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd'} + The compression type to use. + path : str + The file path to write the data. + data : str + The data to write. + dest : str, default "test" + The destination file (for ZIP only) + + Raises + ------ + ValueError : An invalid compression value was passed in. + """ + args: tuple[Any, ...] = (data,) + mode = "wb" + method = "write" + compress_method: Callable + + if compression == "zip": + compress_method = zipfile.ZipFile + mode = "w" + args = (dest, data) + method = "writestr" + elif compression == "tar": + compress_method = tarfile.TarFile + mode = "w" + file = tarfile.TarInfo(name=dest) + bytes = io.BytesIO(data) + file.size = len(data) + args = (file, bytes) + method = "addfile" + elif compression == "gzip": + compress_method = gzip.GzipFile + elif compression == "bz2": + compress_method = get_bz2_file() + elif compression == "zstd": + compress_method = import_optional_dependency("zstandard").open + elif compression == "xz": + compress_method = get_lzma_file() + else: + raise ValueError(f"Unrecognized compression type: {compression}") + + with compress_method(path, mode=mode) as f: + getattr(f, method)(*args) diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/_warnings.py b/venv/lib/python3.10/site-packages/pandas/_testing/_warnings.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a287942f2dac5ddbaf49168db280ec2ba3f2c4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_testing/_warnings.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +from contextlib import ( + contextmanager, + nullcontext, +) +import inspect +import re +import sys +from typing import ( + TYPE_CHECKING, + Literal, + cast, +) +import warnings + +from pandas.compat import PY311 + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Sequence, + ) + + +@contextmanager +def assert_produces_warning( + expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None = Warning, + filter_level: Literal[ + "error", "ignore", "always", "default", "module", "once" + ] = "always", + check_stacklevel: bool = True, + raise_on_extra_warnings: bool = True, + match: str | None = None, +) -> Generator[list[warnings.WarningMessage], None, None]: + """ + Context manager for running code expected to either raise a specific warning, + multiple specific warnings, or not raise any warnings. Verifies that the code + raises the expected warning(s), and that it does not raise any other unexpected + warnings. It is basically a wrapper around ``warnings.catch_warnings``. + + Parameters + ---------- + expected_warning : {Warning, False, tuple[Warning, ...], None}, default Warning + The type of Exception raised. ``exception.Warning`` is the base + class for all warnings. To raise multiple types of exceptions, + pass them as a tuple. To check that no warning is returned, + specify ``False`` or ``None``. + filter_level : str or None, default "always" + Specifies whether warnings are ignored, displayed, or turned + into errors. + Valid values are: + + * "error" - turns matching warnings into exceptions + * "ignore" - discard the warning + * "always" - always emit a warning + * "default" - print the warning the first time it is generated + from each location + * "module" - print the warning the first time it is generated + from each module + * "once" - print the warning the first time it is generated + + check_stacklevel : bool, default True + If True, displays the line that called the function containing + the warning to show were the function is called. Otherwise, the + line that implements the function is displayed. + raise_on_extra_warnings : bool, default True + Whether extra warnings not of the type `expected_warning` should + cause the test to fail. + match : str, optional + Match warning message. + + Examples + -------- + >>> import warnings + >>> with assert_produces_warning(): + ... warnings.warn(UserWarning()) + ... + >>> with assert_produces_warning(False): + ... warnings.warn(RuntimeWarning()) + ... + Traceback (most recent call last): + ... + AssertionError: Caused unexpected warning(s): ['RuntimeWarning']. + >>> with assert_produces_warning(UserWarning): + ... warnings.warn(RuntimeWarning()) + Traceback (most recent call last): + ... + AssertionError: Did not see expected warning of class 'UserWarning'. + + ..warn:: This is *not* thread-safe. + """ + __tracebackhide__ = True + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter(filter_level) + try: + yield w + finally: + if expected_warning: + expected_warning = cast(type[Warning], expected_warning) + _assert_caught_expected_warning( + caught_warnings=w, + expected_warning=expected_warning, + match=match, + check_stacklevel=check_stacklevel, + ) + if raise_on_extra_warnings: + _assert_caught_no_extra_warnings( + caught_warnings=w, + expected_warning=expected_warning, + ) + + +def maybe_produces_warning(warning: type[Warning], condition: bool, **kwargs): + """ + Return a context manager that possibly checks a warning based on the condition + """ + if condition: + return assert_produces_warning(warning, **kwargs) + else: + return nullcontext() + + +def _assert_caught_expected_warning( + *, + caught_warnings: Sequence[warnings.WarningMessage], + expected_warning: type[Warning], + match: str | None, + check_stacklevel: bool, +) -> None: + """Assert that there was the expected warning among the caught warnings.""" + saw_warning = False + matched_message = False + unmatched_messages = [] + + for actual_warning in caught_warnings: + if issubclass(actual_warning.category, expected_warning): + saw_warning = True + + if check_stacklevel: + _assert_raised_with_correct_stacklevel(actual_warning) + + if match is not None: + if re.search(match, str(actual_warning.message)): + matched_message = True + else: + unmatched_messages.append(actual_warning.message) + + if not saw_warning: + raise AssertionError( + f"Did not see expected warning of class " + f"{repr(expected_warning.__name__)}" + ) + + if match and not matched_message: + raise AssertionError( + f"Did not see warning {repr(expected_warning.__name__)} " + f"matching '{match}'. The emitted warning messages are " + f"{unmatched_messages}" + ) + + +def _assert_caught_no_extra_warnings( + *, + caught_warnings: Sequence[warnings.WarningMessage], + expected_warning: type[Warning] | bool | tuple[type[Warning], ...] | None, +) -> None: + """Assert that no extra warnings apart from the expected ones are caught.""" + extra_warnings = [] + + for actual_warning in caught_warnings: + if _is_unexpected_warning(actual_warning, expected_warning): + # GH#38630 pytest.filterwarnings does not suppress these. + if actual_warning.category == ResourceWarning: + # GH 44732: Don't make the CI flaky by filtering SSL-related + # ResourceWarning from dependencies + if "unclosed bool: + """Check if the actual warning issued is unexpected.""" + if actual_warning and not expected_warning: + return True + expected_warning = cast(type[Warning], expected_warning) + return bool(not issubclass(actual_warning.category, expected_warning)) + + +def _assert_raised_with_correct_stacklevel( + actual_warning: warnings.WarningMessage, +) -> None: + # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow + frame = inspect.currentframe() + for _ in range(4): + frame = frame.f_back # type: ignore[union-attr] + try: + caller_filename = inspect.getfile(frame) # type: ignore[arg-type] + finally: + # See note in + # https://docs.python.org/3/library/inspect.html#inspect.Traceback + del frame + msg = ( + "Warning not set with correct stacklevel. " + f"File where warning is raised: {actual_warning.filename} != " + f"{caller_filename}. Warning message: {actual_warning.message}" + ) + assert actual_warning.filename == caller_filename, msg diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/asserters.py b/venv/lib/python3.10/site-packages/pandas/_testing/asserters.py new file mode 100644 index 0000000000000000000000000000000000000000..41d2a7344a4edf2e05664eb599b0049d2c696e4c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_testing/asserters.py @@ -0,0 +1,1435 @@ +from __future__ import annotations + +import operator +from typing import ( + TYPE_CHECKING, + Literal, + NoReturn, + cast, +) + +import numpy as np + +from pandas._libs import lib +from pandas._libs.missing import is_matching_na +from pandas._libs.sparse import SparseIndex +import pandas._libs.testing as _testing +from pandas._libs.tslibs.np_datetime import compare_mismatched_resolutions + +from pandas.core.dtypes.common import ( + is_bool, + is_float_dtype, + is_integer_dtype, + is_number, + is_numeric_dtype, + needs_i8_conversion, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + NumpyEADtype, +) +from pandas.core.dtypes.missing import array_equivalent + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + DatetimeIndex, + Index, + IntervalDtype, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, +) +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin +from pandas.core.arrays.string_ import StringDtype +from pandas.core.indexes.api import safe_sort_index + +from pandas.io.formats.printing import pprint_thing + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +def assert_almost_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = "equiv", + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + **kwargs, +) -> None: + """ + Check that the left and right objects are approximately equal. + + By approximately equal, we refer to objects that are numbers or that + contain numbers which may be equivalent to specific levels of precision. + + Parameters + ---------- + left : object + right : object + check_dtype : bool or {'equiv'}, default 'equiv' + Check dtype if both a and b are the same type. If 'equiv' is passed in, + then `RangeIndex` and `Index` with int64 dtype are also considered + equivalent when doing type checking. + rtol : float, default 1e-5 + Relative tolerance. + atol : float, default 1e-8 + Absolute tolerance. + """ + if isinstance(left, Index): + assert_index_equal( + left, + right, + check_exact=False, + exact=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + elif isinstance(left, Series): + assert_series_equal( + left, + right, + check_exact=False, + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + elif isinstance(left, DataFrame): + assert_frame_equal( + left, + right, + check_exact=False, + check_dtype=check_dtype, + rtol=rtol, + atol=atol, + **kwargs, + ) + + else: + # Other sequences. + if check_dtype: + if is_number(left) and is_number(right): + # Do not compare numeric classes, like np.float64 and float. + pass + elif is_bool(left) and is_bool(right): + # Do not compare bool classes, like np.bool_ and bool. + pass + else: + if isinstance(left, np.ndarray) or isinstance(right, np.ndarray): + obj = "numpy array" + else: + obj = "Input" + assert_class_equal(left, right, obj=obj) + + # if we have "equiv", this becomes True + _testing.assert_almost_equal( + left, right, check_dtype=bool(check_dtype), rtol=rtol, atol=atol, **kwargs + ) + + +def _check_isinstance(left, right, cls) -> None: + """ + Helper method for our assert_* methods that ensures that + the two objects being compared have the right type before + proceeding with the comparison. + + Parameters + ---------- + left : The first object being compared. + right : The second object being compared. + cls : The class type to check against. + + Raises + ------ + AssertionError : Either `left` or `right` is not an instance of `cls`. + """ + cls_name = cls.__name__ + + if not isinstance(left, cls): + raise AssertionError( + f"{cls_name} Expected type {cls}, found {type(left)} instead" + ) + if not isinstance(right, cls): + raise AssertionError( + f"{cls_name} Expected type {cls}, found {type(right)} instead" + ) + + +def assert_dict_equal(left, right, compare_keys: bool = True) -> None: + _check_isinstance(left, right, dict) + _testing.assert_dict_equal(left, right, compare_keys=compare_keys) + + +def assert_index_equal( + left: Index, + right: Index, + exact: bool | str = "equiv", + check_names: bool = True, + check_exact: bool = True, + check_categorical: bool = True, + check_order: bool = True, + rtol: float = 1.0e-5, + atol: float = 1.0e-8, + obj: str = "Index", +) -> None: + """ + Check that left and right Index are equal. + + Parameters + ---------- + left : Index + right : Index + exact : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Index with an int64 dtype as well. + check_names : bool, default True + Whether to check the names attribute. + check_exact : bool, default True + Whether to compare number exactly. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_order : bool, default True + Whether to compare the order of index entries as well as their values. + If True, both indexes must contain the same elements, in the same order. + If False, both indexes must contain the same elements, but in any order. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'Index' + Specify object name being compared, internally used to show appropriate + assertion message. + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Index([1, 2, 3]) + >>> b = pd.Index([1, 2, 3]) + >>> tm.assert_index_equal(a, b) + """ + __tracebackhide__ = True + + def _check_types(left, right, obj: str = "Index") -> None: + if not exact: + return + + assert_class_equal(left, right, exact=exact, obj=obj) + assert_attr_equal("inferred_type", left, right, obj=obj) + + # Skip exact dtype checking when `check_categorical` is False + if isinstance(left.dtype, CategoricalDtype) and isinstance( + right.dtype, CategoricalDtype + ): + if check_categorical: + assert_attr_equal("dtype", left, right, obj=obj) + assert_index_equal(left.categories, right.categories, exact=exact) + return + + assert_attr_equal("dtype", left, right, obj=obj) + + # instance validation + _check_isinstance(left, right, Index) + + # class / dtype comparison + _check_types(left, right, obj=obj) + + # level comparison + if left.nlevels != right.nlevels: + msg1 = f"{obj} levels are different" + msg2 = f"{left.nlevels}, {left}" + msg3 = f"{right.nlevels}, {right}" + raise_assert_detail(obj, msg1, msg2, msg3) + + # length comparison + if len(left) != len(right): + msg1 = f"{obj} length are different" + msg2 = f"{len(left)}, {left}" + msg3 = f"{len(right)}, {right}" + raise_assert_detail(obj, msg1, msg2, msg3) + + # If order doesn't matter then sort the index entries + if not check_order: + left = safe_sort_index(left) + right = safe_sort_index(right) + + # MultiIndex special comparison for little-friendly error messages + if isinstance(left, MultiIndex): + right = cast(MultiIndex, right) + + for level in range(left.nlevels): + lobj = f"MultiIndex level [{level}]" + try: + # try comparison on levels/codes to avoid densifying MultiIndex + assert_index_equal( + left.levels[level], + right.levels[level], + exact=exact, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + rtol=rtol, + atol=atol, + obj=lobj, + ) + assert_numpy_array_equal(left.codes[level], right.codes[level]) + except AssertionError: + llevel = left.get_level_values(level) + rlevel = right.get_level_values(level) + + assert_index_equal( + llevel, + rlevel, + exact=exact, + check_names=check_names, + check_exact=check_exact, + check_categorical=check_categorical, + rtol=rtol, + atol=atol, + obj=lobj, + ) + # get_level_values may change dtype + _check_types(left.levels[level], right.levels[level], obj=obj) + + # skip exact index checking when `check_categorical` is False + elif check_exact and check_categorical: + if not left.equals(right): + mismatch = left._values != right._values + + if not isinstance(mismatch, np.ndarray): + mismatch = cast("ExtensionArray", mismatch).fillna(True) + + diff = np.sum(mismatch.astype(int)) * 100.0 / len(left) + msg = f"{obj} values are different ({np.round(diff, 5)} %)" + raise_assert_detail(obj, msg, left, right) + else: + # if we have "equiv", this becomes True + exact_bool = bool(exact) + _testing.assert_almost_equal( + left.values, + right.values, + rtol=rtol, + atol=atol, + check_dtype=exact_bool, + obj=obj, + lobj=left, + robj=right, + ) + + # metadata comparison + if check_names: + assert_attr_equal("names", left, right, obj=obj) + if isinstance(left, PeriodIndex) or isinstance(right, PeriodIndex): + assert_attr_equal("dtype", left, right, obj=obj) + if isinstance(left, IntervalIndex) or isinstance(right, IntervalIndex): + assert_interval_array_equal(left._values, right._values) + + if check_categorical: + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + assert_categorical_equal(left._values, right._values, obj=f"{obj} category") + + +def assert_class_equal( + left, right, exact: bool | str = True, obj: str = "Input" +) -> None: + """ + Checks classes are equal. + """ + __tracebackhide__ = True + + def repr_class(x): + if isinstance(x, Index): + # return Index as it is to include values in the error message + return x + + return type(x).__name__ + + def is_class_equiv(idx: Index) -> bool: + """Classes that are a RangeIndex (sub-)instance or exactly an `Index` . + + This only checks class equivalence. There is a separate check that the + dtype is int64. + """ + return type(idx) is Index or isinstance(idx, RangeIndex) + + if type(left) == type(right): + return + + if exact == "equiv": + if is_class_equiv(left) and is_class_equiv(right): + return + + msg = f"{obj} classes are different" + raise_assert_detail(obj, msg, repr_class(left), repr_class(right)) + + +def assert_attr_equal(attr: str, left, right, obj: str = "Attributes") -> None: + """ + Check attributes are equal. Both objects must have attribute. + + Parameters + ---------- + attr : str + Attribute name being compared. + left : object + right : object + obj : str, default 'Attributes' + Specify object name being compared, internally used to show appropriate + assertion message + """ + __tracebackhide__ = True + + left_attr = getattr(left, attr) + right_attr = getattr(right, attr) + + if left_attr is right_attr or is_matching_na(left_attr, right_attr): + # e.g. both np.nan, both NaT, both pd.NA, ... + return None + + try: + result = left_attr == right_attr + except TypeError: + # datetimetz on rhs may raise TypeError + result = False + if (left_attr is pd.NA) ^ (right_attr is pd.NA): + result = False + elif not isinstance(result, bool): + result = result.all() + + if not result: + msg = f'Attribute "{attr}" are different' + raise_assert_detail(obj, msg, left_attr, right_attr) + return None + + +def assert_is_valid_plot_return_object(objs) -> None: + from matplotlib.artist import Artist + from matplotlib.axes import Axes + + if isinstance(objs, (Series, np.ndarray)): + if isinstance(objs, Series): + objs = objs._values + for el in objs.ravel(): + msg = ( + "one of 'objs' is not a matplotlib Axes instance, " + f"type encountered {repr(type(el).__name__)}" + ) + assert isinstance(el, (Axes, dict)), msg + else: + msg = ( + "objs is neither an ndarray of Artist instances nor a single " + "ArtistArtist instance, tuple, or dict, 'objs' is a " + f"{repr(type(objs).__name__)}" + ) + assert isinstance(objs, (Artist, tuple, dict)), msg + + +def assert_is_sorted(seq) -> None: + """Assert that the sequence is sorted.""" + if isinstance(seq, (Index, Series)): + seq = seq.values + # sorting does not change precisions + if isinstance(seq, np.ndarray): + assert_numpy_array_equal(seq, np.sort(np.array(seq))) + else: + assert_extension_array_equal(seq, seq[seq.argsort()]) + + +def assert_categorical_equal( + left, + right, + check_dtype: bool = True, + check_category_order: bool = True, + obj: str = "Categorical", +) -> None: + """ + Test that Categoricals are equivalent. + + Parameters + ---------- + left : Categorical + right : Categorical + check_dtype : bool, default True + Check that integer dtype of the codes are the same. + check_category_order : bool, default True + Whether the order of the categories should be compared, which + implies identical integer codes. If False, only the resulting + values are compared. The ordered attribute is + checked regardless. + obj : str, default 'Categorical' + Specify object name being compared, internally used to show appropriate + assertion message. + """ + _check_isinstance(left, right, Categorical) + + exact: bool | str + if isinstance(left.categories, RangeIndex) or isinstance( + right.categories, RangeIndex + ): + exact = "equiv" + else: + # We still want to require exact matches for Index + exact = True + + if check_category_order: + assert_index_equal( + left.categories, right.categories, obj=f"{obj}.categories", exact=exact + ) + assert_numpy_array_equal( + left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes" + ) + else: + try: + lc = left.categories.sort_values() + rc = right.categories.sort_values() + except TypeError: + # e.g. '<' not supported between instances of 'int' and 'str' + lc, rc = left.categories, right.categories + assert_index_equal(lc, rc, obj=f"{obj}.categories", exact=exact) + assert_index_equal( + left.categories.take(left.codes), + right.categories.take(right.codes), + obj=f"{obj}.values", + exact=exact, + ) + + assert_attr_equal("ordered", left, right, obj=obj) + + +def assert_interval_array_equal( + left, right, exact: bool | Literal["equiv"] = "equiv", obj: str = "IntervalArray" +) -> None: + """ + Test that two IntervalArrays are equivalent. + + Parameters + ---------- + left, right : IntervalArray + The IntervalArrays to compare. + exact : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. If 'equiv', then RangeIndex can be substituted for + Index with an int64 dtype as well. + obj : str, default 'IntervalArray' + Specify object name being compared, internally used to show appropriate + assertion message + """ + _check_isinstance(left, right, IntervalArray) + + kwargs = {} + if left._left.dtype.kind in "mM": + # We have a DatetimeArray or TimedeltaArray + kwargs["check_freq"] = False + + assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs) + assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs) + + assert_attr_equal("closed", left, right, obj=obj) + + +def assert_period_array_equal(left, right, obj: str = "PeriodArray") -> None: + _check_isinstance(left, right, PeriodArray) + + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + assert_attr_equal("dtype", left, right, obj=obj) + + +def assert_datetime_array_equal( + left, right, obj: str = "DatetimeArray", check_freq: bool = True +) -> None: + __tracebackhide__ = True + _check_isinstance(left, right, DatetimeArray) + + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + if check_freq: + assert_attr_equal("freq", left, right, obj=obj) + assert_attr_equal("tz", left, right, obj=obj) + + +def assert_timedelta_array_equal( + left, right, obj: str = "TimedeltaArray", check_freq: bool = True +) -> None: + __tracebackhide__ = True + _check_isinstance(left, right, TimedeltaArray) + assert_numpy_array_equal(left._ndarray, right._ndarray, obj=f"{obj}._ndarray") + if check_freq: + assert_attr_equal("freq", left, right, obj=obj) + + +def raise_assert_detail( + obj, message, left, right, diff=None, first_diff=None, index_values=None +) -> NoReturn: + __tracebackhide__ = True + + msg = f"""{obj} are different + +{message}""" + + if isinstance(index_values, Index): + index_values = np.asarray(index_values) + + if isinstance(index_values, np.ndarray): + msg += f"\n[index]: {pprint_thing(index_values)}" + + if isinstance(left, np.ndarray): + left = pprint_thing(left) + elif isinstance(left, (CategoricalDtype, NumpyEADtype, StringDtype)): + left = repr(left) + + if isinstance(right, np.ndarray): + right = pprint_thing(right) + elif isinstance(right, (CategoricalDtype, NumpyEADtype, StringDtype)): + right = repr(right) + + msg += f""" +[left]: {left} +[right]: {right}""" + + if diff is not None: + msg += f"\n[diff]: {diff}" + + if first_diff is not None: + msg += f"\n{first_diff}" + + raise AssertionError(msg) + + +def assert_numpy_array_equal( + left, + right, + strict_nan: bool = False, + check_dtype: bool | Literal["equiv"] = True, + err_msg=None, + check_same=None, + obj: str = "numpy array", + index_values=None, +) -> None: + """ + Check that 'np.ndarray' is equivalent. + + Parameters + ---------- + left, right : numpy.ndarray or iterable + The two arrays to be compared. + strict_nan : bool, default False + If True, consider NaN and None to be different. + check_dtype : bool, default True + Check dtype if both a and b are np.ndarray. + err_msg : str, default None + If provided, used as assertion message. + check_same : None|'copy'|'same', default None + Ensure left and right refer/do not refer to the same memory area. + obj : str, default 'numpy array' + Specify object name being compared, internally used to show appropriate + assertion message. + index_values : Index | numpy.ndarray, default None + optional index (shared by both left and right), used in output. + """ + __tracebackhide__ = True + + # instance validation + # Show a detailed error message when classes are different + assert_class_equal(left, right, obj=obj) + # both classes must be an np.ndarray + _check_isinstance(left, right, np.ndarray) + + def _get_base(obj): + return obj.base if getattr(obj, "base", None) is not None else obj + + left_base = _get_base(left) + right_base = _get_base(right) + + if check_same == "same": + if left_base is not right_base: + raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}") + elif check_same == "copy": + if left_base is right_base: + raise AssertionError(f"{repr(left_base)} is {repr(right_base)}") + + def _raise(left, right, err_msg) -> NoReturn: + if err_msg is None: + if left.shape != right.shape: + raise_assert_detail( + obj, f"{obj} shapes are different", left.shape, right.shape + ) + + diff = 0 + for left_arr, right_arr in zip(left, right): + # count up differences + if not array_equivalent(left_arr, right_arr, strict_nan=strict_nan): + diff += 1 + + diff = diff * 100.0 / left.size + msg = f"{obj} values are different ({np.round(diff, 5)} %)" + raise_assert_detail(obj, msg, left, right, index_values=index_values) + + raise AssertionError(err_msg) + + # compare shape and values + if not array_equivalent(left, right, strict_nan=strict_nan): + _raise(left, right, err_msg) + + if check_dtype: + if isinstance(left, np.ndarray) and isinstance(right, np.ndarray): + assert_attr_equal("dtype", left, right, obj=obj) + + +def assert_extension_array_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + index_values=None, + check_exact: bool | lib.NoDefault = lib.no_default, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "ExtensionArray", +) -> None: + """ + Check that left and right ExtensionArrays are equal. + + Parameters + ---------- + left, right : ExtensionArray + The two arrays to compare. + check_dtype : bool, default True + Whether to check if the ExtensionArray dtypes are identical. + index_values : Index | numpy.ndarray, default None + Optional index (shared by both left and right), used in output. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'ExtensionArray' + Specify object name being compared, internally used to show appropriate + assertion message. + + .. versionadded:: 2.0.0 + + Notes + ----- + Missing values are checked separately from valid values. + A mask of missing values is computed for each and checked to match. + The remaining all-valid values are cast to object dtype and checked. + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Series([1, 2, 3, 4]) + >>> b, c = a.array, a.array + >>> tm.assert_extension_array_equal(b, c) + """ + if ( + check_exact is lib.no_default + and rtol is lib.no_default + and atol is lib.no_default + ): + check_exact = ( + is_numeric_dtype(left.dtype) + and not is_float_dtype(left.dtype) + or is_numeric_dtype(right.dtype) + and not is_float_dtype(right.dtype) + ) + elif check_exact is lib.no_default: + check_exact = False + + rtol = rtol if rtol is not lib.no_default else 1.0e-5 + atol = atol if atol is not lib.no_default else 1.0e-8 + + assert isinstance(left, ExtensionArray), "left is not an ExtensionArray" + assert isinstance(right, ExtensionArray), "right is not an ExtensionArray" + if check_dtype: + assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") + + if ( + isinstance(left, DatetimeLikeArrayMixin) + and isinstance(right, DatetimeLikeArrayMixin) + and type(right) == type(left) + ): + # GH 52449 + if not check_dtype and left.dtype.kind in "mM": + if not isinstance(left.dtype, np.dtype): + l_unit = cast(DatetimeTZDtype, left.dtype).unit + else: + l_unit = np.datetime_data(left.dtype)[0] + if not isinstance(right.dtype, np.dtype): + r_unit = cast(DatetimeTZDtype, right.dtype).unit + else: + r_unit = np.datetime_data(right.dtype)[0] + if ( + l_unit != r_unit + and compare_mismatched_resolutions( + left._ndarray, right._ndarray, operator.eq + ).all() + ): + return + # Avoid slow object-dtype comparisons + # np.asarray for case where we have a np.MaskedArray + assert_numpy_array_equal( + np.asarray(left.asi8), + np.asarray(right.asi8), + index_values=index_values, + obj=obj, + ) + return + + left_na = np.asarray(left.isna()) + right_na = np.asarray(right.isna()) + assert_numpy_array_equal( + left_na, right_na, obj=f"{obj} NA mask", index_values=index_values + ) + + left_valid = left[~left_na].to_numpy(dtype=object) + right_valid = right[~right_na].to_numpy(dtype=object) + if check_exact: + assert_numpy_array_equal( + left_valid, right_valid, obj=obj, index_values=index_values + ) + else: + _testing.assert_almost_equal( + left_valid, + right_valid, + check_dtype=bool(check_dtype), + rtol=rtol, + atol=atol, + obj=obj, + index_values=index_values, + ) + + +# This could be refactored to use the NDFrame.equals method +def assert_series_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + check_index_type: bool | Literal["equiv"] = "equiv", + check_series_type: bool = True, + check_names: bool = True, + check_exact: bool | lib.NoDefault = lib.no_default, + check_datetimelike_compat: bool = False, + check_categorical: bool = True, + check_category_order: bool = True, + check_freq: bool = True, + check_flags: bool = True, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "Series", + *, + check_index: bool = True, + check_like: bool = False, +) -> None: + """ + Check that left and right Series are equal. + + Parameters + ---------- + left : Series + right : Series + check_dtype : bool, default True + Whether to check the Series dtype is identical. + check_index_type : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. + check_series_type : bool, default True + Whether to check the Series class is identical. + check_names : bool, default True + Whether to check the Series and Index names attribute. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + check_datetimelike_compat : bool, default False + Compare datetime-like which is comparable ignoring dtype. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_category_order : bool, default True + Whether to compare category order of internal Categoricals. + check_freq : bool, default True + Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'Series' + Specify object name being compared, internally used to show appropriate + assertion message. + check_index : bool, default True + Whether to check index equivalence. If False, then compare only values. + + .. versionadded:: 1.3.0 + check_like : bool, default False + If True, ignore the order of the index. Must be False if check_index is False. + Note: same labels must be with the same data. + + .. versionadded:: 1.5.0 + + Examples + -------- + >>> from pandas import testing as tm + >>> a = pd.Series([1, 2, 3, 4]) + >>> b = pd.Series([1, 2, 3, 4]) + >>> tm.assert_series_equal(a, b) + """ + __tracebackhide__ = True + check_exact_index = False if check_exact is lib.no_default else check_exact + if ( + check_exact is lib.no_default + and rtol is lib.no_default + and atol is lib.no_default + ): + check_exact = ( + is_numeric_dtype(left.dtype) + and not is_float_dtype(left.dtype) + or is_numeric_dtype(right.dtype) + and not is_float_dtype(right.dtype) + ) + elif check_exact is lib.no_default: + check_exact = False + + rtol = rtol if rtol is not lib.no_default else 1.0e-5 + atol = atol if atol is not lib.no_default else 1.0e-8 + + if not check_index and check_like: + raise ValueError("check_like must be False if check_index is False") + + # instance validation + _check_isinstance(left, right, Series) + + if check_series_type: + assert_class_equal(left, right, obj=obj) + + # length comparison + if len(left) != len(right): + msg1 = f"{len(left)}, {left.index}" + msg2 = f"{len(right)}, {right.index}" + raise_assert_detail(obj, "Series length are different", msg1, msg2) + + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + + if check_index: + # GH #38183 + assert_index_equal( + left.index, + right.index, + exact=check_index_type, + check_names=check_names, + check_exact=check_exact_index, + check_categorical=check_categorical, + check_order=not check_like, + rtol=rtol, + atol=atol, + obj=f"{obj}.index", + ) + + if check_like: + left = left.reindex_like(right) + + if check_freq and isinstance(left.index, (DatetimeIndex, TimedeltaIndex)): + lidx = left.index + ridx = right.index + assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq) + + if check_dtype: + # We want to skip exact dtype checking when `check_categorical` + # is False. We'll still raise if only one is a `Categorical`, + # regardless of `check_categorical` + if ( + isinstance(left.dtype, CategoricalDtype) + and isinstance(right.dtype, CategoricalDtype) + and not check_categorical + ): + pass + else: + assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}") + if check_exact: + left_values = left._values + right_values = right._values + # Only check exact if dtype is numeric + if isinstance(left_values, ExtensionArray) and isinstance( + right_values, ExtensionArray + ): + assert_extension_array_equal( + left_values, + right_values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + else: + # convert both to NumPy if not, check_dtype would raise earlier + lv, rv = left_values, right_values + if isinstance(left_values, ExtensionArray): + lv = left_values.to_numpy() + if isinstance(right_values, ExtensionArray): + rv = right_values.to_numpy() + assert_numpy_array_equal( + lv, + rv, + check_dtype=check_dtype, + obj=str(obj), + index_values=left.index, + ) + elif check_datetimelike_compat and ( + needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype) + ): + # we want to check only if we have compat dtypes + # e.g. integer and M|m are NOT compat, but we can simply check + # the values in that case + + # datetimelike may have different objects (e.g. datetime.datetime + # vs Timestamp) but will compare equal + if not Index(left._values).equals(Index(right._values)): + msg = ( + f"[datetimelike_compat=True] {left._values} " + f"is not equal to {right._values}." + ) + raise AssertionError(msg) + elif isinstance(left.dtype, IntervalDtype) and isinstance( + right.dtype, IntervalDtype + ): + assert_interval_array_equal(left.array, right.array) + elif isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + _testing.assert_almost_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=bool(check_dtype), + obj=str(obj), + index_values=left.index, + ) + elif isinstance(left.dtype, ExtensionDtype) and isinstance( + right.dtype, ExtensionDtype + ): + assert_extension_array_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + elif is_extension_array_dtype_and_needs_i8_conversion( + left.dtype, right.dtype + ) or is_extension_array_dtype_and_needs_i8_conversion(right.dtype, left.dtype): + assert_extension_array_equal( + left._values, + right._values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + elif needs_i8_conversion(left.dtype) and needs_i8_conversion(right.dtype): + # DatetimeArray or TimedeltaArray + assert_extension_array_equal( + left._values, + right._values, + check_dtype=check_dtype, + index_values=left.index, + obj=str(obj), + ) + else: + _testing.assert_almost_equal( + left._values, + right._values, + rtol=rtol, + atol=atol, + check_dtype=bool(check_dtype), + obj=str(obj), + index_values=left.index, + ) + + # metadata comparison + if check_names: + assert_attr_equal("name", left, right, obj=obj) + + if check_categorical: + if isinstance(left.dtype, CategoricalDtype) or isinstance( + right.dtype, CategoricalDtype + ): + assert_categorical_equal( + left._values, + right._values, + obj=f"{obj} category", + check_category_order=check_category_order, + ) + + +# This could be refactored to use the NDFrame.equals method +def assert_frame_equal( + left, + right, + check_dtype: bool | Literal["equiv"] = True, + check_index_type: bool | Literal["equiv"] = "equiv", + check_column_type: bool | Literal["equiv"] = "equiv", + check_frame_type: bool = True, + check_names: bool = True, + by_blocks: bool = False, + check_exact: bool | lib.NoDefault = lib.no_default, + check_datetimelike_compat: bool = False, + check_categorical: bool = True, + check_like: bool = False, + check_freq: bool = True, + check_flags: bool = True, + rtol: float | lib.NoDefault = lib.no_default, + atol: float | lib.NoDefault = lib.no_default, + obj: str = "DataFrame", +) -> None: + """ + Check that left and right DataFrame are equal. + + This function is intended to compare two DataFrames and output any + differences. It is mostly intended for use in unit tests. + Additional parameters allow varying the strictness of the + equality checks performed. + + Parameters + ---------- + left : DataFrame + First DataFrame to compare. + right : DataFrame + Second DataFrame to compare. + check_dtype : bool, default True + Whether to check the DataFrame dtype is identical. + check_index_type : bool or {'equiv'}, default 'equiv' + Whether to check the Index class, dtype and inferred_type + are identical. + check_column_type : bool or {'equiv'}, default 'equiv' + Whether to check the columns class, dtype and inferred_type + are identical. Is passed as the ``exact`` argument of + :func:`assert_index_equal`. + check_frame_type : bool, default True + Whether to check the DataFrame class is identical. + check_names : bool, default True + Whether to check that the `names` attribute for both the `index` + and `column` attributes of the DataFrame is identical. + by_blocks : bool, default False + Specify how to compare internal data. If False, compare by columns. + If True, compare by blocks. + check_exact : bool, default False + Whether to compare number exactly. + + .. versionchanged:: 2.2.0 + + Defaults to True for integer dtypes if none of + ``check_exact``, ``rtol`` and ``atol`` are specified. + check_datetimelike_compat : bool, default False + Compare datetime-like which is comparable ignoring dtype. + check_categorical : bool, default True + Whether to compare internal Categorical exactly. + check_like : bool, default False + If True, ignore the order of index & columns. + Note: index labels must match their respective rows + (same as in columns) - same labels must be with the same data. + check_freq : bool, default True + Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex. + check_flags : bool, default True + Whether to check the `flags` attribute. + rtol : float, default 1e-5 + Relative tolerance. Only used when check_exact is False. + atol : float, default 1e-8 + Absolute tolerance. Only used when check_exact is False. + obj : str, default 'DataFrame' + Specify object name being compared, internally used to show appropriate + assertion message. + + See Also + -------- + assert_series_equal : Equivalent method for asserting Series equality. + DataFrame.equals : Check DataFrame equality. + + Examples + -------- + This example shows comparing two DataFrames that are equal + but with columns of differing dtypes. + + >>> from pandas.testing import assert_frame_equal + >>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]}) + >>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]}) + + df1 equals itself. + + >>> assert_frame_equal(df1, df1) + + df1 differs from df2 as column 'b' is of a different type. + + >>> assert_frame_equal(df1, df2) + Traceback (most recent call last): + ... + AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different + + Attribute "dtype" are different + [left]: int64 + [right]: float64 + + Ignore differing dtypes in columns with check_dtype. + + >>> assert_frame_equal(df1, df2, check_dtype=False) + """ + __tracebackhide__ = True + _rtol = rtol if rtol is not lib.no_default else 1.0e-5 + _atol = atol if atol is not lib.no_default else 1.0e-8 + _check_exact = check_exact if check_exact is not lib.no_default else False + + # instance validation + _check_isinstance(left, right, DataFrame) + + if check_frame_type: + assert isinstance(left, type(right)) + # assert_class_equal(left, right, obj=obj) + + # shape comparison + if left.shape != right.shape: + raise_assert_detail( + obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}" + ) + + if check_flags: + assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}" + + # index comparison + assert_index_equal( + left.index, + right.index, + exact=check_index_type, + check_names=check_names, + check_exact=_check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=_rtol, + atol=_atol, + obj=f"{obj}.index", + ) + + # column comparison + assert_index_equal( + left.columns, + right.columns, + exact=check_column_type, + check_names=check_names, + check_exact=_check_exact, + check_categorical=check_categorical, + check_order=not check_like, + rtol=_rtol, + atol=_atol, + obj=f"{obj}.columns", + ) + + if check_like: + left = left.reindex_like(right) + + # compare by blocks + if by_blocks: + rblocks = right._to_dict_of_blocks() + lblocks = left._to_dict_of_blocks() + for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))): + assert dtype in lblocks + assert dtype in rblocks + assert_frame_equal( + lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj + ) + + # compare by columns + else: + for i, col in enumerate(left.columns): + # We have already checked that columns match, so we can do + # fast location-based lookups + lcol = left._ixs(i, axis=1) + rcol = right._ixs(i, axis=1) + + # GH #38183 + # use check_index=False, because we do not want to run + # assert_index_equal for each column, + # as we already checked it for the whole dataframe before. + assert_series_equal( + lcol, + rcol, + check_dtype=check_dtype, + check_index_type=check_index_type, + check_exact=check_exact, + check_names=check_names, + check_datetimelike_compat=check_datetimelike_compat, + check_categorical=check_categorical, + check_freq=check_freq, + obj=f'{obj}.iloc[:, {i}] (column name="{col}")', + rtol=rtol, + atol=atol, + check_index=False, + check_flags=False, + ) + + +def assert_equal(left, right, **kwargs) -> None: + """ + Wrapper for tm.assert_*_equal to dispatch to the appropriate test function. + + Parameters + ---------- + left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray + The two items to be compared. + **kwargs + All keyword arguments are passed through to the underlying assert method. + """ + __tracebackhide__ = True + + if isinstance(left, Index): + assert_index_equal(left, right, **kwargs) + if isinstance(left, (DatetimeIndex, TimedeltaIndex)): + assert left.freq == right.freq, (left.freq, right.freq) + elif isinstance(left, Series): + assert_series_equal(left, right, **kwargs) + elif isinstance(left, DataFrame): + assert_frame_equal(left, right, **kwargs) + elif isinstance(left, IntervalArray): + assert_interval_array_equal(left, right, **kwargs) + elif isinstance(left, PeriodArray): + assert_period_array_equal(left, right, **kwargs) + elif isinstance(left, DatetimeArray): + assert_datetime_array_equal(left, right, **kwargs) + elif isinstance(left, TimedeltaArray): + assert_timedelta_array_equal(left, right, **kwargs) + elif isinstance(left, ExtensionArray): + assert_extension_array_equal(left, right, **kwargs) + elif isinstance(left, np.ndarray): + assert_numpy_array_equal(left, right, **kwargs) + elif isinstance(left, str): + assert kwargs == {} + assert left == right + else: + assert kwargs == {} + assert_almost_equal(left, right) + + +def assert_sp_array_equal(left, right) -> None: + """ + Check that the left and right SparseArray are equal. + + Parameters + ---------- + left : SparseArray + right : SparseArray + """ + _check_isinstance(left, right, pd.arrays.SparseArray) + + assert_numpy_array_equal(left.sp_values, right.sp_values) + + # SparseIndex comparison + assert isinstance(left.sp_index, SparseIndex) + assert isinstance(right.sp_index, SparseIndex) + + left_index = left.sp_index + right_index = right.sp_index + + if not left_index.equals(right_index): + raise_assert_detail( + "SparseArray.index", "index are not equal", left_index, right_index + ) + else: + # Just ensure a + pass + + assert_attr_equal("fill_value", left, right) + assert_attr_equal("dtype", left, right) + assert_numpy_array_equal(left.to_dense(), right.to_dense()) + + +def assert_contains_all(iterable, dic) -> None: + for k in iterable: + assert k in dic, f"Did not contain item: {repr(k)}" + + +def assert_copy(iter1, iter2, **eql_kwargs) -> None: + """ + iter1, iter2: iterables that produce elements + comparable with assert_almost_equal + + Checks that the elements are equal, but not + the same object. (Does not check that items + in sequences are also not the same object) + """ + for elem1, elem2 in zip(iter1, iter2): + assert_almost_equal(elem1, elem2, **eql_kwargs) + msg = ( + f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be " + "different objects, but they were the same object." + ) + assert elem1 is not elem2, msg + + +def is_extension_array_dtype_and_needs_i8_conversion( + left_dtype: DtypeObj, right_dtype: DtypeObj +) -> bool: + """ + Checks that we have the combination of an ExtensionArraydtype and + a dtype that should be converted to int64 + + Returns + ------- + bool + + Related to issue #37609 + """ + return isinstance(left_dtype, ExtensionDtype) and needs_i8_conversion(right_dtype) + + +def assert_indexing_slices_equivalent(ser: Series, l_slc: slice, i_slc: slice) -> None: + """ + Check that ser.iloc[i_slc] matches ser.loc[l_slc] and, if applicable, + ser[l_slc]. + """ + expected = ser.iloc[i_slc] + + assert_series_equal(ser.loc[l_slc], expected) + + if not is_integer_dtype(ser.index): + # For integer indices, .loc and plain getitem are position-based. + assert_series_equal(ser[l_slc], expected) + + +def assert_metadata_equivalent( + left: DataFrame | Series, right: DataFrame | Series | None = None +) -> None: + """ + Check that ._metadata attributes are equivalent. + """ + for attr in left._metadata: + val = getattr(left, attr, None) + if right is None: + assert val is None + else: + assert val == getattr(right, attr, None) diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/compat.py b/venv/lib/python3.10/site-packages/pandas/_testing/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..cc352ba7b8f2f5a5548d4d5749d3b48ac838aced --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_testing/compat.py @@ -0,0 +1,29 @@ +""" +Helpers for sharing tests between DataFrame/Series +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +from pandas import DataFrame + +if TYPE_CHECKING: + from pandas._typing import DtypeObj + + +def get_dtype(obj) -> DtypeObj: + if isinstance(obj, DataFrame): + # Note: we are assuming only one column + return obj.dtypes.iat[0] + else: + return obj.dtype + + +def get_obj(df: DataFrame, klass): + """ + For sharing tests using frame_or_series, either return the DataFrame + unchanged or return it's first column as a Series. + """ + if klass is DataFrame: + return df + return df._ixs(0, axis=1) diff --git a/venv/lib/python3.10/site-packages/pandas/_testing/contexts.py b/venv/lib/python3.10/site-packages/pandas/_testing/contexts.py new file mode 100644 index 0000000000000000000000000000000000000000..eb6e4a917889aef221b2fc08eb2723c4fe568e04 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/_testing/contexts.py @@ -0,0 +1,257 @@ +from __future__ import annotations + +from contextlib import contextmanager +import os +from pathlib import Path +import tempfile +from typing import ( + IO, + TYPE_CHECKING, + Any, +) +import uuid + +from pandas._config import using_copy_on_write + +from pandas.compat import PYPY +from pandas.errors import ChainedAssignmentError + +from pandas import set_option + +from pandas.io.common import get_handle + +if TYPE_CHECKING: + from collections.abc import Generator + + from pandas._typing import ( + BaseBuffer, + CompressionOptions, + FilePath, + ) + + +@contextmanager +def decompress_file( + path: FilePath | BaseBuffer, compression: CompressionOptions +) -> Generator[IO[bytes], None, None]: + """ + Open a compressed file and return a file object. + + Parameters + ---------- + path : str + The path where the file is read from. + + compression : {'gzip', 'bz2', 'zip', 'xz', 'zstd', None} + Name of the decompression to use + + Returns + ------- + file object + """ + with get_handle(path, "rb", compression=compression, is_text=False) as handle: + yield handle.handle + + +@contextmanager +def set_timezone(tz: str) -> Generator[None, None, None]: + """ + Context manager for temporarily setting a timezone. + + Parameters + ---------- + tz : str + A string representing a valid timezone. + + Examples + -------- + >>> from datetime import datetime + >>> from dateutil.tz import tzlocal + >>> tzlocal().tzname(datetime(2021, 1, 1)) # doctest: +SKIP + 'IST' + + >>> with set_timezone('US/Eastern'): + ... tzlocal().tzname(datetime(2021, 1, 1)) + ... + 'EST' + """ + import time + + def setTZ(tz) -> None: + if tz is None: + try: + del os.environ["TZ"] + except KeyError: + pass + else: + os.environ["TZ"] = tz + time.tzset() + + orig_tz = os.environ.get("TZ") + setTZ(tz) + try: + yield + finally: + setTZ(orig_tz) + + +@contextmanager +def ensure_clean( + filename=None, return_filelike: bool = False, **kwargs: Any +) -> Generator[Any, None, None]: + """ + Gets a temporary path and agrees to remove on close. + + This implementation does not use tempfile.mkstemp to avoid having a file handle. + If the code using the returned path wants to delete the file itself, windows + requires that no program has a file handle to it. + + Parameters + ---------- + filename : str (optional) + suffix of the created file. + return_filelike : bool (default False) + if True, returns a file-like which is *always* cleaned. Necessary for + savefig and other functions which want to append extensions. + **kwargs + Additional keywords are passed to open(). + + """ + folder = Path(tempfile.gettempdir()) + + if filename is None: + filename = "" + filename = str(uuid.uuid4()) + filename + path = folder / filename + + path.touch() + + handle_or_str: str | IO = str(path) + encoding = kwargs.pop("encoding", None) + if return_filelike: + kwargs.setdefault("mode", "w+b") + if encoding is None and "b" not in kwargs["mode"]: + encoding = "utf-8" + handle_or_str = open(path, encoding=encoding, **kwargs) + + try: + yield handle_or_str + finally: + if not isinstance(handle_or_str, str): + handle_or_str.close() + if path.is_file(): + path.unlink() + + +@contextmanager +def with_csv_dialect(name: str, **kwargs) -> Generator[None, None, None]: + """ + Context manager to temporarily register a CSV dialect for parsing CSV. + + Parameters + ---------- + name : str + The name of the dialect. + kwargs : mapping + The parameters for the dialect. + + Raises + ------ + ValueError : the name of the dialect conflicts with a builtin one. + + See Also + -------- + csv : Python's CSV library. + """ + import csv + + _BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"} + + if name in _BUILTIN_DIALECTS: + raise ValueError("Cannot override builtin dialect.") + + csv.register_dialect(name, **kwargs) + try: + yield + finally: + csv.unregister_dialect(name) + + +@contextmanager +def use_numexpr(use, min_elements=None) -> Generator[None, None, None]: + from pandas.core.computation import expressions as expr + + if min_elements is None: + min_elements = expr._MIN_ELEMENTS + + olduse = expr.USE_NUMEXPR + oldmin = expr._MIN_ELEMENTS + set_option("compute.use_numexpr", use) + expr._MIN_ELEMENTS = min_elements + try: + yield + finally: + expr._MIN_ELEMENTS = oldmin + set_option("compute.use_numexpr", olduse) + + +def raises_chained_assignment_error(warn=True, extra_warnings=(), extra_match=()): + from pandas._testing import assert_produces_warning + + if not warn: + from contextlib import nullcontext + + return nullcontext() + + if PYPY and not extra_warnings: + from contextlib import nullcontext + + return nullcontext() + elif PYPY and extra_warnings: + return assert_produces_warning( + extra_warnings, + match="|".join(extra_match), + ) + else: + if using_copy_on_write(): + warning = ChainedAssignmentError + match = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment" + ) + else: + warning = FutureWarning # type: ignore[assignment] + # TODO update match + match = "ChainedAssignmentError" + if extra_warnings: + warning = (warning, *extra_warnings) # type: ignore[assignment] + return assert_produces_warning( + warning, + match="|".join((match, *extra_match)), + ) + + +def assert_cow_warning(warn=True, match=None, **kwargs): + """ + Assert that a warning is raised in the CoW warning mode. + + Parameters + ---------- + warn : bool, default True + By default, check that a warning is raised. Can be turned off by passing False. + match : str + The warning message to match against, if different from the default. + kwargs + Passed through to assert_produces_warning + """ + from pandas._testing import assert_produces_warning + + if not warn: + from contextlib import nullcontext + + return nullcontext() + + if not match: + match = "Setting a value on a view" + + return assert_produces_warning(FutureWarning, match=match, **kwargs) diff --git a/venv/lib/python3.10/site-packages/pandas/errors/__init__.py b/venv/lib/python3.10/site-packages/pandas/errors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..01094ba36b9dd5f3414c32a9a4f832b85902e021 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/errors/__init__.py @@ -0,0 +1,850 @@ +""" +Expose public exceptions & warnings +""" +from __future__ import annotations + +import ctypes + +from pandas._config.config import OptionError + +from pandas._libs.tslibs import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, +) + +from pandas.util.version import InvalidVersion + + +class IntCastingNaNError(ValueError): + """ + Exception raised when converting (``astype``) an array with NaN to an integer type. + + Examples + -------- + >>> pd.DataFrame(np.array([[1, np.nan], [2, 3]]), dtype="i8") + Traceback (most recent call last): + IntCastingNaNError: Cannot convert non-finite values (NA or inf) to integer + """ + + +class NullFrequencyError(ValueError): + """ + Exception raised when a ``freq`` cannot be null. + + Particularly ``DatetimeIndex.shift``, ``TimedeltaIndex.shift``, + ``PeriodIndex.shift``. + + Examples + -------- + >>> df = pd.DatetimeIndex(["2011-01-01 10:00", "2011-01-01"], freq=None) + >>> df.shift(2) + Traceback (most recent call last): + NullFrequencyError: Cannot shift with no freq + """ + + +class PerformanceWarning(Warning): + """ + Warning raised when there is a possible performance impact. + + Examples + -------- + >>> df = pd.DataFrame({"jim": [0, 0, 1, 1], + ... "joe": ["x", "x", "z", "y"], + ... "jolie": [1, 2, 3, 4]}) + >>> df = df.set_index(["jim", "joe"]) + >>> df + jolie + jim joe + 0 x 1 + x 2 + 1 z 3 + y 4 + >>> df.loc[(1, 'z')] # doctest: +SKIP + # PerformanceWarning: indexing past lexsort depth may impact performance. + df.loc[(1, 'z')] + jolie + jim joe + 1 z 3 + """ + + +class UnsupportedFunctionCall(ValueError): + """ + Exception raised when attempting to call a unsupported numpy function. + + For example, ``np.cumsum(groupby_object)``. + + Examples + -------- + >>> df = pd.DataFrame({"A": [0, 0, 1, 1], + ... "B": ["x", "x", "z", "y"], + ... "C": [1, 2, 3, 4]} + ... ) + >>> np.cumsum(df.groupby(["A"])) + Traceback (most recent call last): + UnsupportedFunctionCall: numpy operations are not valid with groupby. + Use .groupby(...).cumsum() instead + """ + + +class UnsortedIndexError(KeyError): + """ + Error raised when slicing a MultiIndex which has not been lexsorted. + + Subclass of `KeyError`. + + Examples + -------- + >>> df = pd.DataFrame({"cat": [0, 0, 1, 1], + ... "color": ["white", "white", "brown", "black"], + ... "lives": [4, 4, 3, 7]}, + ... ) + >>> df = df.set_index(["cat", "color"]) + >>> df + lives + cat color + 0 white 4 + white 4 + 1 brown 3 + black 7 + >>> df.loc[(0, "black"):(1, "white")] + Traceback (most recent call last): + UnsortedIndexError: 'Key length (2) was greater + than MultiIndex lexsort depth (1)' + """ + + +class ParserError(ValueError): + """ + Exception that is raised by an error encountered in parsing file contents. + + This is a generic error raised for errors encountered when functions like + `read_csv` or `read_html` are parsing contents of a file. + + See Also + -------- + read_csv : Read CSV (comma-separated) file into a DataFrame. + read_html : Read HTML table into a DataFrame. + + Examples + -------- + >>> data = '''a,b,c + ... cat,foo,bar + ... dog,foo,"baz''' + >>> from io import StringIO + >>> pd.read_csv(StringIO(data), skipfooter=1, engine='python') + Traceback (most recent call last): + ParserError: ',' expected after '"'. Error could possibly be due + to parsing errors in the skipped footer rows + """ + + +class DtypeWarning(Warning): + """ + Warning raised when reading different dtypes in a column from a file. + + Raised for a dtype incompatibility. This can happen whenever `read_csv` + or `read_table` encounter non-uniform dtypes in a column(s) of a given + CSV file. + + See Also + -------- + read_csv : Read CSV (comma-separated) file into a DataFrame. + read_table : Read general delimited file into a DataFrame. + + Notes + ----- + This warning is issued when dealing with larger files because the dtype + checking happens per chunk read. + + Despite the warning, the CSV file is read with mixed types in a single + column which will be an object type. See the examples below to better + understand this issue. + + Examples + -------- + This example creates and reads a large CSV file with a column that contains + `int` and `str`. + + >>> df = pd.DataFrame({'a': (['1'] * 100000 + ['X'] * 100000 + + ... ['1'] * 100000), + ... 'b': ['b'] * 300000}) # doctest: +SKIP + >>> df.to_csv('test.csv', index=False) # doctest: +SKIP + >>> df2 = pd.read_csv('test.csv') # doctest: +SKIP + ... # DtypeWarning: Columns (0) have mixed types + + Important to notice that ``df2`` will contain both `str` and `int` for the + same input, '1'. + + >>> df2.iloc[262140, 0] # doctest: +SKIP + '1' + >>> type(df2.iloc[262140, 0]) # doctest: +SKIP + + >>> df2.iloc[262150, 0] # doctest: +SKIP + 1 + >>> type(df2.iloc[262150, 0]) # doctest: +SKIP + + + One way to solve this issue is using the `dtype` parameter in the + `read_csv` and `read_table` functions to explicit the conversion: + + >>> df2 = pd.read_csv('test.csv', sep=',', dtype={'a': str}) # doctest: +SKIP + + No warning was issued. + """ + + +class EmptyDataError(ValueError): + """ + Exception raised in ``pd.read_csv`` when empty data or header is encountered. + + Examples + -------- + >>> from io import StringIO + >>> empty = StringIO() + >>> pd.read_csv(empty) + Traceback (most recent call last): + EmptyDataError: No columns to parse from file + """ + + +class ParserWarning(Warning): + """ + Warning raised when reading a file that doesn't use the default 'c' parser. + + Raised by `pd.read_csv` and `pd.read_table` when it is necessary to change + parsers, generally from the default 'c' parser to 'python'. + + It happens due to a lack of support or functionality for parsing a + particular attribute of a CSV file with the requested engine. + + Currently, 'c' unsupported options include the following parameters: + + 1. `sep` other than a single character (e.g. regex separators) + 2. `skipfooter` higher than 0 + 3. `sep=None` with `delim_whitespace=False` + + The warning can be avoided by adding `engine='python'` as a parameter in + `pd.read_csv` and `pd.read_table` methods. + + See Also + -------- + pd.read_csv : Read CSV (comma-separated) file into DataFrame. + pd.read_table : Read general delimited file into DataFrame. + + Examples + -------- + Using a `sep` in `pd.read_csv` other than a single character: + + >>> import io + >>> csv = '''a;b;c + ... 1;1,8 + ... 1;2,1''' + >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]') # doctest: +SKIP + ... # ParserWarning: Falling back to the 'python' engine... + + Adding `engine='python'` to `pd.read_csv` removes the Warning: + + >>> df = pd.read_csv(io.StringIO(csv), sep='[;,]', engine='python') + """ + + +class MergeError(ValueError): + """ + Exception raised when merging data. + + Subclass of ``ValueError``. + + Examples + -------- + >>> left = pd.DataFrame({"a": ["a", "b", "b", "d"], + ... "b": ["cat", "dog", "weasel", "horse"]}, + ... index=range(4)) + >>> right = pd.DataFrame({"a": ["a", "b", "c", "d"], + ... "c": ["meow", "bark", "chirp", "nay"]}, + ... index=range(4)).set_index("a") + >>> left.join(right, on="a", validate="one_to_one",) + Traceback (most recent call last): + MergeError: Merge keys are not unique in left dataset; not a one-to-one merge + """ + + +class AbstractMethodError(NotImplementedError): + """ + Raise this error instead of NotImplementedError for abstract methods. + + Examples + -------- + >>> class Foo: + ... @classmethod + ... def classmethod(cls): + ... raise pd.errors.AbstractMethodError(cls, methodtype="classmethod") + ... def method(self): + ... raise pd.errors.AbstractMethodError(self) + >>> test = Foo.classmethod() + Traceback (most recent call last): + AbstractMethodError: This classmethod must be defined in the concrete class Foo + + >>> test2 = Foo().method() + Traceback (most recent call last): + AbstractMethodError: This classmethod must be defined in the concrete class Foo + """ + + def __init__(self, class_instance, methodtype: str = "method") -> None: + types = {"method", "classmethod", "staticmethod", "property"} + if methodtype not in types: + raise ValueError( + f"methodtype must be one of {methodtype}, got {types} instead." + ) + self.methodtype = methodtype + self.class_instance = class_instance + + def __str__(self) -> str: + if self.methodtype == "classmethod": + name = self.class_instance.__name__ + else: + name = type(self.class_instance).__name__ + return f"This {self.methodtype} must be defined in the concrete class {name}" + + +class NumbaUtilError(Exception): + """ + Error raised for unsupported Numba engine routines. + + Examples + -------- + >>> df = pd.DataFrame({"key": ["a", "a", "b", "b"], "data": [1, 2, 3, 4]}, + ... columns=["key", "data"]) + >>> def incorrect_function(x): + ... return sum(x) * 2.7 + >>> df.groupby("key").agg(incorrect_function, engine="numba") + Traceback (most recent call last): + NumbaUtilError: The first 2 arguments to incorrect_function + must be ['values', 'index'] + """ + + +class DuplicateLabelError(ValueError): + """ + Error raised when an operation would introduce duplicate labels. + + Examples + -------- + >>> s = pd.Series([0, 1, 2], index=['a', 'b', 'c']).set_flags( + ... allows_duplicate_labels=False + ... ) + >>> s.reindex(['a', 'a', 'b']) + Traceback (most recent call last): + ... + DuplicateLabelError: Index has duplicates. + positions + label + a [0, 1] + """ + + +class InvalidIndexError(Exception): + """ + Exception raised when attempting to use an invalid index key. + + Examples + -------- + >>> idx = pd.MultiIndex.from_product([["x", "y"], [0, 1]]) + >>> df = pd.DataFrame([[1, 1, 2, 2], + ... [3, 3, 4, 4]], columns=idx) + >>> df + x y + 0 1 0 1 + 0 1 1 2 2 + 1 3 3 4 4 + >>> df[:, 0] + Traceback (most recent call last): + InvalidIndexError: (slice(None, None, None), 0) + """ + + +class DataError(Exception): + """ + Exceptionn raised when performing an operation on non-numerical data. + + For example, calling ``ohlc`` on a non-numerical column or a function + on a rolling window. + + Examples + -------- + >>> ser = pd.Series(['a', 'b', 'c']) + >>> ser.rolling(2).sum() + Traceback (most recent call last): + DataError: No numeric types to aggregate + """ + + +class SpecificationError(Exception): + """ + Exception raised by ``agg`` when the functions are ill-specified. + + The exception raised in two scenarios. + + The first way is calling ``agg`` on a + Dataframe or Series using a nested renamer (dict-of-dict). + + The second way is calling ``agg`` on a Dataframe with duplicated functions + names without assigning column name. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2], + ... 'B': range(5), + ... 'C': range(5)}) + >>> df.groupby('A').B.agg({'foo': 'count'}) # doctest: +SKIP + ... # SpecificationError: nested renamer is not supported + + >>> df.groupby('A').agg({'B': {'foo': ['sum', 'max']}}) # doctest: +SKIP + ... # SpecificationError: nested renamer is not supported + + >>> df.groupby('A').agg(['min', 'min']) # doctest: +SKIP + ... # SpecificationError: nested renamer is not supported + """ + + +class SettingWithCopyError(ValueError): + """ + Exception raised when trying to set on a copied slice from a ``DataFrame``. + + The ``mode.chained_assignment`` needs to be set to set to 'raise.' This can + happen unintentionally when chained indexing. + + For more information on evaluation order, + see :ref:`the user guide`. + + For more information on view vs. copy, + see :ref:`the user guide`. + + Examples + -------- + >>> pd.options.mode.chained_assignment = 'raise' + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) + >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP + ... # SettingWithCopyError: A value is trying to be set on a copy of a... + """ + + +class SettingWithCopyWarning(Warning): + """ + Warning raised when trying to set on a copied slice from a ``DataFrame``. + + The ``mode.chained_assignment`` needs to be set to set to 'warn.' + 'Warn' is the default option. This can happen unintentionally when + chained indexing. + + For more information on evaluation order, + see :ref:`the user guide`. + + For more information on view vs. copy, + see :ref:`the user guide`. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) + >>> df.loc[0:3]['A'] = 'a' # doctest: +SKIP + ... # SettingWithCopyWarning: A value is trying to be set on a copy of a... + """ + + +class ChainedAssignmentError(Warning): + """ + Warning raised when trying to set using chained assignment. + + When the ``mode.copy_on_write`` option is enabled, chained assignment can + never work. In such a situation, we are always setting into a temporary + object that is the result of an indexing operation (getitem), which under + Copy-on-Write always behaves as a copy. Thus, assigning through a chain + can never update the original Series or DataFrame. + + For more information on view vs. copy, + see :ref:`the user guide`. + + Examples + -------- + >>> pd.options.mode.copy_on_write = True + >>> df = pd.DataFrame({'A': [1, 1, 1, 2, 2]}, columns=['A']) + >>> df["A"][0:3] = 10 # doctest: +SKIP + ... # ChainedAssignmentError: ... + >>> pd.options.mode.copy_on_write = False + """ + + +_chained_assignment_msg = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment.\n" + "When using the Copy-on-Write mode, such chained assignment never works " + "to update the original DataFrame or Series, because the intermediate " + "object on which we are setting values always behaves as a copy.\n\n" + "Try using '.loc[row_indexer, col_indexer] = value' instead, to perform " + "the assignment in a single step.\n\n" + "See the caveats in the documentation: " + "https://pandas.pydata.org/pandas-docs/stable/user_guide/" + "indexing.html#returning-a-view-versus-a-copy" +) + + +_chained_assignment_method_msg = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment using an inplace method.\n" + "When using the Copy-on-Write mode, such inplace method never works " + "to update the original DataFrame or Series, because the intermediate " + "object on which we are setting values always behaves as a copy.\n\n" + "For example, when doing 'df[col].method(value, inplace=True)', try " + "using 'df.method({col: value}, inplace=True)' instead, to perform " + "the operation inplace on the original object.\n\n" +) + + +_chained_assignment_warning_msg = ( + "ChainedAssignmentError: behaviour will change in pandas 3.0!\n" + "You are setting values through chained assignment. Currently this works " + "in certain cases, but when using Copy-on-Write (which will become the " + "default behaviour in pandas 3.0) this will never work to update the " + "original DataFrame or Series, because the intermediate object on which " + "we are setting values will behave as a copy.\n" + "A typical example is when you are setting values in a column of a " + "DataFrame, like:\n\n" + 'df["col"][row_indexer] = value\n\n' + 'Use `df.loc[row_indexer, "col"] = values` instead, to perform the ' + "assignment in a single step and ensure this keeps updating the original `df`.\n\n" + "See the caveats in the documentation: " + "https://pandas.pydata.org/pandas-docs/stable/user_guide/" + "indexing.html#returning-a-view-versus-a-copy\n" +) + + +_chained_assignment_warning_method_msg = ( + "A value is trying to be set on a copy of a DataFrame or Series " + "through chained assignment using an inplace method.\n" + "The behavior will change in pandas 3.0. This inplace method will " + "never work because the intermediate object on which we are setting " + "values always behaves as a copy.\n\n" + "For example, when doing 'df[col].method(value, inplace=True)', try " + "using 'df.method({col: value}, inplace=True)' or " + "df[col] = df[col].method(value) instead, to perform " + "the operation inplace on the original object.\n\n" +) + + +def _check_cacher(obj): + # This is a mess, selection paths that return a view set the _cacher attribute + # on the Series; most of them also set _item_cache which adds 1 to our relevant + # reference count, but iloc does not, so we have to check if we are actually + # in the item cache + if hasattr(obj, "_cacher"): + parent = obj._cacher[1]() + # parent could be dead + if parent is None: + return False + if hasattr(parent, "_item_cache"): + if obj._cacher[0] in parent._item_cache: + # Check if we are actually the item from item_cache, iloc creates a + # new object + return obj is parent._item_cache[obj._cacher[0]] + return False + + +class NumExprClobberingError(NameError): + """ + Exception raised when trying to use a built-in numexpr name as a variable name. + + ``eval`` or ``query`` will throw the error if the engine is set + to 'numexpr'. 'numexpr' is the default engine value for these methods if the + numexpr package is installed. + + Examples + -------- + >>> df = pd.DataFrame({'abs': [1, 1, 1]}) + >>> df.query("abs > 2") # doctest: +SKIP + ... # NumExprClobberingError: Variables in expression "(abs) > (2)" overlap... + >>> sin, a = 1, 2 + >>> pd.eval("sin + a", engine='numexpr') # doctest: +SKIP + ... # NumExprClobberingError: Variables in expression "(sin) + (a)" overlap... + """ + + +class UndefinedVariableError(NameError): + """ + Exception raised by ``query`` or ``eval`` when using an undefined variable name. + + It will also specify whether the undefined variable is local or not. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1]}) + >>> df.query("A > x") # doctest: +SKIP + ... # UndefinedVariableError: name 'x' is not defined + >>> df.query("A > @y") # doctest: +SKIP + ... # UndefinedVariableError: local variable 'y' is not defined + >>> pd.eval('x + 1') # doctest: +SKIP + ... # UndefinedVariableError: name 'x' is not defined + """ + + def __init__(self, name: str, is_local: bool | None = None) -> None: + base_msg = f"{repr(name)} is not defined" + if is_local: + msg = f"local variable {base_msg}" + else: + msg = f"name {base_msg}" + super().__init__(msg) + + +class IndexingError(Exception): + """ + Exception is raised when trying to index and there is a mismatch in dimensions. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1]}) + >>> df.loc[..., ..., 'A'] # doctest: +SKIP + ... # IndexingError: indexer may only contain one '...' entry + >>> df = pd.DataFrame({'A': [1, 1, 1]}) + >>> df.loc[1, ..., ...] # doctest: +SKIP + ... # IndexingError: Too many indexers + >>> df[pd.Series([True], dtype=bool)] # doctest: +SKIP + ... # IndexingError: Unalignable boolean Series provided as indexer... + >>> s = pd.Series(range(2), + ... index = pd.MultiIndex.from_product([["a", "b"], ["c"]])) + >>> s.loc["a", "c", "d"] # doctest: +SKIP + ... # IndexingError: Too many indexers + """ + + +class PyperclipException(RuntimeError): + """ + Exception raised when clipboard functionality is unsupported. + + Raised by ``to_clipboard()`` and ``read_clipboard()``. + """ + + +class PyperclipWindowsException(PyperclipException): + """ + Exception raised when clipboard functionality is unsupported by Windows. + + Access to the clipboard handle would be denied due to some other + window process is accessing it. + """ + + def __init__(self, message: str) -> None: + # attr only exists on Windows, so typing fails on other platforms + message += f" ({ctypes.WinError()})" # type: ignore[attr-defined] + super().__init__(message) + + +class CSSWarning(UserWarning): + """ + Warning is raised when converting css styling fails. + + This can be due to the styling not having an equivalent value or because the + styling isn't properly formatted. + + Examples + -------- + >>> df = pd.DataFrame({'A': [1, 1, 1]}) + >>> df.style.applymap( + ... lambda x: 'background-color: blueGreenRed;' + ... ).to_excel('styled.xlsx') # doctest: +SKIP + CSSWarning: Unhandled color format: 'blueGreenRed' + >>> df.style.applymap( + ... lambda x: 'border: 1px solid red red;' + ... ).to_excel('styled.xlsx') # doctest: +SKIP + CSSWarning: Unhandled color format: 'blueGreenRed' + """ + + +class PossibleDataLossError(Exception): + """ + Exception raised when trying to open a HDFStore file when already opened. + + Examples + -------- + >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP + >>> store.open("w") # doctest: +SKIP + ... # PossibleDataLossError: Re-opening the file [my-store] with mode [a]... + """ + + +class ClosedFileError(Exception): + """ + Exception is raised when trying to perform an operation on a closed HDFStore file. + + Examples + -------- + >>> store = pd.HDFStore('my-store', 'a') # doctest: +SKIP + >>> store.close() # doctest: +SKIP + >>> store.keys() # doctest: +SKIP + ... # ClosedFileError: my-store file is not open! + """ + + +class IncompatibilityWarning(Warning): + """ + Warning raised when trying to use where criteria on an incompatible HDF5 file. + """ + + +class AttributeConflictWarning(Warning): + """ + Warning raised when index attributes conflict when using HDFStore. + + Occurs when attempting to append an index with a different + name than the existing index on an HDFStore or attempting to append an index with a + different frequency than the existing index on an HDFStore. + + Examples + -------- + >>> idx1 = pd.Index(['a', 'b'], name='name1') + >>> df1 = pd.DataFrame([[1, 2], [3, 4]], index=idx1) + >>> df1.to_hdf('file', 'data', 'w', append=True) # doctest: +SKIP + >>> idx2 = pd.Index(['c', 'd'], name='name2') + >>> df2 = pd.DataFrame([[5, 6], [7, 8]], index=idx2) + >>> df2.to_hdf('file', 'data', 'a', append=True) # doctest: +SKIP + AttributeConflictWarning: the [index_name] attribute of the existing index is + [name1] which conflicts with the new [name2]... + """ + + +class DatabaseError(OSError): + """ + Error is raised when executing sql with bad syntax or sql that throws an error. + + Examples + -------- + >>> from sqlite3 import connect + >>> conn = connect(':memory:') + >>> pd.read_sql('select * test', conn) # doctest: +SKIP + ... # DatabaseError: Execution failed on sql 'test': near "test": syntax error + """ + + +class PossiblePrecisionLoss(Warning): + """ + Warning raised by to_stata on a column with a value outside or equal to int64. + + When the column value is outside or equal to the int64 value the column is + converted to a float64 dtype. + + Examples + -------- + >>> df = pd.DataFrame({"s": pd.Series([1, 2**53], dtype=np.int64)}) + >>> df.to_stata('test') # doctest: +SKIP + ... # PossiblePrecisionLoss: Column converted from int64 to float64... + """ + + +class ValueLabelTypeMismatch(Warning): + """ + Warning raised by to_stata on a category column that contains non-string values. + + Examples + -------- + >>> df = pd.DataFrame({"categories": pd.Series(["a", 2], dtype="category")}) + >>> df.to_stata('test') # doctest: +SKIP + ... # ValueLabelTypeMismatch: Stata value labels (pandas categories) must be str... + """ + + +class InvalidColumnName(Warning): + """ + Warning raised by to_stata the column contains a non-valid stata name. + + Because the column name is an invalid Stata variable, the name needs to be + converted. + + Examples + -------- + >>> df = pd.DataFrame({"0categories": pd.Series([2, 2])}) + >>> df.to_stata('test') # doctest: +SKIP + ... # InvalidColumnName: Not all pandas column names were valid Stata variable... + """ + + +class CategoricalConversionWarning(Warning): + """ + Warning is raised when reading a partial labeled Stata file using a iterator. + + Examples + -------- + >>> from pandas.io.stata import StataReader + >>> with StataReader('dta_file', chunksize=2) as reader: # doctest: +SKIP + ... for i, block in enumerate(reader): + ... print(i, block) + ... # CategoricalConversionWarning: One or more series with value labels... + """ + + +class LossySetitemError(Exception): + """ + Raised when trying to do a __setitem__ on an np.ndarray that is not lossless. + + Notes + ----- + This is an internal error. + """ + + +class NoBufferPresent(Exception): + """ + Exception is raised in _get_data_buffer to signal that there is no requested buffer. + """ + + +class InvalidComparison(Exception): + """ + Exception is raised by _validate_comparison_value to indicate an invalid comparison. + + Notes + ----- + This is an internal error. + """ + + +__all__ = [ + "AbstractMethodError", + "AttributeConflictWarning", + "CategoricalConversionWarning", + "ClosedFileError", + "CSSWarning", + "DatabaseError", + "DataError", + "DtypeWarning", + "DuplicateLabelError", + "EmptyDataError", + "IncompatibilityWarning", + "IntCastingNaNError", + "InvalidColumnName", + "InvalidComparison", + "InvalidIndexError", + "InvalidVersion", + "IndexingError", + "LossySetitemError", + "MergeError", + "NoBufferPresent", + "NullFrequencyError", + "NumbaUtilError", + "NumExprClobberingError", + "OptionError", + "OutOfBoundsDatetime", + "OutOfBoundsTimedelta", + "ParserError", + "ParserWarning", + "PerformanceWarning", + "PossibleDataLossError", + "PossiblePrecisionLoss", + "PyperclipException", + "PyperclipWindowsException", + "SettingWithCopyError", + "SettingWithCopyWarning", + "SpecificationError", + "UndefinedVariableError", + "UnsortedIndexError", + "UnsupportedFunctionCall", + "ValueLabelTypeMismatch", +] diff --git a/venv/lib/python3.10/site-packages/pandas/errors/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/errors/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9bb09cc6d67901a606ee0065239e8921b0b6ebc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/errors/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/__init__.py b/venv/lib/python3.10/site-packages/pandas/plotting/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..55c861e384d679654b8615d4cb5808f536fd8f2e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/__init__.py @@ -0,0 +1,98 @@ +""" +Plotting public API. + +Authors of third-party plotting backends should implement a module with a +public ``plot(data, kind, **kwargs)``. The parameter `data` will contain +the data structure and can be a `Series` or a `DataFrame`. For example, +for ``df.plot()`` the parameter `data` will contain the DataFrame `df`. +In some cases, the data structure is transformed before being sent to +the backend (see PlotAccessor.__call__ in pandas/plotting/_core.py for +the exact transformations). + +The parameter `kind` will be one of: + +- line +- bar +- barh +- box +- hist +- kde +- area +- pie +- scatter +- hexbin + +See the pandas API reference for documentation on each kind of plot. + +Any other keyword argument is currently assumed to be backend specific, +but some parameters may be unified and added to the signature in the +future (e.g. `title` which should be useful for any backend). + +Currently, all the Matplotlib functions in pandas are accessed through +the selected backend. For example, `pandas.plotting.boxplot` (equivalent +to `DataFrame.boxplot`) is also accessed in the selected backend. This +is expected to change, and the exact API is under discussion. But with +the current version, backends are expected to implement the next functions: + +- plot (describe above, used for `Series.plot` and `DataFrame.plot`) +- hist_series and hist_frame (for `Series.hist` and `DataFrame.hist`) +- boxplot (`pandas.plotting.boxplot(df)` equivalent to `DataFrame.boxplot`) +- boxplot_frame and boxplot_frame_groupby +- register and deregister (register converters for the tick formats) +- Plots not called as `Series` and `DataFrame` methods: + - table + - andrews_curves + - autocorrelation_plot + - bootstrap_plot + - lag_plot + - parallel_coordinates + - radviz + - scatter_matrix + +Use the code in pandas/plotting/_matplotib.py and +https://github.com/pyviz/hvplot as a reference on how to write a backend. + +For the discussion about the API see +https://github.com/pandas-dev/pandas/issues/26747. +""" +from pandas.plotting._core import ( + PlotAccessor, + boxplot, + boxplot_frame, + boxplot_frame_groupby, + hist_frame, + hist_series, +) +from pandas.plotting._misc import ( + andrews_curves, + autocorrelation_plot, + bootstrap_plot, + deregister as deregister_matplotlib_converters, + lag_plot, + parallel_coordinates, + plot_params, + radviz, + register as register_matplotlib_converters, + scatter_matrix, + table, +) + +__all__ = [ + "PlotAccessor", + "boxplot", + "boxplot_frame", + "boxplot_frame_groupby", + "hist_frame", + "hist_series", + "scatter_matrix", + "radviz", + "andrews_curves", + "bootstrap_plot", + "parallel_coordinates", + "lag_plot", + "autocorrelation_plot", + "table", + "plot_params", + "register_matplotlib_converters", + "deregister_matplotlib_converters", +] diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/_misc.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/_misc.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ff8d93bda6aca0330f404ed9f658b55133bea48b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/plotting/__pycache__/_misc.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_core.py b/venv/lib/python3.10/site-packages/pandas/plotting/_core.py new file mode 100644 index 0000000000000000000000000000000000000000..cb5598a98d5afbc93954d74e3ecc78b4e572606d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_core.py @@ -0,0 +1,1946 @@ +from __future__ import annotations + +import importlib +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +from pandas._config import get_option + +from pandas.util._decorators import ( + Appender, + Substitution, +) + +from pandas.core.dtypes.common import ( + is_integer, + is_list_like, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core.base import PandasObject + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + import types + + from matplotlib.axes import Axes + import numpy as np + + from pandas._typing import IndexLabel + + from pandas import ( + DataFrame, + Series, + ) + from pandas.core.groupby.generic import DataFrameGroupBy + + +def hist_series( + self: Series, + by=None, + ax=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + figsize: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, +): + """ + Draw histogram of the input series using matplotlib. + + Parameters + ---------- + by : object, optional + If passed, then used to form histograms for separate groups. + ax : matplotlib axis object + If not passed, uses gca(). + grid : bool, default True + Whether to show axis grid lines. + xlabelsize : int, default None + If specified changes the x-axis label size. + xrot : float, default None + Rotation of x axis labels. + ylabelsize : int, default None + If specified changes the y-axis label size. + yrot : float, default None + Rotation of y axis labels. + figsize : tuple, default None + Figure size in inches by default. + bins : int or sequence, default 10 + Number of histogram bins to be used. If an integer is given, bins + 1 + bin edges are calculated and returned. If bins is a sequence, gives + bin edges, including left edge of first bin and right edge of last + bin. In this case, bins is returned unmodified. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + legend : bool, default False + Whether to show the legend. + + **kwargs + To be passed to the actual plotting function. + + Returns + ------- + matplotlib.AxesSubplot + A histogram plot. + + See Also + -------- + matplotlib.axes.Axes.hist : Plot a histogram using matplotlib. + + Examples + -------- + For Series: + + .. plot:: + :context: close-figs + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst) + >>> hist = ser.hist() + + For Groupby: + + .. plot:: + :context: close-figs + + >>> lst = ['a', 'a', 'a', 'b', 'b', 'b'] + >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst) + >>> hist = ser.groupby(level=0).hist() + """ + plot_backend = _get_plot_backend(backend) + return plot_backend.hist_series( + self, + by=by, + ax=ax, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + figsize=figsize, + bins=bins, + legend=legend, + **kwargs, + ) + + +def hist_frame( + data: DataFrame, + column: IndexLabel | None = None, + by=None, + grid: bool = True, + xlabelsize: int | None = None, + xrot: float | None = None, + ylabelsize: int | None = None, + yrot: float | None = None, + ax=None, + sharex: bool = False, + sharey: bool = False, + figsize: tuple[int, int] | None = None, + layout: tuple[int, int] | None = None, + bins: int | Sequence[int] = 10, + backend: str | None = None, + legend: bool = False, + **kwargs, +): + """ + Make a histogram of the DataFrame's columns. + + A `histogram`_ is a representation of the distribution of data. + This function calls :meth:`matplotlib.pyplot.hist`, on each series in + the DataFrame, resulting in one histogram per column. + + .. _histogram: https://en.wikipedia.org/wiki/Histogram + + Parameters + ---------- + data : DataFrame + The pandas object holding the data. + column : str or sequence, optional + If passed, will be used to limit data to a subset of columns. + by : object, optional + If passed, then used to form histograms for separate groups. + grid : bool, default True + Whether to show axis grid lines. + xlabelsize : int, default None + If specified changes the x-axis label size. + xrot : float, default None + Rotation of x axis labels. For example, a value of 90 displays the + x labels rotated 90 degrees clockwise. + ylabelsize : int, default None + If specified changes the y-axis label size. + yrot : float, default None + Rotation of y axis labels. For example, a value of 90 displays the + y labels rotated 90 degrees clockwise. + ax : Matplotlib axes object, default None + The axes to plot the histogram on. + sharex : bool, default True if ax is None else False + In case subplots=True, share x axis and set some x axis labels to + invisible; defaults to True if ax is None otherwise False if an ax + is passed in. + Note that passing in both an ax and sharex=True will alter all x axis + labels for all subplots in a figure. + sharey : bool, default False + In case subplots=True, share y axis and set some y axis labels to + invisible. + figsize : tuple, optional + The size in inches of the figure to create. Uses the value in + `matplotlib.rcParams` by default. + layout : tuple, optional + Tuple of (rows, columns) for the layout of the histograms. + bins : int or sequence, default 10 + Number of histogram bins to be used. If an integer is given, bins + 1 + bin edges are calculated and returned. If bins is a sequence, gives + bin edges, including left edge of first bin and right edge of last + bin. In this case, bins is returned unmodified. + + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + + legend : bool, default False + Whether to show the legend. + + **kwargs + All other plotting keyword arguments to be passed to + :meth:`matplotlib.pyplot.hist`. + + Returns + ------- + matplotlib.AxesSubplot or numpy.ndarray of them + + See Also + -------- + matplotlib.pyplot.hist : Plot a histogram using matplotlib. + + Examples + -------- + This example draws a histogram based on the length and width of + some animals, displayed in three bins + + .. plot:: + :context: close-figs + + >>> data = {'length': [1.5, 0.5, 1.2, 0.9, 3], + ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]} + >>> index = ['pig', 'rabbit', 'duck', 'chicken', 'horse'] + >>> df = pd.DataFrame(data, index=index) + >>> hist = df.hist(bins=3) + """ + plot_backend = _get_plot_backend(backend) + return plot_backend.hist_frame( + data, + column=column, + by=by, + grid=grid, + xlabelsize=xlabelsize, + xrot=xrot, + ylabelsize=ylabelsize, + yrot=yrot, + ax=ax, + sharex=sharex, + sharey=sharey, + figsize=figsize, + layout=layout, + legend=legend, + bins=bins, + **kwargs, + ) + + +_boxplot_doc = """ +Make a box plot from DataFrame columns. + +Make a box-and-whisker plot from DataFrame columns, optionally grouped +by some other columns. A box plot is a method for graphically depicting +groups of numerical data through their quartiles. +The box extends from the Q1 to Q3 quartile values of the data, +with a line at the median (Q2). The whiskers extend from the edges +of box to show the range of the data. By default, they extend no more than +`1.5 * IQR (IQR = Q3 - Q1)` from the edges of the box, ending at the farthest +data point within that interval. Outliers are plotted as separate dots. + +For further details see +Wikipedia's entry for `boxplot `_. + +Parameters +---------- +%(data)s\ +column : str or list of str, optional + Column name or list of names, or vector. + Can be any valid input to :meth:`pandas.DataFrame.groupby`. +by : str or array-like, optional + Column in the DataFrame to :meth:`pandas.DataFrame.groupby`. + One box-plot will be done per value of columns in `by`. +ax : object of class matplotlib.axes.Axes, optional + The matplotlib axes to be used by boxplot. +fontsize : float or str + Tick label font size in points or as a string (e.g., `large`). +rot : float, default 0 + The rotation angle of labels (in degrees) + with respect to the screen coordinate system. +grid : bool, default True + Setting this to True will show the grid. +figsize : A tuple (width, height) in inches + The size of the figure to create in matplotlib. +layout : tuple (rows, columns), optional + For example, (3, 5) will display the subplots + using 3 rows and 5 columns, starting from the top-left. +return_type : {'axes', 'dict', 'both'} or None, default 'axes' + The kind of object to return. The default is ``axes``. + + * 'axes' returns the matplotlib axes the boxplot is drawn on. + * 'dict' returns a dictionary whose values are the matplotlib + Lines of the boxplot. + * 'both' returns a namedtuple with the axes and dict. + * when grouping with ``by``, a Series mapping columns to + ``return_type`` is returned. + + If ``return_type`` is `None`, a NumPy array + of axes with the same shape as ``layout`` is returned. +%(backend)s\ + +**kwargs + All other plotting keyword arguments to be passed to + :func:`matplotlib.pyplot.boxplot`. + +Returns +------- +result + See Notes. + +See Also +-------- +pandas.Series.plot.hist: Make a histogram. +matplotlib.pyplot.boxplot : Matplotlib equivalent plot. + +Notes +----- +The return type depends on the `return_type` parameter: + +* 'axes' : object of class matplotlib.axes.Axes +* 'dict' : dict of matplotlib.lines.Line2D objects +* 'both' : a namedtuple with structure (ax, lines) + +For data grouped with ``by``, return a Series of the above or a numpy +array: + +* :class:`~pandas.Series` +* :class:`~numpy.array` (for ``return_type = None``) + +Use ``return_type='dict'`` when you want to tweak the appearance +of the lines after plotting. In this case a dict containing the Lines +making up the boxes, caps, fliers, medians, and whiskers is returned. + +Examples +-------- + +Boxplots can be created for every column in the dataframe +by ``df.boxplot()`` or indicating the columns to be used: + +.. plot:: + :context: close-figs + + >>> np.random.seed(1234) + >>> df = pd.DataFrame(np.random.randn(10, 4), + ... columns=['Col1', 'Col2', 'Col3', 'Col4']) + >>> boxplot = df.boxplot(column=['Col1', 'Col2', 'Col3']) # doctest: +SKIP + +Boxplots of variables distributions grouped by the values of a third +variable can be created using the option ``by``. For instance: + +.. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(10, 2), + ... columns=['Col1', 'Col2']) + >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', + ... 'B', 'B', 'B', 'B', 'B']) + >>> boxplot = df.boxplot(by='X') + +A list of strings (i.e. ``['X', 'Y']``) can be passed to boxplot +in order to group the data by combination of the variables in the x-axis: + +.. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(10, 3), + ... columns=['Col1', 'Col2', 'Col3']) + >>> df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', + ... 'B', 'B', 'B', 'B', 'B']) + >>> df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A', + ... 'B', 'A', 'B', 'A', 'B']) + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y']) + +The layout of boxplot can be adjusted giving a tuple to ``layout``: + +.. plot:: + :context: close-figs + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... layout=(2, 1)) + +Additional formatting can be done to the boxplot, like suppressing the grid +(``grid=False``), rotating the labels in the x-axis (i.e. ``rot=45``) +or changing the fontsize (i.e. ``fontsize=15``): + +.. plot:: + :context: close-figs + + >>> boxplot = df.boxplot(grid=False, rot=45, fontsize=15) # doctest: +SKIP + +The parameter ``return_type`` can be used to select the type of element +returned by `boxplot`. When ``return_type='axes'`` is selected, +the matplotlib axes on which the boxplot is drawn are returned: + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], return_type='axes') + >>> type(boxplot) + + +When grouping with ``by``, a Series mapping columns to ``return_type`` +is returned: + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type='axes') + >>> type(boxplot) + + +If ``return_type`` is `None`, a NumPy array of axes with the same shape +as ``layout`` is returned: + + >>> boxplot = df.boxplot(column=['Col1', 'Col2'], by='X', + ... return_type=None) + >>> type(boxplot) + +""" + +_backend_doc = """\ +backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. +""" + + +_bar_or_line_doc = """ + Parameters + ---------- + x : label or position, optional + Allows plotting of one column versus another. If not specified, + the index of the DataFrame is used. + y : label or position, optional + Allows plotting of one column versus another. If not specified, + all numerical columns are used. + color : str, array-like, or dict, optional + The color for each of the DataFrame's columns. Possible values are: + + - A single color string referred to by name, RGB or RGBA code, + for instance 'red' or '#a98d19'. + + - A sequence of color strings referred to by name, RGB or RGBA + code, which will be used for each column recursively. For + instance ['green','yellow'] each column's %(kind)s will be filled in + green or yellow, alternatively. If there is only a single column to + be plotted, then only the first color from the color list will be + used. + + - A dict of the form {column name : color}, so that each column will be + colored accordingly. For example, if your columns are called `a` and + `b`, then passing {'a': 'green', 'b': 'red'} will color %(kind)ss for + column `a` in green and %(kind)ss for column `b` in red. + + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.axes.Axes or np.ndarray of them + An ndarray is returned with one :class:`matplotlib.axes.Axes` + per column when ``subplots=True``. +""" + + +@Substitution(data="data : DataFrame\n The data to visualize.\n", backend="") +@Appender(_boxplot_doc) +def boxplot( + data: DataFrame, + column: str | list[str] | None = None, + by: str | list[str] | None = None, + ax: Axes | None = None, + fontsize: float | str | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout: tuple[int, int] | None = None, + return_type: str | None = None, + **kwargs, +): + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.boxplot( + data, + column=column, + by=by, + ax=ax, + fontsize=fontsize, + rot=rot, + grid=grid, + figsize=figsize, + layout=layout, + return_type=return_type, + **kwargs, + ) + + +@Substitution(data="", backend=_backend_doc) +@Appender(_boxplot_doc) +def boxplot_frame( + self: DataFrame, + column=None, + by=None, + ax=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + figsize: tuple[float, float] | None = None, + layout=None, + return_type=None, + backend=None, + **kwargs, +): + plot_backend = _get_plot_backend(backend) + return plot_backend.boxplot_frame( + self, + column=column, + by=by, + ax=ax, + fontsize=fontsize, + rot=rot, + grid=grid, + figsize=figsize, + layout=layout, + return_type=return_type, + **kwargs, + ) + + +def boxplot_frame_groupby( + grouped: DataFrameGroupBy, + subplots: bool = True, + column=None, + fontsize: int | None = None, + rot: int = 0, + grid: bool = True, + ax=None, + figsize: tuple[float, float] | None = None, + layout=None, + sharex: bool = False, + sharey: bool = True, + backend=None, + **kwargs, +): + """ + Make box plots from DataFrameGroupBy data. + + Parameters + ---------- + grouped : Grouped DataFrame + subplots : bool + * ``False`` - no subplots will be used + * ``True`` - create a subplot for each group. + + column : column name or list of names, or vector + Can be any valid input to groupby. + fontsize : float or str + rot : label rotation angle + grid : Setting this to True will show the grid + ax : Matplotlib axis object, default None + figsize : A tuple (width, height) in inches + layout : tuple (optional) + The layout of the plot: (rows, columns). + sharex : bool, default False + Whether x-axes will be shared among subplots. + sharey : bool, default True + Whether y-axes will be shared among subplots. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + **kwargs + All other plotting keyword arguments to be passed to + matplotlib's boxplot function. + + Returns + ------- + dict of key/value = group key/DataFrame.boxplot return value + or DataFrame.boxplot return value in case subplots=figures=False + + Examples + -------- + You can create boxplots for grouped data and show them as separate subplots: + + .. plot:: + :context: close-figs + + >>> import itertools + >>> tuples = [t for t in itertools.product(range(1000), range(4))] + >>> index = pd.MultiIndex.from_tuples(tuples, names=['lvl0', 'lvl1']) + >>> data = np.random.randn(len(index), 4) + >>> df = pd.DataFrame(data, columns=list('ABCD'), index=index) + >>> grouped = df.groupby(level='lvl1') + >>> grouped.boxplot(rot=45, fontsize=12, figsize=(8, 10)) # doctest: +SKIP + + The ``subplots=False`` option shows the boxplots in a single figure. + + .. plot:: + :context: close-figs + + >>> grouped.boxplot(subplots=False, rot=45, fontsize=12) # doctest: +SKIP + """ + plot_backend = _get_plot_backend(backend) + return plot_backend.boxplot_frame_groupby( + grouped, + subplots=subplots, + column=column, + fontsize=fontsize, + rot=rot, + grid=grid, + ax=ax, + figsize=figsize, + layout=layout, + sharex=sharex, + sharey=sharey, + **kwargs, + ) + + +class PlotAccessor(PandasObject): + """ + Make plots of Series or DataFrame. + + Uses the backend specified by the + option ``plotting.backend``. By default, matplotlib is used. + + Parameters + ---------- + data : Series or DataFrame + The object for which the method is called. + x : label or position, default None + Only used if data is a DataFrame. + y : label, position or list of label, positions, default None + Allows plotting of one column versus another. Only used if data is a + DataFrame. + kind : str + The kind of plot to produce: + + - 'line' : line plot (default) + - 'bar' : vertical bar plot + - 'barh' : horizontal bar plot + - 'hist' : histogram + - 'box' : boxplot + - 'kde' : Kernel Density Estimation plot + - 'density' : same as 'kde' + - 'area' : area plot + - 'pie' : pie plot + - 'scatter' : scatter plot (DataFrame only) + - 'hexbin' : hexbin plot (DataFrame only) + ax : matplotlib axes object, default None + An axes of the current figure. + subplots : bool or sequence of iterables, default False + Whether to group columns into subplots: + + - ``False`` : No subplots will be used + - ``True`` : Make separate subplots for each column. + - sequence of iterables of column labels: Create a subplot for each + group of columns. For example `[('a', 'c'), ('b', 'd')]` will + create 2 subplots: one with columns 'a' and 'c', and one + with columns 'b' and 'd'. Remaining columns that aren't specified + will be plotted in additional subplots (one per column). + + .. versionadded:: 1.5.0 + + sharex : bool, default True if ax is None else False + In case ``subplots=True``, share x axis and set some x axis labels + to invisible; defaults to True if ax is None otherwise False if + an ax is passed in; Be aware, that passing in both an ax and + ``sharex=True`` will alter all x axis labels for all axis in a figure. + sharey : bool, default False + In case ``subplots=True``, share y axis and set some y axis labels to invisible. + layout : tuple, optional + (rows, columns) for the layout of subplots. + figsize : a tuple (width, height) in inches + Size of a figure object. + use_index : bool, default True + Use index as ticks for x axis. + title : str or list + Title to use for the plot. If a string is passed, print the string + at the top of the figure. If a list is passed and `subplots` is + True, print each item in the list above the corresponding subplot. + grid : bool, default None (matlab style default) + Axis grid lines. + legend : bool or {'reverse'} + Place legend on axis subplots. + style : list or dict + The matplotlib line style per column. + logx : bool or 'sym', default False + Use log scaling or symlog scaling on x axis. + + logy : bool or 'sym' default False + Use log scaling or symlog scaling on y axis. + + loglog : bool or 'sym', default False + Use log scaling or symlog scaling on both x and y axes. + + xticks : sequence + Values to use for the xticks. + yticks : sequence + Values to use for the yticks. + xlim : 2-tuple/list + Set the x limits of the current axes. + ylim : 2-tuple/list + Set the y limits of the current axes. + xlabel : label, optional + Name to use for the xlabel on x-axis. Default uses index name as xlabel, or the + x-column name for planar plots. + + .. versionchanged:: 2.0.0 + + Now applicable to histograms. + + ylabel : label, optional + Name to use for the ylabel on y-axis. Default will show no ylabel, or the + y-column name for planar plots. + + .. versionchanged:: 2.0.0 + + Now applicable to histograms. + + rot : float, default None + Rotation for ticks (xticks for vertical, yticks for horizontal + plots). + fontsize : float, default None + Font size for xticks and yticks. + colormap : str or matplotlib colormap object, default None + Colormap to select colors from. If string, load colormap with that + name from matplotlib. + colorbar : bool, optional + If True, plot colorbar (only relevant for 'scatter' and 'hexbin' + plots). + position : float + Specify relative alignments for bar plot layout. + From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 + (center). + table : bool, Series or DataFrame, default False + If True, draw a table using the data in the DataFrame and the data + will be transposed to meet matplotlib's default layout. + If a Series or DataFrame is passed, use passed data to draw a + table. + yerr : DataFrame, Series, array-like, dict and str + See :ref:`Plotting with Error Bars ` for + detail. + xerr : DataFrame, Series, array-like, dict and str + Equivalent to yerr. + stacked : bool, default False in line and bar plots, and True in area plot + If True, create stacked plot. + secondary_y : bool or sequence, default False + Whether to plot on the secondary y-axis if a list/tuple, which + columns to plot on secondary y-axis. + mark_right : bool, default True + When using a secondary_y axis, automatically mark the column + labels with "(right)" in the legend. + include_bool : bool, default is False + If True, boolean values can be plotted. + backend : str, default None + Backend to use instead of the backend specified in the option + ``plotting.backend``. For instance, 'matplotlib'. Alternatively, to + specify the ``plotting.backend`` for the whole session, set + ``pd.options.plotting.backend``. + **kwargs + Options to pass to matplotlib plotting method. + + Returns + ------- + :class:`matplotlib.axes.Axes` or numpy.ndarray of them + If the backend is not the default matplotlib one, the return value + will be the object returned by the backend. + + Notes + ----- + - See matplotlib documentation online for more on this subject + - If `kind` = 'bar' or 'barh', you can specify relative alignments + for bar plot layout by `position` keyword. + From 0 (left/bottom-end) to 1 (right/top-end). Default is 0.5 + (center) + + Examples + -------- + For Series: + + .. plot:: + :context: close-figs + + >>> ser = pd.Series([1, 2, 3, 3]) + >>> plot = ser.plot(kind='hist', title="My plot") + + For DataFrame: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'length': [1.5, 0.5, 1.2, 0.9, 3], + ... 'width': [0.7, 0.2, 0.15, 0.2, 1.1]}, + ... index=['pig', 'rabbit', 'duck', 'chicken', 'horse']) + >>> plot = df.plot(title="DataFrame Plot") + + For SeriesGroupBy: + + .. plot:: + :context: close-figs + + >>> lst = [-1, -2, -3, 1, 2, 3] + >>> ser = pd.Series([1, 2, 2, 4, 6, 6], index=lst) + >>> plot = ser.groupby(lambda x: x > 0).plot(title="SeriesGroupBy Plot") + + For DataFrameGroupBy: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({"col1" : [1, 2, 3, 4], + ... "col2" : ["A", "B", "A", "B"]}) + >>> plot = df.groupby("col2").plot(kind="bar", title="DataFrameGroupBy Plot") + """ + + _common_kinds = ("line", "bar", "barh", "kde", "density", "area", "hist", "box") + _series_kinds = ("pie",) + _dataframe_kinds = ("scatter", "hexbin") + _kind_aliases = {"density": "kde"} + _all_kinds = _common_kinds + _series_kinds + _dataframe_kinds + + def __init__(self, data: Series | DataFrame) -> None: + self._parent = data + + @staticmethod + def _get_call_args(backend_name: str, data: Series | DataFrame, args, kwargs): + """ + This function makes calls to this accessor `__call__` method compatible + with the previous `SeriesPlotMethods.__call__` and + `DataFramePlotMethods.__call__`. Those had slightly different + signatures, since `DataFramePlotMethods` accepted `x` and `y` + parameters. + """ + if isinstance(data, ABCSeries): + arg_def = [ + ("kind", "line"), + ("ax", None), + ("figsize", None), + ("use_index", True), + ("title", None), + ("grid", None), + ("legend", False), + ("style", None), + ("logx", False), + ("logy", False), + ("loglog", False), + ("xticks", None), + ("yticks", None), + ("xlim", None), + ("ylim", None), + ("rot", None), + ("fontsize", None), + ("colormap", None), + ("table", False), + ("yerr", None), + ("xerr", None), + ("label", None), + ("secondary_y", False), + ("xlabel", None), + ("ylabel", None), + ] + elif isinstance(data, ABCDataFrame): + arg_def = [ + ("x", None), + ("y", None), + ("kind", "line"), + ("ax", None), + ("subplots", False), + ("sharex", None), + ("sharey", False), + ("layout", None), + ("figsize", None), + ("use_index", True), + ("title", None), + ("grid", None), + ("legend", True), + ("style", None), + ("logx", False), + ("logy", False), + ("loglog", False), + ("xticks", None), + ("yticks", None), + ("xlim", None), + ("ylim", None), + ("rot", None), + ("fontsize", None), + ("colormap", None), + ("table", False), + ("yerr", None), + ("xerr", None), + ("secondary_y", False), + ("xlabel", None), + ("ylabel", None), + ] + else: + raise TypeError( + f"Called plot accessor for type {type(data).__name__}, " + "expected Series or DataFrame" + ) + + if args and isinstance(data, ABCSeries): + positional_args = str(args)[1:-1] + keyword_args = ", ".join( + [f"{name}={repr(value)}" for (name, _), value in zip(arg_def, args)] + ) + msg = ( + "`Series.plot()` should not be called with positional " + "arguments, only keyword arguments. The order of " + "positional arguments will change in the future. " + f"Use `Series.plot({keyword_args})` instead of " + f"`Series.plot({positional_args})`." + ) + raise TypeError(msg) + + pos_args = {name: value for (name, _), value in zip(arg_def, args)} + if backend_name == "pandas.plotting._matplotlib": + kwargs = dict(arg_def, **pos_args, **kwargs) + else: + kwargs = dict(pos_args, **kwargs) + + x = kwargs.pop("x", None) + y = kwargs.pop("y", None) + kind = kwargs.pop("kind", "line") + return x, y, kind, kwargs + + def __call__(self, *args, **kwargs): + plot_backend = _get_plot_backend(kwargs.pop("backend", None)) + + x, y, kind, kwargs = self._get_call_args( + plot_backend.__name__, self._parent, args, kwargs + ) + + kind = self._kind_aliases.get(kind, kind) + + # when using another backend, get out of the way + if plot_backend.__name__ != "pandas.plotting._matplotlib": + return plot_backend.plot(self._parent, x=x, y=y, kind=kind, **kwargs) + + if kind not in self._all_kinds: + raise ValueError( + f"{kind} is not a valid plot kind " + f"Valid plot kinds: {self._all_kinds}" + ) + + # The original data structured can be transformed before passed to the + # backend. For example, for DataFrame is common to set the index as the + # `x` parameter, and return a Series with the parameter `y` as values. + data = self._parent.copy() + + if isinstance(data, ABCSeries): + kwargs["reuse_plot"] = True + + if kind in self._dataframe_kinds: + if isinstance(data, ABCDataFrame): + return plot_backend.plot(data, x=x, y=y, kind=kind, **kwargs) + else: + raise ValueError(f"plot kind {kind} can only be used for data frames") + elif kind in self._series_kinds: + if isinstance(data, ABCDataFrame): + if y is None and kwargs.get("subplots") is False: + raise ValueError( + f"{kind} requires either y column or 'subplots=True'" + ) + if y is not None: + if is_integer(y) and not data.columns._holds_integer(): + y = data.columns[y] + # converted to series actually. copy to not modify + data = data[y].copy() + data.index.name = y + elif isinstance(data, ABCDataFrame): + data_cols = data.columns + if x is not None: + if is_integer(x) and not data.columns._holds_integer(): + x = data_cols[x] + elif not isinstance(data[x], ABCSeries): + raise ValueError("x must be a label or position") + data = data.set_index(x) + if y is not None: + # check if we have y as int or list of ints + int_ylist = is_list_like(y) and all(is_integer(c) for c in y) + int_y_arg = is_integer(y) or int_ylist + if int_y_arg and not data.columns._holds_integer(): + y = data_cols[y] + + label_kw = kwargs["label"] if "label" in kwargs else False + for kw in ["xerr", "yerr"]: + if kw in kwargs and ( + isinstance(kwargs[kw], str) or is_integer(kwargs[kw]) + ): + try: + kwargs[kw] = data[kwargs[kw]] + except (IndexError, KeyError, TypeError): + pass + + # don't overwrite + data = data[y].copy() + + if isinstance(data, ABCSeries): + label_name = label_kw or y + data.name = label_name + else: + match = is_list_like(label_kw) and len(label_kw) == len(y) + if label_kw and not match: + raise ValueError( + "label should be list-like and same length as y" + ) + label_name = label_kw or data.columns + data.columns = label_name + + return plot_backend.plot(data, kind=kind, **kwargs) + + __call__.__doc__ = __doc__ + + @Appender( + """ + See Also + -------- + matplotlib.pyplot.plot : Plot y versus x as lines and/or markers. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> s = pd.Series([1, 3, 2]) + >>> s.plot.line() # doctest: +SKIP + + .. plot:: + :context: close-figs + + The following example shows the populations for some animals + over the years. + + >>> df = pd.DataFrame({ + ... 'pig': [20, 18, 489, 675, 1776], + ... 'horse': [4, 25, 281, 600, 1900] + ... }, index=[1990, 1997, 2003, 2009, 2014]) + >>> lines = df.plot.line() + + .. plot:: + :context: close-figs + + An example with subplots, so an array of axes is returned. + + >>> axes = df.plot.line(subplots=True) + >>> type(axes) + + + .. plot:: + :context: close-figs + + Let's repeat the same example, but specifying colors for + each column (in this case, for each animal). + + >>> axes = df.plot.line( + ... subplots=True, color={"pig": "pink", "horse": "#742802"} + ... ) + + .. plot:: + :context: close-figs + + The following example shows the relationship between both + populations. + + >>> lines = df.plot.line(x='pig', y='horse') + """ + ) + @Substitution(kind="line") + @Appender(_bar_or_line_doc) + def line( + self, x: Hashable | None = None, y: Hashable | None = None, **kwargs + ) -> PlotAccessor: + """ + Plot Series or DataFrame as lines. + + This function is useful to plot lines using DataFrame's values + as coordinates. + """ + return self(kind="line", x=x, y=y, **kwargs) + + @Appender( + """ + See Also + -------- + DataFrame.plot.barh : Horizontal bar plot. + DataFrame.plot : Make plots of a DataFrame. + matplotlib.pyplot.bar : Make a bar plot with matplotlib. + + Examples + -------- + Basic plot. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'lab':['A', 'B', 'C'], 'val':[10, 30, 20]}) + >>> ax = df.plot.bar(x='lab', y='val', rot=0) + + Plot a whole dataframe to a bar plot. Each column is assigned a + distinct color, and each row is nested in a group along the + horizontal axis. + + .. plot:: + :context: close-figs + + >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] + >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] + >>> index = ['snail', 'pig', 'elephant', + ... 'rabbit', 'giraffe', 'coyote', 'horse'] + >>> df = pd.DataFrame({'speed': speed, + ... 'lifespan': lifespan}, index=index) + >>> ax = df.plot.bar(rot=0) + + Plot stacked bar charts for the DataFrame + + .. plot:: + :context: close-figs + + >>> ax = df.plot.bar(stacked=True) + + Instead of nesting, the figure can be split by column with + ``subplots=True``. In this case, a :class:`numpy.ndarray` of + :class:`matplotlib.axes.Axes` are returned. + + .. plot:: + :context: close-figs + + >>> axes = df.plot.bar(rot=0, subplots=True) + >>> axes[1].legend(loc=2) # doctest: +SKIP + + If you don't like the default colours, you can specify how you'd + like each column to be colored. + + .. plot:: + :context: close-figs + + >>> axes = df.plot.bar( + ... rot=0, subplots=True, color={"speed": "red", "lifespan": "green"} + ... ) + >>> axes[1].legend(loc=2) # doctest: +SKIP + + Plot a single column. + + .. plot:: + :context: close-figs + + >>> ax = df.plot.bar(y='speed', rot=0) + + Plot only selected categories for the DataFrame. + + .. plot:: + :context: close-figs + + >>> ax = df.plot.bar(x='lifespan', rot=0) + """ + ) + @Substitution(kind="bar") + @Appender(_bar_or_line_doc) + def bar( # pylint: disable=disallowed-name + self, x: Hashable | None = None, y: Hashable | None = None, **kwargs + ) -> PlotAccessor: + """ + Vertical bar plot. + + A bar plot is a plot that presents categorical data with + rectangular bars with lengths proportional to the values that they + represent. A bar plot shows comparisons among discrete categories. One + axis of the plot shows the specific categories being compared, and the + other axis represents a measured value. + """ + return self(kind="bar", x=x, y=y, **kwargs) + + @Appender( + """ + See Also + -------- + DataFrame.plot.bar: Vertical bar plot. + DataFrame.plot : Make plots of DataFrame using matplotlib. + matplotlib.axes.Axes.bar : Plot a vertical bar plot using matplotlib. + + Examples + -------- + Basic example + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'lab': ['A', 'B', 'C'], 'val': [10, 30, 20]}) + >>> ax = df.plot.barh(x='lab', y='val') + + Plot a whole DataFrame to a horizontal bar plot + + .. plot:: + :context: close-figs + + >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] + >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] + >>> index = ['snail', 'pig', 'elephant', + ... 'rabbit', 'giraffe', 'coyote', 'horse'] + >>> df = pd.DataFrame({'speed': speed, + ... 'lifespan': lifespan}, index=index) + >>> ax = df.plot.barh() + + Plot stacked barh charts for the DataFrame + + .. plot:: + :context: close-figs + + >>> ax = df.plot.barh(stacked=True) + + We can specify colors for each column + + .. plot:: + :context: close-figs + + >>> ax = df.plot.barh(color={"speed": "red", "lifespan": "green"}) + + Plot a column of the DataFrame to a horizontal bar plot + + .. plot:: + :context: close-figs + + >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] + >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] + >>> index = ['snail', 'pig', 'elephant', + ... 'rabbit', 'giraffe', 'coyote', 'horse'] + >>> df = pd.DataFrame({'speed': speed, + ... 'lifespan': lifespan}, index=index) + >>> ax = df.plot.barh(y='speed') + + Plot DataFrame versus the desired column + + .. plot:: + :context: close-figs + + >>> speed = [0.1, 17.5, 40, 48, 52, 69, 88] + >>> lifespan = [2, 8, 70, 1.5, 25, 12, 28] + >>> index = ['snail', 'pig', 'elephant', + ... 'rabbit', 'giraffe', 'coyote', 'horse'] + >>> df = pd.DataFrame({'speed': speed, + ... 'lifespan': lifespan}, index=index) + >>> ax = df.plot.barh(x='lifespan') + """ + ) + @Substitution(kind="bar") + @Appender(_bar_or_line_doc) + def barh( + self, x: Hashable | None = None, y: Hashable | None = None, **kwargs + ) -> PlotAccessor: + """ + Make a horizontal bar plot. + + A horizontal bar plot is a plot that presents quantitative data with + rectangular bars with lengths proportional to the values that they + represent. A bar plot shows comparisons among discrete categories. One + axis of the plot shows the specific categories being compared, and the + other axis represents a measured value. + """ + return self(kind="barh", x=x, y=y, **kwargs) + + def box(self, by: IndexLabel | None = None, **kwargs) -> PlotAccessor: + r""" + Make a box plot of the DataFrame columns. + + A box plot is a method for graphically depicting groups of numerical + data through their quartiles. + The box extends from the Q1 to Q3 quartile values of the data, + with a line at the median (Q2). The whiskers extend from the edges + of box to show the range of the data. The position of the whiskers + is set by default to 1.5*IQR (IQR = Q3 - Q1) from the edges of the + box. Outlier points are those past the end of the whiskers. + + For further details see Wikipedia's + entry for `boxplot `__. + + A consideration when using this chart is that the box and the whiskers + can overlap, which is very common when plotting small sets of data. + + Parameters + ---------- + by : str or sequence + Column in the DataFrame to group by. + + .. versionchanged:: 1.4.0 + + Previously, `by` is silently ignore and makes no groupings + + **kwargs + Additional keywords are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + :class:`matplotlib.axes.Axes` or numpy.ndarray of them + + See Also + -------- + DataFrame.boxplot: Another method to draw a box plot. + Series.plot.box: Draw a box plot from a Series object. + matplotlib.pyplot.boxplot: Draw a box plot in matplotlib. + + Examples + -------- + Draw a box plot from a DataFrame with four columns of randomly + generated data. + + .. plot:: + :context: close-figs + + >>> data = np.random.randn(25, 4) + >>> df = pd.DataFrame(data, columns=list('ABCD')) + >>> ax = df.plot.box() + + You can also generate groupings if you specify the `by` parameter (which + can take a column name, or a list or tuple of column names): + + .. versionchanged:: 1.4.0 + + .. plot:: + :context: close-figs + + >>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85] + >>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list}) + >>> ax = df.plot.box(column="age", by="gender", figsize=(10, 8)) + """ + return self(kind="box", by=by, **kwargs) + + def hist( + self, by: IndexLabel | None = None, bins: int = 10, **kwargs + ) -> PlotAccessor: + """ + Draw one histogram of the DataFrame's columns. + + A histogram is a representation of the distribution of data. + This function groups the values of all given Series in the DataFrame + into bins and draws all bins in one :class:`matplotlib.axes.Axes`. + This is useful when the DataFrame's Series are in a similar scale. + + Parameters + ---------- + by : str or sequence, optional + Column in the DataFrame to group by. + + .. versionchanged:: 1.4.0 + + Previously, `by` is silently ignore and makes no groupings + + bins : int, default 10 + Number of histogram bins to be used. + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + class:`matplotlib.AxesSubplot` + Return a histogram plot. + + See Also + -------- + DataFrame.hist : Draw histograms per DataFrame's Series. + Series.hist : Draw a histogram with Series' data. + + Examples + -------- + When we roll a die 6000 times, we expect to get each value around 1000 + times. But when we roll two dice and sum the result, the distribution + is going to be quite different. A histogram illustrates those + distributions. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randint(1, 7, 6000), columns=['one']) + >>> df['two'] = df['one'] + np.random.randint(1, 7, 6000) + >>> ax = df.plot.hist(bins=12, alpha=0.5) + + A grouped histogram can be generated by providing the parameter `by` (which + can be a column name, or a list of column names): + + .. plot:: + :context: close-figs + + >>> age_list = [8, 10, 12, 14, 72, 74, 76, 78, 20, 25, 30, 35, 60, 85] + >>> df = pd.DataFrame({"gender": list("MMMMMMMMFFFFFF"), "age": age_list}) + >>> ax = df.plot.hist(column=["age"], by="gender", figsize=(10, 8)) + """ + return self(kind="hist", by=by, bins=bins, **kwargs) + + def kde( + self, + bw_method: Literal["scott", "silverman"] | float | Callable | None = None, + ind: np.ndarray | int | None = None, + **kwargs, + ) -> PlotAccessor: + """ + Generate Kernel Density Estimate plot using Gaussian kernels. + + In statistics, `kernel density estimation`_ (KDE) is a non-parametric + way to estimate the probability density function (PDF) of a random + variable. This function uses Gaussian kernels and includes automatic + bandwidth determination. + + .. _kernel density estimation: + https://en.wikipedia.org/wiki/Kernel_density_estimation + + Parameters + ---------- + bw_method : str, scalar or callable, optional + The method used to calculate the estimator bandwidth. This can be + 'scott', 'silverman', a scalar constant or a callable. + If None (default), 'scott' is used. + See :class:`scipy.stats.gaussian_kde` for more information. + ind : NumPy array or int, optional + Evaluation points for the estimated PDF. If None (default), + 1000 equally spaced points are used. If `ind` is a NumPy array, the + KDE is evaluated at the points passed. If `ind` is an integer, + `ind` number of equally spaced points are used. + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.axes.Axes or numpy.ndarray of them + + See Also + -------- + scipy.stats.gaussian_kde : Representation of a kernel-density + estimate using Gaussian kernels. This is the function used + internally to estimate the PDF. + + Examples + -------- + Given a Series of points randomly sampled from an unknown + distribution, estimate its PDF using KDE with automatic + bandwidth determination and plot the results, evaluating them at + 1000 equally spaced points (default): + + .. plot:: + :context: close-figs + + >>> s = pd.Series([1, 2, 2.5, 3, 3.5, 4, 5]) + >>> ax = s.plot.kde() + + A scalar bandwidth can be specified. Using a small bandwidth value can + lead to over-fitting, while using a large bandwidth value may result + in under-fitting: + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(bw_method=0.3) + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(bw_method=3) + + Finally, the `ind` parameter determines the evaluation points for the + plot of the estimated PDF: + + .. plot:: + :context: close-figs + + >>> ax = s.plot.kde(ind=[1, 2, 3, 4, 5]) + + For DataFrame, it works in the same way: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({ + ... 'x': [1, 2, 2.5, 3, 3.5, 4, 5], + ... 'y': [4, 4, 4.5, 5, 5.5, 6, 6], + ... }) + >>> ax = df.plot.kde() + + A scalar bandwidth can be specified. Using a small bandwidth value can + lead to over-fitting, while using a large bandwidth value may result + in under-fitting: + + .. plot:: + :context: close-figs + + >>> ax = df.plot.kde(bw_method=0.3) + + .. plot:: + :context: close-figs + + >>> ax = df.plot.kde(bw_method=3) + + Finally, the `ind` parameter determines the evaluation points for the + plot of the estimated PDF: + + .. plot:: + :context: close-figs + + >>> ax = df.plot.kde(ind=[1, 2, 3, 4, 5, 6]) + """ + return self(kind="kde", bw_method=bw_method, ind=ind, **kwargs) + + density = kde + + def area( + self, + x: Hashable | None = None, + y: Hashable | None = None, + stacked: bool = True, + **kwargs, + ) -> PlotAccessor: + """ + Draw a stacked area plot. + + An area plot displays quantitative data visually. + This function wraps the matplotlib area function. + + Parameters + ---------- + x : label or position, optional + Coordinates for the X axis. By default uses the index. + y : label or position, optional + Column to plot. By default uses all columns. + stacked : bool, default True + Area plots are stacked by default. Set to False to create a + unstacked plot. + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.axes.Axes or numpy.ndarray + Area plot, or array of area plots if subplots is True. + + See Also + -------- + DataFrame.plot : Make plots of DataFrame using matplotlib / pylab. + + Examples + -------- + Draw an area plot based on basic business metrics: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({ + ... 'sales': [3, 2, 3, 9, 10, 6], + ... 'signups': [5, 5, 6, 12, 14, 13], + ... 'visits': [20, 42, 28, 62, 81, 50], + ... }, index=pd.date_range(start='2018/01/01', end='2018/07/01', + ... freq='ME')) + >>> ax = df.plot.area() + + Area plots are stacked by default. To produce an unstacked plot, + pass ``stacked=False``: + + .. plot:: + :context: close-figs + + >>> ax = df.plot.area(stacked=False) + + Draw an area plot for a single column: + + .. plot:: + :context: close-figs + + >>> ax = df.plot.area(y='sales') + + Draw with a different `x`: + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({ + ... 'sales': [3, 2, 3], + ... 'visits': [20, 42, 28], + ... 'day': [1, 2, 3], + ... }) + >>> ax = df.plot.area(x='day') + """ + return self(kind="area", x=x, y=y, stacked=stacked, **kwargs) + + def pie(self, **kwargs) -> PlotAccessor: + """ + Generate a pie plot. + + A pie plot is a proportional representation of the numerical data in a + column. This function wraps :meth:`matplotlib.pyplot.pie` for the + specified column. If no column reference is passed and + ``subplots=True`` a pie plot is drawn for each numerical column + independently. + + Parameters + ---------- + y : int or label, optional + Label or position of the column to plot. + If not provided, ``subplots=True`` argument must be passed. + **kwargs + Keyword arguments to pass on to :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.axes.Axes or np.ndarray of them + A NumPy array is returned when `subplots` is True. + + See Also + -------- + Series.plot.pie : Generate a pie plot for a Series. + DataFrame.plot : Make plots of a DataFrame. + + Examples + -------- + In the example below we have a DataFrame with the information about + planet's mass and radius. We pass the 'mass' column to the + pie function to get a pie plot. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame({'mass': [0.330, 4.87 , 5.97], + ... 'radius': [2439.7, 6051.8, 6378.1]}, + ... index=['Mercury', 'Venus', 'Earth']) + >>> plot = df.plot.pie(y='mass', figsize=(5, 5)) + + .. plot:: + :context: close-figs + + >>> plot = df.plot.pie(subplots=True, figsize=(11, 6)) + """ + if ( + isinstance(self._parent, ABCDataFrame) + and kwargs.get("y", None) is None + and not kwargs.get("subplots", False) + ): + raise ValueError("pie requires either y column or 'subplots=True'") + return self(kind="pie", **kwargs) + + def scatter( + self, + x: Hashable, + y: Hashable, + s: Hashable | Sequence[Hashable] | None = None, + c: Hashable | Sequence[Hashable] | None = None, + **kwargs, + ) -> PlotAccessor: + """ + Create a scatter plot with varying marker point size and color. + + The coordinates of each point are defined by two dataframe columns and + filled circles are used to represent each point. This kind of plot is + useful to see complex correlations between two variables. Points could + be for instance natural 2D coordinates like longitude and latitude in + a map or, in general, any pair of metrics that can be plotted against + each other. + + Parameters + ---------- + x : int or str + The column name or column position to be used as horizontal + coordinates for each point. + y : int or str + The column name or column position to be used as vertical + coordinates for each point. + s : str, scalar or array-like, optional + The size of each point. Possible values are: + + - A string with the name of the column to be used for marker's size. + + - A single scalar so all points have the same size. + + - A sequence of scalars, which will be used for each point's size + recursively. For instance, when passing [2,14] all points size + will be either 2 or 14, alternatively. + + c : str, int or array-like, optional + The color of each point. Possible values are: + + - A single color string referred to by name, RGB or RGBA code, + for instance 'red' or '#a98d19'. + + - A sequence of color strings referred to by name, RGB or RGBA + code, which will be used for each point's color recursively. For + instance ['green','yellow'] all points will be filled in green or + yellow, alternatively. + + - A column name or position whose values will be used to color the + marker points according to a colormap. + + **kwargs + Keyword arguments to pass on to :meth:`DataFrame.plot`. + + Returns + ------- + :class:`matplotlib.axes.Axes` or numpy.ndarray of them + + See Also + -------- + matplotlib.pyplot.scatter : Scatter plot using multiple input data + formats. + + Examples + -------- + Let's see how to draw a scatter plot using coordinates from the values + in a DataFrame's columns. + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame([[5.1, 3.5, 0], [4.9, 3.0, 0], [7.0, 3.2, 1], + ... [6.4, 3.2, 1], [5.9, 3.0, 2]], + ... columns=['length', 'width', 'species']) + >>> ax1 = df.plot.scatter(x='length', + ... y='width', + ... c='DarkBlue') + + And now with the color determined by a column as well. + + .. plot:: + :context: close-figs + + >>> ax2 = df.plot.scatter(x='length', + ... y='width', + ... c='species', + ... colormap='viridis') + """ + return self(kind="scatter", x=x, y=y, s=s, c=c, **kwargs) + + def hexbin( + self, + x: Hashable, + y: Hashable, + C: Hashable | None = None, + reduce_C_function: Callable | None = None, + gridsize: int | tuple[int, int] | None = None, + **kwargs, + ) -> PlotAccessor: + """ + Generate a hexagonal binning plot. + + Generate a hexagonal binning plot of `x` versus `y`. If `C` is `None` + (the default), this is a histogram of the number of occurrences + of the observations at ``(x[i], y[i])``. + + If `C` is specified, specifies values at given coordinates + ``(x[i], y[i])``. These values are accumulated for each hexagonal + bin and then reduced according to `reduce_C_function`, + having as default the NumPy's mean function (:meth:`numpy.mean`). + (If `C` is specified, it must also be a 1-D sequence + of the same length as `x` and `y`, or a column label.) + + Parameters + ---------- + x : int or str + The column label or position for x points. + y : int or str + The column label or position for y points. + C : int or str, optional + The column label or position for the value of `(x, y)` point. + reduce_C_function : callable, default `np.mean` + Function of one argument that reduces all the values in a bin to + a single number (e.g. `np.mean`, `np.max`, `np.sum`, `np.std`). + gridsize : int or tuple of (int, int), default 100 + The number of hexagons in the x-direction. + The corresponding number of hexagons in the y-direction is + chosen in a way that the hexagons are approximately regular. + Alternatively, gridsize can be a tuple with two elements + specifying the number of hexagons in the x-direction and the + y-direction. + **kwargs + Additional keyword arguments are documented in + :meth:`DataFrame.plot`. + + Returns + ------- + matplotlib.AxesSubplot + The matplotlib ``Axes`` on which the hexbin is plotted. + + See Also + -------- + DataFrame.plot : Make plots of a DataFrame. + matplotlib.pyplot.hexbin : Hexagonal binning plot using matplotlib, + the matplotlib function that is used under the hood. + + Examples + -------- + The following examples are generated with random data from + a normal distribution. + + .. plot:: + :context: close-figs + + >>> n = 10000 + >>> df = pd.DataFrame({'x': np.random.randn(n), + ... 'y': np.random.randn(n)}) + >>> ax = df.plot.hexbin(x='x', y='y', gridsize=20) + + The next example uses `C` and `np.sum` as `reduce_C_function`. + Note that `'observations'` values ranges from 1 to 5 but the result + plot shows values up to more than 25. This is because of the + `reduce_C_function`. + + .. plot:: + :context: close-figs + + >>> n = 500 + >>> df = pd.DataFrame({ + ... 'coord_x': np.random.uniform(-3, 3, size=n), + ... 'coord_y': np.random.uniform(30, 50, size=n), + ... 'observations': np.random.randint(1,5, size=n) + ... }) + >>> ax = df.plot.hexbin(x='coord_x', + ... y='coord_y', + ... C='observations', + ... reduce_C_function=np.sum, + ... gridsize=10, + ... cmap="viridis") + """ + if reduce_C_function is not None: + kwargs["reduce_C_function"] = reduce_C_function + if gridsize is not None: + kwargs["gridsize"] = gridsize + + return self(kind="hexbin", x=x, y=y, C=C, **kwargs) + + +_backends: dict[str, types.ModuleType] = {} + + +def _load_backend(backend: str) -> types.ModuleType: + """ + Load a pandas plotting backend. + + Parameters + ---------- + backend : str + The identifier for the backend. Either an entrypoint item registered + with importlib.metadata, "matplotlib", or a module name. + + Returns + ------- + types.ModuleType + The imported backend. + """ + from importlib.metadata import entry_points + + if backend == "matplotlib": + # Because matplotlib is an optional dependency and first-party backend, + # we need to attempt an import here to raise an ImportError if needed. + try: + module = importlib.import_module("pandas.plotting._matplotlib") + except ImportError: + raise ImportError( + "matplotlib is required for plotting when the " + 'default backend "matplotlib" is selected.' + ) from None + return module + + found_backend = False + + eps = entry_points() + key = "pandas_plotting_backends" + # entry_points lost dict API ~ PY 3.10 + # https://github.com/python/importlib_metadata/issues/298 + if hasattr(eps, "select"): + entry = eps.select(group=key) + else: + # Argument 2 to "get" of "dict" has incompatible type "Tuple[]"; + # expected "EntryPoints" [arg-type] + entry = eps.get(key, ()) # type: ignore[arg-type] + for entry_point in entry: + found_backend = entry_point.name == backend + if found_backend: + module = entry_point.load() + break + + if not found_backend: + # Fall back to unregistered, module name approach. + try: + module = importlib.import_module(backend) + found_backend = True + except ImportError: + # We re-raise later on. + pass + + if found_backend: + if hasattr(module, "plot"): + # Validate that the interface is implemented when the option is set, + # rather than at plot time. + return module + + raise ValueError( + f"Could not find plotting backend '{backend}'. Ensure that you've " + f"installed the package providing the '{backend}' entrypoint, or that " + "the package has a top-level `.plot` method." + ) + + +def _get_plot_backend(backend: str | None = None): + """ + Return the plotting backend to use (e.g. `pandas.plotting._matplotlib`). + + The plotting system of pandas uses matplotlib by default, but the idea here + is that it can also work with other third-party backends. This function + returns the module which provides a top-level `.plot` method that will + actually do the plotting. The backend is specified from a string, which + either comes from the keyword argument `backend`, or, if not specified, from + the option `pandas.options.plotting.backend`. All the rest of the code in + this file uses the backend specified there for the plotting. + + The backend is imported lazily, as matplotlib is a soft dependency, and + pandas can be used without it being installed. + + Notes + ----- + Modifies `_backends` with imported backend as a side effect. + """ + backend_str: str = backend or get_option("plotting.backend") + + if backend_str in _backends: + return _backends[backend_str] + + module = _load_backend(backend_str) + _backends[backend_str] = module + return module diff --git a/venv/lib/python3.10/site-packages/pandas/plotting/_misc.py b/venv/lib/python3.10/site-packages/pandas/plotting/_misc.py new file mode 100644 index 0000000000000000000000000000000000000000..18db460d388a4b748f91282ae42875206ba36cc6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/plotting/_misc.py @@ -0,0 +1,688 @@ +from __future__ import annotations + +from contextlib import contextmanager +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.plotting._core import _get_plot_backend + +if TYPE_CHECKING: + from collections.abc import ( + Generator, + Mapping, + ) + + from matplotlib.axes import Axes + from matplotlib.colors import Colormap + from matplotlib.figure import Figure + from matplotlib.table import Table + import numpy as np + + from pandas import ( + DataFrame, + Series, + ) + + +def table(ax: Axes, data: DataFrame | Series, **kwargs) -> Table: + """ + Helper function to convert DataFrame and Series to matplotlib.table. + + Parameters + ---------- + ax : Matplotlib axes object + data : DataFrame or Series + Data for table contents. + **kwargs + Keyword arguments to be passed to matplotlib.table.table. + If `rowLabels` or `colLabels` is not specified, data index or column + name will be used. + + Returns + ------- + matplotlib table object + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> import matplotlib.pyplot as plt + >>> df = pd.DataFrame({'A': [1, 2], 'B': [3, 4]}) + >>> fix, ax = plt.subplots() + >>> ax.axis('off') + (0.0, 1.0, 0.0, 1.0) + >>> table = pd.plotting.table(ax, df, loc='center', + ... cellLoc='center', colWidths=list([.2, .2])) + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.table( + ax=ax, data=data, rowLabels=None, colLabels=None, **kwargs + ) + + +def register() -> None: + """ + Register pandas formatters and converters with matplotlib. + + This function modifies the global ``matplotlib.units.registry`` + dictionary. pandas adds custom converters for + + * pd.Timestamp + * pd.Period + * np.datetime64 + * datetime.datetime + * datetime.date + * datetime.time + + See Also + -------- + deregister_matplotlib_converters : Remove pandas formatters and converters. + + Examples + -------- + .. plot:: + :context: close-figs + + The following line is done automatically by pandas so + the plot can be rendered: + + >>> pd.plotting.register_matplotlib_converters() + + >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'), + ... 'y': [1, 2] + ... }) + >>> plot = df.plot.line(x='ts', y='y') + + Unsetting the register manually an error will be raised: + + >>> pd.set_option("plotting.matplotlib.register_converters", + ... False) # doctest: +SKIP + >>> df.plot.line(x='ts', y='y') # doctest: +SKIP + Traceback (most recent call last): + TypeError: float() argument must be a string or a real number, not 'Period' + """ + plot_backend = _get_plot_backend("matplotlib") + plot_backend.register() + + +def deregister() -> None: + """ + Remove pandas formatters and converters. + + Removes the custom converters added by :func:`register`. This + attempts to set the state of the registry back to the state before + pandas registered its own units. Converters for pandas' own types like + Timestamp and Period are removed completely. Converters for types + pandas overwrites, like ``datetime.datetime``, are restored to their + original value. + + See Also + -------- + register_matplotlib_converters : Register pandas formatters and converters + with matplotlib. + + Examples + -------- + .. plot:: + :context: close-figs + + The following line is done automatically by pandas so + the plot can be rendered: + + >>> pd.plotting.register_matplotlib_converters() + + >>> df = pd.DataFrame({'ts': pd.period_range('2020', periods=2, freq='M'), + ... 'y': [1, 2] + ... }) + >>> plot = df.plot.line(x='ts', y='y') + + Unsetting the register manually an error will be raised: + + >>> pd.set_option("plotting.matplotlib.register_converters", + ... False) # doctest: +SKIP + >>> df.plot.line(x='ts', y='y') # doctest: +SKIP + Traceback (most recent call last): + TypeError: float() argument must be a string or a real number, not 'Period' + """ + plot_backend = _get_plot_backend("matplotlib") + plot_backend.deregister() + + +def scatter_matrix( + frame: DataFrame, + alpha: float = 0.5, + figsize: tuple[float, float] | None = None, + ax: Axes | None = None, + grid: bool = False, + diagonal: str = "hist", + marker: str = ".", + density_kwds: Mapping[str, Any] | None = None, + hist_kwds: Mapping[str, Any] | None = None, + range_padding: float = 0.05, + **kwargs, +) -> np.ndarray: + """ + Draw a matrix of scatter plots. + + Parameters + ---------- + frame : DataFrame + alpha : float, optional + Amount of transparency applied. + figsize : (float,float), optional + A tuple (width, height) in inches. + ax : Matplotlib axis object, optional + grid : bool, optional + Setting this to True will show the grid. + diagonal : {'hist', 'kde'} + Pick between 'kde' and 'hist' for either Kernel Density Estimation or + Histogram plot in the diagonal. + marker : str, optional + Matplotlib marker type, default '.'. + density_kwds : keywords + Keyword arguments to be passed to kernel density estimate plot. + hist_kwds : keywords + Keyword arguments to be passed to hist function. + range_padding : float, default 0.05 + Relative extension of axis range in x and y with respect to + (x_max - x_min) or (y_max - y_min). + **kwargs + Keyword arguments to be passed to scatter function. + + Returns + ------- + numpy.ndarray + A matrix of scatter plots. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D']) + >>> pd.plotting.scatter_matrix(df, alpha=0.2) + array([[, , + , ], + [, , + , ], + [, , + , ], + [, , + , ]], + dtype=object) + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.scatter_matrix( + frame=frame, + alpha=alpha, + figsize=figsize, + ax=ax, + grid=grid, + diagonal=diagonal, + marker=marker, + density_kwds=density_kwds, + hist_kwds=hist_kwds, + range_padding=range_padding, + **kwargs, + ) + + +def radviz( + frame: DataFrame, + class_column: str, + ax: Axes | None = None, + color: list[str] | tuple[str, ...] | None = None, + colormap: Colormap | str | None = None, + **kwds, +) -> Axes: + """ + Plot a multidimensional dataset in 2D. + + Each Series in the DataFrame is represented as a evenly distributed + slice on a circle. Each data point is rendered in the circle according to + the value on each Series. Highly correlated `Series` in the `DataFrame` + are placed closer on the unit circle. + + RadViz allow to project a N-dimensional data set into a 2D space where the + influence of each dimension can be interpreted as a balance between the + influence of all dimensions. + + More info available at the `original article + `_ + describing RadViz. + + Parameters + ---------- + frame : `DataFrame` + Object holding the data. + class_column : str + Column name containing the name of the data point category. + ax : :class:`matplotlib.axes.Axes`, optional + A plot instance to which to add the information. + color : list[str] or tuple[str], optional + Assign a color to each category. Example: ['blue', 'green']. + colormap : str or :class:`matplotlib.colors.Colormap`, default None + Colormap to select colors from. If string, load colormap with that + name from matplotlib. + **kwds + Options to pass to matplotlib scatter plotting method. + + Returns + ------- + :class:`matplotlib.axes.Axes` + + See Also + -------- + pandas.plotting.andrews_curves : Plot clustering visualization. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> df = pd.DataFrame( + ... { + ... 'SepalLength': [6.5, 7.7, 5.1, 5.8, 7.6, 5.0, 5.4, 4.6, 6.7, 4.6], + ... 'SepalWidth': [3.0, 3.8, 3.8, 2.7, 3.0, 2.3, 3.0, 3.2, 3.3, 3.6], + ... 'PetalLength': [5.5, 6.7, 1.9, 5.1, 6.6, 3.3, 4.5, 1.4, 5.7, 1.0], + ... 'PetalWidth': [1.8, 2.2, 0.4, 1.9, 2.1, 1.0, 1.5, 0.2, 2.1, 0.2], + ... 'Category': [ + ... 'virginica', + ... 'virginica', + ... 'setosa', + ... 'virginica', + ... 'virginica', + ... 'versicolor', + ... 'versicolor', + ... 'setosa', + ... 'virginica', + ... 'setosa' + ... ] + ... } + ... ) + >>> pd.plotting.radviz(df, 'Category') # doctest: +SKIP + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.radviz( + frame=frame, + class_column=class_column, + ax=ax, + color=color, + colormap=colormap, + **kwds, + ) + + +def andrews_curves( + frame: DataFrame, + class_column: str, + ax: Axes | None = None, + samples: int = 200, + color: list[str] | tuple[str, ...] | None = None, + colormap: Colormap | str | None = None, + **kwargs, +) -> Axes: + """ + Generate a matplotlib plot for visualizing clusters of multivariate data. + + Andrews curves have the functional form: + + .. math:: + f(t) = \\frac{x_1}{\\sqrt{2}} + x_2 \\sin(t) + x_3 \\cos(t) + + x_4 \\sin(2t) + x_5 \\cos(2t) + \\cdots + + Where :math:`x` coefficients correspond to the values of each dimension + and :math:`t` is linearly spaced between :math:`-\\pi` and :math:`+\\pi`. + Each row of frame then corresponds to a single curve. + + Parameters + ---------- + frame : DataFrame + Data to be plotted, preferably normalized to (0.0, 1.0). + class_column : label + Name of the column containing class names. + ax : axes object, default None + Axes to use. + samples : int + Number of points to plot in each curve. + color : str, list[str] or tuple[str], optional + Colors to use for the different classes. Colors can be strings + or 3-element floating point RGB values. + colormap : str or matplotlib colormap object, default None + Colormap to select colors from. If a string, load colormap with that + name from matplotlib. + **kwargs + Options to pass to matplotlib plotting method. + + Returns + ------- + :class:`matplotlib.axes.Axes` + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> df = pd.read_csv( + ... 'https://raw.githubusercontent.com/pandas-dev/' + ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' + ... ) + >>> pd.plotting.andrews_curves(df, 'Name') # doctest: +SKIP + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.andrews_curves( + frame=frame, + class_column=class_column, + ax=ax, + samples=samples, + color=color, + colormap=colormap, + **kwargs, + ) + + +def bootstrap_plot( + series: Series, + fig: Figure | None = None, + size: int = 50, + samples: int = 500, + **kwds, +) -> Figure: + """ + Bootstrap plot on mean, median and mid-range statistics. + + The bootstrap plot is used to estimate the uncertainty of a statistic + by relying on random sampling with replacement [1]_. This function will + generate bootstrapping plots for mean, median and mid-range statistics + for the given number of samples of the given size. + + .. [1] "Bootstrapping (statistics)" in \ + https://en.wikipedia.org/wiki/Bootstrapping_%28statistics%29 + + Parameters + ---------- + series : pandas.Series + Series from where to get the samplings for the bootstrapping. + fig : matplotlib.figure.Figure, default None + If given, it will use the `fig` reference for plotting instead of + creating a new one with default parameters. + size : int, default 50 + Number of data points to consider during each sampling. It must be + less than or equal to the length of the `series`. + samples : int, default 500 + Number of times the bootstrap procedure is performed. + **kwds + Options to pass to matplotlib plotting method. + + Returns + ------- + matplotlib.figure.Figure + Matplotlib figure. + + See Also + -------- + pandas.DataFrame.plot : Basic plotting for DataFrame objects. + pandas.Series.plot : Basic plotting for Series objects. + + Examples + -------- + This example draws a basic bootstrap plot for a Series. + + .. plot:: + :context: close-figs + + >>> s = pd.Series(np.random.uniform(size=100)) + >>> pd.plotting.bootstrap_plot(s) # doctest: +SKIP +
+ """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.bootstrap_plot( + series=series, fig=fig, size=size, samples=samples, **kwds + ) + + +def parallel_coordinates( + frame: DataFrame, + class_column: str, + cols: list[str] | None = None, + ax: Axes | None = None, + color: list[str] | tuple[str, ...] | None = None, + use_columns: bool = False, + xticks: list | tuple | None = None, + colormap: Colormap | str | None = None, + axvlines: bool = True, + axvlines_kwds: Mapping[str, Any] | None = None, + sort_labels: bool = False, + **kwargs, +) -> Axes: + """ + Parallel coordinates plotting. + + Parameters + ---------- + frame : DataFrame + class_column : str + Column name containing class names. + cols : list, optional + A list of column names to use. + ax : matplotlib.axis, optional + Matplotlib axis object. + color : list or tuple, optional + Colors to use for the different classes. + use_columns : bool, optional + If true, columns will be used as xticks. + xticks : list or tuple, optional + A list of values to use for xticks. + colormap : str or matplotlib colormap, default None + Colormap to use for line colors. + axvlines : bool, optional + If true, vertical lines will be added at each xtick. + axvlines_kwds : keywords, optional + Options to be passed to axvline method for vertical lines. + sort_labels : bool, default False + Sort class_column labels, useful when assigning colors. + **kwargs + Options to pass to matplotlib plotting method. + + Returns + ------- + matplotlib.axes.Axes + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> df = pd.read_csv( + ... 'https://raw.githubusercontent.com/pandas-dev/' + ... 'pandas/main/pandas/tests/io/data/csv/iris.csv' + ... ) + >>> pd.plotting.parallel_coordinates( + ... df, 'Name', color=('#556270', '#4ECDC4', '#C7F464') + ... ) # doctest: +SKIP + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.parallel_coordinates( + frame=frame, + class_column=class_column, + cols=cols, + ax=ax, + color=color, + use_columns=use_columns, + xticks=xticks, + colormap=colormap, + axvlines=axvlines, + axvlines_kwds=axvlines_kwds, + sort_labels=sort_labels, + **kwargs, + ) + + +def lag_plot(series: Series, lag: int = 1, ax: Axes | None = None, **kwds) -> Axes: + """ + Lag plot for time series. + + Parameters + ---------- + series : Series + The time series to visualize. + lag : int, default 1 + Lag length of the scatter plot. + ax : Matplotlib axis object, optional + The matplotlib axis object to use. + **kwds + Matplotlib scatter method keyword arguments. + + Returns + ------- + matplotlib.axes.Axes + + Examples + -------- + Lag plots are most commonly used to look for patterns in time series data. + + Given the following time series + + .. plot:: + :context: close-figs + + >>> np.random.seed(5) + >>> x = np.cumsum(np.random.normal(loc=1, scale=5, size=50)) + >>> s = pd.Series(x) + >>> s.plot() # doctest: +SKIP + + A lag plot with ``lag=1`` returns + + .. plot:: + :context: close-figs + + >>> pd.plotting.lag_plot(s, lag=1) + + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.lag_plot(series=series, lag=lag, ax=ax, **kwds) + + +def autocorrelation_plot(series: Series, ax: Axes | None = None, **kwargs) -> Axes: + """ + Autocorrelation plot for time series. + + Parameters + ---------- + series : Series + The time series to visualize. + ax : Matplotlib axis object, optional + The matplotlib axis object to use. + **kwargs + Options to pass to matplotlib plotting method. + + Returns + ------- + matplotlib.axes.Axes + + Examples + -------- + The horizontal lines in the plot correspond to 95% and 99% confidence bands. + + The dashed line is 99% confidence band. + + .. plot:: + :context: close-figs + + >>> spacing = np.linspace(-9 * np.pi, 9 * np.pi, num=1000) + >>> s = pd.Series(0.7 * np.random.rand(1000) + 0.3 * np.sin(spacing)) + >>> pd.plotting.autocorrelation_plot(s) # doctest: +SKIP + """ + plot_backend = _get_plot_backend("matplotlib") + return plot_backend.autocorrelation_plot(series=series, ax=ax, **kwargs) + + +class _Options(dict): + """ + Stores pandas plotting options. + + Allows for parameter aliasing so you can just use parameter names that are + the same as the plot function parameters, but is stored in a canonical + format that makes it easy to breakdown into groups later. + + Examples + -------- + + .. plot:: + :context: close-figs + + >>> np.random.seed(42) + >>> df = pd.DataFrame({'A': np.random.randn(10), + ... 'B': np.random.randn(10)}, + ... index=pd.date_range("1/1/2000", + ... freq='4MS', periods=10)) + >>> with pd.plotting.plot_params.use("x_compat", True): + ... _ = df["A"].plot(color="r") + ... _ = df["B"].plot(color="g") + """ + + # alias so the names are same as plotting method parameter names + _ALIASES = {"x_compat": "xaxis.compat"} + _DEFAULT_KEYS = ["xaxis.compat"] + + def __init__(self, deprecated: bool = False) -> None: + self._deprecated = deprecated + super().__setitem__("xaxis.compat", False) + + def __getitem__(self, key): + key = self._get_canonical_key(key) + if key not in self: + raise ValueError(f"{key} is not a valid pandas plotting option") + return super().__getitem__(key) + + def __setitem__(self, key, value) -> None: + key = self._get_canonical_key(key) + super().__setitem__(key, value) + + def __delitem__(self, key) -> None: + key = self._get_canonical_key(key) + if key in self._DEFAULT_KEYS: + raise ValueError(f"Cannot remove default parameter {key}") + super().__delitem__(key) + + def __contains__(self, key) -> bool: + key = self._get_canonical_key(key) + return super().__contains__(key) + + def reset(self) -> None: + """ + Reset the option store to its initial state + + Returns + ------- + None + """ + # error: Cannot access "__init__" directly + self.__init__() # type: ignore[misc] + + def _get_canonical_key(self, key): + return self._ALIASES.get(key, key) + + @contextmanager + def use(self, key, value) -> Generator[_Options, None, None]: + """ + Temporarily set a parameter value using the with statement. + Aliasing allowed. + """ + old_value = self[key] + try: + self[key] = value + yield self + finally: + self[key] = old_value + + +plot_params = _Options() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1070f077b1d1c37a07329f3f8c1eefa35be5151 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_array.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2167bd738dc784152542268adae70fe11e9de06 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_array.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..307a2ed519aa253e9a0767e6c91f11aa4ac754cf Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_astype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_chained_assignment_deprecation.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_chained_assignment_deprecation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..138c84f67e7d20b917b82e571f65a433d100bec1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_chained_assignment_deprecation.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_clip.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_clip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7630aef12c81f31b3b963776354ed618623d5757 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_clip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..481b9aa9902989d29fd861e5cca0bb795af3e4c5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_core_functionalities.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_core_functionalities.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f0a98f27730156d1c41de950a174aa749b51cf19 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_core_functionalities.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_functions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c29934860be9dcbe04ddbe14beded457d80bf8c1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_functions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0cf8e0c92260da234588d040b0905ef4ac4684a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_internals.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_internals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65866b473a0187b4cb89e2f649831c8487b35ef5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_internals.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_interp_fillna.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_interp_fillna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43c5b096dc98500b8967cc823749221d67edf8c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_interp_fillna.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_methods.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_methods.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ae2f6b8423703ff55abe298fc632c73ac8d523e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_methods.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_replace.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_replace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c644332c800d4b97769793d20ed8d7108b4efa97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_replace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_setitem.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_setitem.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e70a2d61ea5117efe193e089ead74da34cd5455 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_setitem.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..580bf2402c534894ef5fc3cef853a183eba7b470 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/util.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c516f3696652a7b799f89008776784a2cf8c02f9 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..683dc6c128b0bcf9a754177849b8611b4633c7e3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_datetimeindex.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_datetimeindex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc6be1f517186ec255d62b0bb4173c8b7ef9efdc Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_datetimeindex.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_index.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd9bb7c319d2df13b1910c4f45529c174845bde6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_index.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_periodindex.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_periodindex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fd7aee5f4096f00d4069b295c622b40f5d056678 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_periodindex.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_timedeltaindex.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_timedeltaindex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d5290d8c605d2980d462d3953537e0aea27d482 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/__pycache__/test_timedeltaindex.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_datetimeindex.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_datetimeindex.py new file mode 100644 index 0000000000000000000000000000000000000000..b023297c9549d88f6e1c493e50f148a74f26cea6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_datetimeindex.py @@ -0,0 +1,69 @@ +import pytest + +from pandas import ( + DatetimeIndex, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Setting a value on a view:FutureWarning" +) + + +@pytest.mark.parametrize( + "cons", + [ + lambda x: DatetimeIndex(x), + lambda x: DatetimeIndex(DatetimeIndex(x)), + ], +) +def test_datetimeindex(using_copy_on_write, cons): + dt = date_range("2019-12-31", periods=3, freq="D") + ser = Series(dt) + idx = cons(ser) + expected = idx.copy(deep=True) + ser.iloc[0] = Timestamp("2020-12-31") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + + +def test_datetimeindex_tz_convert(using_copy_on_write): + dt = date_range("2019-12-31", periods=3, freq="D", tz="Europe/Berlin") + ser = Series(dt) + idx = DatetimeIndex(ser).tz_convert("US/Eastern") + expected = idx.copy(deep=True) + ser.iloc[0] = Timestamp("2020-12-31", tz="Europe/Berlin") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + + +def test_datetimeindex_tz_localize(using_copy_on_write): + dt = date_range("2019-12-31", periods=3, freq="D") + ser = Series(dt) + idx = DatetimeIndex(ser).tz_localize("Europe/Berlin") + expected = idx.copy(deep=True) + ser.iloc[0] = Timestamp("2020-12-31") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + + +def test_datetimeindex_isocalendar(using_copy_on_write): + dt = date_range("2019-12-31", periods=3, freq="D") + ser = Series(dt) + df = DatetimeIndex(ser).isocalendar() + expected = df.index.copy(deep=True) + ser.iloc[0] = Timestamp("2020-12-31") + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + + +def test_index_values(using_copy_on_write): + idx = date_range("2019-12-31", periods=3, freq="D") + result = idx.values + if using_copy_on_write: + assert result.flags.writeable is False + else: + assert result.flags.writeable is True diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_index.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_index.py new file mode 100644 index 0000000000000000000000000000000000000000..49d756cf32d34306fbb4eb3525f1c5b70d5f155c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_index.py @@ -0,0 +1,184 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def index_view(index_data=[1, 2]): + df = DataFrame({"a": index_data, "b": 1.5}) + view = df[:] + df = df.set_index("a", drop=True) + idx = df.index + # df = None + return idx, view + + +def test_set_index_update_column(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1}) + df = df.set_index("a", drop=False) + expected = df.index.copy(deep=True) + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + else: + tm.assert_index_equal(df.index, Index([100, 2], name="a")) + + +def test_set_index_drop_update_column(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1.5}) + view = df[:] + df = df.set_index("a", drop=True) + expected = df.index.copy(deep=True) + view.iloc[0, 0] = 100 + tm.assert_index_equal(df.index, expected) + + +def test_set_index_series(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1.5}) + ser = Series([10, 11]) + df = df.set_index(ser) + expected = df.index.copy(deep=True) + with tm.assert_cow_warning(warn_copy_on_write): + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + else: + tm.assert_index_equal(df.index, Index([100, 11])) + + +def test_assign_index_as_series(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1.5}) + ser = Series([10, 11]) + df.index = ser + expected = df.index.copy(deep=True) + with tm.assert_cow_warning(warn_copy_on_write): + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + else: + tm.assert_index_equal(df.index, Index([100, 11])) + + +def test_assign_index_as_index(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1.5}) + ser = Series([10, 11]) + rhs_index = Index(ser) + df.index = rhs_index + rhs_index = None # overwrite to clear reference + expected = df.index.copy(deep=True) + with tm.assert_cow_warning(warn_copy_on_write): + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(df.index, expected) + else: + tm.assert_index_equal(df.index, Index([100, 11])) + + +def test_index_from_series(using_copy_on_write, warn_copy_on_write): + ser = Series([1, 2]) + idx = Index(ser) + expected = idx.copy(deep=True) + with tm.assert_cow_warning(warn_copy_on_write): + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + else: + tm.assert_index_equal(idx, Index([100, 2])) + + +def test_index_from_series_copy(using_copy_on_write): + ser = Series([1, 2]) + idx = Index(ser, copy=True) # noqa: F841 + arr = get_array(ser) + ser.iloc[0] = 100 + assert np.shares_memory(get_array(ser), arr) + + +def test_index_from_index(using_copy_on_write, warn_copy_on_write): + ser = Series([1, 2]) + idx = Index(ser) + idx = Index(idx) + expected = idx.copy(deep=True) + with tm.assert_cow_warning(warn_copy_on_write): + ser.iloc[0] = 100 + if using_copy_on_write: + tm.assert_index_equal(idx, expected) + else: + tm.assert_index_equal(idx, Index([100, 2])) + + +@pytest.mark.parametrize( + "func", + [ + lambda x: x._shallow_copy(x._values), + lambda x: x.view(), + lambda x: x.take([0, 1]), + lambda x: x.repeat([1, 1]), + lambda x: x[slice(0, 2)], + lambda x: x[[0, 1]], + lambda x: x._getitem_slice(slice(0, 2)), + lambda x: x.delete([]), + lambda x: x.rename("b"), + lambda x: x.astype("Int64", copy=False), + ], + ids=[ + "_shallow_copy", + "view", + "take", + "repeat", + "getitem_slice", + "getitem_list", + "_getitem_slice", + "delete", + "rename", + "astype", + ], +) +def test_index_ops(using_copy_on_write, func, request): + idx, view_ = index_view() + expected = idx.copy(deep=True) + if "astype" in request.node.callspec.id: + expected = expected.astype("Int64") + idx = func(idx) + view_.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_index_equal(idx, expected, check_names=False) + + +def test_infer_objects(using_copy_on_write): + idx, view_ = index_view(["a", "b"]) + expected = idx.copy(deep=True) + idx = idx.infer_objects(copy=False) + view_.iloc[0, 0] = "aaaa" + if using_copy_on_write: + tm.assert_index_equal(idx, expected, check_names=False) + + +def test_index_to_frame(using_copy_on_write): + idx = Index([1, 2, 3], name="a") + expected = idx.copy(deep=True) + df = idx.to_frame() + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), idx._values) + assert not df._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(df, "a"), idx._values) + + df.iloc[0, 0] = 100 + tm.assert_index_equal(idx, expected) + + +def test_index_values(using_copy_on_write): + idx = Index([1, 2, 3]) + result = idx.values + if using_copy_on_write: + assert result.flags.writeable is False + else: + assert result.flags.writeable is True diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_periodindex.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_periodindex.py new file mode 100644 index 0000000000000000000000000000000000000000..b80ce1d3d838fc0f517089d452221ac19363a9b8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_periodindex.py @@ -0,0 +1,30 @@ +import pytest + +from pandas import ( + Period, + PeriodIndex, + Series, + period_range, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Setting a value on a view:FutureWarning" +) + + +@pytest.mark.parametrize( + "cons", + [ + lambda x: PeriodIndex(x), + lambda x: PeriodIndex(PeriodIndex(x)), + ], +) +def test_periodindex(using_copy_on_write, cons): + dt = period_range("2019-12-31", periods=3, freq="D") + ser = Series(dt) + idx = cons(ser) + expected = idx.copy(deep=True) + ser.iloc[0] = Period("2020-12-31") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_timedeltaindex.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_timedeltaindex.py new file mode 100644 index 0000000000000000000000000000000000000000..5b9832093fded0f48c523bdbc363d043a871eb60 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/index/test_timedeltaindex.py @@ -0,0 +1,30 @@ +import pytest + +from pandas import ( + Series, + Timedelta, + TimedeltaIndex, + timedelta_range, +) +import pandas._testing as tm + +pytestmark = pytest.mark.filterwarnings( + "ignore:Setting a value on a view:FutureWarning" +) + + +@pytest.mark.parametrize( + "cons", + [ + lambda x: TimedeltaIndex(x), + lambda x: TimedeltaIndex(TimedeltaIndex(x)), + ], +) +def test_timedeltaindex(using_copy_on_write, cons): + dt = timedelta_range("1 day", periods=3) + ser = Series(dt) + idx = cons(ser) + expected = idx.copy(deep=True) + ser.iloc[0] = Timedelta("5 days") + if using_copy_on_write: + tm.assert_index_equal(idx, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_array.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_array.py new file mode 100644 index 0000000000000000000000000000000000000000..9a3f83e0293f539cd2a68e9eb515cd817d7eb48b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_array.py @@ -0,0 +1,190 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, + date_range, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + +# ----------------------------------------------------------------------------- +# Copy/view behaviour for accessing underlying array of Series/DataFrame + + +@pytest.mark.parametrize( + "method", + [lambda ser: ser.values, lambda ser: np.asarray(ser)], + ids=["values", "asarray"], +) +def test_series_values(using_copy_on_write, method): + ser = Series([1, 2, 3], name="name") + ser_orig = ser.copy() + + arr = method(ser) + + if using_copy_on_write: + # .values still gives a view but is read-only + assert np.shares_memory(arr, get_array(ser, "name")) + assert arr.flags.writeable is False + + # mutating series through arr therefore doesn't work + with pytest.raises(ValueError, match="read-only"): + arr[0] = 0 + tm.assert_series_equal(ser, ser_orig) + + # mutating the series itself still works + ser.iloc[0] = 0 + assert ser.values[0] == 0 + else: + assert arr.flags.writeable is True + arr[0] = 0 + assert ser.iloc[0] == 0 + + +@pytest.mark.parametrize( + "method", + [lambda df: df.values, lambda df: np.asarray(df)], + ids=["values", "asarray"], +) +def test_dataframe_values(using_copy_on_write, using_array_manager, method): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + + arr = method(df) + + if using_copy_on_write: + # .values still gives a view but is read-only + assert np.shares_memory(arr, get_array(df, "a")) + assert arr.flags.writeable is False + + # mutating series through arr therefore doesn't work + with pytest.raises(ValueError, match="read-only"): + arr[0, 0] = 0 + tm.assert_frame_equal(df, df_orig) + + # mutating the series itself still works + df.iloc[0, 0] = 0 + assert df.values[0, 0] == 0 + else: + assert arr.flags.writeable is True + arr[0, 0] = 0 + if not using_array_manager: + assert df.iloc[0, 0] == 0 + else: + tm.assert_frame_equal(df, df_orig) + + +def test_series_to_numpy(using_copy_on_write): + ser = Series([1, 2, 3], name="name") + ser_orig = ser.copy() + + # default: copy=False, no dtype or NAs + arr = ser.to_numpy() + if using_copy_on_write: + # to_numpy still gives a view but is read-only + assert np.shares_memory(arr, get_array(ser, "name")) + assert arr.flags.writeable is False + + # mutating series through arr therefore doesn't work + with pytest.raises(ValueError, match="read-only"): + arr[0] = 0 + tm.assert_series_equal(ser, ser_orig) + + # mutating the series itself still works + ser.iloc[0] = 0 + assert ser.values[0] == 0 + else: + assert arr.flags.writeable is True + arr[0] = 0 + assert ser.iloc[0] == 0 + + # specify copy=False gives a writeable array + ser = Series([1, 2, 3], name="name") + arr = ser.to_numpy(copy=True) + assert not np.shares_memory(arr, get_array(ser, "name")) + assert arr.flags.writeable is True + + # specifying a dtype that already causes a copy also gives a writeable array + ser = Series([1, 2, 3], name="name") + arr = ser.to_numpy(dtype="float64") + assert not np.shares_memory(arr, get_array(ser, "name")) + assert arr.flags.writeable is True + + +@pytest.mark.parametrize("order", ["F", "C"]) +def test_ravel_read_only(using_copy_on_write, order): + ser = Series([1, 2, 3]) + with tm.assert_produces_warning(FutureWarning, match="is deprecated"): + arr = ser.ravel(order=order) + if using_copy_on_write: + assert arr.flags.writeable is False + assert np.shares_memory(get_array(ser), arr) + + +def test_series_array_ea_dtypes(using_copy_on_write): + ser = Series([1, 2, 3], dtype="Int64") + arr = np.asarray(ser, dtype="int64") + assert np.shares_memory(arr, get_array(ser)) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + arr = np.asarray(ser) + assert np.shares_memory(arr, get_array(ser)) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + +def test_dataframe_array_ea_dtypes(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + arr = np.asarray(df, dtype="int64") + assert np.shares_memory(arr, get_array(df, "a")) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + arr = np.asarray(df) + assert np.shares_memory(arr, get_array(df, "a")) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + +def test_dataframe_array_string_dtype(using_copy_on_write, using_array_manager): + df = DataFrame({"a": ["a", "b"]}, dtype="string") + arr = np.asarray(df) + if not using_array_manager: + assert np.shares_memory(arr, get_array(df, "a")) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + +def test_dataframe_multiple_numpy_dtypes(): + df = DataFrame({"a": [1, 2, 3], "b": 1.5}) + arr = np.asarray(df) + assert not np.shares_memory(arr, get_array(df, "a")) + assert arr.flags.writeable is True + + +def test_values_is_ea(using_copy_on_write): + df = DataFrame({"a": date_range("2012-01-01", periods=3)}) + arr = np.asarray(df) + if using_copy_on_write: + assert arr.flags.writeable is False + else: + assert arr.flags.writeable is True + + +def test_empty_dataframe(): + df = DataFrame() + arr = np.asarray(df) + assert arr.flags.writeable is True diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_astype.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..d462ce3d3187daf1b414d45ffe8193500ac8487c --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_astype.py @@ -0,0 +1,260 @@ +import pickle + +import numpy as np +import pytest + +from pandas.compat.pyarrow import pa_version_under12p0 +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_astype_single_dtype(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": 1.5}) + df_orig = df.copy() + df2 = df.astype("float64") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 2] = 5.5 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + # mutating parent also doesn't update result + df2 = df.astype("float64") + df.iloc[0, 2] = 5.5 + tm.assert_frame_equal(df2, df_orig.astype("float64")) + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +@pytest.mark.parametrize("new_dtype", ["int64", "Int64", "int64[pyarrow]"]) +def test_astype_avoids_copy(using_copy_on_write, dtype, new_dtype): + if new_dtype == "int64[pyarrow]": + pytest.importorskip("pyarrow") + df = DataFrame({"a": [1, 2, 3]}, dtype=dtype) + df_orig = df.copy() + df2 = df.astype(new_dtype) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 0] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + # mutating parent also doesn't update result + df2 = df.astype(new_dtype) + df.iloc[0, 0] = 100 + tm.assert_frame_equal(df2, df_orig.astype(new_dtype)) + + +@pytest.mark.parametrize("dtype", ["float64", "int32", "Int32", "int32[pyarrow]"]) +def test_astype_different_target_dtype(using_copy_on_write, dtype): + if dtype == "int32[pyarrow]": + pytest.importorskip("pyarrow") + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + df2 = df.astype(dtype) + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + if using_copy_on_write: + assert df2._mgr._has_no_reference(0) + + df2.iloc[0, 0] = 5 + tm.assert_frame_equal(df, df_orig) + + # mutating parent also doesn't update result + df2 = df.astype(dtype) + df.iloc[0, 0] = 100 + tm.assert_frame_equal(df2, df_orig.astype(dtype)) + + +@td.skip_array_manager_invalid_test +def test_astype_numpy_to_ea(): + ser = Series([1, 2, 3]) + with pd.option_context("mode.copy_on_write", True): + result = ser.astype("Int64") + assert np.shares_memory(get_array(ser), get_array(result)) + + +@pytest.mark.parametrize( + "dtype, new_dtype", [("object", "string"), ("string", "object")] +) +def test_astype_string_and_object(using_copy_on_write, dtype, new_dtype): + df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype) + df_orig = df.copy() + df2 = df.astype(new_dtype) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = "x" + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype, new_dtype", [("object", "string"), ("string", "object")] +) +def test_astype_string_and_object_update_original( + using_copy_on_write, dtype, new_dtype +): + df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype) + df2 = df.astype(new_dtype) + df_orig = df2.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df.iloc[0, 0] = "x" + tm.assert_frame_equal(df2, df_orig) + + +def test_astype_string_copy_on_pickle_roundrip(): + # https://github.com/pandas-dev/pandas/issues/54654 + # ensure_string_array may alter array inplace + base = Series(np.array([(1, 2), None, 1], dtype="object")) + base_copy = pickle.loads(pickle.dumps(base)) + base_copy.astype(str) + tm.assert_series_equal(base, base_copy) + + +def test_astype_dict_dtypes(using_copy_on_write): + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": Series([1.5, 1.5, 1.5], dtype="float64")} + ) + df_orig = df.copy() + df2 = df.astype({"a": "float64", "c": "float64"}) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 2] = 5.5 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + + df2.iloc[0, 1] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + tm.assert_frame_equal(df, df_orig) + + +def test_astype_different_datetime_resos(using_copy_on_write): + df = DataFrame({"a": date_range("2019-12-31", periods=2, freq="D")}) + result = df.astype("datetime64[ms]") + + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + if using_copy_on_write: + assert result._mgr._has_no_reference(0) + + +def test_astype_different_timezones(using_copy_on_write): + df = DataFrame( + {"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")} + ) + result = df.astype("datetime64[ns, Europe/Berlin]") + if using_copy_on_write: + assert not result._mgr._has_no_reference(0) + assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) + + +def test_astype_different_timezones_different_reso(using_copy_on_write): + df = DataFrame( + {"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")} + ) + result = df.astype("datetime64[ms, Europe/Berlin]") + if using_copy_on_write: + assert result._mgr._has_no_reference(0) + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + + +def test_astype_arrow_timestamp(using_copy_on_write): + pytest.importorskip("pyarrow") + df = DataFrame( + { + "a": [ + Timestamp("2020-01-01 01:01:01.000001"), + Timestamp("2020-01-01 01:01:01.000001"), + ] + }, + dtype="M8[ns]", + ) + result = df.astype("timestamp[ns][pyarrow]") + if using_copy_on_write: + assert not result._mgr._has_no_reference(0) + if pa_version_under12p0: + assert not np.shares_memory( + get_array(df, "a"), get_array(result, "a")._pa_array + ) + else: + assert np.shares_memory( + get_array(df, "a"), get_array(result, "a")._pa_array + ) + + +def test_convert_dtypes_infer_objects(using_copy_on_write): + ser = Series(["a", "b", "c"]) + ser_orig = ser.copy() + result = ser.convert_dtypes( + convert_integer=False, + convert_boolean=False, + convert_floating=False, + convert_string=False, + ) + + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(result)) + else: + assert not np.shares_memory(get_array(ser), get_array(result)) + + result.iloc[0] = "x" + tm.assert_series_equal(ser, ser_orig) + + +def test_convert_dtypes(using_copy_on_write): + df = DataFrame({"a": ["a", "b"], "b": [1, 2], "c": [1.5, 2.5], "d": [True, False]}) + df_orig = df.copy() + df2 = df.convert_dtypes() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(df2, "d"), get_array(df, "d")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "d"), get_array(df, "d")) + + df2.iloc[0, 0] = "x" + tm.assert_frame_equal(df, df_orig) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_chained_assignment_deprecation.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_chained_assignment_deprecation.py new file mode 100644 index 0000000000000000000000000000000000000000..0a37f6b813e55d6072506a5c8168b050aa79ecda --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_chained_assignment_deprecation.py @@ -0,0 +1,174 @@ +import numpy as np +import pytest + +from pandas.compat import PY311 +from pandas.errors import ( + ChainedAssignmentError, + SettingWithCopyWarning, +) + +from pandas import ( + DataFrame, + option_context, +) +import pandas._testing as tm + + +def test_methods_iloc_warn(using_copy_on_write): + if not using_copy_on_write: + df = DataFrame({"a": [1, 2, 3], "b": 1}) + with tm.assert_cow_warning(match="A value"): + df.iloc[:, 0].replace(1, 5, inplace=True) + + with tm.assert_cow_warning(match="A value"): + df.iloc[:, 0].fillna(1, inplace=True) + + with tm.assert_cow_warning(match="A value"): + df.iloc[:, 0].interpolate(inplace=True) + + with tm.assert_cow_warning(match="A value"): + df.iloc[:, 0].ffill(inplace=True) + + with tm.assert_cow_warning(match="A value"): + df.iloc[:, 0].bfill(inplace=True) + + +@pytest.mark.parametrize( + "func, args", + [ + ("replace", (4, 5)), + ("fillna", (1,)), + ("interpolate", ()), + ("bfill", ()), + ("ffill", ()), + ], +) +def test_methods_iloc_getitem_item_cache( + func, args, using_copy_on_write, warn_copy_on_write +): + # ensure we don't incorrectly raise chained assignment warning because + # of the item cache / iloc not setting the item cache + df_orig = DataFrame({"a": [1, 2, 3], "b": 1}) + + df = df_orig.copy() + ser = df.iloc[:, 0] + getattr(ser, func)(*args, inplace=True) + + # parent that holds item_cache is dead, so don't increase ref count + df = df_orig.copy() + ser = df.copy()["a"] + getattr(ser, func)(*args, inplace=True) + + df = df_orig.copy() + df["a"] # populate the item_cache + ser = df.iloc[:, 0] # iloc creates a new object + getattr(ser, func)(*args, inplace=True) + + df = df_orig.copy() + df["a"] # populate the item_cache + ser = df["a"] + getattr(ser, func)(*args, inplace=True) + + df = df_orig.copy() + df["a"] # populate the item_cache + # TODO(CoW-warn) because of the usage of *args, this doesn't warn on Py3.11+ + if using_copy_on_write: + with tm.raises_chained_assignment_error(not PY311): + getattr(df["a"], func)(*args, inplace=True) + else: + with tm.assert_cow_warning(not PY311, match="A value"): + getattr(df["a"], func)(*args, inplace=True) + + df = df_orig.copy() + ser = df["a"] # populate the item_cache and keep ref + if using_copy_on_write: + with tm.raises_chained_assignment_error(not PY311): + getattr(df["a"], func)(*args, inplace=True) + else: + # ideally also warns on the default mode, but the ser' _cacher + # messes up the refcount + even in warning mode this doesn't trigger + # the warning of Py3.1+ (see above) + with tm.assert_cow_warning(warn_copy_on_write and not PY311, match="A value"): + getattr(df["a"], func)(*args, inplace=True) + + +def test_methods_iloc_getitem_item_cache_fillna( + using_copy_on_write, warn_copy_on_write +): + # ensure we don't incorrectly raise chained assignment warning because + # of the item cache / iloc not setting the item cache + df_orig = DataFrame({"a": [1, 2, 3], "b": 1}) + + df = df_orig.copy() + ser = df.iloc[:, 0] + ser.fillna(1, inplace=True) + + # parent that holds item_cache is dead, so don't increase ref count + df = df_orig.copy() + ser = df.copy()["a"] + ser.fillna(1, inplace=True) + + df = df_orig.copy() + df["a"] # populate the item_cache + ser = df.iloc[:, 0] # iloc creates a new object + ser.fillna(1, inplace=True) + + df = df_orig.copy() + df["a"] # populate the item_cache + ser = df["a"] + ser.fillna(1, inplace=True) + + df = df_orig.copy() + df["a"] # populate the item_cache + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].fillna(1, inplace=True) + else: + with tm.assert_cow_warning(match="A value"): + df["a"].fillna(1, inplace=True) + + df = df_orig.copy() + ser = df["a"] # populate the item_cache and keep ref + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].fillna(1, inplace=True) + else: + # TODO(CoW-warn) ideally also warns on the default mode, but the ser' _cacher + # messes up the refcount + with tm.assert_cow_warning(warn_copy_on_write, match="A value"): + df["a"].fillna(1, inplace=True) + + +# TODO(CoW-warn) expand the cases +@pytest.mark.parametrize( + "indexer", [0, [0, 1], slice(0, 2), np.array([True, False, True])] +) +def test_series_setitem(indexer, using_copy_on_write, warn_copy_on_write): + # ensure we only get a single warning for those typical cases of chained + # assignment + df = DataFrame({"a": [1, 2, 3], "b": 1}) + + # using custom check instead of tm.assert_produces_warning because that doesn't + # fail if multiple warnings are raised + with pytest.warns() as record: + df["a"][indexer] = 0 + assert len(record) == 1 + if using_copy_on_write: + assert record[0].category == ChainedAssignmentError + else: + assert record[0].category == FutureWarning + assert "ChainedAssignmentError" in record[0].message.args[0] + + +@pytest.mark.filterwarnings("ignore::pandas.errors.SettingWithCopyWarning") +@pytest.mark.parametrize( + "indexer", ["a", ["a", "b"], slice(0, 2), np.array([True, False, True])] +) +def test_frame_setitem(indexer, using_copy_on_write): + df = DataFrame({"a": [1, 2, 3, 4, 5], "b": 1}) + + extra_warnings = () if using_copy_on_write else (SettingWithCopyWarning,) + + with option_context("chained_assignment", "warn"): + with tm.raises_chained_assignment_error(extra_warnings=extra_warnings): + df[0:3][indexer] = 10 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_clip.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_clip.py new file mode 100644 index 0000000000000000000000000000000000000000..7c87646424e2faf46b740692b007013fef1cfc75 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_clip.py @@ -0,0 +1,101 @@ +import numpy as np + +from pandas import ( + DataFrame, + option_context, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_clip_inplace_reference(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df_copy = df.copy() + arr_a = get_array(df, "a") + view = df[:] + if warn_copy_on_write: + with tm.assert_cow_warning(): + df.clip(lower=2, inplace=True) + else: + df.clip(lower=2, inplace=True) + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr_a) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + tm.assert_frame_equal(df_copy, view) + else: + assert np.shares_memory(get_array(df, "a"), arr_a) + + +def test_clip_inplace_reference_no_op(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df_copy = df.copy() + arr_a = get_array(df, "a") + view = df[:] + df.clip(lower=0, inplace=True) + + assert np.shares_memory(get_array(df, "a"), arr_a) + + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + assert not view._mgr._has_no_reference(0) + tm.assert_frame_equal(df_copy, view) + + +def test_clip_inplace(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + arr_a = get_array(df, "a") + df.clip(lower=2, inplace=True) + + assert np.shares_memory(get_array(df, "a"), arr_a) + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +def test_clip(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df_orig = df.copy() + df2 = df.clip(lower=2) + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + tm.assert_frame_equal(df_orig, df) + + +def test_clip_no_op(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df2 = df.clip(lower=0) + + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +def test_clip_chained_inplace(using_copy_on_write): + df = DataFrame({"a": [1, 4, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].clip(1, 2, inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + df[["a"]].clip(1, 2, inplace=True) + tm.assert_frame_equal(df, df_orig) + else: + with tm.assert_produces_warning(FutureWarning, match="inplace method"): + df["a"].clip(1, 2, inplace=True) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + df[["a"]].clip(1, 2, inplace=True) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + df[df["a"] > 1].clip(1, 2, inplace=True) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_core_functionalities.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_core_functionalities.py new file mode 100644 index 0000000000000000000000000000000000000000..8dc80c5cc0e0eadbe792e114d48593d95df17907 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_core_functionalities.py @@ -0,0 +1,106 @@ +import numpy as np +import pytest + +from pandas import DataFrame +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_assigning_to_same_variable_removes_references(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + df = df.reset_index() + if using_copy_on_write: + assert df._mgr._has_no_reference(1) + arr = get_array(df, "a") + df.iloc[0, 1] = 100 # Write into a + + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_setitem_dont_track_unnecessary_references(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1}) + + df["b"] = 100 + arr = get_array(df, "a") + # We split the block in setitem, if we are not careful the new blocks will + # reference each other triggering a copy + df.iloc[0, 0] = 100 + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_setitem_with_view_copies(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1}) + view = df[:] + expected = df.copy() + + df["b"] = 100 + arr = get_array(df, "a") + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 100 # Check that we correctly track reference + if using_copy_on_write: + assert not np.shares_memory(arr, get_array(df, "a")) + tm.assert_frame_equal(view, expected) + + +def test_setitem_with_view_invalidated_does_not_copy( + using_copy_on_write, warn_copy_on_write, request +): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1}) + view = df[:] + + df["b"] = 100 + arr = get_array(df, "a") + view = None # noqa: F841 + # TODO(CoW-warn) false positive? -> block gets split because of `df["b"] = 100` + # which introduces additional refs, even when those of `view` go out of scopes + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 100 + if using_copy_on_write: + # Setitem split the block. Since the old block shared data with view + # all the new blocks are referencing view and each other. When view + # goes out of scope, they don't share data with any other block, + # so we should not trigger a copy + mark = pytest.mark.xfail( + reason="blk.delete does not track references correctly" + ) + request.applymarker(mark) + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_out_of_scope(using_copy_on_write): + def func(): + df = DataFrame({"a": [1, 2], "b": 1.5, "c": 1}) + # create some subset + result = df[["a", "b"]] + return result + + result = func() + if using_copy_on_write: + assert not result._mgr.blocks[0].refs.has_reference() + assert not result._mgr.blocks[1].refs.has_reference() + + +def test_delete(using_copy_on_write): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"] + ) + del df["b"] + if using_copy_on_write: + assert not df._mgr.blocks[0].refs.has_reference() + assert not df._mgr.blocks[1].refs.has_reference() + + df = df[["a"]] + if using_copy_on_write: + assert not df._mgr.blocks[0].refs.has_reference() + + +def test_delete_reference(using_copy_on_write): + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"] + ) + x = df[:] + del df["b"] + if using_copy_on_write: + assert df._mgr.blocks[0].refs.has_reference() + assert df._mgr.blocks[1].refs.has_reference() + assert x._mgr.blocks[0].refs.has_reference() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_functions.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_functions.py new file mode 100644 index 0000000000000000000000000000000000000000..56e4b186350f2719978d6ca3803154033c8e08af --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_functions.py @@ -0,0 +1,396 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + concat, + merge, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_concat_frames(using_copy_on_write): + df = DataFrame({"b": ["a"] * 3}) + df2 = DataFrame({"a": ["a"] * 3}) + df_orig = df.copy() + result = concat([df, df2], axis=1) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + else: + assert not np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + + result.iloc[0, 0] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + + result.iloc[0, 1] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_concat_frames_updating_input(using_copy_on_write): + df = DataFrame({"b": ["a"] * 3}) + df2 = DataFrame({"a": ["a"] * 3}) + result = concat([df, df2], axis=1) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + else: + assert not np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + + expected = result.copy() + df.iloc[0, 0] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + + df2.iloc[0, 0] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a")) + tm.assert_frame_equal(result, expected) + + +def test_concat_series(using_copy_on_write): + ser = Series([1, 2], name="a") + ser2 = Series([3, 4], name="b") + ser_orig = ser.copy() + ser2_orig = ser2.copy() + result = concat([ser, ser2], axis=1) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), ser.values) + assert np.shares_memory(get_array(result, "b"), ser2.values) + else: + assert not np.shares_memory(get_array(result, "a"), ser.values) + assert not np.shares_memory(get_array(result, "b"), ser2.values) + + result.iloc[0, 0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), ser.values) + assert np.shares_memory(get_array(result, "b"), ser2.values) + + result.iloc[0, 1] = 1000 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), ser2.values) + tm.assert_series_equal(ser, ser_orig) + tm.assert_series_equal(ser2, ser2_orig) + + +def test_concat_frames_chained(using_copy_on_write): + df1 = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + df2 = DataFrame({"c": [4, 5, 6]}) + df3 = DataFrame({"d": [4, 5, 6]}) + result = concat([concat([df1, df2], axis=1), df3], axis=1) + expected = result.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "c"), get_array(df2, "c")) + assert np.shares_memory(get_array(result, "d"), get_array(df3, "d")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "c"), get_array(df2, "c")) + assert not np.shares_memory(get_array(result, "d"), get_array(df3, "d")) + + df1.iloc[0, 0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + + tm.assert_frame_equal(result, expected) + + +def test_concat_series_chained(using_copy_on_write): + ser1 = Series([1, 2, 3], name="a") + ser2 = Series([4, 5, 6], name="c") + ser3 = Series([4, 5, 6], name="d") + result = concat([concat([ser1, ser2], axis=1), ser3], axis=1) + expected = result.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(ser1, "a")) + assert np.shares_memory(get_array(result, "c"), get_array(ser2, "c")) + assert np.shares_memory(get_array(result, "d"), get_array(ser3, "d")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a")) + assert not np.shares_memory(get_array(result, "c"), get_array(ser2, "c")) + assert not np.shares_memory(get_array(result, "d"), get_array(ser3, "d")) + + ser1.iloc[0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a")) + + tm.assert_frame_equal(result, expected) + + +def test_concat_series_updating_input(using_copy_on_write): + ser = Series([1, 2], name="a") + ser2 = Series([3, 4], name="b") + expected = DataFrame({"a": [1, 2], "b": [3, 4]}) + result = concat([ser, ser2], axis=1) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(ser, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b")) + + ser.iloc[0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b")) + tm.assert_frame_equal(result, expected) + + ser2.iloc[0] = 1000 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b")) + tm.assert_frame_equal(result, expected) + + +def test_concat_mixed_series_frame(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "c": 1}) + ser = Series([4, 5, 6], name="d") + result = concat([df, ser], axis=1) + expected = result.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(result, "c"), get_array(df, "c")) + assert np.shares_memory(get_array(result, "d"), get_array(ser, "d")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(result, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d")) + + ser.iloc[0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d")) + + df.iloc[0, 0] = 100 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("copy", [True, None, False]) +def test_concat_copy_keyword(using_copy_on_write, copy): + df = DataFrame({"a": [1, 2]}) + df2 = DataFrame({"b": [1.5, 2.5]}) + + result = concat([df, df2], axis=1, copy=copy) + + if using_copy_on_write or copy is False: + assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(result, "b")) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b")) + + +@pytest.mark.parametrize( + "func", + [ + lambda df1, df2, **kwargs: df1.merge(df2, **kwargs), + lambda df1, df2, **kwargs: merge(df1, df2, **kwargs), + ], +) +def test_merge_on_key(using_copy_on_write, func): + df1 = DataFrame({"key": ["a", "b", "c"], "a": [1, 2, 3]}) + df2 = DataFrame({"key": ["a", "b", "c"], "b": [4, 5, 6]}) + df1_orig = df1.copy() + df2_orig = df2.copy() + + result = func(df1, df2, on="key") + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(result, "key"), get_array(df1, "key")) + assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 2] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + tm.assert_frame_equal(df1, df1_orig) + tm.assert_frame_equal(df2, df2_orig) + + +def test_merge_on_index(using_copy_on_write): + df1 = DataFrame({"a": [1, 2, 3]}) + df2 = DataFrame({"b": [4, 5, 6]}) + df1_orig = df1.copy() + df2_orig = df2.copy() + + result = merge(df1, df2, left_index=True, right_index=True) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + tm.assert_frame_equal(df1, df1_orig) + tm.assert_frame_equal(df2, df2_orig) + + +@pytest.mark.parametrize( + "func, how", + [ + (lambda df1, df2, **kwargs: merge(df2, df1, on="key", **kwargs), "right"), + (lambda df1, df2, **kwargs: merge(df1, df2, on="key", **kwargs), "left"), + ], +) +def test_merge_on_key_enlarging_one(using_copy_on_write, func, how): + df1 = DataFrame({"key": ["a", "b", "c"], "a": [1, 2, 3]}) + df2 = DataFrame({"key": ["a", "b"], "b": [4, 5]}) + df1_orig = df1.copy() + df2_orig = df2.copy() + + result = func(df1, df2, how=how) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + assert df2._mgr._has_no_reference(1) + assert df2._mgr._has_no_reference(0) + assert np.shares_memory(get_array(result, "key"), get_array(df1, "key")) is ( + how == "left" + ) + assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + if how == "left": + result.iloc[0, 1] = 0 + else: + result.iloc[0, 2] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + tm.assert_frame_equal(df1, df1_orig) + tm.assert_frame_equal(df2, df2_orig) + + +@pytest.mark.parametrize("copy", [True, None, False]) +def test_merge_copy_keyword(using_copy_on_write, copy): + df = DataFrame({"a": [1, 2]}) + df2 = DataFrame({"b": [3, 4.5]}) + + result = df.merge(df2, copy=copy, left_index=True, right_index=True) + + if using_copy_on_write or copy is False: + assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(result, "b")) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b")) + + +def test_join_on_key(using_copy_on_write): + df_index = Index(["a", "b", "c"], name="key") + + df1 = DataFrame({"a": [1, 2, 3]}, index=df_index.copy(deep=True)) + df2 = DataFrame({"b": [4, 5, 6]}, index=df_index.copy(deep=True)) + + df1_orig = df1.copy() + df2_orig = df2.copy() + + result = df1.join(df2, on="key") + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(result.index), get_array(df1.index)) + assert not np.shares_memory(get_array(result.index), get_array(df2.index)) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + result.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b")) + + tm.assert_frame_equal(df1, df1_orig) + tm.assert_frame_equal(df2, df2_orig) + + +def test_join_multiple_dataframes_on_key(using_copy_on_write): + df_index = Index(["a", "b", "c"], name="key") + + df1 = DataFrame({"a": [1, 2, 3]}, index=df_index.copy(deep=True)) + dfs_list = [ + DataFrame({"b": [4, 5, 6]}, index=df_index.copy(deep=True)), + DataFrame({"c": [7, 8, 9]}, index=df_index.copy(deep=True)), + ] + + df1_orig = df1.copy() + dfs_list_orig = [df.copy() for df in dfs_list] + + result = df1.join(dfs_list) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b")) + assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + assert np.shares_memory(get_array(result.index), get_array(df1.index)) + assert not np.shares_memory( + get_array(result.index), get_array(dfs_list[0].index) + ) + assert not np.shares_memory( + get_array(result.index), get_array(dfs_list[1].index) + ) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert not np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b")) + assert not np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + + result.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a")) + assert np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b")) + assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + + result.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b")) + assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + + result.iloc[0, 2] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c")) + + tm.assert_frame_equal(df1, df1_orig) + for df, df_orig in zip(dfs_list, dfs_list_orig): + tm.assert_frame_equal(df, df_orig) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_indexing.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..479fa148f994a74eb205e3fa19ba957504744a54 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_indexing.py @@ -0,0 +1,1266 @@ +import numpy as np +import pytest + +from pandas.errors import SettingWithCopyWarning + +from pandas.core.dtypes.common import is_float_dtype + +import pandas as pd +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +@pytest.fixture(params=["numpy", "nullable"]) +def backend(request): + if request.param == "numpy": + + def make_dataframe(*args, **kwargs): + return DataFrame(*args, **kwargs) + + def make_series(*args, **kwargs): + return Series(*args, **kwargs) + + elif request.param == "nullable": + + def make_dataframe(*args, **kwargs): + df = DataFrame(*args, **kwargs) + df_nullable = df.convert_dtypes() + # convert_dtypes will try to cast float to int if there is no loss in + # precision -> undo that change + for col in df.columns: + if is_float_dtype(df[col].dtype) and not is_float_dtype( + df_nullable[col].dtype + ): + df_nullable[col] = df_nullable[col].astype("Float64") + # copy final result to ensure we start with a fully self-owning DataFrame + return df_nullable.copy() + + def make_series(*args, **kwargs): + ser = Series(*args, **kwargs) + return ser.convert_dtypes().copy() + + return request.param, make_dataframe, make_series + + +# ----------------------------------------------------------------------------- +# Indexing operations taking subset + modifying the subset/parent + + +def test_subset_column_selection(backend, using_copy_on_write): + # Case: taking a subset of the columns of a DataFrame + # + afterwards modifying the subset + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + subset = df[["a", "c"]] + + if using_copy_on_write: + # the subset shares memory ... + assert np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + # ... but uses CoW when being modified + subset.iloc[0, 0] = 0 + else: + assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + # INFO this no longer raise warning since pandas 1.4 + # with pd.option_context("chained_assignment", "warn"): + # with tm.assert_produces_warning(SettingWithCopyWarning): + subset.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + + expected = DataFrame({"a": [0, 2, 3], "c": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(subset, expected) + tm.assert_frame_equal(df, df_orig) + + +def test_subset_column_selection_modify_parent(backend, using_copy_on_write): + # Case: taking a subset of the columns of a DataFrame + # + afterwards modifying the parent + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + + subset = df[["a", "c"]] + + if using_copy_on_write: + # the subset shares memory ... + assert np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + # ... but parent uses CoW parent when it is modified + df.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + if using_copy_on_write: + # different column/block still shares memory + assert np.shares_memory(get_array(subset, "c"), get_array(df, "c")) + + expected = DataFrame({"a": [1, 2, 3], "c": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(subset, expected) + + +def test_subset_row_slice(backend, using_copy_on_write, warn_copy_on_write): + # Case: taking a subset of the rows of a DataFrame using a slice + # + afterwards modifying the subset + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + subset = df[1:3] + subset._mgr._verify_integrity() + + assert np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + + if using_copy_on_write: + subset.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a")) + + else: + # INFO this no longer raise warning since pandas 1.4 + # with pd.option_context("chained_assignment", "warn"): + # with tm.assert_produces_warning(SettingWithCopyWarning): + with tm.assert_cow_warning(warn_copy_on_write): + subset.iloc[0, 0] = 0 + + subset._mgr._verify_integrity() + + expected = DataFrame({"a": [0, 3], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3)) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig.iloc[1, 0] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_column_slice( + backend, using_copy_on_write, warn_copy_on_write, using_array_manager, dtype +): + # Case: taking a subset of the columns of a DataFrame using a slice + # + afterwards modifying the subset + dtype_backend, DataFrame, _ = backend + single_block = ( + dtype == "int64" and dtype_backend == "numpy" + ) and not using_array_manager + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + subset = df.iloc[:, 1:] + subset._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(subset, "b"), get_array(df, "b")) + + subset.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(subset, "b"), get_array(df, "b")) + elif warn_copy_on_write: + with tm.assert_cow_warning(single_block): + subset.iloc[0, 0] = 0 + else: + # we only get a warning in case of a single block + warn = SettingWithCopyWarning if single_block else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + subset.iloc[0, 0] = 0 + + expected = DataFrame({"b": [0, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}) + tm.assert_frame_equal(subset, expected) + # original parent dataframe is not modified (also not for BlockManager case, + # except for single block) + if not using_copy_on_write and (using_array_manager or single_block): + df_orig.iloc[0, 1] = 0 + tm.assert_frame_equal(df, df_orig) + else: + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +@pytest.mark.parametrize( + "row_indexer", + [slice(1, 2), np.array([False, True, True]), np.array([1, 2])], + ids=["slice", "mask", "array"], +) +@pytest.mark.parametrize( + "column_indexer", + [slice("b", "c"), np.array([False, True, True]), ["b", "c"]], + ids=["slice", "mask", "array"], +) +def test_subset_loc_rows_columns( + backend, + dtype, + row_indexer, + column_indexer, + using_array_manager, + using_copy_on_write, + warn_copy_on_write, +): + # Case: taking a subset of the rows+columns of a DataFrame using .loc + # + afterwards modifying the subset + # Generic test for several combinations of row/column indexers, not all + # of those could actually return a view / need CoW (so this test is not + # checking memory sharing, only ensuring subsequent mutation doesn't + # affect the parent dataframe) + dtype_backend, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + subset = df.loc[row_indexer, column_indexer] + + # a few corner cases _do_ actually modify the parent (with both row and column + # slice, and in case of ArrayManager or BlockManager with single block) + mutate_parent = ( + isinstance(row_indexer, slice) + and isinstance(column_indexer, slice) + and ( + using_array_manager + or ( + dtype == "int64" + and dtype_backend == "numpy" + and not using_copy_on_write + ) + ) + ) + + # modifying the subset never modifies the parent + with tm.assert_cow_warning(warn_copy_on_write and mutate_parent): + subset.iloc[0, 0] = 0 + + expected = DataFrame( + {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3) + ) + tm.assert_frame_equal(subset, expected) + if mutate_parent: + df_orig.iloc[1, 1] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +@pytest.mark.parametrize( + "row_indexer", + [slice(1, 3), np.array([False, True, True]), np.array([1, 2])], + ids=["slice", "mask", "array"], +) +@pytest.mark.parametrize( + "column_indexer", + [slice(1, 3), np.array([False, True, True]), [1, 2]], + ids=["slice", "mask", "array"], +) +def test_subset_iloc_rows_columns( + backend, + dtype, + row_indexer, + column_indexer, + using_array_manager, + using_copy_on_write, + warn_copy_on_write, +): + # Case: taking a subset of the rows+columns of a DataFrame using .iloc + # + afterwards modifying the subset + # Generic test for several combinations of row/column indexers, not all + # of those could actually return a view / need CoW (so this test is not + # checking memory sharing, only ensuring subsequent mutation doesn't + # affect the parent dataframe) + dtype_backend, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + subset = df.iloc[row_indexer, column_indexer] + + # a few corner cases _do_ actually modify the parent (with both row and column + # slice, and in case of ArrayManager or BlockManager with single block) + mutate_parent = ( + isinstance(row_indexer, slice) + and isinstance(column_indexer, slice) + and ( + using_array_manager + or ( + dtype == "int64" + and dtype_backend == "numpy" + and not using_copy_on_write + ) + ) + ) + + # modifying the subset never modifies the parent + with tm.assert_cow_warning(warn_copy_on_write and mutate_parent): + subset.iloc[0, 0] = 0 + + expected = DataFrame( + {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3) + ) + tm.assert_frame_equal(subset, expected) + if mutate_parent: + df_orig.iloc[1, 1] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "indexer", + [slice(0, 2), np.array([True, True, False]), np.array([0, 1])], + ids=["slice", "mask", "array"], +) +def test_subset_set_with_row_indexer( + backend, indexer_si, indexer, using_copy_on_write, warn_copy_on_write +): + # Case: setting values with a row indexer on a viewing subset + # subset[indexer] = value and subset.iloc[indexer] = value + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]}) + df_orig = df.copy() + subset = df[1:4] + + if ( + indexer_si is tm.setitem + and isinstance(indexer, np.ndarray) + and indexer.dtype == "int" + ): + pytest.skip("setitem with labels selects on columns") + + if using_copy_on_write: + indexer_si(subset)[indexer] = 0 + elif warn_copy_on_write: + with tm.assert_cow_warning(): + indexer_si(subset)[indexer] = 0 + else: + # INFO iloc no longer raises warning since pandas 1.4 + warn = SettingWithCopyWarning if indexer_si is tm.setitem else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + indexer_si(subset)[indexer] = 0 + + expected = DataFrame( + {"a": [0, 0, 4], "b": [0, 0, 7], "c": [0.0, 0.0, 0.4]}, index=range(1, 4) + ) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig[1:3] = 0 + tm.assert_frame_equal(df, df_orig) + + +def test_subset_set_with_mask(backend, using_copy_on_write, warn_copy_on_write): + # Case: setting values with a mask on a viewing subset: subset[mask] = value + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]}) + df_orig = df.copy() + subset = df[1:4] + + mask = subset > 3 + + if using_copy_on_write: + subset[mask] = 0 + elif warn_copy_on_write: + with tm.assert_cow_warning(): + subset[mask] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + subset[mask] = 0 + + expected = DataFrame( + {"a": [2, 3, 0], "b": [0, 0, 0], "c": [0.20, 0.3, 0.4]}, index=range(1, 4) + ) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig.loc[3, "a"] = 0 + df_orig.loc[1:3, "b"] = 0 + tm.assert_frame_equal(df, df_orig) + + +def test_subset_set_column(backend, using_copy_on_write, warn_copy_on_write): + # Case: setting a single column on a viewing subset -> subset[col] = value + dtype_backend, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + subset = df[1:3] + + if dtype_backend == "numpy": + arr = np.array([10, 11], dtype="int64") + else: + arr = pd.array([10, 11], dtype="Int64") + + if using_copy_on_write or warn_copy_on_write: + subset["a"] = arr + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + subset["a"] = arr + + subset._mgr._verify_integrity() + expected = DataFrame( + {"a": [10, 11], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3) + ) + tm.assert_frame_equal(subset, expected) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_set_column_with_loc( + backend, using_copy_on_write, warn_copy_on_write, using_array_manager, dtype +): + # Case: setting a single column with loc on a viewing subset + # -> subset.loc[:, col] = value + _, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + subset = df[1:3] + + if using_copy_on_write: + subset.loc[:, "a"] = np.array([10, 11], dtype="int64") + elif warn_copy_on_write: + with tm.assert_cow_warning(): + subset.loc[:, "a"] = np.array([10, 11], dtype="int64") + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning( + None, + raise_on_extra_warnings=not using_array_manager, + ): + subset.loc[:, "a"] = np.array([10, 11], dtype="int64") + + subset._mgr._verify_integrity() + expected = DataFrame( + {"a": [10, 11], "b": [5, 6], "c": np.array([8, 9], dtype=dtype)}, + index=range(1, 3), + ) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig.loc[1:3, "a"] = np.array([10, 11], dtype="int64") + tm.assert_frame_equal(df, df_orig) + + +def test_subset_set_column_with_loc2( + backend, using_copy_on_write, warn_copy_on_write, using_array_manager +): + # Case: setting a single column with loc on a viewing subset + # -> subset.loc[:, col] = value + # separate test for case of DataFrame of a single column -> takes a separate + # code path + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + subset = df[1:3] + + if using_copy_on_write: + subset.loc[:, "a"] = 0 + elif warn_copy_on_write: + with tm.assert_cow_warning(): + subset.loc[:, "a"] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning( + None, + raise_on_extra_warnings=not using_array_manager, + ): + subset.loc[:, "a"] = 0 + + subset._mgr._verify_integrity() + expected = DataFrame({"a": [0, 0]}, index=range(1, 3)) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + # original parent dataframe is not modified (CoW) + tm.assert_frame_equal(df, df_orig) + else: + # original parent dataframe is actually updated + df_orig.loc[1:3, "a"] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_set_columns(backend, using_copy_on_write, warn_copy_on_write, dtype): + # Case: setting multiple columns on a viewing subset + # -> subset[[col1, col2]] = value + dtype_backend, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + subset = df[1:3] + + if using_copy_on_write or warn_copy_on_write: + subset[["a", "c"]] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + subset[["a", "c"]] = 0 + + subset._mgr._verify_integrity() + if using_copy_on_write: + # first and third column should certainly have no references anymore + assert all(subset._mgr._has_no_reference(i) for i in [0, 2]) + expected = DataFrame({"a": [0, 0], "b": [5, 6], "c": [0, 0]}, index=range(1, 3)) + if dtype_backend == "nullable": + # there is not yet a global option, so overriding a column by setting a scalar + # defaults to numpy dtype even if original column was nullable + expected["a"] = expected["a"].astype("int64") + expected["c"] = expected["c"].astype("int64") + + tm.assert_frame_equal(subset, expected) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "indexer", + [slice("a", "b"), np.array([True, True, False]), ["a", "b"]], + ids=["slice", "mask", "array"], +) +def test_subset_set_with_column_indexer( + backend, indexer, using_copy_on_write, warn_copy_on_write +): + # Case: setting multiple columns with a column indexer on a viewing subset + # -> subset.loc[:, [col1, col2]] = value + _, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]}) + df_orig = df.copy() + subset = df[1:3] + + if using_copy_on_write: + subset.loc[:, indexer] = 0 + elif warn_copy_on_write: + with tm.assert_cow_warning(): + subset.loc[:, indexer] = 0 + else: + with pd.option_context("chained_assignment", "warn"): + # As of 2.0, this setitem attempts (successfully) to set values + # inplace, so the assignment is not chained. + subset.loc[:, indexer] = 0 + + subset._mgr._verify_integrity() + expected = DataFrame({"a": [0, 0], "b": [0.0, 0.0], "c": [5, 6]}, index=range(1, 3)) + tm.assert_frame_equal(subset, expected) + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + # pre-2.0, in the mixed case with BlockManager, only column "a" + # would be mutated in the parent frame. this changed with the + # enforcement of GH#45333 + df_orig.loc[1:2, ["a", "b"]] = 0 + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "method", + [ + lambda df: df[["a", "b"]][0:2], + lambda df: df[0:2][["a", "b"]], + lambda df: df[["a", "b"]].iloc[0:2], + lambda df: df[["a", "b"]].loc[0:1], + lambda df: df[0:2].iloc[:, 0:2], + lambda df: df[0:2].loc[:, "a":"b"], # type: ignore[misc] + ], + ids=[ + "row-getitem-slice", + "column-getitem", + "row-iloc-slice", + "row-loc-slice", + "column-iloc-slice", + "column-loc-slice", + ], +) +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_chained_getitem( + request, + backend, + method, + dtype, + using_copy_on_write, + using_array_manager, + warn_copy_on_write, +): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + _, DataFrame, _ = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + # when not using CoW, it depends on whether we have a single block or not + # and whether we are slicing the columns -> in that case we have a view + test_callspec = request.node.callspec.id + if not using_array_manager: + subset_is_view = test_callspec in ( + "numpy-single-block-column-iloc-slice", + "numpy-single-block-column-loc-slice", + ) + else: + # with ArrayManager, it doesn't matter whether we have + # single vs mixed block or numpy vs nullable dtypes + subset_is_view = test_callspec.endswith( + ("column-iloc-slice", "column-loc-slice") + ) + + # modify subset -> don't modify parent + subset = method(df) + + with tm.assert_cow_warning(warn_copy_on_write and subset_is_view): + subset.iloc[0, 0] = 0 + if using_copy_on_write or (not subset_is_view): + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = method(df) + with tm.assert_cow_warning(warn_copy_on_write and subset_is_view): + df.iloc[0, 0] = 0 + expected = DataFrame({"a": [1, 2], "b": [4, 5]}) + if using_copy_on_write or not subset_is_view: + tm.assert_frame_equal(subset, expected) + else: + assert subset.iloc[0, 0] == 0 + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_subset_chained_getitem_column( + backend, dtype, using_copy_on_write, warn_copy_on_write +): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + dtype_backend, DataFrame, Series = backend + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + # modify subset -> don't modify parent + subset = df[:]["a"][0:2] + df._clear_item_cache() + with tm.assert_cow_warning(warn_copy_on_write): + subset.iloc[0] = 0 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = df[:]["a"][0:2] + df._clear_item_cache() + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 0 + expected = Series([1, 2], name="a") + if using_copy_on_write: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + +@pytest.mark.parametrize( + "method", + [ + lambda s: s["a":"c"]["a":"b"], # type: ignore[misc] + lambda s: s.iloc[0:3].iloc[0:2], + lambda s: s.loc["a":"c"].loc["a":"b"], # type: ignore[misc] + lambda s: s.loc["a":"c"] # type: ignore[misc] + .iloc[0:3] + .iloc[0:2] + .loc["a":"b"] # type: ignore[misc] + .iloc[0:1], + ], + ids=["getitem", "iloc", "loc", "long-chain"], +) +def test_subset_chained_getitem_series( + backend, method, using_copy_on_write, warn_copy_on_write +): + # Case: creating a subset using multiple, chained getitem calls using views + # still needs to guarantee proper CoW behaviour + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + + # modify subset -> don't modify parent + subset = method(s) + with tm.assert_cow_warning(warn_copy_on_write): + subset.iloc[0] = 0 + if using_copy_on_write: + tm.assert_series_equal(s, s_orig) + else: + assert s.iloc[0] == 0 + + # modify parent -> don't modify subset + subset = s.iloc[0:3].iloc[0:2] + with tm.assert_cow_warning(warn_copy_on_write): + s.iloc[0] = 0 + expected = Series([1, 2], index=["a", "b"]) + if using_copy_on_write: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + +def test_subset_chained_single_block_row( + using_copy_on_write, using_array_manager, warn_copy_on_write +): + # not parametrizing this for dtype backend, since this explicitly tests single block + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + + # modify subset -> don't modify parent + subset = df[:].iloc[0].iloc[0:2] + with tm.assert_cow_warning(warn_copy_on_write): + subset.iloc[0] = 0 + if using_copy_on_write or using_array_manager: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + # modify parent -> don't modify subset + subset = df[:].iloc[0].iloc[0:2] + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 0 + expected = Series([1, 4], index=["a", "b"], name=0) + if using_copy_on_write or using_array_manager: + tm.assert_series_equal(subset, expected) + else: + assert subset.iloc[0] == 0 + + +@pytest.mark.parametrize( + "method", + [ + lambda df: df[:], + lambda df: df.loc[:, :], + lambda df: df.loc[:], + lambda df: df.iloc[:, :], + lambda df: df.iloc[:], + ], + ids=["getitem", "loc", "loc-rows", "iloc", "iloc-rows"], +) +def test_null_slice(backend, method, using_copy_on_write, warn_copy_on_write): + # Case: also all variants of indexing with a null slice (:) should return + # new objects to ensure we correctly use CoW for the results + dtype_backend, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + + df2 = method(df) + + # we always return new objects (shallow copy), regardless of CoW or not + assert df2 is not df + + # and those trigger CoW when mutated + with tm.assert_cow_warning(warn_copy_on_write): + df2.iloc[0, 0] = 0 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + +@pytest.mark.parametrize( + "method", + [ + lambda s: s[:], + lambda s: s.loc[:], + lambda s: s.iloc[:], + ], + ids=["getitem", "loc", "iloc"], +) +def test_null_slice_series(backend, method, using_copy_on_write, warn_copy_on_write): + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + + s2 = method(s) + + # we always return new objects, regardless of CoW or not + assert s2 is not s + + # and those trigger CoW when mutated + with tm.assert_cow_warning(warn_copy_on_write): + s2.iloc[0] = 0 + if using_copy_on_write: + tm.assert_series_equal(s, s_orig) + else: + assert s.iloc[0] == 0 + + +# TODO add more tests modifying the parent + + +# ----------------------------------------------------------------------------- +# Series -- Indexing operations taking subset + modifying the subset/parent + + +def test_series_getitem_slice(backend, using_copy_on_write, warn_copy_on_write): + # Case: taking a slice of a Series + afterwards modifying the subset + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + + subset = s[:] + assert np.shares_memory(get_array(subset), get_array(s)) + + with tm.assert_cow_warning(warn_copy_on_write): + subset.iloc[0] = 0 + + if using_copy_on_write: + assert not np.shares_memory(get_array(subset), get_array(s)) + + expected = Series([0, 2, 3], index=["a", "b", "c"]) + tm.assert_series_equal(subset, expected) + + if using_copy_on_write: + # original parent series is not modified (CoW) + tm.assert_series_equal(s, s_orig) + else: + # original parent series is actually updated + assert s.iloc[0] == 0 + + +def test_series_getitem_ellipsis(using_copy_on_write, warn_copy_on_write): + # Case: taking a view of a Series using Ellipsis + afterwards modifying the subset + s = Series([1, 2, 3]) + s_orig = s.copy() + + subset = s[...] + assert np.shares_memory(get_array(subset), get_array(s)) + + with tm.assert_cow_warning(warn_copy_on_write): + subset.iloc[0] = 0 + + if using_copy_on_write: + assert not np.shares_memory(get_array(subset), get_array(s)) + + expected = Series([0, 2, 3]) + tm.assert_series_equal(subset, expected) + + if using_copy_on_write: + # original parent series is not modified (CoW) + tm.assert_series_equal(s, s_orig) + else: + # original parent series is actually updated + assert s.iloc[0] == 0 + + +@pytest.mark.parametrize( + "indexer", + [slice(0, 2), np.array([True, True, False]), np.array([0, 1])], + ids=["slice", "mask", "array"], +) +def test_series_subset_set_with_indexer( + backend, indexer_si, indexer, using_copy_on_write, warn_copy_on_write +): + # Case: setting values in a viewing Series with an indexer + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + subset = s[:] + + warn = None + msg = "Series.__setitem__ treating keys as positions is deprecated" + if ( + indexer_si is tm.setitem + and isinstance(indexer, np.ndarray) + and indexer.dtype.kind == "i" + ): + warn = FutureWarning + if warn_copy_on_write: + with tm.assert_cow_warning(raise_on_extra_warnings=warn is not None): + indexer_si(subset)[indexer] = 0 + else: + with tm.assert_produces_warning(warn, match=msg): + indexer_si(subset)[indexer] = 0 + expected = Series([0, 0, 3], index=["a", "b", "c"]) + tm.assert_series_equal(subset, expected) + + if using_copy_on_write: + tm.assert_series_equal(s, s_orig) + else: + tm.assert_series_equal(s, expected) + + +# ----------------------------------------------------------------------------- +# del operator + + +def test_del_frame(backend, using_copy_on_write, warn_copy_on_write): + # Case: deleting a column with `del` on a viewing child dataframe should + # not modify parent + update the references + dtype_backend, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df[:] + + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + del df2["b"] + + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + tm.assert_frame_equal(df, df_orig) + tm.assert_frame_equal(df2, df_orig[["a", "c"]]) + df2._mgr._verify_integrity() + + with tm.assert_cow_warning(warn_copy_on_write and dtype_backend == "numpy"): + df.loc[0, "b"] = 200 + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + df_orig = df.copy() + + with tm.assert_cow_warning(warn_copy_on_write): + df2.loc[0, "a"] = 100 + if using_copy_on_write: + # modifying child after deleting a column still doesn't update parent + tm.assert_frame_equal(df, df_orig) + else: + assert df.loc[0, "a"] == 100 + + +def test_del_series(backend): + _, _, Series = backend + s = Series([1, 2, 3], index=["a", "b", "c"]) + s_orig = s.copy() + s2 = s[:] + + assert np.shares_memory(get_array(s), get_array(s2)) + + del s2["a"] + + assert not np.shares_memory(get_array(s), get_array(s2)) + tm.assert_series_equal(s, s_orig) + tm.assert_series_equal(s2, s_orig[["b", "c"]]) + + # modifying s2 doesn't need copy on write (due to `del`, s2 is backed by new array) + values = s2.values + s2.loc["b"] = 100 + assert values[0] == 100 + + +# ----------------------------------------------------------------------------- +# Accessing column as Series + + +def test_column_as_series( + backend, using_copy_on_write, warn_copy_on_write, using_array_manager +): + # Case: selecting a single column now also uses Copy-on-Write + dtype_backend, DataFrame, Series = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + s = df["a"] + + assert np.shares_memory(get_array(s, "a"), get_array(df, "a")) + + if using_copy_on_write or using_array_manager: + s[0] = 0 + else: + if warn_copy_on_write: + with tm.assert_cow_warning(): + s[0] = 0 + else: + warn = SettingWithCopyWarning if dtype_backend == "numpy" else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + s[0] = 0 + + expected = Series([0, 2, 3], name="a") + tm.assert_series_equal(s, expected) + if using_copy_on_write: + # assert not np.shares_memory(s.values, get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + # ensure cached series on getitem is not the changed series + tm.assert_series_equal(df["a"], df_orig["a"]) + else: + df_orig.iloc[0, 0] = 0 + tm.assert_frame_equal(df, df_orig) + + +def test_column_as_series_set_with_upcast( + backend, using_copy_on_write, using_array_manager, warn_copy_on_write +): + # Case: selecting a single column now also uses Copy-on-Write -> when + # setting a value causes an upcast, we don't need to update the parent + # DataFrame through the cache mechanism + dtype_backend, DataFrame, Series = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + s = df["a"] + if dtype_backend == "nullable": + with tm.assert_cow_warning(warn_copy_on_write): + with pytest.raises(TypeError, match="Invalid value"): + s[0] = "foo" + expected = Series([1, 2, 3], name="a") + elif using_copy_on_write or warn_copy_on_write or using_array_manager: + # TODO(CoW-warn) assert the FutureWarning for CoW is also raised + with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"): + s[0] = "foo" + expected = Series(["foo", 2, 3], dtype=object, name="a") + else: + with pd.option_context("chained_assignment", "warn"): + msg = "|".join( + [ + "A value is trying to be set on a copy of a slice from a DataFrame", + "Setting an item of incompatible dtype is deprecated", + ] + ) + with tm.assert_produces_warning( + (SettingWithCopyWarning, FutureWarning), match=msg + ): + s[0] = "foo" + expected = Series(["foo", 2, 3], dtype=object, name="a") + + tm.assert_series_equal(s, expected) + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + # ensure cached series on getitem is not the changed series + tm.assert_series_equal(df["a"], df_orig["a"]) + else: + df_orig["a"] = expected + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "method", + [ + lambda df: df["a"], + lambda df: df.loc[:, "a"], + lambda df: df.iloc[:, 0], + ], + ids=["getitem", "loc", "iloc"], +) +def test_column_as_series_no_item_cache( + request, + backend, + method, + using_copy_on_write, + warn_copy_on_write, + using_array_manager, +): + # Case: selecting a single column (which now also uses Copy-on-Write to protect + # the view) should always give a new object (i.e. not make use of a cache) + dtype_backend, DataFrame, _ = backend + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + s1 = method(df) + s2 = method(df) + + is_iloc = "iloc" in request.node.name + if using_copy_on_write or warn_copy_on_write or is_iloc: + assert s1 is not s2 + else: + assert s1 is s2 + + if using_copy_on_write or using_array_manager: + s1.iloc[0] = 0 + elif warn_copy_on_write: + with tm.assert_cow_warning(): + s1.iloc[0] = 0 + else: + warn = SettingWithCopyWarning if dtype_backend == "numpy" else None + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + s1.iloc[0] = 0 + + if using_copy_on_write: + tm.assert_series_equal(s2, df_orig["a"]) + tm.assert_frame_equal(df, df_orig) + else: + assert s2.iloc[0] == 0 + + +# TODO add tests for other indexing methods on the Series + + +def test_dataframe_add_column_from_series(backend, using_copy_on_write): + # Case: adding a new column to a DataFrame from an existing column/series + # -> delays copy under CoW + _, DataFrame, Series = backend + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + + s = Series([10, 11, 12]) + df["new"] = s + if using_copy_on_write: + assert np.shares_memory(get_array(df, "new"), get_array(s)) + else: + assert not np.shares_memory(get_array(df, "new"), get_array(s)) + + # editing series -> doesn't modify column in frame + s[0] = 0 + expected = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "new": [10, 11, 12]}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("val", [100, "a"]) +@pytest.mark.parametrize( + "indexer_func, indexer", + [ + (tm.loc, (0, "a")), + (tm.iloc, (0, 0)), + (tm.loc, ([0], "a")), + (tm.iloc, ([0], 0)), + (tm.loc, (slice(None), "a")), + (tm.iloc, (slice(None), 0)), + ], +) +@pytest.mark.parametrize( + "col", [[0.1, 0.2, 0.3], [7, 8, 9]], ids=["mixed-block", "single-block"] +) +def test_set_value_copy_only_necessary_column( + using_copy_on_write, warn_copy_on_write, indexer_func, indexer, val, col +): + # When setting inplace, only copy column that is modified instead of the whole + # block (by splitting the block) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": col}) + df_orig = df.copy() + view = df[:] + + if val == "a" and not warn_copy_on_write: + with tm.assert_produces_warning( + FutureWarning, match="Setting an item of incompatible dtype is deprecated" + ): + indexer_func(df)[indexer] = val + if val == "a" and warn_copy_on_write: + with tm.assert_produces_warning( + FutureWarning, match="incompatible dtype|Setting a value on a view" + ): + indexer_func(df)[indexer] = val + else: + with tm.assert_cow_warning(warn_copy_on_write and val == 100): + indexer_func(df)[indexer] = val + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(view, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(view, "a")) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "c"), get_array(view, "c")) + if val == "a": + assert not np.shares_memory(get_array(df, "a"), get_array(view, "a")) + else: + assert np.shares_memory(get_array(df, "a"), get_array(view, "a")) + + +def test_series_midx_slice(using_copy_on_write, warn_copy_on_write): + ser = Series([1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]])) + ser_orig = ser.copy() + result = ser[1] + assert np.shares_memory(get_array(ser), get_array(result)) + with tm.assert_cow_warning(warn_copy_on_write): + result.iloc[0] = 100 + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + else: + expected = Series( + [100, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]]) + ) + tm.assert_series_equal(ser, expected) + + +def test_getitem_midx_slice( + using_copy_on_write, warn_copy_on_write, using_array_manager +): + df = DataFrame({("a", "x"): [1, 2], ("a", "y"): 1, ("b", "x"): 2}) + df_orig = df.copy() + new_df = df[("a",)] + + if using_copy_on_write: + assert not new_df._mgr._has_no_reference(0) + + if not using_array_manager: + assert np.shares_memory(get_array(df, ("a", "x")), get_array(new_df, "x")) + if using_copy_on_write: + new_df.iloc[0, 0] = 100 + tm.assert_frame_equal(df_orig, df) + else: + if warn_copy_on_write: + with tm.assert_cow_warning(): + new_df.iloc[0, 0] = 100 + else: + with pd.option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + new_df.iloc[0, 0] = 100 + assert df.iloc[0, 0] == 100 + + +def test_series_midx_tuples_slice(using_copy_on_write, warn_copy_on_write): + ser = Series( + [1, 2, 3], + index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]), + ) + result = ser[(1, 2)] + assert np.shares_memory(get_array(ser), get_array(result)) + with tm.assert_cow_warning(warn_copy_on_write): + result.iloc[0] = 100 + if using_copy_on_write: + expected = Series( + [1, 2, 3], + index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]), + ) + tm.assert_series_equal(ser, expected) + + +def test_midx_read_only_bool_indexer(): + # GH#56635 + def mklbl(prefix, n): + return [f"{prefix}{i}" for i in range(n)] + + idx = pd.MultiIndex.from_product( + [mklbl("A", 4), mklbl("B", 2), mklbl("C", 4), mklbl("D", 2)] + ) + cols = pd.MultiIndex.from_tuples( + [("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], names=["lvl0", "lvl1"] + ) + df = DataFrame(1, index=idx, columns=cols).sort_index().sort_index(axis=1) + + mask = df[("a", "foo")] == 1 + expected_mask = mask.copy() + result = df.loc[pd.IndexSlice[mask, :, ["C1", "C3"]], :] + expected = df.loc[pd.IndexSlice[:, :, ["C1", "C3"]], :] + tm.assert_frame_equal(result, expected) + tm.assert_series_equal(mask, expected_mask) + + +def test_loc_enlarging_with_dataframe(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + rhs = DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]}) + rhs_orig = rhs.copy() + df.loc[:, ["b", "c"]] = rhs + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(rhs, "b")) + assert np.shares_memory(get_array(df, "c"), get_array(rhs, "c")) + assert not df._mgr._has_no_reference(1) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(rhs, "b")) + + df.iloc[0, 1] = 100 + tm.assert_frame_equal(rhs, rhs_orig) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_methods.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_methods.py new file mode 100644 index 0000000000000000000000000000000000000000..5d1eefccbb1e723320da889f2874b79b12ce3d0e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_methods.py @@ -0,0 +1,2055 @@ +import numpy as np +import pytest + +from pandas.errors import SettingWithCopyWarning + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Period, + Series, + Timestamp, + date_range, + option_context, + period_range, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +def test_copy(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_copy = df.copy() + + # the deep copy by defaults takes a shallow copy of the Index + assert df_copy.index is not df.index + assert df_copy.columns is not df.columns + assert df_copy.index.is_(df.index) + assert df_copy.columns.is_(df.columns) + + # the deep copy doesn't share memory + assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a")) + if using_copy_on_write: + assert not df_copy._mgr.blocks[0].refs.has_reference() + assert not df_copy._mgr.blocks[1].refs.has_reference() + + # mutating copy doesn't mutate original + df_copy.iloc[0, 0] = 0 + assert df.iloc[0, 0] == 1 + + +def test_copy_shallow(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_copy = df.copy(deep=False) + + # the shallow copy also makes a shallow copy of the index + if using_copy_on_write: + assert df_copy.index is not df.index + assert df_copy.columns is not df.columns + assert df_copy.index.is_(df.index) + assert df_copy.columns.is_(df.columns) + else: + assert df_copy.index is df.index + assert df_copy.columns is df.columns + + # the shallow copy still shares memory + assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a")) + if using_copy_on_write: + assert df_copy._mgr.blocks[0].refs.has_reference() + assert df_copy._mgr.blocks[1].refs.has_reference() + + if using_copy_on_write: + # mutating shallow copy doesn't mutate original + df_copy.iloc[0, 0] = 0 + assert df.iloc[0, 0] == 1 + # mutating triggered a copy-on-write -> no longer shares memory + assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a")) + # but still shares memory for the other columns/blocks + assert np.shares_memory(get_array(df_copy, "c"), get_array(df, "c")) + else: + # mutating shallow copy does mutate original + with tm.assert_cow_warning(warn_copy_on_write): + df_copy.iloc[0, 0] = 0 + assert df.iloc[0, 0] == 0 + # and still shares memory + assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a")) + + +@pytest.mark.parametrize("copy", [True, None, False]) +@pytest.mark.parametrize( + "method", + [ + lambda df, copy: df.rename(columns=str.lower, copy=copy), + lambda df, copy: df.reindex(columns=["a", "c"], copy=copy), + lambda df, copy: df.reindex_like(df, copy=copy), + lambda df, copy: df.align(df, copy=copy)[0], + lambda df, copy: df.set_axis(["a", "b", "c"], axis="index", copy=copy), + lambda df, copy: df.rename_axis(index="test", copy=copy), + lambda df, copy: df.rename_axis(columns="test", copy=copy), + lambda df, copy: df.astype({"b": "int64"}, copy=copy), + # lambda df, copy: df.swaplevel(0, 0, copy=copy), + lambda df, copy: df.swapaxes(0, 0, copy=copy), + lambda df, copy: df.truncate(0, 5, copy=copy), + lambda df, copy: df.infer_objects(copy=copy), + lambda df, copy: df.to_timestamp(copy=copy), + lambda df, copy: df.to_period(freq="D", copy=copy), + lambda df, copy: df.tz_localize("US/Central", copy=copy), + lambda df, copy: df.tz_convert("US/Central", copy=copy), + lambda df, copy: df.set_flags(allows_duplicate_labels=False, copy=copy), + ], + ids=[ + "rename", + "reindex", + "reindex_like", + "align", + "set_axis", + "rename_axis0", + "rename_axis1", + "astype", + # "swaplevel", # only series + "swapaxes", + "truncate", + "infer_objects", + "to_timestamp", + "to_period", + "tz_localize", + "tz_convert", + "set_flags", + ], +) +def test_methods_copy_keyword( + request, method, copy, using_copy_on_write, using_array_manager +): + index = None + if "to_timestamp" in request.node.callspec.id: + index = period_range("2012-01-01", freq="D", periods=3) + elif "to_period" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3) + elif "tz_localize" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3) + elif "tz_convert" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels") + + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=index) + + if "swapaxes" in request.node.callspec.id: + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df2 = method(df, copy=copy) + else: + df2 = method(df, copy=copy) + + share_memory = using_copy_on_write or copy is False + + if request.node.callspec.id.startswith("reindex-"): + # TODO copy=False without CoW still returns a copy in this case + if not using_copy_on_write and not using_array_manager and copy is False: + share_memory = False + + if share_memory: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +@pytest.mark.parametrize("copy", [True, None, False]) +@pytest.mark.parametrize( + "method", + [ + lambda ser, copy: ser.rename(index={0: 100}, copy=copy), + lambda ser, copy: ser.rename(None, copy=copy), + lambda ser, copy: ser.reindex(index=ser.index, copy=copy), + lambda ser, copy: ser.reindex_like(ser, copy=copy), + lambda ser, copy: ser.align(ser, copy=copy)[0], + lambda ser, copy: ser.set_axis(["a", "b", "c"], axis="index", copy=copy), + lambda ser, copy: ser.rename_axis(index="test", copy=copy), + lambda ser, copy: ser.astype("int64", copy=copy), + lambda ser, copy: ser.swaplevel(0, 1, copy=copy), + lambda ser, copy: ser.swapaxes(0, 0, copy=copy), + lambda ser, copy: ser.truncate(0, 5, copy=copy), + lambda ser, copy: ser.infer_objects(copy=copy), + lambda ser, copy: ser.to_timestamp(copy=copy), + lambda ser, copy: ser.to_period(freq="D", copy=copy), + lambda ser, copy: ser.tz_localize("US/Central", copy=copy), + lambda ser, copy: ser.tz_convert("US/Central", copy=copy), + lambda ser, copy: ser.set_flags(allows_duplicate_labels=False, copy=copy), + ], + ids=[ + "rename (dict)", + "rename", + "reindex", + "reindex_like", + "align", + "set_axis", + "rename_axis0", + "astype", + "swaplevel", + "swapaxes", + "truncate", + "infer_objects", + "to_timestamp", + "to_period", + "tz_localize", + "tz_convert", + "set_flags", + ], +) +def test_methods_series_copy_keyword(request, method, copy, using_copy_on_write): + index = None + if "to_timestamp" in request.node.callspec.id: + index = period_range("2012-01-01", freq="D", periods=3) + elif "to_period" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3) + elif "tz_localize" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3) + elif "tz_convert" in request.node.callspec.id: + index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels") + elif "swaplevel" in request.node.callspec.id: + index = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]]) + + ser = Series([1, 2, 3], index=index) + + if "swapaxes" in request.node.callspec.id: + msg = "'Series.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + ser2 = method(ser, copy=copy) + else: + ser2 = method(ser, copy=copy) + + share_memory = using_copy_on_write or copy is False + + if share_memory: + assert np.shares_memory(get_array(ser2), get_array(ser)) + else: + assert not np.shares_memory(get_array(ser2), get_array(ser)) + + +@pytest.mark.parametrize("copy", [True, None, False]) +def test_transpose_copy_keyword(using_copy_on_write, copy, using_array_manager): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + result = df.transpose(copy=copy) + share_memory = using_copy_on_write or copy is False or copy is None + share_memory = share_memory and not using_array_manager + + if share_memory: + assert np.shares_memory(get_array(df, "a"), get_array(result, 0)) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, 0)) + + +# ----------------------------------------------------------------------------- +# DataFrame methods returning new DataFrame using shallow copy + + +def test_reset_index(using_copy_on_write): + # Case: resetting the index (i.e. adding a new column) + mutating the + # resulting dataframe + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=[10, 11, 12] + ) + df_orig = df.copy() + df2 = df.reset_index() + df2._mgr._verify_integrity() + + if using_copy_on_write: + # still shares memory (df2 is a shallow copy) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 2] = 0 + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("index", [pd.RangeIndex(0, 2), Index([1, 2])]) +def test_reset_index_series_drop(using_copy_on_write, index): + ser = Series([1, 2], index=index) + ser_orig = ser.copy() + ser2 = ser.reset_index(drop=True) + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(ser2)) + assert not ser._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(ser), get_array(ser2)) + + ser2.iloc[0] = 100 + tm.assert_series_equal(ser, ser_orig) + + +def test_groupby_column_index_in_references(): + df = DataFrame( + {"A": ["a", "b", "c", "d"], "B": [1, 2, 3, 4], "C": ["a", "a", "b", "b"]} + ) + df = df.set_index("A") + key = df["C"] + result = df.groupby(key, observed=True).sum() + expected = df.groupby("C", observed=True).sum() + tm.assert_frame_equal(result, expected) + + +def test_rename_columns(using_copy_on_write): + # Case: renaming columns returns a new dataframe + # + afterwards modifying the result + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.rename(columns=str.upper) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "C"), get_array(df, "c")) + expected = DataFrame({"A": [0, 2, 3], "B": [4, 5, 6], "C": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df, df_orig) + + +def test_rename_columns_modify_parent(using_copy_on_write): + # Case: renaming columns returns a new dataframe + # + afterwards modifying the original (parent) dataframe + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df2 = df.rename(columns=str.upper) + df2_orig = df2.copy() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + df.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "C"), get_array(df, "c")) + expected = DataFrame({"a": [0, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(df, expected) + tm.assert_frame_equal(df2, df2_orig) + + +def test_pipe(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1.5}) + df_orig = df.copy() + + def testfunc(df): + return df + + df2 = df.pipe(testfunc) + + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column + df2.iloc[0, 0] = 0 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + expected = DataFrame({"a": [0, 2, 3], "b": 1.5}) + tm.assert_frame_equal(df, expected) + + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + +def test_pipe_modify_df(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1.5}) + df_orig = df.copy() + + def testfunc(df): + df.iloc[0, 0] = 100 + return df + + df2 = df.pipe(testfunc) + + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + expected = DataFrame({"a": [100, 2, 3], "b": 1.5}) + tm.assert_frame_equal(df, expected) + + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + +def test_reindex_columns(using_copy_on_write): + # Case: reindexing the column returns a new dataframe + # + afterwards modifying the result + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.reindex(columns=["a", "c"]) + + if using_copy_on_write: + # still shares memory (df2 is a shallow copy) + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + # mutating df2 triggers a copy-on-write for that column + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "index", + [ + lambda idx: idx, + lambda idx: idx.view(), + lambda idx: idx.copy(), + lambda idx: list(idx), + ], + ids=["identical", "view", "copy", "values"], +) +def test_reindex_rows(index, using_copy_on_write): + # Case: reindexing the rows with an index that matches the current index + # can use a shallow copy + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.reindex(index=index(df.index)) + + if using_copy_on_write: + # still shares memory (df2 is a shallow copy) + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + # mutating df2 triggers a copy-on-write for that column + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +def test_drop_on_column(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.drop(columns="a") + df2._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + else: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +def test_select_dtypes(using_copy_on_write): + # Case: selecting columns using `select_dtypes()` returns a new dataframe + # + afterwards modifying the result + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.select_dtypes("int64") + df2._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "filter_kwargs", [{"items": ["a"]}, {"like": "a"}, {"regex": "a"}] +) +def test_filter(using_copy_on_write, filter_kwargs): + # Case: selecting columns using `filter()` returns a new dataframe + # + afterwards modifying the result + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.filter(**filter_kwargs) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + if using_copy_on_write: + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_shift_no_op(using_copy_on_write): + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], + index=date_range("2020-01-01", "2020-01-03"), + columns=["a", "b"], + ) + df_orig = df.copy() + df2 = df.shift(periods=0) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + tm.assert_frame_equal(df2, df_orig) + + +def test_shift_index(using_copy_on_write): + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], + index=date_range("2020-01-01", "2020-01-03"), + columns=["a", "b"], + ) + df2 = df.shift(periods=1, axis=0) + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +def test_shift_rows_freq(using_copy_on_write): + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], + index=date_range("2020-01-01", "2020-01-03"), + columns=["a", "b"], + ) + df_orig = df.copy() + df_orig.index = date_range("2020-01-02", "2020-01-04") + df2 = df.shift(periods=1, freq="1D") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + tm.assert_frame_equal(df2, df_orig) + + +def test_shift_columns(using_copy_on_write, warn_copy_on_write): + df = DataFrame( + [[1, 2], [3, 4], [5, 6]], columns=date_range("2020-01-01", "2020-01-02") + ) + df2 = df.shift(periods=1, axis=1) + + assert np.shares_memory(get_array(df2, "2020-01-02"), get_array(df, "2020-01-01")) + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory( + get_array(df2, "2020-01-02"), get_array(df, "2020-01-01") + ) + expected = DataFrame( + [[np.nan, 1], [np.nan, 3], [np.nan, 5]], + columns=date_range("2020-01-01", "2020-01-02"), + ) + tm.assert_frame_equal(df2, expected) + + +def test_pop(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + view_original = df[:] + result = df.pop("a") + + assert np.shares_memory(result.values, get_array(view_original, "a")) + assert np.shares_memory(get_array(df, "b"), get_array(view_original, "b")) + + if using_copy_on_write: + result.iloc[0] = 0 + assert not np.shares_memory(result.values, get_array(view_original, "a")) + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "b"), get_array(view_original, "b")) + tm.assert_frame_equal(view_original, df_orig) + else: + expected = DataFrame({"a": [1, 2, 3], "b": [0, 5, 6], "c": [0.1, 0.2, 0.3]}) + tm.assert_frame_equal(view_original, expected) + + +@pytest.mark.parametrize( + "func", + [ + lambda x, y: x.align(y), + lambda x, y: x.align(y.a, axis=0), + lambda x, y: x.align(y.a.iloc[slice(0, 1)], axis=1), + ], +) +def test_align_frame(using_copy_on_write, func): + df = DataFrame({"a": [1, 2, 3], "b": "a"}) + df_orig = df.copy() + df_changed = df[["b", "a"]].copy() + df2, _ = func(df, df_changed) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_align_series(using_copy_on_write): + ser = Series([1, 2]) + ser_orig = ser.copy() + ser_other = ser.copy() + ser2, ser_other_result = ser.align(ser_other) + + if using_copy_on_write: + assert np.shares_memory(ser2.values, ser.values) + assert np.shares_memory(ser_other_result.values, ser_other.values) + else: + assert not np.shares_memory(ser2.values, ser.values) + assert not np.shares_memory(ser_other_result.values, ser_other.values) + + ser2.iloc[0] = 0 + ser_other_result.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(ser2.values, ser.values) + assert not np.shares_memory(ser_other_result.values, ser_other.values) + tm.assert_series_equal(ser, ser_orig) + tm.assert_series_equal(ser_other, ser_orig) + + +def test_align_copy_false(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + df2, df3 = df.align(df, copy=False) + + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + if using_copy_on_write: + df2.loc[0, "a"] = 0 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + df3.loc[0, "a"] = 0 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + +def test_align_with_series_copy_false(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + ser = Series([1, 2, 3], name="x") + ser_orig = ser.copy() + df_orig = df.copy() + df2, ser2 = df.align(ser, copy=False, axis=0) + + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + assert np.shares_memory(get_array(ser, "x"), get_array(ser2, "x")) + + if using_copy_on_write: + df2.loc[0, "a"] = 0 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + ser2.loc[0] = 0 + tm.assert_series_equal(ser, ser_orig) # Original is unchanged + + +def test_to_frame(using_copy_on_write, warn_copy_on_write): + # Case: converting a Series to a DataFrame with to_frame + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + + df = ser[:].to_frame() + + # currently this always returns a "view" + assert np.shares_memory(ser.values, get_array(df, 0)) + + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 0 + + if using_copy_on_write: + # mutating df triggers a copy-on-write for that column + assert not np.shares_memory(ser.values, get_array(df, 0)) + tm.assert_series_equal(ser, ser_orig) + else: + # but currently select_dtypes() actually returns a view -> mutates parent + expected = ser_orig.copy() + expected.iloc[0] = 0 + tm.assert_series_equal(ser, expected) + + # modify original series -> don't modify dataframe + df = ser[:].to_frame() + with tm.assert_cow_warning(warn_copy_on_write): + ser.iloc[0] = 0 + + if using_copy_on_write: + tm.assert_frame_equal(df, ser_orig.to_frame()) + else: + expected = ser_orig.copy().to_frame() + expected.iloc[0, 0] = 0 + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("ax", ["index", "columns"]) +def test_swapaxes_noop(using_copy_on_write, ax): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df2 = df.swapaxes(ax, ax) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_swapaxes_single_block(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["x", "y", "z"]) + df_orig = df.copy() + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df2 = df.swapaxes("index", "columns") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "x"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column/block + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_swapaxes_read_only_array(): + df = DataFrame({"a": [1, 2], "b": 3}) + msg = "'DataFrame.swapaxes' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + df = df.swapaxes(axis1="index", axis2="columns") + df.iloc[0, 0] = 100 + expected = DataFrame({0: [100, 3], 1: [2, 3]}, index=["a", "b"]) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize( + "method, idx", + [ + (lambda df: df.copy(deep=False).copy(deep=False), 0), + (lambda df: df.reset_index().reset_index(), 2), + (lambda df: df.rename(columns=str.upper).rename(columns=str.lower), 0), + (lambda df: df.copy(deep=False).select_dtypes(include="number"), 0), + ], + ids=["shallow-copy", "reset_index", "rename", "select_dtypes"], +) +def test_chained_methods(request, method, idx, using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + + # when not using CoW, only the copy() variant actually gives a view + df2_is_view = not using_copy_on_write and request.node.callspec.id == "shallow-copy" + + # modify df2 -> don't modify df + df2 = method(df) + with tm.assert_cow_warning(warn_copy_on_write and df2_is_view): + df2.iloc[0, idx] = 0 + if not df2_is_view: + tm.assert_frame_equal(df, df_orig) + + # modify df -> don't modify df2 + df2 = method(df) + with tm.assert_cow_warning(warn_copy_on_write and df2_is_view): + df.iloc[0, 0] = 0 + if not df2_is_view: + tm.assert_frame_equal(df2.iloc[:, idx:], df_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})]) +def test_to_timestamp(using_copy_on_write, obj): + obj.index = Index([Period("2012-1-1", freq="D"), Period("2012-1-2", freq="D")]) + + obj_orig = obj.copy() + obj2 = obj.to_timestamp() + + if using_copy_on_write: + assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + else: + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + + # mutating obj2 triggers a copy-on-write for that column / block + obj2.iloc[0] = 0 + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + tm.assert_equal(obj, obj_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})]) +def test_to_period(using_copy_on_write, obj): + obj.index = Index([Timestamp("2019-12-31"), Timestamp("2020-12-31")]) + + obj_orig = obj.copy() + obj2 = obj.to_period(freq="Y") + + if using_copy_on_write: + assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + else: + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + + # mutating obj2 triggers a copy-on-write for that column / block + obj2.iloc[0] = 0 + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + tm.assert_equal(obj, obj_orig) + + +def test_set_index(using_copy_on_write): + # GH 49473 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.set_index("a") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + else: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 1] = 0 + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + tm.assert_frame_equal(df, df_orig) + + +def test_set_index_mutating_parent_does_not_mutate_index(): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + result = df.set_index("a") + expected = result.copy() + + df.iloc[0, 0] = 100 + tm.assert_frame_equal(result, expected) + + +def test_add_prefix(using_copy_on_write): + # GH 49473 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.add_prefix("CoW_") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a")) + df2.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a")) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "CoW_c"), get_array(df, "c")) + expected = DataFrame( + {"CoW_a": [0, 2, 3], "CoW_b": [4, 5, 6], "CoW_c": [0.1, 0.2, 0.3]} + ) + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df, df_orig) + + +def test_add_suffix(using_copy_on_write): + # GH 49473 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.add_suffix("_CoW") + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a")) + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c_CoW"), get_array(df, "c")) + expected = DataFrame( + {"a_CoW": [0, 2, 3], "b_CoW": [4, 5, 6], "c_CoW": [0.1, 0.2, 0.3]} + ) + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("axis, val", [(0, 5.5), (1, np.nan)]) +def test_dropna(using_copy_on_write, axis, val): + df = DataFrame({"a": [1, 2, 3], "b": [4, val, 6], "c": "d"}) + df_orig = df.copy() + df2 = df.dropna(axis=axis) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("val", [5, 5.5]) +def test_dropna_series(using_copy_on_write, val): + ser = Series([1, val, 4]) + ser_orig = ser.copy() + ser2 = ser.dropna() + + if using_copy_on_write: + assert np.shares_memory(ser2.values, ser.values) + else: + assert not np.shares_memory(ser2.values, ser.values) + + ser2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(ser2.values, ser.values) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize( + "method", + [ + lambda df: df.head(), + lambda df: df.head(2), + lambda df: df.tail(), + lambda df: df.tail(3), + ], +) +def test_head_tail(method, using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = method(df) + df2._mgr._verify_integrity() + + if using_copy_on_write: + # We are explicitly deviating for CoW here to make an eager copy (avoids + # tracking references for very cheap ops) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + # modify df2 to trigger CoW for that block + with tm.assert_cow_warning(warn_copy_on_write): + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + # without CoW enabled, head and tail return views. Mutating df2 also mutates df. + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + with tm.assert_cow_warning(warn_copy_on_write): + df2.iloc[0, 0] = 1 + tm.assert_frame_equal(df, df_orig) + + +def test_infer_objects(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": "c", "c": 1, "d": "x"}) + df_orig = df.copy() + df2 = df.infer_objects() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + df2.iloc[0, 0] = 0 + df2.iloc[0, 1] = "d" + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + tm.assert_frame_equal(df, df_orig) + + +def test_infer_objects_no_reference(using_copy_on_write): + df = DataFrame( + { + "a": [1, 2], + "b": "c", + "c": 1, + "d": Series( + [Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object" + ), + "e": "b", + } + ) + df = df.infer_objects() + + arr_a = get_array(df, "a") + arr_b = get_array(df, "b") + arr_d = get_array(df, "d") + + df.iloc[0, 0] = 0 + df.iloc[0, 1] = "d" + df.iloc[0, 3] = Timestamp("2018-12-31") + if using_copy_on_write: + assert np.shares_memory(arr_a, get_array(df, "a")) + # TODO(CoW): Block splitting causes references here + assert not np.shares_memory(arr_b, get_array(df, "b")) + assert np.shares_memory(arr_d, get_array(df, "d")) + + +def test_infer_objects_reference(using_copy_on_write): + df = DataFrame( + { + "a": [1, 2], + "b": "c", + "c": 1, + "d": Series( + [Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object" + ), + } + ) + view = df[:] # noqa: F841 + df = df.infer_objects() + + arr_a = get_array(df, "a") + arr_b = get_array(df, "b") + arr_d = get_array(df, "d") + + df.iloc[0, 0] = 0 + df.iloc[0, 1] = "d" + df.iloc[0, 3] = Timestamp("2018-12-31") + if using_copy_on_write: + assert not np.shares_memory(arr_a, get_array(df, "a")) + assert not np.shares_memory(arr_b, get_array(df, "b")) + assert np.shares_memory(arr_d, get_array(df, "d")) + + +@pytest.mark.parametrize( + "kwargs", + [ + {"before": "a", "after": "b", "axis": 1}, + {"before": 0, "after": 1, "axis": 0}, + ], +) +def test_truncate(using_copy_on_write, kwargs): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2}) + df_orig = df.copy() + df2 = df.truncate(**kwargs) + df2._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("method", ["assign", "drop_duplicates"]) +def test_assign_drop_duplicates(using_copy_on_write, method): + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + df2 = getattr(df, method)() + df2._mgr._verify_integrity() + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})]) +def test_take(using_copy_on_write, obj): + # Check that no copy is made when we take all rows in original order + obj_orig = obj.copy() + obj2 = obj.take([0, 1]) + + if using_copy_on_write: + assert np.shares_memory(obj2.values, obj.values) + else: + assert not np.shares_memory(obj2.values, obj.values) + + obj2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(obj2.values, obj.values) + tm.assert_equal(obj, obj_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})]) +def test_between_time(using_copy_on_write, obj): + obj.index = date_range("2018-04-09", periods=2, freq="1D20min") + obj_orig = obj.copy() + obj2 = obj.between_time("0:00", "1:00") + + if using_copy_on_write: + assert np.shares_memory(obj2.values, obj.values) + else: + assert not np.shares_memory(obj2.values, obj.values) + + obj2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(obj2.values, obj.values) + tm.assert_equal(obj, obj_orig) + + +def test_reindex_like(using_copy_on_write): + df = DataFrame({"a": [1, 2], "b": "a"}) + other = DataFrame({"b": "a", "a": [1, 2]}) + + df_orig = df.copy() + df2 = df.reindex_like(other) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 1] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_sort_index(using_copy_on_write): + # GH 49473 + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + ser2 = ser.sort_index() + + if using_copy_on_write: + assert np.shares_memory(ser.values, ser2.values) + else: + assert not np.shares_memory(ser.values, ser2.values) + + # mutating ser triggers a copy-on-write for the column / block + ser2.iloc[0] = 0 + assert not np.shares_memory(ser2.values, ser.values) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize( + "obj, kwargs", + [(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})], +) +def test_sort_values(using_copy_on_write, obj, kwargs): + obj_orig = obj.copy() + obj2 = obj.sort_values(**kwargs) + + if using_copy_on_write: + assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + else: + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + + # mutating df triggers a copy-on-write for the column / block + obj2.iloc[0] = 0 + assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a")) + tm.assert_equal(obj, obj_orig) + + +@pytest.mark.parametrize( + "obj, kwargs", + [(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})], +) +def test_sort_values_inplace(using_copy_on_write, obj, kwargs, warn_copy_on_write): + obj_orig = obj.copy() + view = obj[:] + obj.sort_values(inplace=True, **kwargs) + + assert np.shares_memory(get_array(obj, "a"), get_array(view, "a")) + + # mutating obj triggers a copy-on-write for the column / block + with tm.assert_cow_warning(warn_copy_on_write): + obj.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(obj, "a"), get_array(view, "a")) + tm.assert_equal(view, obj_orig) + else: + assert np.shares_memory(get_array(obj, "a"), get_array(view, "a")) + + +@pytest.mark.parametrize("decimals", [-1, 0, 1]) +def test_round(using_copy_on_write, warn_copy_on_write, decimals): + df = DataFrame({"a": [1, 2], "b": "c"}) + df_orig = df.copy() + df2 = df.round(decimals=decimals) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + # TODO: Make inplace by using out parameter of ndarray.round? + if decimals >= 0: + # Ensure lazy copy if no-op + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 1] = "d" + df2.iloc[0, 0] = 4 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_reorder_levels(using_copy_on_write): + index = MultiIndex.from_tuples( + [(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"] + ) + df = DataFrame({"a": [1, 2, 3, 4]}, index=index) + df_orig = df.copy() + df2 = df.reorder_levels(order=["two", "one"]) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_series_reorder_levels(using_copy_on_write): + index = MultiIndex.from_tuples( + [(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"] + ) + ser = Series([1, 2, 3, 4], index=index) + ser_orig = ser.copy() + ser2 = ser.reorder_levels(order=["two", "one"]) + + if using_copy_on_write: + assert np.shares_memory(ser2.values, ser.values) + else: + assert not np.shares_memory(ser2.values, ser.values) + + ser2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(ser2.values, ser.values) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize("obj", [Series([1, 2, 3]), DataFrame({"a": [1, 2, 3]})]) +def test_swaplevel(using_copy_on_write, obj): + index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"]) + obj.index = index + obj_orig = obj.copy() + obj2 = obj.swaplevel() + + if using_copy_on_write: + assert np.shares_memory(obj2.values, obj.values) + else: + assert not np.shares_memory(obj2.values, obj.values) + + obj2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(obj2.values, obj.values) + tm.assert_equal(obj, obj_orig) + + +def test_frame_set_axis(using_copy_on_write): + # GH 49473 + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}) + df_orig = df.copy() + df2 = df.set_axis(["a", "b", "c"], axis="index") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 0] = 0 + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_series_set_axis(using_copy_on_write): + # GH 49473 + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + ser2 = ser.set_axis(["a", "b", "c"], axis="index") + + if using_copy_on_write: + assert np.shares_memory(ser, ser2) + else: + assert not np.shares_memory(ser, ser2) + + # mutating ser triggers a copy-on-write for the column / block + ser2.iloc[0] = 0 + assert not np.shares_memory(ser2, ser) + tm.assert_series_equal(ser, ser_orig) + + +def test_set_flags(using_copy_on_write, warn_copy_on_write): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + ser2 = ser.set_flags(allows_duplicate_labels=False) + + assert np.shares_memory(ser, ser2) + + # mutating ser triggers a copy-on-write for the column / block + with tm.assert_cow_warning(warn_copy_on_write): + ser2.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(ser2, ser) + tm.assert_series_equal(ser, ser_orig) + else: + assert np.shares_memory(ser2, ser) + expected = Series([0, 2, 3]) + tm.assert_series_equal(ser, expected) + + +@pytest.mark.parametrize("kwargs", [{"mapper": "test"}, {"index": "test"}]) +def test_rename_axis(using_copy_on_write, kwargs): + df = DataFrame({"a": [1, 2, 3, 4]}, index=Index([1, 2, 3, 4], name="a")) + df_orig = df.copy() + df2 = df.rename_axis(**kwargs) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + df2.iloc[0, 0] = 0 + if using_copy_on_write: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize( + "func, tz", [("tz_convert", "Europe/Berlin"), ("tz_localize", None)] +) +def test_tz_convert_localize(using_copy_on_write, func, tz): + # GH 49473 + ser = Series( + [1, 2], index=date_range(start="2014-08-01 09:00", freq="h", periods=2, tz=tz) + ) + ser_orig = ser.copy() + ser2 = getattr(ser, func)("US/Central") + + if using_copy_on_write: + assert np.shares_memory(ser.values, ser2.values) + else: + assert not np.shares_memory(ser.values, ser2.values) + + # mutating ser triggers a copy-on-write for the column / block + ser2.iloc[0] = 0 + assert not np.shares_memory(ser2.values, ser.values) + tm.assert_series_equal(ser, ser_orig) + + +def test_droplevel(using_copy_on_write): + # GH 49473 + index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"]) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, index=index) + df_orig = df.copy() + df2 = df.droplevel(0) + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c")) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "b"), get_array(df, "b")) + + tm.assert_frame_equal(df, df_orig) + + +def test_squeeze(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + df_orig = df.copy() + series = df.squeeze() + + # Should share memory regardless of CoW since squeeze is just an iloc + assert np.shares_memory(series.values, get_array(df, "a")) + + # mutating squeezed df triggers a copy-on-write for that column/block + with tm.assert_cow_warning(warn_copy_on_write): + series.iloc[0] = 0 + if using_copy_on_write: + assert not np.shares_memory(series.values, get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + else: + # Without CoW the original will be modified + assert np.shares_memory(series.values, get_array(df, "a")) + assert df.loc[0, "a"] == 0 + + +def test_items(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + + # Test this twice, since the second time, the item cache will be + # triggered, and we want to make sure it still works then. + for i in range(2): + for name, ser in df.items(): + assert np.shares_memory(get_array(ser, name), get_array(df, name)) + + # mutating df triggers a copy-on-write for that column / block + with tm.assert_cow_warning(warn_copy_on_write): + ser.iloc[0] = 0 + + if using_copy_on_write: + assert not np.shares_memory(get_array(ser, name), get_array(df, name)) + tm.assert_frame_equal(df, df_orig) + else: + # Original frame will be modified + assert df.loc[0, name] == 0 + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +def test_putmask(using_copy_on_write, dtype, warn_copy_on_write): + df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype) + view = df[:] + df_orig = df.copy() + with tm.assert_cow_warning(warn_copy_on_write): + df[df == df] = 5 + + if using_copy_on_write: + assert not np.shares_memory(get_array(view, "a"), get_array(df, "a")) + tm.assert_frame_equal(view, df_orig) + else: + # Without CoW the original will be modified + assert np.shares_memory(get_array(view, "a"), get_array(df, "a")) + assert view.iloc[0, 0] == 5 + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +def test_putmask_no_reference(using_copy_on_write, dtype): + df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype) + arr_a = get_array(df, "a") + df[df == df] = 5 + + if using_copy_on_write: + assert np.shares_memory(arr_a, get_array(df, "a")) + + +@pytest.mark.parametrize("dtype", ["float64", "Float64"]) +def test_putmask_aligns_rhs_no_reference(using_copy_on_write, dtype): + df = DataFrame({"a": [1.5, 2], "b": 1.5}, dtype=dtype) + arr_a = get_array(df, "a") + df[df == df] = DataFrame({"a": [5.5, 5]}) + + if using_copy_on_write: + assert np.shares_memory(arr_a, get_array(df, "a")) + + +@pytest.mark.parametrize( + "val, exp, warn", [(5.5, True, FutureWarning), (5, False, None)] +) +def test_putmask_dont_copy_some_blocks( + using_copy_on_write, val, exp, warn, warn_copy_on_write +): + df = DataFrame({"a": [1, 2], "b": 1, "c": 1.5}) + view = df[:] + df_orig = df.copy() + indexer = DataFrame( + [[True, False, False], [True, False, False]], columns=list("abc") + ) + if warn_copy_on_write: + with tm.assert_cow_warning(): + df[indexer] = val + else: + with tm.assert_produces_warning(warn, match="incompatible dtype"): + df[indexer] = val + + if using_copy_on_write: + assert not np.shares_memory(get_array(view, "a"), get_array(df, "a")) + # TODO(CoW): Could split blocks to avoid copying the whole block + assert np.shares_memory(get_array(view, "b"), get_array(df, "b")) is exp + assert np.shares_memory(get_array(view, "c"), get_array(df, "c")) + assert df._mgr._has_no_reference(1) is not exp + assert not df._mgr._has_no_reference(2) + tm.assert_frame_equal(view, df_orig) + elif val == 5: + # Without CoW the original will be modified, the other case upcasts, e.g. copy + assert np.shares_memory(get_array(view, "a"), get_array(df, "a")) + assert np.shares_memory(get_array(view, "c"), get_array(df, "c")) + assert view.iloc[0, 0] == 5 + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +@pytest.mark.parametrize( + "func", + [ + lambda ser: ser.where(ser > 0, 10), + lambda ser: ser.mask(ser <= 0, 10), + ], +) +def test_where_mask_noop(using_copy_on_write, dtype, func): + ser = Series([1, 2, 3], dtype=dtype) + ser_orig = ser.copy() + + result = func(ser) + + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(result)) + else: + assert not np.shares_memory(get_array(ser), get_array(result)) + + result.iloc[0] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), get_array(result)) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize("dtype", ["int64", "Int64"]) +@pytest.mark.parametrize( + "func", + [ + lambda ser: ser.where(ser < 0, 10), + lambda ser: ser.mask(ser >= 0, 10), + ], +) +def test_where_mask(using_copy_on_write, dtype, func): + ser = Series([1, 2, 3], dtype=dtype) + ser_orig = ser.copy() + + result = func(ser) + + assert not np.shares_memory(get_array(ser), get_array(result)) + tm.assert_series_equal(ser, ser_orig) + + +@pytest.mark.parametrize("dtype, val", [("int64", 10.5), ("Int64", 10)]) +@pytest.mark.parametrize( + "func", + [ + lambda df, val: df.where(df < 0, val), + lambda df, val: df.mask(df >= 0, val), + ], +) +def test_where_mask_noop_on_single_column(using_copy_on_write, dtype, val, func): + df = DataFrame({"a": [1, 2, 3], "b": [-4, -5, -6]}, dtype=dtype) + df_orig = df.copy() + + result = func(df, val) + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(result, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + else: + assert not np.shares_memory(get_array(df, "b"), get_array(result, "b")) + + result.iloc[0, 1] = 10 + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "b"), get_array(result, "b")) + tm.assert_frame_equal(df, df_orig) + + +@pytest.mark.parametrize("func", ["mask", "where"]) +def test_chained_where_mask(using_copy_on_write, func): + df = DataFrame({"a": [1, 4, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + getattr(df["a"], func)(df["a"] > 2, 5, inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + getattr(df[["a"]], func)(df["a"] > 2, 5, inplace=True) + tm.assert_frame_equal(df, df_orig) + else: + with tm.assert_produces_warning(FutureWarning, match="inplace method"): + getattr(df["a"], func)(df["a"] > 2, 5, inplace=True) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + getattr(df[["a"]], func)(df["a"] > 2, 5, inplace=True) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + getattr(df[df["a"] > 1], func)(df["a"] > 2, 5, inplace=True) + + +def test_asfreq_noop(using_copy_on_write): + df = DataFrame( + {"a": [0.0, None, 2.0, 3.0]}, + index=date_range("1/1/2000", periods=4, freq="min"), + ) + df_orig = df.copy() + df2 = df.asfreq(freq="min") + + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + # mutating df2 triggers a copy-on-write for that column / block + df2.iloc[0, 0] = 0 + + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_iterrows(using_copy_on_write): + df = DataFrame({"a": 0, "b": 1}, index=[1, 2, 3]) + df_orig = df.copy() + + for _, sub in df.iterrows(): + sub.iloc[0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + + +def test_interpolate_creates_copy(using_copy_on_write, warn_copy_on_write): + # GH#51126 + df = DataFrame({"a": [1.5, np.nan, 3]}) + view = df[:] + expected = df.copy() + + with tm.assert_cow_warning(warn_copy_on_write): + df.ffill(inplace=True) + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 100.5 + + if using_copy_on_write: + tm.assert_frame_equal(view, expected) + else: + expected = DataFrame({"a": [100.5, 1.5, 3]}) + tm.assert_frame_equal(view, expected) + + +def test_isetitem(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}) + df_orig = df.copy() + df2 = df.copy(deep=None) # Trigger a CoW + df2.isetitem(1, np.array([-1, -2, -3])) # This is inplace + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + else: + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + df2.loc[0, "a"] = 0 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + else: + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + + +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_isetitem_series(using_copy_on_write, dtype): + df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)}) + ser = Series([7, 8, 9]) + ser_orig = ser.copy() + df.isetitem(0, ser) + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), get_array(ser)) + assert not df._mgr._has_no_reference(0) + + # mutating dataframe doesn't update series + df.loc[0, "a"] = 0 + tm.assert_series_equal(ser, ser_orig) + + # mutating series doesn't update dataframe + df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)}) + ser = Series([7, 8, 9]) + df.isetitem(0, ser) + + ser.loc[0] = 0 + expected = DataFrame({"a": [7, 8, 9], "b": np.array([4, 5, 6], dtype=dtype)}) + tm.assert_frame_equal(df, expected) + + +def test_isetitem_frame(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2}) + rhs = DataFrame({"a": [4, 5, 6], "b": 2}) + df.isetitem([0, 1], rhs) + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), get_array(rhs, "a")) + assert np.shares_memory(get_array(df, "b"), get_array(rhs, "b")) + assert not df._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(rhs, "a")) + assert not np.shares_memory(get_array(df, "b"), get_array(rhs, "b")) + expected = df.copy() + rhs.iloc[0, 0] = 100 + rhs.iloc[0, 1] = 100 + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("key", ["a", ["a"]]) +def test_get(using_copy_on_write, warn_copy_on_write, key): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df_orig = df.copy() + + result = df.get(key) + + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + result.iloc[0] = 0 + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + tm.assert_frame_equal(df, df_orig) + else: + # for non-CoW it depends on whether we got a Series or DataFrame if it + # is a view or copy or triggers a warning or not + if warn_copy_on_write: + warn = FutureWarning if isinstance(key, str) else None + else: + warn = SettingWithCopyWarning if isinstance(key, list) else None + with option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + result.iloc[0] = 0 + + if isinstance(key, list): + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + +@pytest.mark.parametrize("axis, key", [(0, 0), (1, "a")]) +@pytest.mark.parametrize( + "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"] +) +def test_xs( + using_copy_on_write, warn_copy_on_write, using_array_manager, axis, key, dtype +): + single_block = (dtype == "int64") and not using_array_manager + is_view = single_block or (using_array_manager and axis == 1) + df = DataFrame( + {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)} + ) + df_orig = df.copy() + + result = df.xs(key, axis=axis) + + if axis == 1 or single_block: + assert np.shares_memory(get_array(df, "a"), get_array(result)) + elif using_copy_on_write: + assert result._mgr._has_no_reference(0) + + if using_copy_on_write or (is_view and not warn_copy_on_write): + result.iloc[0] = 0 + elif warn_copy_on_write: + with tm.assert_cow_warning(single_block or axis == 1): + result.iloc[0] = 0 + else: + with option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(SettingWithCopyWarning): + result.iloc[0] = 0 + + if using_copy_on_write or (not single_block and axis == 0): + tm.assert_frame_equal(df, df_orig) + else: + assert df.iloc[0, 0] == 0 + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize("key, level", [("l1", 0), (2, 1)]) +def test_xs_multiindex( + using_copy_on_write, warn_copy_on_write, using_array_manager, key, level, axis +): + arr = np.arange(18).reshape(6, 3) + index = MultiIndex.from_product([["l1", "l2"], [1, 2, 3]], names=["lev1", "lev2"]) + df = DataFrame(arr, index=index, columns=list("abc")) + if axis == 1: + df = df.transpose().copy() + df_orig = df.copy() + + result = df.xs(key, level=level, axis=axis) + + if level == 0: + assert np.shares_memory( + get_array(df, df.columns[0]), get_array(result, result.columns[0]) + ) + + if warn_copy_on_write: + warn = FutureWarning if level == 0 else None + elif not using_copy_on_write and not using_array_manager: + warn = SettingWithCopyWarning + else: + warn = None + with option_context("chained_assignment", "warn"): + with tm.assert_produces_warning(warn): + result.iloc[0, 0] = 0 + + tm.assert_frame_equal(df, df_orig) + + +def test_update_frame(using_copy_on_write, warn_copy_on_write): + df1 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]}) + df2 = DataFrame({"b": [100.0]}, index=[1]) + df1_orig = df1.copy() + view = df1[:] + + # TODO(CoW) better warning message? + with tm.assert_cow_warning(warn_copy_on_write): + df1.update(df2) + + expected = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 100.0, 6.0]}) + tm.assert_frame_equal(df1, expected) + if using_copy_on_write: + # df1 is updated, but its view not + tm.assert_frame_equal(view, df1_orig) + assert np.shares_memory(get_array(df1, "a"), get_array(view, "a")) + assert not np.shares_memory(get_array(df1, "b"), get_array(view, "b")) + else: + tm.assert_frame_equal(view, expected) + + +def test_update_series(using_copy_on_write, warn_copy_on_write): + ser1 = Series([1.0, 2.0, 3.0]) + ser2 = Series([100.0], index=[1]) + ser1_orig = ser1.copy() + view = ser1[:] + + if warn_copy_on_write: + with tm.assert_cow_warning(): + ser1.update(ser2) + else: + ser1.update(ser2) + + expected = Series([1.0, 100.0, 3.0]) + tm.assert_series_equal(ser1, expected) + if using_copy_on_write: + # ser1 is updated, but its view not + tm.assert_series_equal(view, ser1_orig) + else: + tm.assert_series_equal(view, expected) + + +def test_update_chained_assignment(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + ser2 = Series([100.0], index=[1]) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].update(ser2) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + df[["a"]].update(ser2.to_frame()) + tm.assert_frame_equal(df, df_orig) + else: + with tm.assert_produces_warning(FutureWarning, match="inplace method"): + df["a"].update(ser2) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + df[["a"]].update(ser2.to_frame()) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + df[df["a"] > 1].update(ser2.to_frame()) + + +def test_inplace_arithmetic_series(using_copy_on_write): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + data = get_array(ser) + ser *= 2 + if using_copy_on_write: + # https://github.com/pandas-dev/pandas/pull/55745 + # changed to NOT update inplace because there is no benefit (actual + # operation already done non-inplace). This was only for the optics + # of updating the backing array inplace, but we no longer want to make + # that guarantee + assert not np.shares_memory(get_array(ser), data) + tm.assert_numpy_array_equal(data, get_array(ser_orig)) + else: + assert np.shares_memory(get_array(ser), data) + tm.assert_numpy_array_equal(data, get_array(ser)) + + +def test_inplace_arithmetic_series_with_reference( + using_copy_on_write, warn_copy_on_write +): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + view = ser[:] + with tm.assert_cow_warning(warn_copy_on_write): + ser *= 2 + if using_copy_on_write: + assert not np.shares_memory(get_array(ser), get_array(view)) + tm.assert_series_equal(ser_orig, view) + else: + assert np.shares_memory(get_array(ser), get_array(view)) + + +@pytest.mark.parametrize("copy", [True, False]) +def test_transpose(using_copy_on_write, copy, using_array_manager): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + df_orig = df.copy() + result = df.transpose(copy=copy) + + if not copy and not using_array_manager or using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), get_array(result, 0)) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, 0)) + + result.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + + +def test_transpose_different_dtypes(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1.5}) + df_orig = df.copy() + result = df.T + + assert not np.shares_memory(get_array(df, "a"), get_array(result, 0)) + result.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + + +def test_transpose_ea_single_column(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + result = df.T + + assert not np.shares_memory(get_array(df, "a"), get_array(result, 0)) + + +def test_transform_frame(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + df_orig = df.copy() + + def func(ser): + ser.iloc[0] = 100 + return ser + + with tm.assert_cow_warning(warn_copy_on_write): + df.transform(func) + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + + +def test_transform_series(using_copy_on_write, warn_copy_on_write): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + + def func(ser): + ser.iloc[0] = 100 + return ser + + with tm.assert_cow_warning(warn_copy_on_write): + ser.transform(func) + if using_copy_on_write: + tm.assert_series_equal(ser, ser_orig) + + +def test_count_read_only_array(): + df = DataFrame({"a": [1, 2], "b": 3}) + result = df.count() + result.iloc[0] = 100 + expected = Series([100, 2], index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +def test_series_view(using_copy_on_write, warn_copy_on_write): + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + + with tm.assert_produces_warning(FutureWarning, match="is deprecated"): + ser2 = ser.view() + assert np.shares_memory(get_array(ser), get_array(ser2)) + if using_copy_on_write: + assert not ser2._mgr._has_no_reference(0) + + with tm.assert_cow_warning(warn_copy_on_write): + ser2.iloc[0] = 100 + + if using_copy_on_write: + tm.assert_series_equal(ser_orig, ser) + else: + expected = Series([100, 2, 3]) + tm.assert_series_equal(ser, expected) + + +def test_insert_series(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + ser = Series([1, 2, 3]) + ser_orig = ser.copy() + df.insert(loc=1, value=ser, column="b") + if using_copy_on_write: + assert np.shares_memory(get_array(ser), get_array(df, "b")) + assert not df._mgr._has_no_reference(1) + else: + assert not np.shares_memory(get_array(ser), get_array(df, "b")) + + df.iloc[0, 1] = 100 + tm.assert_series_equal(ser, ser_orig) + + +def test_eval(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + df_orig = df.copy() + + result = df.eval("c = a+b") + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), get_array(result, "a")) + else: + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + + result.iloc[0, 0] = 100 + tm.assert_frame_equal(df, df_orig) + + +def test_eval_inplace(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": 1}) + df_orig = df.copy() + df_view = df[:] + + df.eval("c = a+b", inplace=True) + assert np.shares_memory(get_array(df, "a"), get_array(df_view, "a")) + + with tm.assert_cow_warning(warn_copy_on_write): + df.iloc[0, 0] = 100 + if using_copy_on_write: + tm.assert_frame_equal(df_view, df_orig) + + +def test_apply_modify_row(using_copy_on_write, warn_copy_on_write): + # Case: applying a function on each row as a Series object, where the + # function mutates the row object (which needs to trigger CoW if row is a view) + df = DataFrame({"A": [1, 2], "B": [3, 4]}) + df_orig = df.copy() + + def transform(row): + row["B"] = 100 + return row + + with tm.assert_cow_warning(warn_copy_on_write): + df.apply(transform, axis=1) + + if using_copy_on_write: + tm.assert_frame_equal(df, df_orig) + else: + assert df.loc[0, "B"] == 100 + + # row Series is a copy + df = DataFrame({"A": [1, 2], "B": ["b", "c"]}) + df_orig = df.copy() + + with tm.assert_produces_warning(None): + df.apply(transform, axis=1) + + tm.assert_frame_equal(df, df_orig) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_replace.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_replace.py new file mode 100644 index 0000000000000000000000000000000000000000..6d16bc308388359b69e07d77a5fef153b4eb248f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_replace.py @@ -0,0 +1,481 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + DataFrame, + option_context, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + + +@pytest.mark.parametrize( + "replace_kwargs", + [ + {"to_replace": {"a": 1, "b": 4}, "value": -1}, + # Test CoW splits blocks to avoid copying unchanged columns + {"to_replace": {"a": 1}, "value": -1}, + {"to_replace": {"b": 4}, "value": -1}, + {"to_replace": {"b": {4: 1}}}, + # TODO: Add these in a further optimization + # We would need to see which columns got replaced in the mask + # which could be expensive + # {"to_replace": {"b": 1}}, + # 1 + ], +) +def test_replace(using_copy_on_write, replace_kwargs): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": ["foo", "bar", "baz"]}) + df_orig = df.copy() + + df_replaced = df.replace(**replace_kwargs) + + if using_copy_on_write: + if (df_replaced["b"] == df["b"]).all(): + assert np.shares_memory(get_array(df_replaced, "b"), get_array(df, "b")) + assert np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c")) + + # mutating squeezed df triggers a copy-on-write for that column/block + df_replaced.loc[0, "c"] = -1 + if using_copy_on_write: + assert not np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c")) + + if "a" in replace_kwargs["to_replace"]: + arr = get_array(df_replaced, "a") + df_replaced.loc[0, "a"] = 100 + assert np.shares_memory(get_array(df_replaced, "a"), arr) + tm.assert_frame_equal(df, df_orig) + + +def test_replace_regex_inplace_refs(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": ["aaa", "bbb"]}) + df_orig = df.copy() + view = df[:] + arr = get_array(df, "a") + with tm.assert_cow_warning(warn_copy_on_write): + df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True) + if using_copy_on_write: + assert not np.shares_memory(arr, get_array(df, "a")) + assert df._mgr._has_no_reference(0) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_replace_regex_inplace(using_copy_on_write): + df = DataFrame({"a": ["aaa", "bbb"]}) + arr = get_array(df, "a") + df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert np.shares_memory(arr, get_array(df, "a")) + + df_orig = df.copy() + df2 = df.replace(to_replace=r"^b.*$", value="new", regex=True) + tm.assert_frame_equal(df_orig, df) + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +def test_replace_regex_inplace_no_op(using_copy_on_write): + df = DataFrame({"a": [1, 2]}) + arr = get_array(df, "a") + df.replace(to_replace=r"^a.$", value="new", inplace=True, regex=True) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert np.shares_memory(arr, get_array(df, "a")) + + df_orig = df.copy() + df2 = df.replace(to_replace=r"^x.$", value="new", regex=True) + tm.assert_frame_equal(df_orig, df) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + +def test_replace_mask_all_false_second_block(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3], "b": 100.5, "c": 1, "d": 2}) + df_orig = df.copy() + + df2 = df.replace(to_replace=1.5, value=55.5) + + if using_copy_on_write: + # TODO: Block splitting would allow us to avoid copying b + assert np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + else: + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + df2.loc[0, "c"] = 1 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + # TODO: This should split and not copy the whole block + # assert np.shares_memory(get_array(df, "d"), get_array(df2, "d")) + + +def test_replace_coerce_single_column(using_copy_on_write, using_array_manager): + df = DataFrame({"a": [1.5, 2, 3], "b": 100.5}) + df_orig = df.copy() + + df2 = df.replace(to_replace=1.5, value="a") + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + elif not using_array_manager: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + if using_copy_on_write: + df2.loc[0, "b"] = 0.5 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + +def test_replace_to_replace_wrong_dtype(using_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3], "b": 100.5}) + df_orig = df.copy() + + df2 = df.replace(to_replace="xxx", value=1.5) + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + else: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + df2.loc[0, "b"] = 0.5 + tm.assert_frame_equal(df, df_orig) # Original is unchanged + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b")) + + +def test_replace_list_categorical(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"]}, dtype="category") + arr = get_array(df, "a") + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.replace(["c"], value="a", inplace=True) + assert np.shares_memory(arr.codes, get_array(df, "a").codes) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + df_orig = df.copy() + with tm.assert_produces_warning(FutureWarning, match=msg): + df2 = df.replace(["b"], value="a") + assert not np.shares_memory(arr.codes, get_array(df2, "a").codes) + + tm.assert_frame_equal(df, df_orig) + + +def test_replace_list_inplace_refs_categorical(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"]}, dtype="category") + view = df[:] + df_orig = df.copy() + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + df.replace(["c"], value="a", inplace=True) + if using_copy_on_write: + assert not np.shares_memory( + get_array(view, "a").codes, get_array(df, "a").codes + ) + tm.assert_frame_equal(df_orig, view) + else: + # This could be inplace + assert not np.shares_memory( + get_array(view, "a").codes, get_array(df, "a").codes + ) + + +@pytest.mark.parametrize("to_replace", [1.5, [1.5], []]) +def test_replace_inplace(using_copy_on_write, to_replace): + df = DataFrame({"a": [1.5, 2, 3]}) + arr_a = get_array(df, "a") + df.replace(to_replace=1.5, value=15.5, inplace=True) + + assert np.shares_memory(get_array(df, "a"), arr_a) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("to_replace", [1.5, [1.5]]) +def test_replace_inplace_reference(using_copy_on_write, to_replace, warn_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + arr_a = get_array(df, "a") + view = df[:] + with tm.assert_cow_warning(warn_copy_on_write): + df.replace(to_replace=to_replace, value=15.5, inplace=True) + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr_a) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + else: + assert np.shares_memory(get_array(df, "a"), arr_a) + + +@pytest.mark.parametrize("to_replace", ["a", 100.5]) +def test_replace_inplace_reference_no_op(using_copy_on_write, to_replace): + df = DataFrame({"a": [1.5, 2, 3]}) + arr_a = get_array(df, "a") + view = df[:] + df.replace(to_replace=to_replace, value=15.5, inplace=True) + + assert np.shares_memory(get_array(df, "a"), arr_a) + if using_copy_on_write: + assert not df._mgr._has_no_reference(0) + assert not view._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("to_replace", [1, [1]]) +@pytest.mark.parametrize("val", [1, 1.5]) +def test_replace_categorical_inplace_reference(using_copy_on_write, val, to_replace): + df = DataFrame({"a": Categorical([1, 2, 3])}) + df_orig = df.copy() + arr_a = get_array(df, "a") + view = df[:] + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + warn = FutureWarning if val == 1.5 else None + with tm.assert_produces_warning(warn, match=msg): + df.replace(to_replace=to_replace, value=val, inplace=True) + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a").codes, arr_a.codes) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "a").codes, arr_a.codes) + + +@pytest.mark.parametrize("val", [1, 1.5]) +def test_replace_categorical_inplace(using_copy_on_write, val): + df = DataFrame({"a": Categorical([1, 2, 3])}) + arr_a = get_array(df, "a") + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + warn = FutureWarning if val == 1.5 else None + with tm.assert_produces_warning(warn, match=msg): + df.replace(to_replace=1, value=val, inplace=True) + + assert np.shares_memory(get_array(df, "a").codes, arr_a.codes) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + expected = DataFrame({"a": Categorical([val, 2, 3])}) + tm.assert_frame_equal(df, expected) + + +@pytest.mark.parametrize("val", [1, 1.5]) +def test_replace_categorical(using_copy_on_write, val): + df = DataFrame({"a": Categorical([1, 2, 3])}) + df_orig = df.copy() + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + warn = FutureWarning if val == 1.5 else None + with tm.assert_produces_warning(warn, match=msg): + df2 = df.replace(to_replace=1, value=val) + + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert df2._mgr._has_no_reference(0) + assert not np.shares_memory(get_array(df, "a").codes, get_array(df2, "a").codes) + tm.assert_frame_equal(df, df_orig) + + arr_a = get_array(df2, "a").codes + df2.iloc[0, 0] = 2.0 + assert np.shares_memory(get_array(df2, "a").codes, arr_a) + + +@pytest.mark.parametrize("method", ["where", "mask"]) +def test_masking_inplace(using_copy_on_write, method, warn_copy_on_write): + df = DataFrame({"a": [1.5, 2, 3]}) + df_orig = df.copy() + arr_a = get_array(df, "a") + view = df[:] + + method = getattr(df, method) + if warn_copy_on_write: + with tm.assert_cow_warning(): + method(df["a"] > 1.6, -1, inplace=True) + else: + method(df["a"] > 1.6, -1, inplace=True) + + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr_a) + assert df._mgr._has_no_reference(0) + assert view._mgr._has_no_reference(0) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "a"), arr_a) + + +def test_replace_empty_list(using_copy_on_write): + df = DataFrame({"a": [1, 2]}) + + df2 = df.replace([], []) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + assert not df._mgr._has_no_reference(0) + else: + assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + + arr_a = get_array(df, "a") + df.replace([], []) + if using_copy_on_write: + assert np.shares_memory(get_array(df, "a"), arr_a) + assert not df._mgr._has_no_reference(0) + assert not df2._mgr._has_no_reference(0) + + +@pytest.mark.parametrize("value", ["d", None]) +def test_replace_object_list_inplace(using_copy_on_write, value): + df = DataFrame({"a": ["a", "b", "c"]}) + arr = get_array(df, "a") + df.replace(["c"], value, inplace=True) + if using_copy_on_write or value is None: + assert np.shares_memory(arr, get_array(df, "a")) + else: + # This could be inplace + assert not np.shares_memory(arr, get_array(df, "a")) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +def test_replace_list_multiple_elements_inplace(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3]}) + arr = get_array(df, "a") + df.replace([1, 2], 4, inplace=True) + if using_copy_on_write: + assert np.shares_memory(arr, get_array(df, "a")) + assert df._mgr._has_no_reference(0) + else: + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_replace_list_none(using_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"]}) + + df_orig = df.copy() + df2 = df.replace(["b"], value=None) + tm.assert_frame_equal(df, df_orig) + + assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a")) + + +def test_replace_list_none_inplace_refs(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": ["a", "b", "c"]}) + arr = get_array(df, "a") + df_orig = df.copy() + view = df[:] + with tm.assert_cow_warning(warn_copy_on_write): + df.replace(["a"], value=None, inplace=True) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + assert not np.shares_memory(arr, get_array(df, "a")) + tm.assert_frame_equal(df_orig, view) + else: + assert np.shares_memory(arr, get_array(df, "a")) + + +def test_replace_columnwise_no_op_inplace(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) + view = df[:] + df_orig = df.copy() + df.replace({"a": 10}, 100, inplace=True) + if using_copy_on_write: + assert np.shares_memory(get_array(view, "a"), get_array(df, "a")) + df.iloc[0, 0] = 100 + tm.assert_frame_equal(view, df_orig) + + +def test_replace_columnwise_no_op(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) + df_orig = df.copy() + df2 = df.replace({"a": 10}, 100) + if using_copy_on_write: + assert np.shares_memory(get_array(df2, "a"), get_array(df, "a")) + df2.iloc[0, 0] = 100 + tm.assert_frame_equal(df, df_orig) + + +def test_replace_chained_assignment(using_copy_on_write): + df = DataFrame({"a": [1, np.nan, 2], "b": 1}) + df_orig = df.copy() + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + df["a"].replace(1, 100, inplace=True) + tm.assert_frame_equal(df, df_orig) + + with tm.raises_chained_assignment_error(): + df[["a"]].replace(1, 100, inplace=True) + tm.assert_frame_equal(df, df_orig) + else: + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + df[["a"]].replace(1, 100, inplace=True) + + with tm.assert_produces_warning(None): + with option_context("mode.chained_assignment", None): + df[df.a > 5].replace(1, 100, inplace=True) + + with tm.assert_produces_warning(FutureWarning, match="inplace method"): + df["a"].replace(1, 100, inplace=True) + + +def test_replace_listlike(using_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) + df_orig = df.copy() + + result = df.replace([200, 201], [11, 11]) + if using_copy_on_write: + assert np.shares_memory(get_array(result, "a"), get_array(df, "a")) + else: + assert not np.shares_memory(get_array(result, "a"), get_array(df, "a")) + + result.iloc[0, 0] = 100 + tm.assert_frame_equal(df, df) + + result = df.replace([200, 2], [10, 10]) + assert not np.shares_memory(get_array(df, "a"), get_array(result, "a")) + tm.assert_frame_equal(df, df_orig) + + +def test_replace_listlike_inplace(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]}) + arr = get_array(df, "a") + df.replace([200, 2], [10, 11], inplace=True) + assert np.shares_memory(get_array(df, "a"), arr) + + view = df[:] + df_orig = df.copy() + with tm.assert_cow_warning(warn_copy_on_write): + df.replace([200, 3], [10, 11], inplace=True) + if using_copy_on_write: + assert not np.shares_memory(get_array(df, "a"), arr) + tm.assert_frame_equal(view, df_orig) + else: + assert np.shares_memory(get_array(df, "a"), arr) + tm.assert_frame_equal(df, view) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_setitem.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_setitem.py new file mode 100644 index 0000000000000000000000000000000000000000..bc3b939734534520f0cf7051dbc72989d0caf990 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_setitem.py @@ -0,0 +1,156 @@ +import numpy as np + +from pandas import ( + DataFrame, + Index, + MultiIndex, + RangeIndex, + Series, +) +import pandas._testing as tm +from pandas.tests.copy_view.util import get_array + +# ----------------------------------------------------------------------------- +# Copy/view behaviour for the values that are set in a DataFrame + + +def test_set_column_with_array(): + # Case: setting an array as a new column (df[col] = arr) copies that data + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + arr = np.array([1, 2, 3], dtype="int64") + + df["c"] = arr + + # the array data is copied + assert not np.shares_memory(get_array(df, "c"), arr) + # and thus modifying the array does not modify the DataFrame + arr[0] = 0 + tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c")) + + +def test_set_column_with_series(using_copy_on_write): + # Case: setting a series as a new column (df[col] = s) copies that data + # (with delayed copy with CoW) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + ser = Series([1, 2, 3]) + + df["c"] = ser + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "c"), get_array(ser)) + else: + # the series data is copied + assert not np.shares_memory(get_array(df, "c"), get_array(ser)) + + # and modifying the series does not modify the DataFrame + ser.iloc[0] = 0 + assert ser.iloc[0] == 0 + tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c")) + + +def test_set_column_with_index(using_copy_on_write): + # Case: setting an index as a new column (df[col] = idx) copies that data + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + idx = Index([1, 2, 3]) + + df["c"] = idx + + # the index data is copied + assert not np.shares_memory(get_array(df, "c"), idx.values) + + idx = RangeIndex(1, 4) + arr = idx.values + + df["d"] = idx + + assert not np.shares_memory(get_array(df, "d"), arr) + + +def test_set_columns_with_dataframe(using_copy_on_write): + # Case: setting a DataFrame as new columns copies that data + # (with delayed copy with CoW) + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]}) + + df[["c", "d"]] = df2 + + if using_copy_on_write: + assert np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + else: + # the data is copied + assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c")) + + # and modifying the set DataFrame does not modify the original DataFrame + df2.iloc[0, 0] = 0 + tm.assert_series_equal(df["c"], Series([7, 8, 9], name="c")) + + +def test_setitem_series_no_copy(using_copy_on_write): + # Case: setting a Series as column into a DataFrame can delay copying that data + df = DataFrame({"a": [1, 2, 3]}) + rhs = Series([4, 5, 6]) + rhs_orig = rhs.copy() + + # adding a new column + df["b"] = rhs + if using_copy_on_write: + assert np.shares_memory(get_array(rhs), get_array(df, "b")) + + df.iloc[0, 1] = 100 + tm.assert_series_equal(rhs, rhs_orig) + + +def test_setitem_series_no_copy_single_block(using_copy_on_write): + # Overwriting an existing column that is a single block + df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]}) + rhs = Series([4, 5, 6]) + rhs_orig = rhs.copy() + + df["a"] = rhs + if using_copy_on_write: + assert np.shares_memory(get_array(rhs), get_array(df, "a")) + + df.iloc[0, 0] = 100 + tm.assert_series_equal(rhs, rhs_orig) + + +def test_setitem_series_no_copy_split_block(using_copy_on_write): + # Overwriting an existing column that is part of a larger block + df = DataFrame({"a": [1, 2, 3], "b": 1}) + rhs = Series([4, 5, 6]) + rhs_orig = rhs.copy() + + df["b"] = rhs + if using_copy_on_write: + assert np.shares_memory(get_array(rhs), get_array(df, "b")) + + df.iloc[0, 1] = 100 + tm.assert_series_equal(rhs, rhs_orig) + + +def test_setitem_series_column_midx_broadcasting(using_copy_on_write): + # Setting a Series to multiple columns will repeat the data + # (currently copying the data eagerly) + df = DataFrame( + [[1, 2, 3], [3, 4, 5]], + columns=MultiIndex.from_arrays([["a", "a", "b"], [1, 2, 3]]), + ) + rhs = Series([10, 11]) + df["a"] = rhs + assert not np.shares_memory(get_array(rhs), df._get_column_array(0)) + if using_copy_on_write: + assert df._mgr._has_no_reference(0) + + +def test_set_column_with_inplace_operator(using_copy_on_write, warn_copy_on_write): + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + + # this should not raise any warning + with tm.assert_produces_warning(None): + df["a"] += 1 + + # when it is not in a chain, then it should produce a warning + df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + ser = df["a"] + with tm.assert_cow_warning(warn_copy_on_write): + ser += 1 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_util.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_util.py new file mode 100644 index 0000000000000000000000000000000000000000..ff55330d70b28c5459a4c0915dd93c8640a91add --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/test_util.py @@ -0,0 +1,14 @@ +import numpy as np + +from pandas import DataFrame +from pandas.tests.copy_view.util import get_array + + +def test_get_array_numpy(): + df = DataFrame({"a": [1, 2, 3]}) + assert np.shares_memory(get_array(df, "a"), get_array(df, "a")) + + +def test_get_array_masked(): + df = DataFrame({"a": [1, 2, 3]}, dtype="Int64") + assert np.shares_memory(get_array(df, "a"), get_array(df, "a")) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/copy_view/util.py b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/util.py new file mode 100644 index 0000000000000000000000000000000000000000..969334424936559767b0bca87093acfec52f9763 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/copy_view/util.py @@ -0,0 +1,30 @@ +from pandas import ( + Categorical, + Index, + Series, +) +from pandas.core.arrays import BaseMaskedArray + + +def get_array(obj, col=None): + """ + Helper method to get array for a DataFrame column or a Series. + + Equivalent of df[col].values, but without going through normal getitem, + which triggers tracking references / CoW (and we might be testing that + this is done by some other operation). + """ + if isinstance(obj, Index): + arr = obj._values + elif isinstance(obj, Series) and (col is None or obj.name == col): + arr = obj._values + else: + assert col is not None + icol = obj.columns.get_loc(col) + assert isinstance(icol, int) + arr = obj._get_column_array(icol) + if isinstance(arr, BaseMaskedArray): + return arr._data + elif isinstance(arr, Categorical): + return arr + return getattr(arr, "_ndarray", arr) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8264a5f4975db744d55e4d2f25ea8f31c3d81504 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..805303c6bb9f1dd33de5585b19604976ac52520b Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_numeric.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_numeric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98c2a37ea3efbb4dc7e07c9b09a98119f7446770 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_numeric.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_time.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_time.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cd2be63a4d81fb6d6817e6fbe2e58744c67d3e7f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_time.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_timedelta.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_timedelta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17faf34eb78f6405afaa77f6c2a1d65beaea2b97 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_timedelta.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_datetime.py b/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ed996dade8e976607537e30c322d470ffcae2d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_datetime.py @@ -0,0 +1,3900 @@ +""" test to_datetime """ + +import calendar +from collections import deque +from datetime import ( + date, + datetime, + timedelta, + timezone, +) +from decimal import Decimal +import locale + +from dateutil.parser import parse +from dateutil.tz.tz import tzoffset +import numpy as np +import pytest +import pytz + +from pandas._libs import tslib +from pandas._libs.tslibs import ( + iNaT, + parsing, +) +from pandas.errors import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, +) +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_datetime64_ns_dtype + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + NaT, + Series, + Timestamp, + date_range, + isna, + to_datetime, +) +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray +from pandas.core.tools import datetimes as tools +from pandas.core.tools.datetimes import start_caching_at + +PARSING_ERR_MSG = ( + r"You might want to try:\n" + r" - passing `format` if your strings have a consistent format;\n" + r" - passing `format=\'ISO8601\'` if your strings are all ISO8601 " + r"but not necessarily in exactly the same format;\n" + r" - passing `format=\'mixed\'`, and the format will be inferred " + r"for each element individually. You might want to use `dayfirst` " + r"alongside this." +) + +pytestmark = pytest.mark.filterwarnings( + "ignore:errors='ignore' is deprecated:FutureWarning" +) + + +@pytest.fixture(params=[True, False]) +def cache(request): + """ + cache keyword to pass to to_datetime. + """ + return request.param + + +class TestTimeConversionFormats: + @pytest.mark.parametrize("readonly", [True, False]) + def test_to_datetime_readonly(self, readonly): + # GH#34857 + arr = np.array([], dtype=object) + if readonly: + arr.setflags(write=False) + result = to_datetime(arr) + expected = to_datetime([]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "format, expected", + [ + [ + "%d/%m/%Y", + [Timestamp("20000101"), Timestamp("20000201"), Timestamp("20000301")], + ], + [ + "%m/%d/%Y", + [Timestamp("20000101"), Timestamp("20000102"), Timestamp("20000103")], + ], + ], + ) + def test_to_datetime_format(self, cache, index_or_series, format, expected): + values = index_or_series(["1/1/2000", "1/2/2000", "1/3/2000"]) + result = to_datetime(values, format=format, cache=cache) + expected = index_or_series(expected) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "arg, expected, format", + [ + ["1/1/2000", "20000101", "%d/%m/%Y"], + ["1/1/2000", "20000101", "%m/%d/%Y"], + ["1/2/2000", "20000201", "%d/%m/%Y"], + ["1/2/2000", "20000102", "%m/%d/%Y"], + ["1/3/2000", "20000301", "%d/%m/%Y"], + ["1/3/2000", "20000103", "%m/%d/%Y"], + ], + ) + def test_to_datetime_format_scalar(self, cache, arg, expected, format): + result = to_datetime(arg, format=format, cache=cache) + expected = Timestamp(expected) + assert result == expected + + def test_to_datetime_format_YYYYMMDD(self, cache): + ser = Series([19801222, 19801222] + [19810105] * 5) + expected = Series([Timestamp(x) for x in ser.apply(str)]) + + result = to_datetime(ser, format="%Y%m%d", cache=cache) + tm.assert_series_equal(result, expected) + + result = to_datetime(ser.apply(str), format="%Y%m%d", cache=cache) + tm.assert_series_equal(result, expected) + + def test_to_datetime_format_YYYYMMDD_with_nat(self, cache): + # Explicit cast to float to explicit cast when setting np.nan + ser = Series([19801222, 19801222] + [19810105] * 5, dtype="float") + # with NaT + expected = Series( + [Timestamp("19801222"), Timestamp("19801222")] + [Timestamp("19810105")] * 5 + ) + expected[2] = np.nan + ser[2] = np.nan + + result = to_datetime(ser, format="%Y%m%d", cache=cache) + tm.assert_series_equal(result, expected) + + # string with NaT + ser2 = ser.apply(str) + ser2[2] = "nat" + with pytest.raises( + ValueError, + match=( + 'unconverted data remains when parsing with format "%Y%m%d": ".0", ' + "at position 0" + ), + ): + # https://github.com/pandas-dev/pandas/issues/50051 + to_datetime(ser2, format="%Y%m%d", cache=cache) + + def test_to_datetime_format_YYYYMM_with_nat(self, cache): + # https://github.com/pandas-dev/pandas/issues/50237 + # Explicit cast to float to explicit cast when setting np.nan + ser = Series([198012, 198012] + [198101] * 5, dtype="float") + expected = Series( + [Timestamp("19801201"), Timestamp("19801201")] + [Timestamp("19810101")] * 5 + ) + expected[2] = np.nan + ser[2] = np.nan + result = to_datetime(ser, format="%Y%m", cache=cache) + tm.assert_series_equal(result, expected) + + def test_to_datetime_format_YYYYMMDD_ignore(self, cache): + # coercion + # GH 7930, GH 14487 + ser = Series([20121231, 20141231, 99991231]) + result = to_datetime(ser, format="%Y%m%d", errors="ignore", cache=cache) + expected = Series( + [20121231, 20141231, 99991231], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_format_YYYYMMDD_ignore_with_outofbounds(self, cache): + # https://github.com/pandas-dev/pandas/issues/26493 + result = to_datetime( + ["15010101", "20150101", np.nan], + format="%Y%m%d", + errors="ignore", + cache=cache, + ) + expected = Index(["15010101", "20150101", np.nan], dtype=object) + tm.assert_index_equal(result, expected) + + def test_to_datetime_format_YYYYMMDD_coercion(self, cache): + # coercion + # GH 7930 + ser = Series([20121231, 20141231, 99991231]) + result = to_datetime(ser, format="%Y%m%d", errors="coerce", cache=cache) + expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "input_s", + [ + # Null values with Strings + ["19801222", "20010112", None], + ["19801222", "20010112", np.nan], + ["19801222", "20010112", NaT], + ["19801222", "20010112", "NaT"], + # Null values with Integers + [19801222, 20010112, None], + [19801222, 20010112, np.nan], + [19801222, 20010112, NaT], + [19801222, 20010112, "NaT"], + ], + ) + def test_to_datetime_format_YYYYMMDD_with_none(self, input_s): + # GH 30011 + # format='%Y%m%d' + # with None + expected = Series([Timestamp("19801222"), Timestamp("20010112"), NaT]) + result = Series(to_datetime(input_s, format="%Y%m%d")) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "input_s, expected", + [ + # NaN before strings with invalid date values + [ + Series(["19801222", np.nan, "20010012", "10019999"]), + Series([Timestamp("19801222"), np.nan, np.nan, np.nan]), + ], + # NaN after strings with invalid date values + [ + Series(["19801222", "20010012", "10019999", np.nan]), + Series([Timestamp("19801222"), np.nan, np.nan, np.nan]), + ], + # NaN before integers with invalid date values + [ + Series([20190813, np.nan, 20010012, 20019999]), + Series([Timestamp("20190813"), np.nan, np.nan, np.nan]), + ], + # NaN after integers with invalid date values + [ + Series([20190813, 20010012, np.nan, 20019999]), + Series([Timestamp("20190813"), np.nan, np.nan, np.nan]), + ], + ], + ) + def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected): + # GH 25512 + # format='%Y%m%d', errors='coerce' + result = to_datetime(input_s, format="%Y%m%d", errors="coerce") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data, format, expected", + [ + ([pd.NA], "%Y%m%d%H%M%S", DatetimeIndex(["NaT"])), + ([pd.NA], None, DatetimeIndex(["NaT"])), + ( + [pd.NA, "20210202202020"], + "%Y%m%d%H%M%S", + DatetimeIndex(["NaT", "2021-02-02 20:20:20"]), + ), + (["201010", pd.NA], "%y%m%d", DatetimeIndex(["2020-10-10", "NaT"])), + (["201010", pd.NA], "%d%m%y", DatetimeIndex(["2010-10-20", "NaT"])), + ([None, np.nan, pd.NA], None, DatetimeIndex(["NaT", "NaT", "NaT"])), + ([None, np.nan, pd.NA], "%Y%m%d", DatetimeIndex(["NaT", "NaT", "NaT"])), + ], + ) + def test_to_datetime_with_NA(self, data, format, expected): + # GH#42957 + result = to_datetime(data, format=format) + tm.assert_index_equal(result, expected) + + def test_to_datetime_with_NA_with_warning(self): + # GH#42957 + result = to_datetime(["201010", pd.NA]) + expected = DatetimeIndex(["2010-10-20", "NaT"]) + tm.assert_index_equal(result, expected) + + def test_to_datetime_format_integer(self, cache): + # GH 10178 + ser = Series([2000, 2001, 2002]) + expected = Series([Timestamp(x) for x in ser.apply(str)]) + + result = to_datetime(ser, format="%Y", cache=cache) + tm.assert_series_equal(result, expected) + + ser = Series([200001, 200105, 200206]) + expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in ser.apply(str)]) + + result = to_datetime(ser, format="%Y%m", cache=cache) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "int_date, expected", + [ + # valid date, length == 8 + [20121030, datetime(2012, 10, 30)], + # short valid date, length == 6 + [199934, datetime(1999, 3, 4)], + # long integer date partially parsed to datetime(2012,1,1), length > 8 + [2012010101, 2012010101], + # invalid date partially parsed to datetime(2012,9,9), length == 8 + [20129930, 20129930], + # short integer date partially parsed to datetime(2012,9,9), length < 8 + [2012993, 2012993], + # short invalid date, length == 4 + [2121, 2121], + ], + ) + def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected): + # GH 26583 + result = to_datetime(int_date, format="%Y%m%d", errors="ignore") + assert result == expected + + def test_to_datetime_format_microsecond(self, cache): + month_abbr = calendar.month_abbr[4] + val = f"01-{month_abbr}-2011 00:00:01.978" + + format = "%d-%b-%Y %H:%M:%S.%f" + result = to_datetime(val, format=format, cache=cache) + exp = datetime.strptime(val, format) + assert result == exp + + @pytest.mark.parametrize( + "value, format, dt", + [ + ["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")], + ["01/10/2010 05:43", "%m/%d/%Y %I:%M", Timestamp("2010-01-10 05:43")], + [ + "01/10/2010 13:56:01", + "%m/%d/%Y %H:%M:%S", + Timestamp("2010-01-10 13:56:01"), + ], + # The 3 tests below are locale-dependent. + # They pass, except when the machine locale is zh_CN or it_IT . + pytest.param( + "01/10/2010 08:14 PM", + "%m/%d/%Y %I:%M %p", + Timestamp("2010-01-10 20:14"), + marks=pytest.mark.xfail( + locale.getlocale()[0] in ("zh_CN", "it_IT"), + reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8", + strict=False, + ), + ), + pytest.param( + "01/10/2010 07:40 AM", + "%m/%d/%Y %I:%M %p", + Timestamp("2010-01-10 07:40"), + marks=pytest.mark.xfail( + locale.getlocale()[0] in ("zh_CN", "it_IT"), + reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8", + strict=False, + ), + ), + pytest.param( + "01/10/2010 09:12:56 AM", + "%m/%d/%Y %I:%M:%S %p", + Timestamp("2010-01-10 09:12:56"), + marks=pytest.mark.xfail( + locale.getlocale()[0] in ("zh_CN", "it_IT"), + reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8", + strict=False, + ), + ), + ], + ) + def test_to_datetime_format_time(self, cache, value, format, dt): + assert to_datetime(value, format=format, cache=cache) == dt + + @td.skip_if_not_us_locale + def test_to_datetime_with_non_exact(self, cache): + # GH 10834 + # 8904 + # exact kw + ser = Series( + ["19MAY11", "foobar19MAY11", "19MAY11:00:00:00", "19MAY11 00:00:00Z"] + ) + result = to_datetime(ser, format="%d%b%y", exact=False, cache=cache) + expected = to_datetime( + ser.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "format, expected", + [ + ("%Y-%m-%d", Timestamp(2000, 1, 3)), + ("%Y-%d-%m", Timestamp(2000, 3, 1)), + ("%Y-%m-%d %H", Timestamp(2000, 1, 3, 12)), + ("%Y-%d-%m %H", Timestamp(2000, 3, 1, 12)), + ("%Y-%m-%d %H:%M", Timestamp(2000, 1, 3, 12, 34)), + ("%Y-%d-%m %H:%M", Timestamp(2000, 3, 1, 12, 34)), + ("%Y-%m-%d %H:%M:%S", Timestamp(2000, 1, 3, 12, 34, 56)), + ("%Y-%d-%m %H:%M:%S", Timestamp(2000, 3, 1, 12, 34, 56)), + ("%Y-%m-%d %H:%M:%S.%f", Timestamp(2000, 1, 3, 12, 34, 56, 123456)), + ("%Y-%d-%m %H:%M:%S.%f", Timestamp(2000, 3, 1, 12, 34, 56, 123456)), + ( + "%Y-%m-%d %H:%M:%S.%f%z", + Timestamp(2000, 1, 3, 12, 34, 56, 123456, tz="UTC+01:00"), + ), + ( + "%Y-%d-%m %H:%M:%S.%f%z", + Timestamp(2000, 3, 1, 12, 34, 56, 123456, tz="UTC+01:00"), + ), + ], + ) + def test_non_exact_doesnt_parse_whole_string(self, cache, format, expected): + # https://github.com/pandas-dev/pandas/issues/50412 + # the formats alternate between ISO8601 and non-ISO8601 to check both paths + result = to_datetime( + "2000-01-03 12:34:56.123456+01:00", format=format, exact=False + ) + assert result == expected + + @pytest.mark.parametrize( + "arg", + [ + "2012-01-01 09:00:00.000000001", + "2012-01-01 09:00:00.000001", + "2012-01-01 09:00:00.001", + "2012-01-01 09:00:00.001000", + "2012-01-01 09:00:00.001000000", + ], + ) + def test_parse_nanoseconds_with_formula(self, cache, arg): + # GH8989 + # truncating the nanoseconds when a format was provided + expected = to_datetime(arg, cache=cache) + result = to_datetime(arg, format="%Y-%m-%d %H:%M:%S.%f", cache=cache) + assert result == expected + + @pytest.mark.parametrize( + "value,fmt,expected", + [ + ["2009324", "%Y%W%w", Timestamp("2009-08-13")], + ["2013020", "%Y%U%w", Timestamp("2013-01-13")], + ], + ) + def test_to_datetime_format_weeks(self, value, fmt, expected, cache): + assert to_datetime(value, format=fmt, cache=cache) == expected + + @pytest.mark.parametrize( + "fmt,dates,expected_dates", + [ + [ + "%Y-%m-%d %H:%M:%S %Z", + ["2010-01-01 12:00:00 UTC"] * 2, + [Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2, + ], + [ + "%Y-%m-%d %H:%M:%S%z", + ["2010-01-01 12:00:00+0100"] * 2, + [ + Timestamp( + "2010-01-01 12:00:00", tzinfo=timezone(timedelta(minutes=60)) + ) + ] + * 2, + ], + [ + "%Y-%m-%d %H:%M:%S %z", + ["2010-01-01 12:00:00 +0100"] * 2, + [ + Timestamp( + "2010-01-01 12:00:00", tzinfo=timezone(timedelta(minutes=60)) + ) + ] + * 2, + ], + [ + "%Y-%m-%d %H:%M:%S %z", + ["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"], + [ + Timestamp( + "2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0) + ), # pytz coerces to UTC + Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)), + ], + ], + ], + ) + def test_to_datetime_parse_tzname_or_tzoffset(self, fmt, dates, expected_dates): + # GH 13486 + result = to_datetime(dates, format=fmt) + expected = Index(expected_dates) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "fmt,dates,expected_dates", + [ + [ + "%Y-%m-%d %H:%M:%S %Z", + [ + "2010-01-01 12:00:00 UTC", + "2010-01-01 12:00:00 GMT", + "2010-01-01 12:00:00 US/Pacific", + ], + [ + Timestamp("2010-01-01 12:00:00", tz="UTC"), + Timestamp("2010-01-01 12:00:00", tz="GMT"), + Timestamp("2010-01-01 12:00:00", tz="US/Pacific"), + ], + ], + [ + "%Y-%m-%d %H:%M:%S %z", + ["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"], + [ + Timestamp( + "2010-01-01 12:00:00", tzinfo=timezone(timedelta(minutes=60)) + ), + Timestamp( + "2010-01-01 12:00:00", tzinfo=timezone(timedelta(minutes=-60)) + ), + ], + ], + ], + ) + def test_to_datetime_parse_tzname_or_tzoffset_utc_false_deprecated( + self, fmt, dates, expected_dates + ): + # GH 13486, 50887 + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime(dates, format=fmt) + expected = Index(expected_dates) + tm.assert_equal(result, expected) + + def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self): + # GH 32792 + dates = [ + "2010-01-01 12:00:00 +0100", + "2010-01-01 12:00:00 -0100", + "2010-01-01 12:00:00 +0300", + "2010-01-01 12:00:00 +0400", + ] + expected_dates = [ + "2010-01-01 11:00:00+00:00", + "2010-01-01 13:00:00+00:00", + "2010-01-01 09:00:00+00:00", + "2010-01-01 08:00:00+00:00", + ] + fmt = "%Y-%m-%d %H:%M:%S %z" + + result = to_datetime(dates, format=fmt, utc=True) + expected = DatetimeIndex(expected_dates) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "offset", ["+0", "-1foo", "UTCbar", ":10", "+01:000:01", ""] + ) + def test_to_datetime_parse_timezone_malformed(self, offset): + fmt = "%Y-%m-%d %H:%M:%S %z" + date = "2010-01-01 12:00:00 " + offset + + msg = "|".join( + [ + r'^time data ".*" doesn\'t match format ".*", at position 0. ' + f"{PARSING_ERR_MSG}$", + r'^unconverted data remains when parsing with format ".*": ".*", ' + f"at position 0. {PARSING_ERR_MSG}$", + ] + ) + with pytest.raises(ValueError, match=msg): + to_datetime([date], format=fmt) + + def test_to_datetime_parse_timezone_keeps_name(self): + # GH 21697 + fmt = "%Y-%m-%d %H:%M:%S %z" + arg = Index(["2010-01-01 12:00:00 Z"], name="foo") + result = to_datetime(arg, format=fmt) + expected = DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo") + tm.assert_index_equal(result, expected) + + +class TestToDatetime: + @pytest.mark.filterwarnings("ignore:Could not infer format") + def test_to_datetime_overflow(self): + # we should get an OutOfBoundsDatetime, NOT OverflowError + # TODO: Timestamp raises ValueError("could not convert string to Timestamp") + # can we make these more consistent? + arg = "08335394550" + msg = 'Parsing "08335394550" to datetime overflows, at position 0' + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(arg) + + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime([arg]) + + res = to_datetime(arg, errors="coerce") + assert res is NaT + res = to_datetime([arg], errors="coerce") + tm.assert_index_equal(res, Index([NaT])) + + res = to_datetime(arg, errors="ignore") + assert isinstance(res, str) and res == arg + res = to_datetime([arg], errors="ignore") + tm.assert_index_equal(res, Index([arg], dtype=object)) + + def test_to_datetime_mixed_datetime_and_string(self): + # GH#47018 adapted old doctest with new behavior + d1 = datetime(2020, 1, 1, 17, tzinfo=timezone(-timedelta(hours=1))) + d2 = datetime(2020, 1, 1, 18, tzinfo=timezone(-timedelta(hours=1))) + res = to_datetime(["2020-01-01 17:00 -0100", d2]) + expected = to_datetime([d1, d2]).tz_convert(timezone(timedelta(minutes=-60))) + tm.assert_index_equal(res, expected) + + def test_to_datetime_mixed_string_and_numeric(self): + # GH#55780 np.array(vals) would incorrectly cast the number to str + vals = ["2016-01-01", 0] + expected = DatetimeIndex([Timestamp(x) for x in vals]) + result = to_datetime(vals, format="mixed") + result2 = to_datetime(vals[::-1], format="mixed")[::-1] + result3 = DatetimeIndex(vals) + result4 = DatetimeIndex(vals[::-1])[::-1] + + tm.assert_index_equal(result, expected) + tm.assert_index_equal(result2, expected) + tm.assert_index_equal(result3, expected) + tm.assert_index_equal(result4, expected) + + @pytest.mark.parametrize( + "format", ["%Y-%m-%d", "%Y-%d-%m"], ids=["ISO8601", "non-ISO8601"] + ) + def test_to_datetime_mixed_date_and_string(self, format): + # https://github.com/pandas-dev/pandas/issues/50108 + d1 = date(2020, 1, 2) + res = to_datetime(["2020-01-01", d1], format=format) + expected = DatetimeIndex(["2020-01-01", "2020-01-02"], dtype="M8[ns]") + tm.assert_index_equal(res, expected) + + @pytest.mark.parametrize( + "fmt", + ["%Y-%d-%m %H:%M:%S%z", "%Y-%m-%d %H:%M:%S%z"], + ids=["non-ISO8601 format", "ISO8601 format"], + ) + @pytest.mark.parametrize( + "utc, args, expected", + [ + pytest.param( + True, + ["2000-01-01 01:00:00-08:00", "2000-01-01 02:00:00-08:00"], + DatetimeIndex( + ["2000-01-01 09:00:00+00:00", "2000-01-01 10:00:00+00:00"], + dtype="datetime64[ns, UTC]", + ), + id="all tz-aware, with utc", + ), + pytest.param( + False, + ["2000-01-01 01:00:00+00:00", "2000-01-01 02:00:00+00:00"], + DatetimeIndex( + ["2000-01-01 01:00:00+00:00", "2000-01-01 02:00:00+00:00"], + ), + id="all tz-aware, without utc", + ), + pytest.param( + True, + ["2000-01-01 01:00:00-08:00", "2000-01-01 02:00:00+00:00"], + DatetimeIndex( + ["2000-01-01 09:00:00+00:00", "2000-01-01 02:00:00+00:00"], + dtype="datetime64[ns, UTC]", + ), + id="all tz-aware, mixed offsets, with utc", + ), + pytest.param( + True, + ["2000-01-01 01:00:00", "2000-01-01 02:00:00+00:00"], + DatetimeIndex( + ["2000-01-01 01:00:00+00:00", "2000-01-01 02:00:00+00:00"], + dtype="datetime64[ns, UTC]", + ), + id="tz-aware string, naive pydatetime, with utc", + ), + ], + ) + @pytest.mark.parametrize( + "constructor", + [Timestamp, lambda x: Timestamp(x).to_pydatetime()], + ) + def test_to_datetime_mixed_datetime_and_string_with_format( + self, fmt, utc, args, expected, constructor + ): + # https://github.com/pandas-dev/pandas/issues/49298 + # https://github.com/pandas-dev/pandas/issues/50254 + # note: ISO8601 formats go down a fastpath, so we need to check both + # a ISO8601 format and a non-ISO8601 one + ts1 = constructor(args[0]) + ts2 = args[1] + result = to_datetime([ts1, ts2], format=fmt, utc=utc) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "fmt", + ["%Y-%d-%m %H:%M:%S%z", "%Y-%m-%d %H:%M:%S%z"], + ids=["non-ISO8601 format", "ISO8601 format"], + ) + @pytest.mark.parametrize( + "constructor", + [Timestamp, lambda x: Timestamp(x).to_pydatetime()], + ) + def test_to_datetime_mixed_datetime_and_string_with_format_mixed_offsets_utc_false( + self, fmt, constructor + ): + # https://github.com/pandas-dev/pandas/issues/49298 + # https://github.com/pandas-dev/pandas/issues/50254 + # note: ISO8601 formats go down a fastpath, so we need to check both + # a ISO8601 format and a non-ISO8601 one + args = ["2000-01-01 01:00:00", "2000-01-01 02:00:00+00:00"] + ts1 = constructor(args[0]) + ts2 = args[1] + msg = "parsing datetimes with mixed time zones will raise an error" + + expected = Index( + [ + Timestamp("2000-01-01 01:00:00"), + Timestamp("2000-01-01 02:00:00+0000", tz="UTC"), + ], + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime([ts1, ts2], format=fmt, utc=False) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "fmt, expected", + [ + pytest.param( + "%Y-%m-%d %H:%M:%S%z", + Index( + [ + Timestamp("2000-01-01 09:00:00+0100", tz="UTC+01:00"), + Timestamp("2000-01-02 02:00:00+0200", tz="UTC+02:00"), + NaT, + ] + ), + id="ISO8601, non-UTC", + ), + pytest.param( + "%Y-%d-%m %H:%M:%S%z", + Index( + [ + Timestamp("2000-01-01 09:00:00+0100", tz="UTC+01:00"), + Timestamp("2000-02-01 02:00:00+0200", tz="UTC+02:00"), + NaT, + ] + ), + id="non-ISO8601, non-UTC", + ), + ], + ) + def test_to_datetime_mixed_offsets_with_none_tz(self, fmt, expected): + # https://github.com/pandas-dev/pandas/issues/50071 + msg = "parsing datetimes with mixed time zones will raise an error" + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime( + ["2000-01-01 09:00:00+01:00", "2000-01-02 02:00:00+02:00", None], + format=fmt, + utc=False, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "fmt, expected", + [ + pytest.param( + "%Y-%m-%d %H:%M:%S%z", + DatetimeIndex( + ["2000-01-01 08:00:00+00:00", "2000-01-02 00:00:00+00:00", "NaT"], + dtype="datetime64[ns, UTC]", + ), + id="ISO8601, UTC", + ), + pytest.param( + "%Y-%d-%m %H:%M:%S%z", + DatetimeIndex( + ["2000-01-01 08:00:00+00:00", "2000-02-01 00:00:00+00:00", "NaT"], + dtype="datetime64[ns, UTC]", + ), + id="non-ISO8601, UTC", + ), + ], + ) + def test_to_datetime_mixed_offsets_with_none(self, fmt, expected): + # https://github.com/pandas-dev/pandas/issues/50071 + result = to_datetime( + ["2000-01-01 09:00:00+01:00", "2000-01-02 02:00:00+02:00", None], + format=fmt, + utc=True, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "fmt", + ["%Y-%d-%m %H:%M:%S%z", "%Y-%m-%d %H:%M:%S%z"], + ids=["non-ISO8601 format", "ISO8601 format"], + ) + @pytest.mark.parametrize( + "args", + [ + pytest.param( + ["2000-01-01 01:00:00-08:00", "2000-01-01 02:00:00-07:00"], + id="all tz-aware, mixed timezones, without utc", + ), + ], + ) + @pytest.mark.parametrize( + "constructor", + [Timestamp, lambda x: Timestamp(x).to_pydatetime()], + ) + def test_to_datetime_mixed_datetime_and_string_with_format_raises( + self, fmt, args, constructor + ): + # https://github.com/pandas-dev/pandas/issues/49298 + # note: ISO8601 formats go down a fastpath, so we need to check both + # a ISO8601 format and a non-ISO8601 one + ts1 = constructor(args[0]) + ts2 = constructor(args[1]) + with pytest.raises( + ValueError, match="cannot be converted to datetime64 unless utc=True" + ): + to_datetime([ts1, ts2], format=fmt, utc=False) + + def test_to_datetime_np_str(self): + # GH#32264 + # GH#48969 + value = np.str_("2019-02-04 10:18:46.297000+0000") + + ser = Series([value]) + + exp = Timestamp("2019-02-04 10:18:46.297000", tz="UTC") + + assert to_datetime(value) == exp + assert to_datetime(ser.iloc[0]) == exp + + res = to_datetime([value]) + expected = Index([exp]) + tm.assert_index_equal(res, expected) + + res = to_datetime(ser) + expected = Series(expected) + tm.assert_series_equal(res, expected) + + @pytest.mark.parametrize( + "s, _format, dt", + [ + ["2015-1-1", "%G-%V-%u", datetime(2014, 12, 29, 0, 0)], + ["2015-1-4", "%G-%V-%u", datetime(2015, 1, 1, 0, 0)], + ["2015-1-7", "%G-%V-%u", datetime(2015, 1, 4, 0, 0)], + ], + ) + def test_to_datetime_iso_week_year_format(self, s, _format, dt): + # See GH#16607 + assert to_datetime(s, format=_format) == dt + + @pytest.mark.parametrize( + "msg, s, _format", + [ + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 50", + "%Y %V", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51", + "%G %V", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 Monday", + "%G %A", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 Mon", + "%G %a", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 6", + "%G %w", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 6", + "%G %u", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "2051", + "%G", + ], + [ + "Day of the year directive '%j' is not compatible with ISO year " + "directive '%G'. Use '%Y' instead.", + "1999 51 6 256", + "%G %V %u %j", + ], + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 51 Sunday", + "%Y %V %A", + ], + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 51 Sun", + "%Y %V %a", + ], + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 51 1", + "%Y %V %w", + ], + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 51 1", + "%Y %V %u", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "20", + "%V", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51 Sunday", + "%V %A", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51 Sun", + "%V %a", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51 1", + "%V %w", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51 1", + "%V %u", + ], + [ + "Day of the year directive '%j' is not compatible with ISO year " + "directive '%G'. Use '%Y' instead.", + "1999 50", + "%G %j", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "20 Monday", + "%V %A", + ], + ], + ) + @pytest.mark.parametrize("errors", ["raise", "coerce", "ignore"]) + def test_error_iso_week_year(self, msg, s, _format, errors): + # See GH#16607, GH#50308 + # This test checks for errors thrown when giving the wrong format + # However, as discussed on PR#25541, overriding the locale + # causes a different error to be thrown due to the format being + # locale specific, but the test data is in english. + # Therefore, the tests only run when locale is not overwritten, + # as a sort of solution to this problem. + if locale.getlocale() != ("zh_CN", "UTF-8") and locale.getlocale() != ( + "it_IT", + "UTF-8", + ): + with pytest.raises(ValueError, match=msg): + to_datetime(s, format=_format, errors=errors) + + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_to_datetime_dtarr(self, tz): + # DatetimeArray + dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz) + arr = dti._data + + result = to_datetime(arr) + assert result is arr + + # Doesn't work on Windows since tzpath not set correctly + @td.skip_if_windows + @pytest.mark.parametrize("arg_class", [Series, Index]) + @pytest.mark.parametrize("utc", [True, False]) + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_to_datetime_arrow(self, tz, utc, arg_class): + pa = pytest.importorskip("pyarrow") + + dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz) + dti = arg_class(dti) + + dti_arrow = dti.astype(pd.ArrowDtype(pa.timestamp(unit="ns", tz=tz))) + + result = to_datetime(dti_arrow, utc=utc) + expected = to_datetime(dti, utc=utc).astype( + pd.ArrowDtype(pa.timestamp(unit="ns", tz=tz if not utc else "UTC")) + ) + if not utc and arg_class is not Series: + # Doesn't hold for utc=True, since that will astype + # to_datetime also returns a new object for series + assert result is dti_arrow + if arg_class is Series: + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + def test_to_datetime_pydatetime(self): + actual = to_datetime(datetime(2008, 1, 15)) + assert actual == datetime(2008, 1, 15) + + def test_to_datetime_YYYYMMDD(self): + actual = to_datetime("20080115") + assert actual == datetime(2008, 1, 15) + + def test_to_datetime_unparsable_ignore(self): + # unparsable + ser = "Month 1, 1999" + assert to_datetime(ser, errors="ignore") == ser + + @td.skip_if_windows # `tm.set_timezone` does not work in windows + def test_to_datetime_now(self): + # See GH#18666 + with tm.set_timezone("US/Eastern"): + # GH#18705 + now = Timestamp("now").as_unit("ns") + pdnow = to_datetime("now") + pdnow2 = to_datetime(["now"])[0] + + # These should all be equal with infinite perf; this gives + # a generous margin of 10 seconds + assert abs(pdnow._value - now._value) < 1e10 + assert abs(pdnow2._value - now._value) < 1e10 + + assert pdnow.tzinfo is None + assert pdnow2.tzinfo is None + + @td.skip_if_windows # `tm.set_timezone` does not work in windows + @pytest.mark.parametrize("tz", ["Pacific/Auckland", "US/Samoa"]) + def test_to_datetime_today(self, tz): + # See GH#18666 + # Test with one timezone far ahead of UTC and another far behind, so + # one of these will _almost_ always be in a different day from UTC. + # Unfortunately this test between 12 and 1 AM Samoa time + # this both of these timezones _and_ UTC will all be in the same day, + # so this test will not detect the regression introduced in #18666. + with tm.set_timezone(tz): + nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64) + pdtoday = to_datetime("today") + pdtoday2 = to_datetime(["today"])[0] + + tstoday = Timestamp("today").as_unit("ns") + tstoday2 = Timestamp.today().as_unit("ns") + + # These should all be equal with infinite perf; this gives + # a generous margin of 10 seconds + assert abs(pdtoday.normalize()._value - nptoday) < 1e10 + assert abs(pdtoday2.normalize()._value - nptoday) < 1e10 + assert abs(pdtoday._value - tstoday._value) < 1e10 + assert abs(pdtoday._value - tstoday2._value) < 1e10 + + assert pdtoday.tzinfo is None + assert pdtoday2.tzinfo is None + + @pytest.mark.parametrize("arg", ["now", "today"]) + def test_to_datetime_today_now_unicode_bytes(self, arg): + to_datetime([arg]) + + @pytest.mark.parametrize( + "format, expected_ds", + [ + ("%Y-%m-%d %H:%M:%S%z", "2020-01-03"), + ("%Y-%d-%m %H:%M:%S%z", "2020-03-01"), + (None, "2020-01-03"), + ], + ) + @pytest.mark.parametrize( + "string, attribute", + [ + ("now", "utcnow"), + ("today", "today"), + ], + ) + def test_to_datetime_now_with_format(self, format, expected_ds, string, attribute): + # https://github.com/pandas-dev/pandas/issues/50359 + result = to_datetime(["2020-01-03 00:00:00Z", string], format=format, utc=True) + expected = DatetimeIndex( + [expected_ds, getattr(Timestamp, attribute)()], dtype="datetime64[ns, UTC]" + ) + assert (expected - result).max().total_seconds() < 1 + + @pytest.mark.parametrize( + "dt", [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")] + ) + def test_to_datetime_dt64s(self, cache, dt): + assert to_datetime(dt, cache=cache) == Timestamp(dt) + + @pytest.mark.parametrize( + "arg, format", + [ + ("2001-01-01", "%Y-%m-%d"), + ("01-01-2001", "%d-%m-%Y"), + ], + ) + def test_to_datetime_dt64s_and_str(self, arg, format): + # https://github.com/pandas-dev/pandas/issues/50036 + result = to_datetime([arg, np.datetime64("2020-01-01")], format=format) + expected = DatetimeIndex(["2001-01-01", "2020-01-01"]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")] + ) + @pytest.mark.parametrize("errors", ["raise", "ignore", "coerce"]) + def test_to_datetime_dt64s_out_of_ns_bounds(self, cache, dt, errors): + # GH#50369 We cast to the nearest supported reso, i.e. "s" + ts = to_datetime(dt, errors=errors, cache=cache) + assert isinstance(ts, Timestamp) + assert ts.unit == "s" + assert ts.asm8 == dt + + ts = Timestamp(dt) + assert ts.unit == "s" + assert ts.asm8 == dt + + @pytest.mark.skip_ubsan + def test_to_datetime_dt64d_out_of_bounds(self, cache): + dt64 = np.datetime64(np.iinfo(np.int64).max, "D") + + msg = "Out of bounds second timestamp: 25252734927768524-07-27" + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(dt64) + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(dt64, errors="raise", cache=cache) + + assert to_datetime(dt64, errors="coerce", cache=cache) is NaT + + @pytest.mark.parametrize("unit", ["s", "D"]) + def test_to_datetime_array_of_dt64s(self, cache, unit): + # https://github.com/pandas-dev/pandas/issues/31491 + # Need at least 50 to ensure cache is used. + dts = [ + np.datetime64("2000-01-01", unit), + np.datetime64("2000-01-02", unit), + ] * 30 + # Assuming all datetimes are in bounds, to_datetime() returns + # an array that is equal to Timestamp() parsing + result = to_datetime(dts, cache=cache) + if cache: + # FIXME: behavior should not depend on cache + expected = DatetimeIndex([Timestamp(x).asm8 for x in dts], dtype="M8[s]") + else: + expected = DatetimeIndex([Timestamp(x).asm8 for x in dts], dtype="M8[ns]") + + tm.assert_index_equal(result, expected) + + # A list of datetimes where the last one is out of bounds + dts_with_oob = dts + [np.datetime64("9999-01-01")] + + # As of GH#51978 we do not raise in this case + to_datetime(dts_with_oob, errors="raise") + + result = to_datetime(dts_with_oob, errors="coerce", cache=cache) + if not cache: + # FIXME: shouldn't depend on cache! + expected = DatetimeIndex( + [Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30 + + [NaT], + ) + else: + expected = DatetimeIndex(np.array(dts_with_oob, dtype="M8[s]")) + tm.assert_index_equal(result, expected) + + # With errors='ignore', out of bounds datetime64s + # are converted to their .item(), which depending on the version of + # numpy is either a python datetime.datetime or datetime.date + result = to_datetime(dts_with_oob, errors="ignore", cache=cache) + if not cache: + # FIXME: shouldn't depend on cache! + expected = Index(dts_with_oob) + tm.assert_index_equal(result, expected) + + def test_out_of_bounds_errors_ignore(self): + # https://github.com/pandas-dev/pandas/issues/50587 + result = to_datetime(np.datetime64("9999-01-01"), errors="ignore") + expected = np.datetime64("9999-01-01") + assert result == expected + + def test_out_of_bounds_errors_ignore2(self): + # GH#12424 + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = to_datetime( + Series(["2362-01-01", np.nan], dtype=object), errors="ignore" + ) + exp = Series(["2362-01-01", np.nan], dtype=object) + tm.assert_series_equal(res, exp) + + def test_to_datetime_tz(self, cache): + # xref 8260 + # uniform returns a DatetimeIndex + arr = [ + Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"), + ] + result = to_datetime(arr, cache=cache) + expected = DatetimeIndex( + ["2013-01-01 13:00:00", "2013-01-02 14:00:00"], tz="US/Pacific" + ) + tm.assert_index_equal(result, expected) + + def test_to_datetime_tz_mixed(self, cache): + # mixed tzs will raise if errors='raise' + # https://github.com/pandas-dev/pandas/issues/50585 + arr = [ + Timestamp("2013-01-01 13:00:00", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00", tz="US/Eastern"), + ] + msg = ( + "Tz-aware datetime.datetime cannot be " + "converted to datetime64 unless utc=True" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(arr, cache=cache) + + depr_msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = to_datetime(arr, cache=cache, errors="ignore") + expected = Index( + [ + Timestamp("2013-01-01 13:00:00-08:00"), + Timestamp("2013-01-02 14:00:00-05:00"), + ], + dtype="object", + ) + tm.assert_index_equal(result, expected) + result = to_datetime(arr, cache=cache, errors="coerce") + expected = DatetimeIndex( + ["2013-01-01 13:00:00-08:00", "NaT"], dtype="datetime64[ns, US/Pacific]" + ) + tm.assert_index_equal(result, expected) + + def test_to_datetime_different_offsets(self, cache): + # inspired by asv timeseries.ToDatetimeNONISO8601 benchmark + # see GH-26097 for more + ts_string_1 = "March 1, 2018 12:00:00+0400" + ts_string_2 = "March 1, 2018 12:00:00+0500" + arr = [ts_string_1] * 5 + [ts_string_2] * 5 + expected = Index([parse(x) for x in arr]) + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime(arr, cache=cache) + tm.assert_index_equal(result, expected) + + def test_to_datetime_tz_pytz(self, cache): + # see gh-8260 + us_eastern = pytz.timezone("US/Eastern") + arr = np.array( + [ + us_eastern.localize( + datetime(year=2000, month=1, day=1, hour=3, minute=0) + ), + us_eastern.localize( + datetime(year=2000, month=6, day=1, hour=3, minute=0) + ), + ], + dtype=object, + ) + result = to_datetime(arr, utc=True, cache=cache) + expected = DatetimeIndex( + ["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"], + dtype="datetime64[ns, UTC]", + freq=None, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "init_constructor, end_constructor", + [ + (Index, DatetimeIndex), + (list, DatetimeIndex), + (np.array, DatetimeIndex), + (Series, Series), + ], + ) + def test_to_datetime_utc_true(self, cache, init_constructor, end_constructor): + # See gh-11934 & gh-6415 + data = ["20100102 121314", "20100102 121315"] + expected_data = [ + Timestamp("2010-01-02 12:13:14", tz="utc"), + Timestamp("2010-01-02 12:13:15", tz="utc"), + ] + + result = to_datetime( + init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache + ) + expected = end_constructor(expected_data) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "scalar, expected", + [ + ["20100102 121314", Timestamp("2010-01-02 12:13:14", tz="utc")], + ["20100102 121315", Timestamp("2010-01-02 12:13:15", tz="utc")], + ], + ) + def test_to_datetime_utc_true_scalar(self, cache, scalar, expected): + # Test scalar case as well + result = to_datetime(scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache) + assert result == expected + + def test_to_datetime_utc_true_with_series_single_value(self, cache): + # GH 15760 UTC=True with Series + ts = 1.5e18 + result = to_datetime(Series([ts]), utc=True, cache=cache) + expected = Series([Timestamp(ts, tz="utc")]) + tm.assert_series_equal(result, expected) + + def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): + ts = "2013-01-01 00:00:00-01:00" + expected_ts = "2013-01-01 01:00:00" + data = Series([ts] * 3) + result = to_datetime(data, utc=True, cache=cache) + expected = Series([Timestamp(expected_ts, tz="utc")] * 3) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "date, dtype", + [ + ("2013-01-01 01:00:00", "datetime64[ns]"), + ("2013-01-01 01:00:00", "datetime64[ns, UTC]"), + ], + ) + def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype): + expected = Series( + [Timestamp("2013-01-01 01:00:00", tz="UTC")], dtype="M8[ns, UTC]" + ) + result = to_datetime(Series([date], dtype=dtype), utc=True, cache=cache) + tm.assert_series_equal(result, expected) + + def test_to_datetime_tz_psycopg2(self, request, cache): + # xref 8260 + psycopg2_tz = pytest.importorskip("psycopg2.tz") + + # misc cases + tz1 = psycopg2_tz.FixedOffsetTimezone(offset=-300, name=None) + tz2 = psycopg2_tz.FixedOffsetTimezone(offset=-240, name=None) + arr = np.array( + [ + datetime(2000, 1, 1, 3, 0, tzinfo=tz1), + datetime(2000, 6, 1, 3, 0, tzinfo=tz2), + ], + dtype=object, + ) + + result = to_datetime(arr, errors="coerce", utc=True, cache=cache) + expected = DatetimeIndex( + ["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"], + dtype="datetime64[ns, UTC]", + freq=None, + ) + tm.assert_index_equal(result, expected) + + # dtype coercion + i = DatetimeIndex( + ["2000-01-01 08:00:00"], + tz=psycopg2_tz.FixedOffsetTimezone(offset=-300, name=None), + ) + assert is_datetime64_ns_dtype(i) + + # tz coercion + result = to_datetime(i, errors="coerce", cache=cache) + tm.assert_index_equal(result, i) + + result = to_datetime(i, errors="coerce", utc=True, cache=cache) + expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("arg", [True, False]) + def test_datetime_bool(self, cache, arg): + # GH13176 + msg = r"dtype bool cannot be converted to datetime64\[ns\]" + with pytest.raises(TypeError, match=msg): + to_datetime(arg) + assert to_datetime(arg, errors="coerce", cache=cache) is NaT + assert to_datetime(arg, errors="ignore", cache=cache) is arg + + def test_datetime_bool_arrays_mixed(self, cache): + msg = f"{type(cache)} is not convertible to datetime" + with pytest.raises(TypeError, match=msg): + to_datetime([False, datetime.today()], cache=cache) + with pytest.raises( + ValueError, + match=( + r'^time data "True" doesn\'t match format "%Y%m%d", ' + f"at position 1. {PARSING_ERR_MSG}$" + ), + ): + to_datetime(["20130101", True], cache=cache) + tm.assert_index_equal( + to_datetime([0, False, NaT, 0.0], errors="coerce", cache=cache), + DatetimeIndex( + [to_datetime(0, cache=cache), NaT, NaT, to_datetime(0, cache=cache)] + ), + ) + + @pytest.mark.parametrize("arg", [bool, to_datetime]) + def test_datetime_invalid_datatype(self, arg): + # GH13176 + msg = "is not convertible to datetime" + with pytest.raises(TypeError, match=msg): + to_datetime(arg) + + @pytest.mark.parametrize("errors", ["coerce", "raise", "ignore"]) + def test_invalid_format_raises(self, errors): + # https://github.com/pandas-dev/pandas/issues/50255 + with pytest.raises( + ValueError, match="':' is a bad directive in format 'H%:M%:S%" + ): + to_datetime(["00:00:00"], format="H%:M%:S%", errors=errors) + + @pytest.mark.parametrize("value", ["a", "00:01:99"]) + @pytest.mark.parametrize("format", [None, "%H:%M:%S"]) + def test_datetime_invalid_scalar(self, value, format): + # GH24763 + res = to_datetime(value, errors="ignore", format=format) + assert res == value + + res = to_datetime(value, errors="coerce", format=format) + assert res is NaT + + msg = "|".join( + [ + r'^time data "a" doesn\'t match format "%H:%M:%S", at position 0. ' + f"{PARSING_ERR_MSG}$", + r'^Given date string "a" not likely a datetime, at position 0$', + r'^unconverted data remains when parsing with format "%H:%M:%S": "9", ' + f"at position 0. {PARSING_ERR_MSG}$", + r"^second must be in 0..59: 00:01:99, at position 0$", + ] + ) + with pytest.raises(ValueError, match=msg): + to_datetime(value, errors="raise", format=format) + + @pytest.mark.parametrize("value", ["3000/12/11 00:00:00"]) + @pytest.mark.parametrize("format", [None, "%H:%M:%S"]) + def test_datetime_outofbounds_scalar(self, value, format): + # GH24763 + res = to_datetime(value, errors="ignore", format=format) + assert res == value + + res = to_datetime(value, errors="coerce", format=format) + assert res is NaT + + if format is not None: + msg = r'^time data ".*" doesn\'t match format ".*", at position 0.' + with pytest.raises(ValueError, match=msg): + to_datetime(value, errors="raise", format=format) + else: + msg = "^Out of bounds .*, at position 0$" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(value, errors="raise", format=format) + + @pytest.mark.parametrize( + ("values"), [(["a"]), (["00:01:99"]), (["a", "b", "99:00:00"])] + ) + @pytest.mark.parametrize("format", [(None), ("%H:%M:%S")]) + def test_datetime_invalid_index(self, values, format): + # GH24763 + # Not great to have logic in tests, but this one's hard to + # parametrise over + if format is None and len(values) > 1: + warn = UserWarning + else: + warn = None + with tm.assert_produces_warning( + warn, match="Could not infer format", raise_on_extra_warnings=False + ): + res = to_datetime(values, errors="ignore", format=format) + tm.assert_index_equal(res, Index(values, dtype=object)) + + with tm.assert_produces_warning( + warn, match="Could not infer format", raise_on_extra_warnings=False + ): + res = to_datetime(values, errors="coerce", format=format) + tm.assert_index_equal(res, DatetimeIndex([NaT] * len(values))) + + msg = "|".join( + [ + r'^Given date string "a" not likely a datetime, at position 0$', + r'^time data "a" doesn\'t match format "%H:%M:%S", at position 0. ' + f"{PARSING_ERR_MSG}$", + r'^unconverted data remains when parsing with format "%H:%M:%S": "9", ' + f"at position 0. {PARSING_ERR_MSG}$", + r"^second must be in 0..59: 00:01:99, at position 0$", + ] + ) + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + warn, match="Could not infer format", raise_on_extra_warnings=False + ): + to_datetime(values, errors="raise", format=format) + + @pytest.mark.parametrize("utc", [True, None]) + @pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None]) + @pytest.mark.parametrize("constructor", [list, tuple, np.array, Index, deque]) + def test_to_datetime_cache(self, utc, format, constructor): + date = "20130101 00:00:00" + test_dates = [date] * 10**5 + data = constructor(test_dates) + + result = to_datetime(data, utc=utc, format=format, cache=True) + expected = to_datetime(data, utc=utc, format=format, cache=False) + + tm.assert_index_equal(result, expected) + + def test_to_datetime_from_deque(self): + # GH 29403 + result = to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51)) + expected = to_datetime([Timestamp("2010-06-02 09:30:00")] * 51) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("utc", [True, None]) + @pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None]) + def test_to_datetime_cache_series(self, utc, format): + date = "20130101 00:00:00" + test_dates = [date] * 10**5 + data = Series(test_dates) + result = to_datetime(data, utc=utc, format=format, cache=True) + expected = to_datetime(data, utc=utc, format=format, cache=False) + tm.assert_series_equal(result, expected) + + def test_to_datetime_cache_scalar(self): + date = "20130101 00:00:00" + result = to_datetime(date, cache=True) + expected = Timestamp("20130101 00:00:00") + assert result == expected + + @pytest.mark.parametrize( + "datetimelikes,expected_values", + ( + ( + (None, np.nan) + (NaT,) * start_caching_at, + (NaT,) * (start_caching_at + 2), + ), + ( + (None, Timestamp("2012-07-26")) + (NaT,) * start_caching_at, + (NaT, Timestamp("2012-07-26")) + (NaT,) * start_caching_at, + ), + ( + (None,) + + (NaT,) * start_caching_at + + ("2012 July 26", Timestamp("2012-07-26")), + (NaT,) * (start_caching_at + 1) + + (Timestamp("2012-07-26"), Timestamp("2012-07-26")), + ), + ), + ) + def test_convert_object_to_datetime_with_cache( + self, datetimelikes, expected_values + ): + # GH#39882 + ser = Series( + datetimelikes, + dtype="object", + ) + result_series = to_datetime(ser, errors="coerce") + expected_series = Series( + expected_values, + dtype="datetime64[ns]", + ) + tm.assert_series_equal(result_series, expected_series) + + @pytest.mark.parametrize("cache", [True, False]) + @pytest.mark.parametrize( + "input", + [ + Series([NaT] * 20 + [None] * 20, dtype="object"), + Series([NaT] * 60 + [None] * 60, dtype="object"), + Series([None] * 20), + Series([None] * 60), + Series([""] * 20), + Series([""] * 60), + Series([pd.NA] * 20), + Series([pd.NA] * 60), + Series([np.nan] * 20), + Series([np.nan] * 60), + ], + ) + def test_to_datetime_converts_null_like_to_nat(self, cache, input): + # GH35888 + expected = Series([NaT] * len(input), dtype="M8[ns]") + result = to_datetime(input, cache=cache) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "date, format", + [ + ("2017-20", "%Y-%W"), + ("20 Sunday", "%W %A"), + ("20 Sun", "%W %a"), + ("2017-21", "%Y-%U"), + ("20 Sunday", "%U %A"), + ("20 Sun", "%U %a"), + ], + ) + def test_week_without_day_and_calendar_year(self, date, format): + # GH16774 + + msg = "Cannot use '%W' or '%U' without day and year" + with pytest.raises(ValueError, match=msg): + to_datetime(date, format=format) + + def test_to_datetime_coerce(self): + # GH 26122 + ts_strings = [ + "March 1, 2018 12:00:00+0400", + "March 1, 2018 12:00:00+0500", + "20100240", + ] + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime(ts_strings, errors="coerce") + expected = Index( + [ + datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 14400)), + datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 18000)), + NaT, + ] + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "string_arg, format", + [("March 1, 2018", "%B %d, %Y"), ("2018-03-01", "%Y-%m-%d")], + ) + @pytest.mark.parametrize( + "outofbounds", + [ + datetime(9999, 1, 1), + date(9999, 1, 1), + np.datetime64("9999-01-01"), + "January 1, 9999", + "9999-01-01", + ], + ) + def test_to_datetime_coerce_oob(self, string_arg, format, outofbounds): + # https://github.com/pandas-dev/pandas/issues/50255 + ts_strings = [string_arg, outofbounds] + result = to_datetime(ts_strings, errors="coerce", format=format) + expected = DatetimeIndex([datetime(2018, 3, 1), NaT]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "errors, expected", + [ + ("coerce", Index([NaT, NaT])), + ("ignore", Index(["200622-12-31", "111111-24-11"], dtype=object)), + ], + ) + def test_to_datetime_malformed_no_raise(self, errors, expected): + # GH 28299 + # GH 48633 + ts_strings = ["200622-12-31", "111111-24-11"] + with tm.assert_produces_warning( + UserWarning, match="Could not infer format", raise_on_extra_warnings=False + ): + result = to_datetime(ts_strings, errors=errors) + tm.assert_index_equal(result, expected) + + def test_to_datetime_malformed_raise(self): + # GH 48633 + ts_strings = ["200622-12-31", "111111-24-11"] + msg = ( + 'Parsed string "200622-12-31" gives an invalid tzoffset, which must ' + r"be between -timedelta\(hours=24\) and timedelta\(hours=24\), " + "at position 0" + ) + with pytest.raises( + ValueError, + match=msg, + ): + with tm.assert_produces_warning( + UserWarning, match="Could not infer format" + ): + to_datetime( + ts_strings, + errors="raise", + ) + + def test_iso_8601_strings_with_same_offset(self): + # GH 17697, 11736 + ts_str = "2015-11-18 15:30:00+05:30" + result = to_datetime(ts_str) + expected = Timestamp(ts_str) + assert result == expected + + expected = DatetimeIndex([Timestamp(ts_str)] * 2) + result = to_datetime([ts_str] * 2) + tm.assert_index_equal(result, expected) + + result = DatetimeIndex([ts_str] * 2) + tm.assert_index_equal(result, expected) + + def test_iso_8601_strings_with_different_offsets(self): + # GH 17697, 11736, 50887 + ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT] + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime(ts_strings) + expected = np.array( + [ + datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)), + datetime(2015, 11, 18, 16, 30, tzinfo=tzoffset(None, 23400)), + NaT, + ], + dtype=object, + ) + # GH 21864 + expected = Index(expected) + tm.assert_index_equal(result, expected) + + def test_iso_8601_strings_with_different_offsets_utc(self): + ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT] + result = to_datetime(ts_strings, utc=True) + expected = DatetimeIndex( + [Timestamp(2015, 11, 18, 10), Timestamp(2015, 11, 18, 10), NaT], tz="UTC" + ) + tm.assert_index_equal(result, expected) + + def test_mixed_offsets_with_native_datetime_raises(self): + # GH 25978 + + vals = [ + "nan", + Timestamp("1990-01-01"), + "2015-03-14T16:15:14.123-08:00", + "2019-03-04T21:56:32.620-07:00", + None, + "today", + "now", + ] + ser = Series(vals) + assert all(ser[i] is vals[i] for i in range(len(vals))) # GH#40111 + + now = Timestamp("now") + today = Timestamp("today") + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + mixed = to_datetime(ser) + expected = Series( + [ + "NaT", + Timestamp("1990-01-01"), + Timestamp("2015-03-14T16:15:14.123-08:00").to_pydatetime(), + Timestamp("2019-03-04T21:56:32.620-07:00").to_pydatetime(), + None, + ], + dtype=object, + ) + tm.assert_series_equal(mixed[:-2], expected) + # we'll check mixed[-1] and mixed[-2] match now and today to within + # call-timing tolerances + assert (now - mixed.iloc[-1]).total_seconds() <= 0.1 + assert (today - mixed.iloc[-2]).total_seconds() <= 0.1 + + with pytest.raises(ValueError, match="Tz-aware datetime.datetime"): + to_datetime(mixed) + + def test_non_iso_strings_with_tz_offset(self): + result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2) + expected = DatetimeIndex( + [datetime(2018, 3, 1, 12, tzinfo=timezone(timedelta(minutes=240)))] * 2 + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "ts, expected", + [ + (Timestamp("2018-01-01"), Timestamp("2018-01-01", tz="UTC")), + ( + Timestamp("2018-01-01", tz="US/Pacific"), + Timestamp("2018-01-01 08:00", tz="UTC"), + ), + ], + ) + def test_timestamp_utc_true(self, ts, expected): + # GH 24415 + result = to_datetime(ts, utc=True) + assert result == expected + + @pytest.mark.parametrize("dt_str", ["00010101", "13000101", "30000101", "99990101"]) + def test_to_datetime_with_format_out_of_bounds(self, dt_str): + # GH 9107 + msg = "Out of bounds nanosecond timestamp" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(dt_str, format="%Y%m%d") + + def test_to_datetime_utc(self): + arr = np.array([parse("2012-06-13T01:39:00Z")], dtype=object) + + result = to_datetime(arr, utc=True) + assert result.tz is timezone.utc + + def test_to_datetime_fixed_offset(self): + from pandas.tests.indexes.datetimes.test_timezones import FixedOffset + + fixed_off = FixedOffset(-420, "-07:00") + + dates = [ + datetime(2000, 1, 1, tzinfo=fixed_off), + datetime(2000, 1, 2, tzinfo=fixed_off), + datetime(2000, 1, 3, tzinfo=fixed_off), + ] + result = to_datetime(dates) + assert result.tz == fixed_off + + @pytest.mark.parametrize( + "date", + [ + ["2020-10-26 00:00:00+06:00", "2020-10-26 00:00:00+01:00"], + ["2020-10-26 00:00:00+06:00", Timestamp("2018-01-01", tz="US/Pacific")], + [ + "2020-10-26 00:00:00+06:00", + datetime(2020, 1, 1, 18, tzinfo=pytz.timezone("Australia/Melbourne")), + ], + ], + ) + def test_to_datetime_mixed_offsets_with_utc_false_deprecated(self, date): + # GH 50887 + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + to_datetime(date, utc=False) + + +class TestToDatetimeUnit: + @pytest.mark.parametrize("unit", ["Y", "M"]) + @pytest.mark.parametrize("item", [150, float(150)]) + def test_to_datetime_month_or_year_unit_int(self, cache, unit, item, request): + # GH#50870 Note we have separate tests that pd.Timestamp gets these right + ts = Timestamp(item, unit=unit) + expected = DatetimeIndex([ts], dtype="M8[ns]") + + result = to_datetime([item], unit=unit, cache=cache) + tm.assert_index_equal(result, expected) + + result = to_datetime(np.array([item], dtype=object), unit=unit, cache=cache) + tm.assert_index_equal(result, expected) + + result = to_datetime(np.array([item]), unit=unit, cache=cache) + tm.assert_index_equal(result, expected) + + # with a nan! + result = to_datetime(np.array([item, np.nan]), unit=unit, cache=cache) + assert result.isna()[1] + tm.assert_index_equal(result[:1], expected) + + @pytest.mark.parametrize("unit", ["Y", "M"]) + def test_to_datetime_month_or_year_unit_non_round_float(self, cache, unit): + # GH#50301 + # Match Timestamp behavior in disallowing non-round floats with + # Y or M unit + warn_msg = "strings will be parsed as datetime strings" + msg = f"Conversion of non-round float with unit={unit} is ambiguous" + with pytest.raises(ValueError, match=msg): + to_datetime([1.5], unit=unit, errors="raise") + with pytest.raises(ValueError, match=msg): + to_datetime(np.array([1.5]), unit=unit, errors="raise") + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + to_datetime(["1.5"], unit=unit, errors="raise") + + # with errors="ignore" we also end up raising within the Timestamp + # constructor; this may not be ideal + with pytest.raises(ValueError, match=msg): + to_datetime([1.5], unit=unit, errors="ignore") + + res = to_datetime([1.5], unit=unit, errors="coerce") + expected = Index([NaT], dtype="M8[ns]") + tm.assert_index_equal(res, expected) + + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = to_datetime(["1.5"], unit=unit, errors="coerce") + tm.assert_index_equal(res, expected) + + # round floats are OK + res = to_datetime([1.0], unit=unit) + expected = to_datetime([1], unit=unit) + tm.assert_index_equal(res, expected) + + def test_unit(self, cache): + # GH 11758 + # test proper behavior with errors + msg = "cannot specify both format and unit" + with pytest.raises(ValueError, match=msg): + to_datetime([1], unit="D", format="%Y%m%d", cache=cache) + + def test_unit_str(self, cache): + # GH 57051 + # Test that strs aren't dropping precision to 32-bit accidentally. + with tm.assert_produces_warning(FutureWarning): + res = to_datetime(["1704660000"], unit="s", origin="unix") + expected = to_datetime([1704660000], unit="s", origin="unix") + tm.assert_index_equal(res, expected) + + def test_unit_array_mixed_nans(self, cache): + values = [11111111111111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""] + result = to_datetime(values, unit="D", errors="ignore", cache=cache) + expected = Index( + [ + 11111111111111111, + Timestamp("1970-01-02"), + Timestamp("1970-01-02"), + NaT, + NaT, + NaT, + NaT, + NaT, + ], + dtype=object, + ) + tm.assert_index_equal(result, expected) + + result = to_datetime(values, unit="D", errors="coerce", cache=cache) + expected = DatetimeIndex( + ["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"], + dtype="M8[ns]", + ) + tm.assert_index_equal(result, expected) + + msg = "cannot convert input 11111111111111111 with the unit 'D'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(values, unit="D", errors="raise", cache=cache) + + def test_unit_array_mixed_nans_large_int(self, cache): + values = [1420043460000000000000000, iNaT, NaT, np.nan, "NaT"] + + result = to_datetime(values, errors="ignore", unit="s", cache=cache) + expected = Index([1420043460000000000000000, NaT, NaT, NaT, NaT], dtype=object) + tm.assert_index_equal(result, expected) + + result = to_datetime(values, errors="coerce", unit="s", cache=cache) + expected = DatetimeIndex(["NaT", "NaT", "NaT", "NaT", "NaT"], dtype="M8[ns]") + tm.assert_index_equal(result, expected) + + msg = "cannot convert input 1420043460000000000000000 with the unit 's'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(values, errors="raise", unit="s", cache=cache) + + def test_to_datetime_invalid_str_not_out_of_bounds_valuerror(self, cache): + # if we have a string, then we raise a ValueError + # and NOT an OutOfBoundsDatetime + msg = "non convertible value foo with the unit 's'" + with pytest.raises(ValueError, match=msg): + to_datetime("foo", errors="raise", unit="s", cache=cache) + + @pytest.mark.parametrize("error", ["raise", "coerce", "ignore"]) + def test_unit_consistency(self, cache, error): + # consistency of conversions + expected = Timestamp("1970-05-09 14:25:11") + result = to_datetime(11111111, unit="s", errors=error, cache=cache) + assert result == expected + assert isinstance(result, Timestamp) + + @pytest.mark.parametrize("errors", ["ignore", "raise", "coerce"]) + @pytest.mark.parametrize("dtype", ["float64", "int64"]) + def test_unit_with_numeric(self, cache, errors, dtype): + # GH 13180 + # coercions from floats/ints are ok + expected = DatetimeIndex( + ["2015-06-19 05:33:20", "2015-05-27 22:33:20"], dtype="M8[ns]" + ) + arr = np.array([1.434692e18, 1.432766e18]).astype(dtype) + result = to_datetime(arr, errors=errors, cache=cache) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "exp, arr, warning", + [ + [ + ["NaT", "2015-06-19 05:33:20", "2015-05-27 22:33:20"], + ["foo", 1.434692e18, 1.432766e18], + UserWarning, + ], + [ + ["2015-06-19 05:33:20", "2015-05-27 22:33:20", "NaT", "NaT"], + [1.434692e18, 1.432766e18, "foo", "NaT"], + None, + ], + ], + ) + def test_unit_with_numeric_coerce(self, cache, exp, arr, warning): + # but we want to make sure that we are coercing + # if we have ints/strings + expected = DatetimeIndex(exp, dtype="M8[ns]") + with tm.assert_produces_warning(warning, match="Could not infer format"): + result = to_datetime(arr, errors="coerce", cache=cache) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "arr", + [ + [Timestamp("20130101"), 1.434692e18, 1.432766e18], + [1.434692e18, 1.432766e18, Timestamp("20130101")], + ], + ) + def test_unit_mixed(self, cache, arr): + # GH#50453 pre-2.0 with mixed numeric/datetimes and errors="coerce" + # the numeric entries would be coerced to NaT, was never clear exactly + # why. + # mixed integers/datetimes + expected = Index([Timestamp(x) for x in arr], dtype="M8[ns]") + result = to_datetime(arr, errors="coerce", cache=cache) + tm.assert_index_equal(result, expected) + + # GH#49037 pre-2.0 this raised, but it always worked with Series, + # was never clear why it was disallowed + result = to_datetime(arr, errors="raise", cache=cache) + tm.assert_index_equal(result, expected) + + result = DatetimeIndex(arr) + tm.assert_index_equal(result, expected) + + def test_unit_rounding(self, cache): + # GH 14156 & GH 20445: argument will incur floating point errors + # but no premature rounding + value = 1434743731.8770001 + result = to_datetime(value, unit="s", cache=cache) + expected = Timestamp("2015-06-19 19:55:31.877000093") + assert result == expected + + alt = Timestamp(value, unit="s") + assert alt == result + + def test_unit_ignore_keeps_name(self, cache): + # GH 21697 + expected = Index([15e9] * 2, name="name") + result = to_datetime(expected, errors="ignore", unit="s", cache=cache) + tm.assert_index_equal(result, expected) + + def test_to_datetime_errors_ignore_utc_true(self): + # GH#23758 + result = to_datetime([1], unit="s", utc=True, errors="ignore") + expected = DatetimeIndex(["1970-01-01 00:00:01"], dtype="M8[ns, UTC]") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("dtype", [int, float]) + def test_to_datetime_unit(self, dtype): + epoch = 1370745748 + ser = Series([epoch + t for t in range(20)]).astype(dtype) + result = to_datetime(ser, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in range(20) + ], + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("null", [iNaT, np.nan]) + def test_to_datetime_unit_with_nulls(self, null): + epoch = 1370745748 + ser = Series([epoch + t for t in range(20)] + [null]) + result = to_datetime(ser, unit="s") + expected = Series( + [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] + + [NaT], + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_unit_fractional_seconds(self): + # GH13834 + epoch = 1370745748 + ser = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float) + result = to_datetime(ser, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in np.arange(0, 2, 0.25) + ] + + [NaT], + dtype="M8[ns]", + ) + # GH20455 argument will incur floating point errors but no premature rounding + result = result.round("ms") + tm.assert_series_equal(result, expected) + + def test_to_datetime_unit_na_values(self): + result = to_datetime([1, 2, "NaT", NaT, np.nan], unit="D") + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3, + dtype="M8[ns]", + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("bad_val", ["foo", 111111111]) + def test_to_datetime_unit_invalid(self, bad_val): + msg = f"{bad_val} with the unit 'D'" + with pytest.raises(ValueError, match=msg): + to_datetime([1, 2, bad_val], unit="D") + + @pytest.mark.parametrize("bad_val", ["foo", 111111111]) + def test_to_timestamp_unit_coerce(self, bad_val): + # coerce we can process + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1, + dtype="M8[ns]", + ) + result = to_datetime([1, 2, bad_val], unit="D", errors="coerce") + tm.assert_index_equal(result, expected) + + def test_float_to_datetime_raise_near_bounds(self): + # GH50183 + msg = "cannot convert input with unit 'D'" + oneday_in_ns = 1e9 * 60 * 60 * 24 + tsmax_in_days = 2**63 / oneday_in_ns # 2**63 ns, in days + # just in bounds + should_succeed = Series( + [0, tsmax_in_days - 0.005, -tsmax_in_days + 0.005], dtype=float + ) + expected = (should_succeed * oneday_in_ns).astype(np.int64) + for error_mode in ["raise", "coerce", "ignore"]: + result1 = to_datetime(should_succeed, unit="D", errors=error_mode) + # Cast to `np.float64` so that `rtol` and inexact checking kick in + # (`check_exact` doesn't take place for integer dtypes) + tm.assert_almost_equal( + result1.astype(np.int64).astype(np.float64), + expected.astype(np.float64), + rtol=1e-10, + ) + # just out of bounds + should_fail1 = Series([0, tsmax_in_days + 0.005], dtype=float) + should_fail2 = Series([0, -tsmax_in_days - 0.005], dtype=float) + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(should_fail1, unit="D", errors="raise") + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(should_fail2, unit="D", errors="raise") + + +class TestToDatetimeDataFrame: + @pytest.fixture + def df(self): + return DataFrame( + { + "year": [2015, 2016], + "month": [2, 3], + "day": [4, 5], + "hour": [6, 7], + "minute": [58, 59], + "second": [10, 11], + "ms": [1, 1], + "us": [2, 2], + "ns": [3, 3], + } + ) + + def test_dataframe(self, df, cache): + result = to_datetime( + {"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache + ) + expected = Series( + [Timestamp("20150204 00:00:00"), Timestamp("20160305 00:0:00")] + ) + tm.assert_series_equal(result, expected) + + # dict-like + result = to_datetime(df[["year", "month", "day"]].to_dict(), cache=cache) + tm.assert_series_equal(result, expected) + + def test_dataframe_dict_with_constructable(self, df, cache): + # dict but with constructable + df2 = df[["year", "month", "day"]].to_dict() + df2["month"] = 2 + result = to_datetime(df2, cache=cache) + expected2 = Series( + [Timestamp("20150204 00:00:00"), Timestamp("20160205 00:0:00")] + ) + tm.assert_series_equal(result, expected2) + + @pytest.mark.parametrize( + "unit", + [ + { + "year": "years", + "month": "months", + "day": "days", + "hour": "hours", + "minute": "minutes", + "second": "seconds", + }, + { + "year": "year", + "month": "month", + "day": "day", + "hour": "hour", + "minute": "minute", + "second": "second", + }, + ], + ) + def test_dataframe_field_aliases_column_subset(self, df, cache, unit): + # unit mappings + result = to_datetime(df[list(unit.keys())].rename(columns=unit), cache=cache) + expected = Series( + [Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")], + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_field_aliases(self, df, cache): + d = { + "year": "year", + "month": "month", + "day": "day", + "hour": "hour", + "minute": "minute", + "second": "second", + "ms": "ms", + "us": "us", + "ns": "ns", + } + + result = to_datetime(df.rename(columns=d), cache=cache) + expected = Series( + [ + Timestamp("20150204 06:58:10.001002003"), + Timestamp("20160305 07:59:11.001002003"), + ] + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_str_dtype(self, df, cache): + # coerce back to int + result = to_datetime(df.astype(str), cache=cache) + expected = Series( + [ + Timestamp("20150204 06:58:10.001002003"), + Timestamp("20160305 07:59:11.001002003"), + ] + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_coerce(self, cache): + # passing coerce + df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]}) + + msg = ( + r'^cannot assemble the datetimes: time data ".+" doesn\'t ' + r'match format "%Y%m%d", at position 1\.' + ) + with pytest.raises(ValueError, match=msg): + to_datetime(df2, cache=cache) + + result = to_datetime(df2, errors="coerce", cache=cache) + expected = Series([Timestamp("20150204 00:00:00"), NaT]) + tm.assert_series_equal(result, expected) + + def test_dataframe_extra_keys_raisesm(self, df, cache): + # extra columns + msg = r"extra keys have been passed to the datetime assemblage: \[foo\]" + with pytest.raises(ValueError, match=msg): + df2 = df.copy() + df2["foo"] = 1 + to_datetime(df2, cache=cache) + + @pytest.mark.parametrize( + "cols", + [ + ["year"], + ["year", "month"], + ["year", "month", "second"], + ["month", "day"], + ["year", "day", "second"], + ], + ) + def test_dataframe_missing_keys_raises(self, df, cache, cols): + # not enough + msg = ( + r"to assemble mappings requires at least that \[year, month, " + r"day\] be specified: \[.+\] is missing" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(df[cols], cache=cache) + + def test_dataframe_duplicate_columns_raises(self, cache): + # duplicates + msg = "cannot assemble with duplicate keys" + df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]}) + df2.columns = ["year", "year", "day"] + with pytest.raises(ValueError, match=msg): + to_datetime(df2, cache=cache) + + df2 = DataFrame( + {"year": [2015, 2016], "month": [2, 20], "day": [4, 5], "hour": [4, 5]} + ) + df2.columns = ["year", "month", "day", "day"] + with pytest.raises(ValueError, match=msg): + to_datetime(df2, cache=cache) + + def test_dataframe_int16(self, cache): + # GH#13451 + df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) + + # int16 + result = to_datetime(df.astype("int16"), cache=cache) + expected = Series( + [Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")] + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_mixed(self, cache): + # mixed dtypes + df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) + df["month"] = df["month"].astype("int8") + df["day"] = df["day"].astype("int8") + result = to_datetime(df, cache=cache) + expected = Series( + [Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")] + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_float(self, cache): + # float + df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]}) + msg = ( + r"^cannot assemble the datetimes: unconverted data remains when parsing " + r'with format ".*": "1", at position 0.' + ) + with pytest.raises(ValueError, match=msg): + to_datetime(df, cache=cache) + + def test_dataframe_utc_true(self): + # GH#23760 + df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) + result = to_datetime(df, utc=True) + expected = Series( + np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]") + ).dt.tz_localize("UTC") + tm.assert_series_equal(result, expected) + + +class TestToDatetimeMisc: + def test_to_datetime_barely_out_of_bounds(self): + # GH#19529 + # GH#19382 close enough to bounds that dropping nanos would result + # in an in-bounds datetime + arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object) + + msg = "^Out of bounds nanosecond timestamp: .*, at position 0" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(arr) + + @pytest.mark.parametrize( + "arg, exp_str", + [ + ["2012-01-01 00:00:00", "2012-01-01 00:00:00"], + ["20121001", "2012-10-01"], # bad iso 8601 + ], + ) + def test_to_datetime_iso8601(self, cache, arg, exp_str): + result = to_datetime([arg], cache=cache) + exp = Timestamp(exp_str) + assert result[0] == exp + + @pytest.mark.parametrize( + "input, format", + [ + ("2012", "%Y-%m"), + ("2012-01", "%Y-%m-%d"), + ("2012-01-01", "%Y-%m-%d %H"), + ("2012-01-01 10", "%Y-%m-%d %H:%M"), + ("2012-01-01 10:00", "%Y-%m-%d %H:%M:%S"), + ("2012-01-01 10:00:00", "%Y-%m-%d %H:%M:%S.%f"), + ("2012-01-01 10:00:00.123", "%Y-%m-%d %H:%M:%S.%f%z"), + (0, "%Y-%m-%d"), + ], + ) + @pytest.mark.parametrize("exact", [True, False]) + def test_to_datetime_iso8601_fails(self, input, format, exact): + # https://github.com/pandas-dev/pandas/issues/12649 + # `format` is longer than the string, so this fails regardless of `exact` + with pytest.raises( + ValueError, + match=( + rf"time data \"{input}\" doesn't match format " + rf"\"{format}\", at position 0" + ), + ): + to_datetime(input, format=format, exact=exact) + + @pytest.mark.parametrize( + "input, format", + [ + ("2012-01-01", "%Y-%m"), + ("2012-01-01 10", "%Y-%m-%d"), + ("2012-01-01 10:00", "%Y-%m-%d %H"), + ("2012-01-01 10:00:00", "%Y-%m-%d %H:%M"), + (0, "%Y-%m-%d"), + ], + ) + def test_to_datetime_iso8601_exact_fails(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + # `format` is shorter than the date string, so only fails with `exact=True` + msg = "|".join( + [ + '^unconverted data remains when parsing with format ".*": ".*"' + f", at position 0. {PARSING_ERR_MSG}$", + f'^time data ".*" doesn\'t match format ".*", at position 0. ' + f"{PARSING_ERR_MSG}$", + ] + ) + with pytest.raises( + ValueError, + match=(msg), + ): + to_datetime(input, format=format) + + @pytest.mark.parametrize( + "input, format", + [ + ("2012-01-01", "%Y-%m"), + ("2012-01-01 00", "%Y-%m-%d"), + ("2012-01-01 00:00", "%Y-%m-%d %H"), + ("2012-01-01 00:00:00", "%Y-%m-%d %H:%M"), + ], + ) + def test_to_datetime_iso8601_non_exact(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + expected = Timestamp(2012, 1, 1) + result = to_datetime(input, format=format, exact=False) + assert result == expected + + @pytest.mark.parametrize( + "input, format", + [ + ("2020-01", "%Y/%m"), + ("2020-01-01", "%Y/%m/%d"), + ("2020-01-01 00", "%Y/%m/%dT%H"), + ("2020-01-01T00", "%Y/%m/%d %H"), + ("2020-01-01 00:00", "%Y/%m/%dT%H:%M"), + ("2020-01-01T00:00", "%Y/%m/%d %H:%M"), + ("2020-01-01 00:00:00", "%Y/%m/%dT%H:%M:%S"), + ("2020-01-01T00:00:00", "%Y/%m/%d %H:%M:%S"), + ], + ) + def test_to_datetime_iso8601_separator(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + with pytest.raises( + ValueError, + match=( + rf"time data \"{input}\" doesn\'t match format " + rf"\"{format}\", at position 0" + ), + ): + to_datetime(input, format=format) + + @pytest.mark.parametrize( + "input, format", + [ + ("2020-01", "%Y-%m"), + ("2020-01-01", "%Y-%m-%d"), + ("2020-01-01 00", "%Y-%m-%d %H"), + ("2020-01-01T00", "%Y-%m-%dT%H"), + ("2020-01-01 00:00", "%Y-%m-%d %H:%M"), + ("2020-01-01T00:00", "%Y-%m-%dT%H:%M"), + ("2020-01-01 00:00:00", "%Y-%m-%d %H:%M:%S"), + ("2020-01-01T00:00:00", "%Y-%m-%dT%H:%M:%S"), + ("2020-01-01T00:00:00.000", "%Y-%m-%dT%H:%M:%S.%f"), + ("2020-01-01T00:00:00.000000", "%Y-%m-%dT%H:%M:%S.%f"), + ("2020-01-01T00:00:00.000000000", "%Y-%m-%dT%H:%M:%S.%f"), + ], + ) + def test_to_datetime_iso8601_valid(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + expected = Timestamp(2020, 1, 1) + result = to_datetime(input, format=format) + assert result == expected + + @pytest.mark.parametrize( + "input, format", + [ + ("2020-1", "%Y-%m"), + ("2020-1-1", "%Y-%m-%d"), + ("2020-1-1 0", "%Y-%m-%d %H"), + ("2020-1-1T0", "%Y-%m-%dT%H"), + ("2020-1-1 0:0", "%Y-%m-%d %H:%M"), + ("2020-1-1T0:0", "%Y-%m-%dT%H:%M"), + ("2020-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"), + ("2020-1-1T0:0:0", "%Y-%m-%dT%H:%M:%S"), + ("2020-1-1T0:0:0.000", "%Y-%m-%dT%H:%M:%S.%f"), + ("2020-1-1T0:0:0.000000", "%Y-%m-%dT%H:%M:%S.%f"), + ("2020-1-1T0:0:0.000000000", "%Y-%m-%dT%H:%M:%S.%f"), + ], + ) + def test_to_datetime_iso8601_non_padded(self, input, format): + # https://github.com/pandas-dev/pandas/issues/21422 + expected = Timestamp(2020, 1, 1) + result = to_datetime(input, format=format) + assert result == expected + + @pytest.mark.parametrize( + "input, format", + [ + ("2020-01-01T00:00:00.000000000+00:00", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2020-01-01T00:00:00+00:00", "%Y-%m-%dT%H:%M:%S%z"), + ("2020-01-01T00:00:00Z", "%Y-%m-%dT%H:%M:%S%z"), + ], + ) + def test_to_datetime_iso8601_with_timezone_valid(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + expected = Timestamp(2020, 1, 1, tzinfo=pytz.UTC) + result = to_datetime(input, format=format) + assert result == expected + + def test_to_datetime_default(self, cache): + rs = to_datetime("2001", cache=cache) + xp = datetime(2001, 1, 1) + assert rs == xp + + @pytest.mark.xfail(reason="fails to enforce dayfirst=True, which would raise") + def test_to_datetime_respects_dayfirst(self, cache): + # dayfirst is essentially broken + + # The msg here is not important since it isn't actually raised yet. + msg = "Invalid date specified" + with pytest.raises(ValueError, match=msg): + # if dayfirst is respected, then this would parse as month=13, which + # would raise + with tm.assert_produces_warning(UserWarning, match="Provide format"): + to_datetime("01-13-2012", dayfirst=True, cache=cache) + + def test_to_datetime_on_datetime64_series(self, cache): + # #2699 + ser = Series(date_range("1/1/2000", periods=10)) + + result = to_datetime(ser, cache=cache) + assert result[0] == ser[0] + + def test_to_datetime_with_space_in_series(self, cache): + # GH 6428 + ser = Series(["10/18/2006", "10/18/2008", " "]) + msg = ( + r'^time data " " doesn\'t match format "%m/%d/%Y", ' + rf"at position 2. {PARSING_ERR_MSG}$" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(ser, errors="raise", cache=cache) + result_coerce = to_datetime(ser, errors="coerce", cache=cache) + expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT]) + tm.assert_series_equal(result_coerce, expected_coerce) + result_ignore = to_datetime(ser, errors="ignore", cache=cache) + tm.assert_series_equal(result_ignore, ser) + + @td.skip_if_not_us_locale + def test_to_datetime_with_apply(self, cache): + # this is only locale tested with US/None locales + # GH 5195 + # with a format and coerce a single item to_datetime fails + td = Series(["May 04", "Jun 02", "Dec 11"], index=[1, 2, 3]) + expected = to_datetime(td, format="%b %y", cache=cache) + result = td.apply(to_datetime, format="%b %y", cache=cache) + tm.assert_series_equal(result, expected) + + def test_to_datetime_timezone_name(self): + # https://github.com/pandas-dev/pandas/issues/49748 + result = to_datetime("2020-01-01 00:00:00UTC", format="%Y-%m-%d %H:%M:%S%Z") + expected = Timestamp(2020, 1, 1).tz_localize("UTC") + assert result == expected + + @td.skip_if_not_us_locale + @pytest.mark.parametrize("errors", ["raise", "coerce", "ignore"]) + def test_to_datetime_with_apply_with_empty_str(self, cache, errors): + # this is only locale tested with US/None locales + # GH 5195, GH50251 + # with a format and coerce a single item to_datetime fails + td = Series(["May 04", "Jun 02", ""], index=[1, 2, 3]) + expected = to_datetime(td, format="%b %y", errors=errors, cache=cache) + + result = td.apply( + lambda x: to_datetime(x, format="%b %y", errors="coerce", cache=cache) + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_empty_stt(self, cache): + # empty string + result = to_datetime("", cache=cache) + assert result is NaT + + def test_to_datetime_empty_str_list(self, cache): + result = to_datetime(["", ""], cache=cache) + assert isna(result).all() + + def test_to_datetime_zero(self, cache): + # ints + result = Timestamp(0) + expected = to_datetime(0, cache=cache) + assert result == expected + + def test_to_datetime_strings(self, cache): + # GH 3888 (strings) + expected = to_datetime(["2012"], cache=cache)[0] + result = to_datetime("2012", cache=cache) + assert result == expected + + def test_to_datetime_strings_variation(self, cache): + array = ["2012", "20120101", "20120101 12:01:01"] + expected = [to_datetime(dt_str, cache=cache) for dt_str in array] + result = [Timestamp(date_str) for date_str in array] + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("result", [Timestamp("2012"), to_datetime("2012")]) + def test_to_datetime_strings_vs_constructor(self, result): + expected = Timestamp(2012, 1, 1) + assert result == expected + + def test_to_datetime_unprocessable_input(self, cache): + # GH 4928 + # GH 21864 + result = to_datetime([1, "1"], errors="ignore", cache=cache) + + expected = Index(np.array([1, "1"], dtype="O")) + tm.assert_equal(result, expected) + msg = '^Given date string "1" not likely a datetime, at position 1$' + with pytest.raises(ValueError, match=msg): + to_datetime([1, "1"], errors="raise", cache=cache) + + def test_to_datetime_unhashable_input(self, cache): + series = Series([["a"]] * 100) + result = to_datetime(series, errors="ignore", cache=cache) + tm.assert_series_equal(series, result) + + def test_to_datetime_other_datetime64_units(self): + # 5/25/2012 + scalar = np.int64(1337904000000000).view("M8[us]") + as_obj = scalar.astype("O") + + index = DatetimeIndex([scalar]) + assert index[0] == scalar.astype("O") + + value = Timestamp(scalar) + assert value == as_obj + + def test_to_datetime_list_of_integers(self): + rng = date_range("1/1/2000", periods=20) + rng = DatetimeIndex(rng.values) + + ints = list(rng.asi8) + + result = DatetimeIndex(ints) + + tm.assert_index_equal(rng, result) + + def test_to_datetime_overflow(self): + # gh-17637 + # we are overflowing Timedelta range here + msg = "Cannot cast 139999 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + date_range(start="1/1/1700", freq="B", periods=100000) + + def test_string_invalid_operation(self, cache): + invalid = np.array(["87156549591102612381000001219H5"], dtype=object) + # GH #51084 + + with pytest.raises(ValueError, match="Unknown datetime string format"): + to_datetime(invalid, errors="raise", cache=cache) + + def test_string_na_nat_conversion(self, cache): + # GH #999, #858 + + strings = np.array(["1/1/2000", "1/2/2000", np.nan, "1/4/2000"], dtype=object) + + expected = np.empty(4, dtype="M8[ns]") + for i, val in enumerate(strings): + if isna(val): + expected[i] = iNaT + else: + expected[i] = parse(val) + + result = tslib.array_to_datetime(strings)[0] + tm.assert_almost_equal(result, expected) + + result2 = to_datetime(strings, cache=cache) + assert isinstance(result2, DatetimeIndex) + tm.assert_numpy_array_equal(result, result2.values) + + def test_string_na_nat_conversion_malformed(self, cache): + malformed = np.array(["1/100/2000", np.nan], dtype=object) + + # GH 10636, default is now 'raise' + msg = r"Unknown datetime string format" + with pytest.raises(ValueError, match=msg): + to_datetime(malformed, errors="raise", cache=cache) + + result = to_datetime(malformed, errors="ignore", cache=cache) + # GH 21864 + expected = Index(malformed, dtype=object) + tm.assert_index_equal(result, expected) + + with pytest.raises(ValueError, match=msg): + to_datetime(malformed, errors="raise", cache=cache) + + def test_string_na_nat_conversion_with_name(self, cache): + idx = ["a", "b", "c", "d", "e"] + series = Series( + ["1/1/2000", np.nan, "1/3/2000", np.nan, "1/5/2000"], index=idx, name="foo" + ) + dseries = Series( + [ + to_datetime("1/1/2000", cache=cache), + np.nan, + to_datetime("1/3/2000", cache=cache), + np.nan, + to_datetime("1/5/2000", cache=cache), + ], + index=idx, + name="foo", + ) + + result = to_datetime(series, cache=cache) + dresult = to_datetime(dseries, cache=cache) + + expected = Series(np.empty(5, dtype="M8[ns]"), index=idx) + for i in range(5): + x = series.iloc[i] + if isna(x): + expected.iloc[i] = NaT + else: + expected.iloc[i] = to_datetime(x, cache=cache) + + tm.assert_series_equal(result, expected, check_names=False) + assert result.name == "foo" + + tm.assert_series_equal(dresult, expected, check_names=False) + assert dresult.name == "foo" + + @pytest.mark.parametrize( + "unit", + ["h", "m", "s", "ms", "us", "ns"], + ) + def test_dti_constructor_numpy_timeunits(self, cache, unit): + # GH 9114 + dtype = np.dtype(f"M8[{unit}]") + base = to_datetime(["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache) + + values = base.values.astype(dtype) + + if unit in ["h", "m"]: + # we cast to closest supported unit + unit = "s" + exp_dtype = np.dtype(f"M8[{unit}]") + expected = DatetimeIndex(base.astype(exp_dtype)) + assert expected.dtype == exp_dtype + + tm.assert_index_equal(DatetimeIndex(values), expected) + tm.assert_index_equal(to_datetime(values, cache=cache), expected) + + def test_dayfirst(self, cache): + # GH 5917 + arr = ["10/02/2014", "11/02/2014", "12/02/2014"] + expected = DatetimeIndex( + [datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)] + ) + idx1 = DatetimeIndex(arr, dayfirst=True) + idx2 = DatetimeIndex(np.array(arr), dayfirst=True) + idx3 = to_datetime(arr, dayfirst=True, cache=cache) + idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache) + idx5 = DatetimeIndex(Index(arr), dayfirst=True) + idx6 = DatetimeIndex(Series(arr), dayfirst=True) + tm.assert_index_equal(expected, idx1) + tm.assert_index_equal(expected, idx2) + tm.assert_index_equal(expected, idx3) + tm.assert_index_equal(expected, idx4) + tm.assert_index_equal(expected, idx5) + tm.assert_index_equal(expected, idx6) + + def test_dayfirst_warnings_valid_input(self): + # GH 12585 + warning_msg = ( + "Parsing dates in .* format when dayfirst=.* was specified. " + "Pass `dayfirst=.*` or specify a format to silence this warning." + ) + + # CASE 1: valid input + arr = ["31/12/2014", "10/03/2011"] + expected = DatetimeIndex( + ["2014-12-31", "2011-03-10"], dtype="datetime64[ns]", freq=None + ) + + # A. dayfirst arg correct, no warning + res1 = to_datetime(arr, dayfirst=True) + tm.assert_index_equal(expected, res1) + + # B. dayfirst arg incorrect, warning + with tm.assert_produces_warning(UserWarning, match=warning_msg): + res2 = to_datetime(arr, dayfirst=False) + tm.assert_index_equal(expected, res2) + + def test_dayfirst_warnings_invalid_input(self): + # CASE 2: invalid input + # cannot consistently process with single format + # ValueError *always* raised + + # first in DD/MM/YYYY, second in MM/DD/YYYY + arr = ["31/12/2014", "03/30/2011"] + + with pytest.raises( + ValueError, + match=( + r'^time data "03/30/2011" doesn\'t match format ' + rf'"%d/%m/%Y", at position 1. {PARSING_ERR_MSG}$' + ), + ): + to_datetime(arr, dayfirst=True) + + @pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray._from_sequence]) + def test_to_datetime_dta_tz(self, klass): + # GH#27733 + dti = date_range("2015-04-05", periods=3).rename("foo") + expected = dti.tz_localize("UTC") + + obj = klass(dti) + expected = klass(expected) + + result = to_datetime(obj, utc=True) + tm.assert_equal(result, expected) + + +class TestGuessDatetimeFormat: + @pytest.mark.parametrize( + "test_list", + [ + [ + "2011-12-30 00:00:00.000000", + "2011-12-30 00:00:00.000000", + "2011-12-30 00:00:00.000000", + ], + [np.nan, np.nan, "2011-12-30 00:00:00.000000"], + ["", "2011-12-30 00:00:00.000000"], + ["NaT", "2011-12-30 00:00:00.000000"], + ["2011-12-30 00:00:00.000000", "random_string"], + ["now", "2011-12-30 00:00:00.000000"], + ["today", "2011-12-30 00:00:00.000000"], + ], + ) + def test_guess_datetime_format_for_array(self, test_list): + expected_format = "%Y-%m-%d %H:%M:%S.%f" + test_array = np.array(test_list, dtype=object) + assert tools._guess_datetime_format_for_array(test_array) == expected_format + + @td.skip_if_not_us_locale + def test_guess_datetime_format_for_array_all_nans(self): + format_for_string_of_nans = tools._guess_datetime_format_for_array( + np.array([np.nan, np.nan, np.nan], dtype="O") + ) + assert format_for_string_of_nans is None + + +class TestToDatetimeInferFormat: + @pytest.mark.parametrize( + "test_format", ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"] + ) + def test_to_datetime_infer_datetime_format_consistent_format( + self, cache, test_format + ): + ser = Series(date_range("20000101", periods=50, freq="h")) + + s_as_dt_strings = ser.apply(lambda x: x.strftime(test_format)) + + with_format = to_datetime(s_as_dt_strings, format=test_format, cache=cache) + without_format = to_datetime(s_as_dt_strings, cache=cache) + + # Whether the format is explicitly passed, or + # it is inferred, the results should all be the same + tm.assert_series_equal(with_format, without_format) + + def test_to_datetime_inconsistent_format(self, cache): + data = ["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"] + ser = Series(np.array(data)) + msg = ( + r'^time data "01-02-2011 00:00:00" doesn\'t match format ' + rf'"%m/%d/%Y %H:%M:%S", at position 1. {PARSING_ERR_MSG}$' + ) + with pytest.raises(ValueError, match=msg): + to_datetime(ser, cache=cache) + + def test_to_datetime_consistent_format(self, cache): + data = ["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"] + ser = Series(np.array(data)) + result = to_datetime(ser, cache=cache) + expected = Series( + ["2011-01-01", "2011-02-01", "2011-03-01"], dtype="datetime64[ns]" + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_series_with_nans(self, cache): + ser = Series( + np.array( + ["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan], + dtype=object, + ) + ) + result = to_datetime(ser, cache=cache) + expected = Series( + ["2011-01-01", NaT, "2011-01-03", NaT], dtype="datetime64[ns]" + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_series_start_with_nans(self, cache): + ser = Series( + np.array( + [ + np.nan, + np.nan, + "01/01/2011 00:00:00", + "01/02/2011 00:00:00", + "01/03/2011 00:00:00", + ], + dtype=object, + ) + ) + + result = to_datetime(ser, cache=cache) + expected = Series( + [NaT, NaT, "2011-01-01", "2011-01-02", "2011-01-03"], dtype="datetime64[ns]" + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "tz_name, offset", + [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)], + ) + def test_infer_datetime_format_tz_name(self, tz_name, offset): + # GH 33133 + ser = Series([f"2019-02-02 08:07:13 {tz_name}"]) + result = to_datetime(ser) + tz = timezone(timedelta(minutes=offset)) + expected = Series([Timestamp("2019-02-02 08:07:13").tz_localize(tz)]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "ts,zero_tz", + [ + ("2019-02-02 08:07:13", "Z"), + ("2019-02-02 08:07:13", ""), + ("2019-02-02 08:07:13.012345", "Z"), + ("2019-02-02 08:07:13.012345", ""), + ], + ) + def test_infer_datetime_format_zero_tz(self, ts, zero_tz): + # GH 41047 + ser = Series([ts + zero_tz]) + result = to_datetime(ser) + tz = pytz.utc if zero_tz == "Z" else None + expected = Series([Timestamp(ts, tz=tz)]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("format", [None, "%Y-%m-%d"]) + def test_to_datetime_iso8601_noleading_0s(self, cache, format): + # GH 11871 + ser = Series(["2014-1-1", "2014-2-2", "2015-3-3"]) + expected = Series( + [ + Timestamp("2014-01-01"), + Timestamp("2014-02-02"), + Timestamp("2015-03-03"), + ] + ) + result = to_datetime(ser, format=format, cache=cache) + tm.assert_series_equal(result, expected) + + def test_parse_dates_infer_datetime_format_warning(self): + # GH 49024 + with tm.assert_produces_warning( + UserWarning, + match="The argument 'infer_datetime_format' is deprecated", + ): + to_datetime(["10-10-2000"], infer_datetime_format=True) + + +class TestDaysInMonth: + # tests for issue #10154 + + @pytest.mark.parametrize( + "arg, format", + [ + ["2015-02-29", None], + ["2015-02-29", "%Y-%m-%d"], + ["2015-02-32", "%Y-%m-%d"], + ["2015-04-31", "%Y-%m-%d"], + ], + ) + def test_day_not_in_month_coerce(self, cache, arg, format): + assert isna(to_datetime(arg, errors="coerce", format=format, cache=cache)) + + def test_day_not_in_month_raise(self, cache): + msg = "day is out of range for month: 2015-02-29, at position 0" + with pytest.raises(ValueError, match=msg): + to_datetime("2015-02-29", errors="raise", cache=cache) + + @pytest.mark.parametrize( + "arg, format, msg", + [ + ( + "2015-02-29", + "%Y-%m-%d", + f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-29-02", + "%Y-%d-%m", + f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-02-32", + "%Y-%m-%d", + '^unconverted data remains when parsing with format "%Y-%m-%d": "2", ' + f"at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-32-02", + "%Y-%d-%m", + '^time data "2015-32-02" doesn\'t match format "%Y-%d-%m", ' + f"at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-04-31", + "%Y-%m-%d", + f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-31-04", + "%Y-%d-%m", + f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$", + ), + ], + ) + def test_day_not_in_month_raise_value(self, cache, arg, format, msg): + # https://github.com/pandas-dev/pandas/issues/50462 + with pytest.raises(ValueError, match=msg): + to_datetime(arg, errors="raise", format=format, cache=cache) + + @pytest.mark.parametrize( + "expected, format", + [ + ["2015-02-29", None], + ["2015-02-29", "%Y-%m-%d"], + ["2015-02-29", "%Y-%m-%d"], + ["2015-04-31", "%Y-%m-%d"], + ], + ) + def test_day_not_in_month_ignore(self, cache, expected, format): + result = to_datetime(expected, errors="ignore", format=format, cache=cache) + assert result == expected + + +class TestDatetimeParsingWrappers: + @pytest.mark.parametrize( + "date_str, expected", + [ + ("2011-01-01", datetime(2011, 1, 1)), + ("2Q2005", datetime(2005, 4, 1)), + ("2Q05", datetime(2005, 4, 1)), + ("2005Q1", datetime(2005, 1, 1)), + ("05Q1", datetime(2005, 1, 1)), + ("2011Q3", datetime(2011, 7, 1)), + ("11Q3", datetime(2011, 7, 1)), + ("3Q2011", datetime(2011, 7, 1)), + ("3Q11", datetime(2011, 7, 1)), + # quarterly without space + ("2000Q4", datetime(2000, 10, 1)), + ("00Q4", datetime(2000, 10, 1)), + ("4Q2000", datetime(2000, 10, 1)), + ("4Q00", datetime(2000, 10, 1)), + ("2000q4", datetime(2000, 10, 1)), + ("2000-Q4", datetime(2000, 10, 1)), + ("00-Q4", datetime(2000, 10, 1)), + ("4Q-2000", datetime(2000, 10, 1)), + ("4Q-00", datetime(2000, 10, 1)), + ("00q4", datetime(2000, 10, 1)), + ("2005", datetime(2005, 1, 1)), + ("2005-11", datetime(2005, 11, 1)), + ("2005 11", datetime(2005, 11, 1)), + ("11-2005", datetime(2005, 11, 1)), + ("11 2005", datetime(2005, 11, 1)), + ("200511", datetime(2020, 5, 11)), + ("20051109", datetime(2005, 11, 9)), + ("20051109 10:15", datetime(2005, 11, 9, 10, 15)), + ("20051109 08H", datetime(2005, 11, 9, 8, 0)), + ("2005-11-09 10:15", datetime(2005, 11, 9, 10, 15)), + ("2005-11-09 08H", datetime(2005, 11, 9, 8, 0)), + ("2005/11/09 10:15", datetime(2005, 11, 9, 10, 15)), + ("2005/11/09 10:15:32", datetime(2005, 11, 9, 10, 15, 32)), + ("2005/11/09 10:15:32 AM", datetime(2005, 11, 9, 10, 15, 32)), + ("2005/11/09 10:15:32 PM", datetime(2005, 11, 9, 22, 15, 32)), + ("2005/11/09 08H", datetime(2005, 11, 9, 8, 0)), + ("Thu Sep 25 10:36:28 2003", datetime(2003, 9, 25, 10, 36, 28)), + ("Thu Sep 25 2003", datetime(2003, 9, 25)), + ("Sep 25 2003", datetime(2003, 9, 25)), + ("January 1 2014", datetime(2014, 1, 1)), + # GH#10537 + ("2014-06", datetime(2014, 6, 1)), + ("06-2014", datetime(2014, 6, 1)), + ("2014-6", datetime(2014, 6, 1)), + ("6-2014", datetime(2014, 6, 1)), + ("20010101 12", datetime(2001, 1, 1, 12)), + ("20010101 1234", datetime(2001, 1, 1, 12, 34)), + ("20010101 123456", datetime(2001, 1, 1, 12, 34, 56)), + ], + ) + def test_parsers(self, date_str, expected, cache): + # dateutil >= 2.5.0 defaults to yearfirst=True + # https://github.com/dateutil/dateutil/issues/217 + yearfirst = True + + result1, _ = parsing.parse_datetime_string_with_reso( + date_str, yearfirst=yearfirst + ) + result2 = to_datetime(date_str, yearfirst=yearfirst) + result3 = to_datetime([date_str], yearfirst=yearfirst) + # result5 is used below + result4 = to_datetime( + np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache + ) + result6 = DatetimeIndex([date_str], yearfirst=yearfirst) + # result7 is used below + result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst) + result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst) + + for res in [result1, result2]: + assert res == expected + for res in [result3, result4, result6, result8, result9]: + exp = DatetimeIndex([Timestamp(expected)]) + tm.assert_index_equal(res, exp) + + # these really need to have yearfirst, but we don't support + if not yearfirst: + result5 = Timestamp(date_str) + assert result5 == expected + result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst) + assert result7 == expected + + def test_na_values_with_cache( + self, cache, unique_nulls_fixture, unique_nulls_fixture2 + ): + # GH22305 + expected = Index([NaT, NaT], dtype="datetime64[ns]") + result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache) + tm.assert_index_equal(result, expected) + + def test_parsers_nat(self): + # Test that each of several string-accepting methods return pd.NaT + result1, _ = parsing.parse_datetime_string_with_reso("NaT") + result2 = to_datetime("NaT") + result3 = Timestamp("NaT") + result4 = DatetimeIndex(["NaT"])[0] + assert result1 is NaT + assert result2 is NaT + assert result3 is NaT + assert result4 is NaT + + @pytest.mark.parametrize( + "date_str, dayfirst, yearfirst, expected", + [ + ("10-11-12", False, False, datetime(2012, 10, 11)), + ("10-11-12", True, False, datetime(2012, 11, 10)), + ("10-11-12", False, True, datetime(2010, 11, 12)), + ("10-11-12", True, True, datetime(2010, 12, 11)), + ("20/12/21", False, False, datetime(2021, 12, 20)), + ("20/12/21", True, False, datetime(2021, 12, 20)), + ("20/12/21", False, True, datetime(2020, 12, 21)), + ("20/12/21", True, True, datetime(2020, 12, 21)), + ], + ) + def test_parsers_dayfirst_yearfirst( + self, cache, date_str, dayfirst, yearfirst, expected + ): + # OK + # 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00 + # 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00 + # 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00 + + # OK + # 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00 + # 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00 + # 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00 + + # bug fix in 2.5.2 + # 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00 + # 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00 + # 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00 + + # OK + # 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00 + # 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00 + # 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00 + + # OK + # 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00 + # 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00 + # 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00 + + # OK + # 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00 + # 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00 + # 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00 + + # revert of bug in 2.5.2 + # 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00 + # 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12 + # 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00 + + # OK + # 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00 + # 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00 + # 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00 + + # str : dayfirst, yearfirst, expected + + # compare with dateutil result + dateutil_result = parse(date_str, dayfirst=dayfirst, yearfirst=yearfirst) + assert dateutil_result == expected + + result1, _ = parsing.parse_datetime_string_with_reso( + date_str, dayfirst=dayfirst, yearfirst=yearfirst + ) + + # we don't support dayfirst/yearfirst here: + if not dayfirst and not yearfirst: + result2 = Timestamp(date_str) + assert result2 == expected + + result3 = to_datetime( + date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache + ) + + result4 = DatetimeIndex([date_str], dayfirst=dayfirst, yearfirst=yearfirst)[0] + + assert result1 == expected + assert result3 == expected + assert result4 == expected + + @pytest.mark.parametrize( + "date_str, exp_def", + [["10:15", datetime(1, 1, 1, 10, 15)], ["9:05", datetime(1, 1, 1, 9, 5)]], + ) + def test_parsers_timestring(self, date_str, exp_def): + # must be the same as dateutil result + exp_now = parse(date_str) + + result1, _ = parsing.parse_datetime_string_with_reso(date_str) + result2 = to_datetime(date_str) + result3 = to_datetime([date_str]) + result4 = Timestamp(date_str) + result5 = DatetimeIndex([date_str])[0] + # parse time string return time string based on default date + # others are not, and can't be changed because it is used in + # time series plot + assert result1 == exp_def + assert result2 == exp_now + assert result3 == exp_now + assert result4 == exp_now + assert result5 == exp_now + + @pytest.mark.parametrize( + "dt_string, tz, dt_string_repr", + [ + ( + "2013-01-01 05:45+0545", + timezone(timedelta(minutes=345)), + "Timestamp('2013-01-01 05:45:00+0545', tz='UTC+05:45')", + ), + ( + "2013-01-01 05:30+0530", + timezone(timedelta(minutes=330)), + "Timestamp('2013-01-01 05:30:00+0530', tz='UTC+05:30')", + ), + ], + ) + def test_parsers_timezone_minute_offsets_roundtrip( + self, cache, dt_string, tz, dt_string_repr + ): + # GH11708 + base = to_datetime("2013-01-01 00:00:00", cache=cache) + base = base.tz_localize("UTC").tz_convert(tz) + dt_time = to_datetime(dt_string, cache=cache) + assert base == dt_time + assert dt_string_repr == repr(dt_time) + + +@pytest.fixture(params=["D", "s", "ms", "us", "ns"]) +def units(request): + """Day and some time units. + + * D + * s + * ms + * us + * ns + """ + return request.param + + +@pytest.fixture +def epoch_1960(): + """Timestamp at 1960-01-01.""" + return Timestamp("1960-01-01") + + +@pytest.fixture +def units_from_epochs(): + return list(range(5)) + + +@pytest.fixture(params=["timestamp", "pydatetime", "datetime64", "str_1960"]) +def epochs(epoch_1960, request): + """Timestamp at 1960-01-01 in various forms. + + * Timestamp + * datetime.datetime + * numpy.datetime64 + * str + """ + assert request.param in {"timestamp", "pydatetime", "datetime64", "str_1960"} + if request.param == "timestamp": + return epoch_1960 + elif request.param == "pydatetime": + return epoch_1960.to_pydatetime() + elif request.param == "datetime64": + return epoch_1960.to_datetime64() + else: + return str(epoch_1960) + + +@pytest.fixture +def julian_dates(): + return date_range("2014-1-1", periods=10).to_julian_date().values + + +class TestOrigin: + def test_origin_and_unit(self): + # GH#42624 + ts = to_datetime(1, unit="s", origin=1) + expected = Timestamp("1970-01-01 00:00:02") + assert ts == expected + + ts = to_datetime(1, unit="s", origin=1_000_000_000) + expected = Timestamp("2001-09-09 01:46:41") + assert ts == expected + + def test_julian(self, julian_dates): + # gh-11276, gh-11745 + # for origin as julian + + result = Series(to_datetime(julian_dates, unit="D", origin="julian")) + expected = Series( + to_datetime(julian_dates - Timestamp(0).to_julian_date(), unit="D") + ) + tm.assert_series_equal(result, expected) + + def test_unix(self): + result = Series(to_datetime([0, 1, 2], unit="D", origin="unix")) + expected = Series( + [Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")], + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + def test_julian_round_trip(self): + result = to_datetime(2456658, origin="julian", unit="D") + assert result.to_julian_date() == 2456658 + + # out-of-bounds + msg = "1 is Out of Bounds for origin='julian'" + with pytest.raises(ValueError, match=msg): + to_datetime(1, origin="julian", unit="D") + + def test_invalid_unit(self, units, julian_dates): + # checking for invalid combination of origin='julian' and unit != D + if units != "D": + msg = "unit must be 'D' for origin='julian'" + with pytest.raises(ValueError, match=msg): + to_datetime(julian_dates, unit=units, origin="julian") + + @pytest.mark.parametrize("unit", ["ns", "D"]) + def test_invalid_origin(self, unit): + # need to have a numeric specified + msg = "it must be numeric with a unit specified" + with pytest.raises(ValueError, match=msg): + to_datetime("2005-01-01", origin="1960-01-01", unit=unit) + + def test_epoch(self, units, epochs, epoch_1960, units_from_epochs): + expected = Series( + [pd.Timedelta(x, unit=units) + epoch_1960 for x in units_from_epochs] + ) + + result = Series(to_datetime(units_from_epochs, unit=units, origin=epochs)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "origin, exc", + [ + ("random_string", ValueError), + ("epoch", ValueError), + ("13-24-1990", ValueError), + (datetime(1, 1, 1), OutOfBoundsDatetime), + ], + ) + def test_invalid_origins(self, origin, exc, units, units_from_epochs): + msg = "|".join( + [ + f"origin {origin} is Out of Bounds", + f"origin {origin} cannot be converted to a Timestamp", + "Cannot cast .* to unit='ns' without overflow", + ] + ) + with pytest.raises(exc, match=msg): + to_datetime(units_from_epochs, unit=units, origin=origin) + + def test_invalid_origins_tzinfo(self): + # GH16842 + with pytest.raises(ValueError, match="must be tz-naive"): + to_datetime(1, unit="D", origin=datetime(2000, 1, 1, tzinfo=pytz.utc)) + + def test_incorrect_value_exception(self): + # GH47495 + msg = ( + "Unknown datetime string format, unable to parse: yesterday, at position 1" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(["today", "yesterday"]) + + @pytest.mark.parametrize( + "format, warning", + [ + (None, UserWarning), + ("%Y-%m-%d %H:%M:%S", None), + ("%Y-%d-%m %H:%M:%S", None), + ], + ) + def test_to_datetime_out_of_bounds_with_format_arg(self, format, warning): + # see gh-23830 + msg = r"^Out of bounds nanosecond timestamp: 2417-10-10 00:00:00, at position 0" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime("2417-10-10 00:00:00", format=format) + + @pytest.mark.parametrize( + "arg, origin, expected_str", + [ + [200 * 365, "unix", "2169-11-13 00:00:00"], + [200 * 365, "1870-01-01", "2069-11-13 00:00:00"], + [300 * 365, "1870-01-01", "2169-10-20 00:00:00"], + ], + ) + def test_processing_order(self, arg, origin, expected_str): + # make sure we handle out-of-bounds *before* + # constructing the dates + + result = to_datetime(arg, unit="D", origin=origin) + expected = Timestamp(expected_str) + assert result == expected + + result = to_datetime(200 * 365, unit="D", origin="1870-01-01") + expected = Timestamp("2069-11-13 00:00:00") + assert result == expected + + result = to_datetime(300 * 365, unit="D", origin="1870-01-01") + expected = Timestamp("2169-10-20 00:00:00") + assert result == expected + + @pytest.mark.parametrize( + "offset,utc,exp", + [ + ["Z", True, "2019-01-01T00:00:00.000Z"], + ["Z", None, "2019-01-01T00:00:00.000Z"], + ["-01:00", True, "2019-01-01T01:00:00.000Z"], + ["-01:00", None, "2019-01-01T00:00:00.000-01:00"], + ], + ) + def test_arg_tz_ns_unit(self, offset, utc, exp): + # GH 25546 + arg = "2019-01-01T00:00:00.000" + offset + result = to_datetime([arg], unit="ns", utc=utc) + expected = to_datetime([exp]).as_unit("ns") + tm.assert_index_equal(result, expected) + + +class TestShouldCache: + @pytest.mark.parametrize( + "listlike,do_caching", + [ + ([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), + ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True), + ], + ) + def test_should_cache(self, listlike, do_caching): + assert ( + tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7) + == do_caching + ) + + @pytest.mark.parametrize( + "unique_share,check_count, err_message", + [ + (0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"), + (10, 2, r"unique_share must be in next bounds: \(0; 1\)"), + ], + ) + def test_should_cache_errors(self, unique_share, check_count, err_message): + arg = [5] * 10 + + with pytest.raises(AssertionError, match=err_message): + tools.should_cache(arg, unique_share, check_count) + + @pytest.mark.parametrize( + "listlike", + [ + (deque([Timestamp("2010-06-02 09:30:00")] * 51)), + ([Timestamp("2010-06-02 09:30:00")] * 51), + (tuple([Timestamp("2010-06-02 09:30:00")] * 51)), + ], + ) + def test_no_slicing_errors_in_should_cache(self, listlike): + # GH#29403 + assert tools.should_cache(listlike) is True + + +def test_nullable_integer_to_datetime(): + # Test for #30050 + ser = Series([1, 2, None, 2**61, None]) + ser = ser.astype("Int64") + ser_copy = ser.copy() + + res = to_datetime(ser, unit="ns") + + expected = Series( + [ + np.datetime64("1970-01-01 00:00:00.000000001"), + np.datetime64("1970-01-01 00:00:00.000000002"), + np.datetime64("NaT"), + np.datetime64("2043-01-25 23:56:49.213693952"), + np.datetime64("NaT"), + ] + ) + tm.assert_series_equal(res, expected) + # Check that ser isn't mutated + tm.assert_series_equal(ser, ser_copy) + + +@pytest.mark.parametrize("klass", [np.array, list]) +def test_na_to_datetime(nulls_fixture, klass): + if isinstance(nulls_fixture, Decimal): + with pytest.raises(TypeError, match="not convertible to datetime"): + to_datetime(klass([nulls_fixture])) + + else: + result = to_datetime(klass([nulls_fixture])) + + assert result[0] is NaT + + +@pytest.mark.parametrize("errors", ["raise", "coerce", "ignore"]) +@pytest.mark.parametrize( + "args, format", + [ + (["03/24/2016", "03/25/2016", ""], "%m/%d/%Y"), + (["2016-03-24", "2016-03-25", ""], "%Y-%m-%d"), + ], + ids=["non-ISO8601", "ISO8601"], +) +def test_empty_string_datetime(errors, args, format): + # GH13044, GH50251 + td = Series(args) + + # coerce empty string to pd.NaT + result = to_datetime(td, format=format, errors=errors) + expected = Series(["2016-03-24", "2016-03-25", NaT], dtype="datetime64[ns]") + tm.assert_series_equal(expected, result) + + +def test_empty_string_datetime_coerce__unit(): + # GH13044 + # coerce empty string to pd.NaT + result = to_datetime([1, ""], unit="s", errors="coerce") + expected = DatetimeIndex(["1970-01-01 00:00:01", "NaT"], dtype="datetime64[ns]") + tm.assert_index_equal(expected, result) + + # verify that no exception is raised even when errors='raise' is set + result = to_datetime([1, ""], unit="s", errors="raise") + tm.assert_index_equal(expected, result) + + +@pytest.mark.parametrize("cache", [True, False]) +def test_to_datetime_monotonic_increasing_index(cache): + # GH28238 + cstart = start_caching_at + times = date_range(Timestamp("1980"), periods=cstart, freq="YS") + times = times.to_frame(index=False, name="DT").sample(n=cstart, random_state=1) + times.index = times.index.to_series().astype(float) / 1000 + result = to_datetime(times.iloc[:, 0], cache=cache) + expected = times.iloc[:, 0] + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "series_length", + [40, start_caching_at, (start_caching_at + 1), (start_caching_at + 5)], +) +def test_to_datetime_cache_coerce_50_lines_outofbounds(series_length): + # GH#45319 + ser = Series( + [datetime.fromisoformat("1446-04-12 00:00:00+00:00")] + + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length), + dtype=object, + ) + result1 = to_datetime(ser, errors="coerce", utc=True) + + expected1 = Series( + [NaT] + ([Timestamp("1991-10-20 00:00:00+00:00")] * series_length) + ) + + tm.assert_series_equal(result1, expected1) + + result2 = to_datetime(ser, errors="ignore", utc=True) + + expected2 = Series( + [datetime.fromisoformat("1446-04-12 00:00:00+00:00")] + + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length) + ) + + tm.assert_series_equal(result2, expected2) + + with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond timestamp"): + to_datetime(ser, errors="raise", utc=True) + + +def test_to_datetime_format_f_parse_nanos(): + # GH 48767 + timestamp = "15/02/2020 02:03:04.123456789" + timestamp_format = "%d/%m/%Y %H:%M:%S.%f" + result = to_datetime(timestamp, format=timestamp_format) + expected = Timestamp( + year=2020, + month=2, + day=15, + hour=2, + minute=3, + second=4, + microsecond=123456, + nanosecond=789, + ) + assert result == expected + + +def test_to_datetime_mixed_iso8601(): + # https://github.com/pandas-dev/pandas/issues/50411 + result = to_datetime(["2020-01-01", "2020-01-01 05:00:00"], format="ISO8601") + expected = DatetimeIndex(["2020-01-01 00:00:00", "2020-01-01 05:00:00"]) + tm.assert_index_equal(result, expected) + + +def test_to_datetime_mixed_other(): + # https://github.com/pandas-dev/pandas/issues/50411 + result = to_datetime(["01/11/2000", "12 January 2000"], format="mixed") + expected = DatetimeIndex(["2000-01-11", "2000-01-12"]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("exact", [True, False]) +@pytest.mark.parametrize("format", ["ISO8601", "mixed"]) +def test_to_datetime_mixed_or_iso_exact(exact, format): + msg = "Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'" + with pytest.raises(ValueError, match=msg): + to_datetime(["2020-01-01"], exact=exact, format=format) + + +def test_to_datetime_mixed_not_necessarily_iso8601_raise(): + # https://github.com/pandas-dev/pandas/issues/50411 + with pytest.raises( + ValueError, match="Time data 01-01-2000 is not ISO8601 format, at position 1" + ): + to_datetime(["2020-01-01", "01-01-2000"], format="ISO8601") + + +@pytest.mark.parametrize( + ("errors", "expected"), + [ + ("coerce", DatetimeIndex(["2020-01-01 00:00:00", NaT])), + ("ignore", Index(["2020-01-01", "01-01-2000"], dtype=object)), + ], +) +def test_to_datetime_mixed_not_necessarily_iso8601_coerce(errors, expected): + # https://github.com/pandas-dev/pandas/issues/50411 + result = to_datetime(["2020-01-01", "01-01-2000"], format="ISO8601", errors=errors) + tm.assert_index_equal(result, expected) + + +def test_ignoring_unknown_tz_deprecated(): + # GH#18702, GH#51476 + dtstr = "2014 Jan 9 05:15 FAKE" + msg = 'un-recognized timezone "FAKE". Dropping unrecognized timezones is deprecated' + with tm.assert_produces_warning(FutureWarning, match=msg): + res = Timestamp(dtstr) + assert res == Timestamp(dtstr[:-5]) + + with tm.assert_produces_warning(FutureWarning): + res = to_datetime(dtstr) + assert res == to_datetime(dtstr[:-5]) + with tm.assert_produces_warning(FutureWarning): + res = to_datetime([dtstr]) + tm.assert_index_equal(res, to_datetime([dtstr[:-5]])) + + +def test_from_numeric_arrow_dtype(any_numeric_ea_dtype): + # GH 52425 + pytest.importorskip("pyarrow") + ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]") + result = to_datetime(ser) + expected = Series([1, 2], dtype="datetime64[ns]") + tm.assert_series_equal(result, expected) + + +def test_to_datetime_with_empty_str_utc_false_format_mixed(): + # GH 50887 + vals = ["2020-01-01 00:00+00:00", ""] + result = to_datetime(vals, format="mixed") + expected = Index([Timestamp("2020-01-01 00:00+00:00"), "NaT"], dtype="M8[ns, UTC]") + tm.assert_index_equal(result, expected) + + # Check that a couple of other similar paths work the same way + alt = to_datetime(vals) + tm.assert_index_equal(alt, expected) + alt2 = DatetimeIndex(vals) + tm.assert_index_equal(alt2, expected) + + +def test_to_datetime_with_empty_str_utc_false_offsets_and_format_mixed(): + # GH 50887 + msg = "parsing datetimes with mixed time zones will raise an error" + + with tm.assert_produces_warning(FutureWarning, match=msg): + to_datetime( + ["2020-01-01 00:00+00:00", "2020-01-01 00:00+02:00", ""], format="mixed" + ) + + +def test_to_datetime_mixed_tzs_mixed_types(): + # GH#55793, GH#55693 mismatched tzs but one is str and other is + # datetime object + ts = Timestamp("2016-01-02 03:04:05", tz="US/Pacific") + dtstr = "2023-10-30 15:06+01" + arr = [ts, dtstr] + + msg = ( + "Mixed timezones detected. pass utc=True in to_datetime or tz='UTC' " + "in DatetimeIndex to convert to a common timezone" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(arr) + with pytest.raises(ValueError, match=msg): + to_datetime(arr, format="mixed") + with pytest.raises(ValueError, match=msg): + DatetimeIndex(arr) + + +def test_to_datetime_mixed_types_matching_tzs(): + # GH#55793 + dtstr = "2023-11-01 09:22:03-07:00" + ts = Timestamp(dtstr) + arr = [ts, dtstr] + res1 = to_datetime(arr) + res2 = to_datetime(arr[::-1])[::-1] + res3 = to_datetime(arr, format="mixed") + res4 = DatetimeIndex(arr) + + expected = DatetimeIndex([ts, ts]) + tm.assert_index_equal(res1, expected) + tm.assert_index_equal(res2, expected) + tm.assert_index_equal(res3, expected) + tm.assert_index_equal(res4, expected) + + +dtstr = "2020-01-01 00:00+00:00" +ts = Timestamp(dtstr) + + +@pytest.mark.filterwarnings("ignore:Could not infer format:UserWarning") +@pytest.mark.parametrize( + "aware_val", + [dtstr, Timestamp(dtstr)], + ids=lambda x: type(x).__name__, +) +@pytest.mark.parametrize( + "naive_val", + [dtstr[:-6], ts.tz_localize(None), ts.date(), ts.asm8, ts.value, float(ts.value)], + ids=lambda x: type(x).__name__, +) +@pytest.mark.parametrize("naive_first", [True, False]) +def test_to_datetime_mixed_awareness_mixed_types(aware_val, naive_val, naive_first): + # GH#55793, GH#55693 + # Empty string parses to NaT + vals = [aware_val, naive_val, ""] + + vec = vals + if naive_first: + # alas, the behavior is order-dependent, so we test both ways + vec = [naive_val, aware_val, ""] + + # both_strs-> paths that were previously already deprecated with warning + # issued in _array_to_datetime_object + both_strs = isinstance(aware_val, str) and isinstance(naive_val, str) + has_numeric = isinstance(naive_val, (int, float)) + + depr_msg = "In a future version of pandas, parsing datetimes with mixed time zones" + + first_non_null = next(x for x in vec if x != "") + # if first_non_null is a not a string, _guess_datetime_format_for_array + # doesn't guess a format so we don't go through array_strptime + if not isinstance(first_non_null, str): + # that case goes through array_strptime which has different behavior + msg = "Cannot mix tz-aware with tz-naive values" + if naive_first and isinstance(aware_val, Timestamp): + if isinstance(naive_val, Timestamp): + msg = "Tz-aware datetime.datetime cannot be converted to datetime64" + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + else: + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + + # No warning/error with utc=True + to_datetime(vec, utc=True) + + elif has_numeric and vec.index(aware_val) < vec.index(naive_val): + msg = "time data .* doesn't match format" + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + with pytest.raises(ValueError, match=msg): + to_datetime(vec, utc=True) + + elif both_strs and vec.index(aware_val) < vec.index(naive_val): + msg = r"time data \"2020-01-01 00:00\" doesn't match format" + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + with pytest.raises(ValueError, match=msg): + to_datetime(vec, utc=True) + + elif both_strs and vec.index(naive_val) < vec.index(aware_val): + msg = "unconverted data remains when parsing with format" + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + with pytest.raises(ValueError, match=msg): + to_datetime(vec, utc=True) + + else: + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + to_datetime(vec) + + # No warning/error with utc=True + to_datetime(vec, utc=True) + + if both_strs: + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + to_datetime(vec, format="mixed") + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + msg = "DatetimeIndex has mixed timezones" + with pytest.raises(TypeError, match=msg): + DatetimeIndex(vec) + else: + msg = "Cannot mix tz-aware with tz-naive values" + if naive_first and isinstance(aware_val, Timestamp): + if isinstance(naive_val, Timestamp): + msg = "Tz-aware datetime.datetime cannot be converted to datetime64" + with pytest.raises(ValueError, match=msg): + to_datetime(vec, format="mixed") + with pytest.raises(ValueError, match=msg): + DatetimeIndex(vec) + else: + with pytest.raises(ValueError, match=msg): + to_datetime(vec, format="mixed") + with pytest.raises(ValueError, match=msg): + DatetimeIndex(vec) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_numeric.py b/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..c452382ec572bd24cf704c445f24f9af87947141 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_numeric.py @@ -0,0 +1,978 @@ +import decimal + +import numpy as np +from numpy import iinfo +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + ArrowDtype, + DataFrame, + Index, + Series, + option_context, + to_numeric, +) +import pandas._testing as tm + + +@pytest.fixture(params=[None, "ignore", "raise", "coerce"]) +def errors(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def signed(request): + return request.param + + +@pytest.fixture(params=[lambda x: x, str], ids=["identity", "str"]) +def transform(request): + return request.param + + +@pytest.fixture(params=[47393996303418497800, 100000000000000000000]) +def large_val(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def multiple_elts(request): + return request.param + + +@pytest.fixture( + params=[ + (lambda x: Index(x, name="idx"), tm.assert_index_equal), + (lambda x: Series(x, name="ser"), tm.assert_series_equal), + (lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal), + ] +) +def transform_assert_equal(request): + return request.param + + +@pytest.mark.parametrize( + "input_kwargs,result_kwargs", + [ + ({}, {"dtype": np.int64}), + ({"errors": "coerce", "downcast": "integer"}, {"dtype": np.int8}), + ], +) +def test_empty(input_kwargs, result_kwargs): + # see gh-16302 + ser = Series([], dtype=object) + result = to_numeric(ser, **input_kwargs) + + expected = Series([], **result_kwargs) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +@pytest.mark.parametrize("last_val", ["7", 7]) +def test_series(last_val, infer_string): + with option_context("future.infer_string", infer_string): + ser = Series(["1", "-3.14", last_val]) + result = to_numeric(ser) + + expected = Series([1, -3.14, 7]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [ + [1, 3, 4, 5], + [1.0, 3.0, 4.0, 5.0], + # Bool is regarded as numeric. + [True, False, True, True], + ], +) +def test_series_numeric(data): + ser = Series(data, index=list("ABCD"), name="EFG") + + result = to_numeric(ser) + tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize( + "data,msg", + [ + ([1, -3.14, "apple"], 'Unable to parse string "apple" at position 2'), + ( + ["orange", 1, -3.14, "apple"], + 'Unable to parse string "orange" at position 0', + ), + ], +) +def test_error(data, msg): + ser = Series(data) + + with pytest.raises(ValueError, match=msg): + to_numeric(ser, errors="raise") + + +@pytest.mark.parametrize( + "errors,exp_data", [("ignore", [1, -3.14, "apple"]), ("coerce", [1, -3.14, np.nan])] +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_ignore_error(errors, exp_data): + ser = Series([1, -3.14, "apple"]) + result = to_numeric(ser, errors=errors) + + expected = Series(exp_data) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "errors,exp", + [ + ("raise", 'Unable to parse string "apple" at position 2'), + ("ignore", [True, False, "apple"]), + # Coerces to float. + ("coerce", [1.0, 0.0, np.nan]), + ], +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_bool_handling(errors, exp): + ser = Series([True, False, "apple"]) + + if isinstance(exp, str): + with pytest.raises(ValueError, match=exp): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) + expected = Series(exp) + + tm.assert_series_equal(result, expected) + + +def test_list(): + ser = ["1", "-3.14", "7"] + res = to_numeric(ser) + + expected = np.array([1, -3.14, 7]) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize( + "data,arr_kwargs", + [ + ([1, 3, 4, 5], {"dtype": np.int64}), + ([1.0, 3.0, 4.0, 5.0], {}), + # Boolean is regarded as numeric. + ([True, False, True, True], {}), + ], +) +def test_list_numeric(data, arr_kwargs): + result = to_numeric(data) + expected = np.array(data, **arr_kwargs) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("kwargs", [{"dtype": "O"}, {}]) +def test_numeric(kwargs): + data = [1, -3.14, 7] + + ser = Series(data, **kwargs) + result = to_numeric(ser) + + expected = Series(data) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "columns", + [ + # One column. + "a", + # Multiple columns. + ["a", "b"], + ], +) +def test_numeric_df_columns(columns): + # see gh-14827 + df = DataFrame( + { + "a": [1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"], + "b": [1.0, 2.0, 3.0, 4.0], + } + ) + + expected = DataFrame({"a": [1.2, 3.14, np.inf, 0.1], "b": [1.0, 2.0, 3.0, 4.0]}) + + df_copy = df.copy() + df_copy[columns] = df_copy[columns].apply(to_numeric) + + tm.assert_frame_equal(df_copy, expected) + + +@pytest.mark.parametrize( + "data,exp_data", + [ + ( + [[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1], + [[3.14, 1.0], 1.6, 0.1], + ), + ([np.array([decimal.Decimal(3.14), 1.0]), 0.1], [[3.14, 1.0], 0.1]), + ], +) +def test_numeric_embedded_arr_likes(data, exp_data): + # Test to_numeric with embedded lists and arrays + df = DataFrame({"a": data}) + df["a"] = df["a"].apply(to_numeric) + + expected = DataFrame({"a": exp_data}) + tm.assert_frame_equal(df, expected) + + +def test_all_nan(): + ser = Series(["a", "b", "c"]) + result = to_numeric(ser, errors="coerce") + + expected = Series([np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_type_check(errors): + # see gh-11776 + df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]}) + kwargs = {"errors": errors} if errors is not None else {} + with pytest.raises(TypeError, match="1-d array"): + to_numeric(df, **kwargs) + + +@pytest.mark.parametrize("val", [1, 1.1, 20001]) +def test_scalar(val, signed, transform): + val = -val if signed else val + assert to_numeric(transform(val)) == float(val) + + +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_really_large_scalar(large_val, signed, transform, errors): + # see gh-24910 + kwargs = {"errors": errors} if errors is not None else {} + val = -large_val if signed else large_val + + val = transform(val) + val_is_string = isinstance(val, str) + + if val_is_string and errors in (None, "raise"): + msg = "Integer out of range. at position 0" + with pytest.raises(ValueError, match=msg): + to_numeric(val, **kwargs) + else: + expected = float(val) if (errors == "coerce" and val_is_string) else val + tm.assert_almost_equal(to_numeric(val, **kwargs), expected) + + +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_really_large_in_arr(large_val, signed, transform, multiple_elts, errors): + # see gh-24910 + kwargs = {"errors": errors} if errors is not None else {} + val = -large_val if signed else large_val + val = transform(val) + + extra_elt = "string" + arr = [val] + multiple_elts * [extra_elt] + + val_is_string = isinstance(val, str) + coercing = errors == "coerce" + + if errors in (None, "raise") and (val_is_string or multiple_elts): + if val_is_string: + msg = "Integer out of range. at position 0" + else: + msg = 'Unable to parse string "string" at position 1' + + with pytest.raises(ValueError, match=msg): + to_numeric(arr, **kwargs) + else: + result = to_numeric(arr, **kwargs) + + exp_val = float(val) if (coercing and val_is_string) else val + expected = [exp_val] + + if multiple_elts: + if coercing: + expected.append(np.nan) + exp_dtype = float + else: + expected.append(extra_elt) + exp_dtype = object + else: + exp_dtype = float if isinstance(exp_val, (int, float)) else object + + tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype)) + + +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_really_large_in_arr_consistent(large_val, signed, multiple_elts, errors): + # see gh-24910 + # + # Even if we discover that we have to hold float, does not mean + # we should be lenient on subsequent elements that fail to be integer. + kwargs = {"errors": errors} if errors is not None else {} + arr = [str(-large_val if signed else large_val)] + + if multiple_elts: + arr.insert(0, large_val) + + if errors in (None, "raise"): + index = int(multiple_elts) + msg = f"Integer out of range. at position {index}" + + with pytest.raises(ValueError, match=msg): + to_numeric(arr, **kwargs) + else: + result = to_numeric(arr, **kwargs) + + if errors == "coerce": + expected = [float(i) for i in arr] + exp_dtype = float + else: + expected = arr + exp_dtype = object + + tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype)) + + +@pytest.mark.parametrize( + "errors,checker", + [ + ("raise", 'Unable to parse string "fail" at position 0'), + ("ignore", lambda x: x == "fail"), + ("coerce", lambda x: np.isnan(x)), + ], +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_scalar_fail(errors, checker): + scalar = "fail" + + if isinstance(checker, str): + with pytest.raises(ValueError, match=checker): + to_numeric(scalar, errors=errors) + else: + assert checker(to_numeric(scalar, errors=errors)) + + +@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, np.nan, 3, np.nan]]) +def test_numeric_dtypes(data, transform_assert_equal): + transform, assert_equal = transform_assert_equal + data = transform(data) + + result = to_numeric(data) + assert_equal(result, data) + + +@pytest.mark.parametrize( + "data,exp", + [ + (["1", "2", "3"], np.array([1, 2, 3], dtype="int64")), + (["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4])), + ], +) +def test_str(data, exp, transform_assert_equal): + transform, assert_equal = transform_assert_equal + result = to_numeric(transform(data)) + + expected = transform(exp) + assert_equal(result, expected) + + +def test_datetime_like(tz_naive_fixture, transform_assert_equal): + transform, assert_equal = transform_assert_equal + idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture) + + result = to_numeric(transform(idx)) + expected = transform(idx.asi8) + assert_equal(result, expected) + + +def test_timedelta(transform_assert_equal): + transform, assert_equal = transform_assert_equal + idx = pd.timedelta_range("1 days", periods=3, freq="D") + + result = to_numeric(transform(idx)) + expected = transform(idx.asi8) + assert_equal(result, expected) + + +def test_period(request, transform_assert_equal): + transform, assert_equal = transform_assert_equal + + idx = pd.period_range("2011-01", periods=3, freq="M", name="") + inp = transform(idx) + + if not isinstance(inp, Index): + request.applymarker( + pytest.mark.xfail(reason="Missing PeriodDtype support in to_numeric") + ) + result = to_numeric(inp) + expected = transform(idx.asi8) + assert_equal(result, expected) + + +@pytest.mark.parametrize( + "errors,expected", + [ + ("raise", "Invalid object type at position 0"), + ("ignore", Series([[10.0, 2], 1.0, "apple"])), + ("coerce", Series([np.nan, 1.0, np.nan])), + ], +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_non_hashable(errors, expected): + # see gh-13324 + ser = Series([[10.0, 2], 1.0, "apple"]) + + if isinstance(expected, str): + with pytest.raises(TypeError, match=expected): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) + tm.assert_series_equal(result, expected) + + +def test_downcast_invalid_cast(): + # see gh-13352 + data = ["1", 2, 3] + invalid_downcast = "unsigned-integer" + msg = "invalid downcasting method provided" + + with pytest.raises(ValueError, match=msg): + to_numeric(data, downcast=invalid_downcast) + + +def test_errors_invalid_value(): + # see gh-26466 + data = ["1", 2, 3] + invalid_error_value = "invalid" + msg = "invalid error value specified" + + with pytest.raises(ValueError, match=msg): + to_numeric(data, errors=invalid_error_value) + + +@pytest.mark.parametrize( + "data", + [ + ["1", 2, 3], + [1, 2, 3], + np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"), + ], +) +@pytest.mark.parametrize( + "kwargs,exp_dtype", + [ + # Basic function tests. + ({}, np.int64), + ({"downcast": None}, np.int64), + # Support below np.float32 is rare and far between. + ({"downcast": "float"}, np.dtype(np.float32).char), + # Basic dtype support. + ({"downcast": "unsigned"}, np.dtype(np.typecodes["UnsignedInteger"][0])), + ], +) +def test_downcast_basic(data, kwargs, exp_dtype): + # see gh-13352 + result = to_numeric(data, **kwargs) + expected = np.array([1, 2, 3], dtype=exp_dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("signed_downcast", ["integer", "signed"]) +@pytest.mark.parametrize( + "data", + [ + ["1", 2, 3], + [1, 2, 3], + np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"), + ], +) +def test_signed_downcast(data, signed_downcast): + # see gh-13352 + smallest_int_dtype = np.dtype(np.typecodes["Integer"][0]) + expected = np.array([1, 2, 3], dtype=smallest_int_dtype) + + res = to_numeric(data, downcast=signed_downcast) + tm.assert_numpy_array_equal(res, expected) + + +def test_ignore_downcast_invalid_data(): + # If we can't successfully cast the given + # data to a numeric dtype, do not bother + # with the downcast parameter. + data = ["foo", 2, 3] + expected = np.array(data, dtype=object) + + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = to_numeric(data, errors="ignore", downcast="unsigned") + tm.assert_numpy_array_equal(res, expected) + + +def test_ignore_downcast_neg_to_unsigned(): + # Cannot cast to an unsigned integer + # because we have a negative number. + data = ["-1", 2, 3] + expected = np.array([-1, 2, 3], dtype=np.int64) + + res = to_numeric(data, downcast="unsigned") + tm.assert_numpy_array_equal(res, expected) + + +# Warning in 32 bit platforms +@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning") +@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"]) +@pytest.mark.parametrize( + "data,expected", + [ + (["1.1", 2, 3], np.array([1.1, 2, 3], dtype=np.float64)), + ( + [10000.0, 20000, 3000, 40000.36, 50000, 50000.00], + np.array( + [10000.0, 20000, 3000, 40000.36, 50000, 50000.00], dtype=np.float64 + ), + ), + ], +) +def test_ignore_downcast_cannot_convert_float(data, expected, downcast): + # Cannot cast to an integer (signed or unsigned) + # because we have a float number. + res = to_numeric(data, downcast=downcast) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize( + "downcast,expected_dtype", + [("integer", np.int16), ("signed", np.int16), ("unsigned", np.uint16)], +) +def test_downcast_not8bit(downcast, expected_dtype): + # the smallest integer dtype need not be np.(u)int8 + data = ["256", 257, 258] + + expected = np.array([256, 257, 258], dtype=expected_dtype) + res = to_numeric(data, downcast=downcast) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize( + "dtype,downcast,min_max", + [ + ("int8", "integer", [iinfo(np.int8).min, iinfo(np.int8).max]), + ("int16", "integer", [iinfo(np.int16).min, iinfo(np.int16).max]), + ("int32", "integer", [iinfo(np.int32).min, iinfo(np.int32).max]), + ("int64", "integer", [iinfo(np.int64).min, iinfo(np.int64).max]), + ("uint8", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max]), + ("uint16", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max]), + ("uint32", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max]), + ("uint64", "unsigned", [iinfo(np.uint64).min, iinfo(np.uint64).max]), + ("int16", "integer", [iinfo(np.int8).min, iinfo(np.int8).max + 1]), + ("int32", "integer", [iinfo(np.int16).min, iinfo(np.int16).max + 1]), + ("int64", "integer", [iinfo(np.int32).min, iinfo(np.int32).max + 1]), + ("int16", "integer", [iinfo(np.int8).min - 1, iinfo(np.int16).max]), + ("int32", "integer", [iinfo(np.int16).min - 1, iinfo(np.int32).max]), + ("int64", "integer", [iinfo(np.int32).min - 1, iinfo(np.int64).max]), + ("uint16", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]), + ("uint32", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]), + ("uint64", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]), + ], +) +def test_downcast_limits(dtype, downcast, min_max): + # see gh-14404: test the limits of each downcast. + series = to_numeric(Series(min_max), downcast=downcast) + assert series.dtype == dtype + + +def test_downcast_float64_to_float32(): + # GH-43693: Check float64 preservation when >= 16,777,217 + series = Series([16777217.0, np.finfo(np.float64).max, np.nan], dtype=np.float64) + result = to_numeric(series, downcast="float") + + assert series.dtype == result.dtype + + +@pytest.mark.parametrize( + "ser,expected", + [ + ( + Series([0, 9223372036854775808]), + Series([0, 9223372036854775808], dtype=np.uint64), + ) + ], +) +def test_downcast_uint64(ser, expected): + # see gh-14422: + # BUG: to_numeric doesn't work uint64 numbers + + result = to_numeric(ser, downcast="unsigned") + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data,exp_data", + [ + ( + [200, 300, "", "NaN", 30000000000000000000], + [200, 300, np.nan, np.nan, 30000000000000000000], + ), + ( + ["12345678901234567890", "1234567890", "ITEM"], + [12345678901234567890, 1234567890, np.nan], + ), + ], +) +def test_coerce_uint64_conflict(data, exp_data): + # see gh-17007 and gh-17125 + # + # Still returns float despite the uint64-nan conflict, + # which would normally force the casting to object. + result = to_numeric(Series(data), errors="coerce") + expected = Series(exp_data, dtype=float) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "errors,exp", + [ + ("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])), + ("raise", "Unable to parse string"), + ], +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_non_coerce_uint64_conflict(errors, exp): + # see gh-17007 and gh-17125 + # + # For completeness. + ser = Series(["12345678901234567890", "1234567890", "ITEM"]) + + if isinstance(exp, str): + with pytest.raises(ValueError, match=exp): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) + tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize("dc1", ["integer", "float", "unsigned"]) +@pytest.mark.parametrize("dc2", ["integer", "float", "unsigned"]) +def test_downcast_empty(dc1, dc2): + # GH32493 + + tm.assert_numpy_array_equal( + to_numeric([], downcast=dc1), + to_numeric([], downcast=dc2), + check_dtype=False, + ) + + +def test_failure_to_convert_uint64_string_to_NaN(): + # GH 32394 + result = to_numeric("uint64", errors="coerce") + assert np.isnan(result) + + ser = Series([32, 64, np.nan]) + result = to_numeric(Series(["32", "64", "uint64"]), errors="coerce") + tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize( + "strrep", + [ + "243.164", + "245.968", + "249.585", + "259.745", + "265.742", + "272.567", + "279.196", + "280.366", + "275.034", + "271.351", + "272.889", + "270.627", + "280.828", + "290.383", + "308.153", + "319.945", + "336.0", + "344.09", + "351.385", + "356.178", + "359.82", + "361.03", + "367.701", + "380.812", + "387.98", + "391.749", + "391.171", + "385.97", + "385.345", + "386.121", + "390.996", + "399.734", + "413.073", + "421.532", + "430.221", + "437.092", + "439.746", + "446.01", + "451.191", + "460.463", + "469.779", + "472.025", + "479.49", + "474.864", + "467.54", + "471.978", + ], +) +def test_precision_float_conversion(strrep): + # GH 31364 + result = to_numeric(strrep) + + assert result == float(strrep) + + +@pytest.mark.parametrize( + "values, expected", + [ + (["1", "2", None], Series([1, 2, np.nan], dtype="Int64")), + (["1", "2", "3"], Series([1, 2, 3], dtype="Int64")), + (["1", "2", 3], Series([1, 2, 3], dtype="Int64")), + (["1", "2", 3.5], Series([1, 2, 3.5], dtype="Float64")), + (["1", None, 3.5], Series([1, np.nan, 3.5], dtype="Float64")), + (["1", "2", "3.5"], Series([1, 2, 3.5], dtype="Float64")), + ], +) +def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected): + # https://github.com/pandas-dev/pandas/issues/37262 + s = Series(values, dtype=nullable_string_dtype) + result = to_numeric(s) + tm.assert_series_equal(result, expected) + + +def test_to_numeric_from_nullable_string_coerce(nullable_string_dtype): + # GH#52146 + values = ["a", "1"] + ser = Series(values, dtype=nullable_string_dtype) + result = to_numeric(ser, errors="coerce") + expected = Series([pd.NA, 1], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_to_numeric_from_nullable_string_ignore(nullable_string_dtype): + # GH#52146 + values = ["a", "1"] + ser = Series(values, dtype=nullable_string_dtype) + expected = ser.copy() + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_numeric(ser, errors="ignore") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data, input_dtype, downcast, expected_dtype", + ( + ([1, 1], "Int64", "integer", "Int8"), + ([1.0, pd.NA], "Float64", "integer", "Int8"), + ([1.0, 1.1], "Float64", "integer", "Float64"), + ([1, pd.NA], "Int64", "integer", "Int8"), + ([450, 300], "Int64", "integer", "Int16"), + ([1, 1], "Float64", "integer", "Int8"), + ([np.iinfo(np.int64).max - 1, 1], "Int64", "integer", "Int64"), + ([1, 1], "Int64", "signed", "Int8"), + ([1.0, 1.0], "Float32", "signed", "Int8"), + ([1.0, 1.1], "Float64", "signed", "Float64"), + ([1, pd.NA], "Int64", "signed", "Int8"), + ([450, -300], "Int64", "signed", "Int16"), + ([np.iinfo(np.uint64).max - 1, 1], "UInt64", "signed", "UInt64"), + ([1, 1], "Int64", "unsigned", "UInt8"), + ([1.0, 1.0], "Float32", "unsigned", "UInt8"), + ([1.0, 1.1], "Float64", "unsigned", "Float64"), + ([1, pd.NA], "Int64", "unsigned", "UInt8"), + ([450, -300], "Int64", "unsigned", "Int64"), + ([-1, -1], "Int32", "unsigned", "Int32"), + ([1, 1], "Float64", "float", "Float32"), + ([1, 1.1], "Float64", "float", "Float32"), + ([1, 1], "Float32", "float", "Float32"), + ([1, 1.1], "Float32", "float", "Float32"), + ), +) +def test_downcast_nullable_numeric(data, input_dtype, downcast, expected_dtype): + arr = pd.array(data, dtype=input_dtype) + result = to_numeric(arr, downcast=downcast) + expected = pd.array(data, dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_downcast_nullable_mask_is_copied(): + # GH38974 + + arr = pd.array([1, 2, pd.NA], dtype="Int64") + + result = to_numeric(arr, downcast="integer") + expected = pd.array([1, 2, pd.NA], dtype="Int8") + tm.assert_extension_array_equal(result, expected) + + arr[1] = pd.NA # should not modify result + tm.assert_extension_array_equal(result, expected) + + +def test_to_numeric_scientific_notation(): + # GH 15898 + result = to_numeric("1.7e+308") + expected = np.float64(1.7e308) + assert result == expected + + +@pytest.mark.parametrize("val", [9876543210.0, 2.0**128]) +def test_to_numeric_large_float_not_downcast_to_float_32(val): + # GH 19729 + expected = Series([val]) + result = to_numeric(expected, downcast="float") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "val, dtype", [(1, "Int64"), (1.5, "Float64"), (True, "boolean")] +) +def test_to_numeric_dtype_backend(val, dtype): + # GH#50505 + ser = Series([val], dtype=object) + result = to_numeric(ser, dtype_backend="numpy_nullable") + expected = Series([val], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "val, dtype", + [ + (1, "Int64"), + (1.5, "Float64"), + (True, "boolean"), + (1, "int64[pyarrow]"), + (1.5, "float64[pyarrow]"), + (True, "bool[pyarrow]"), + ], +) +def test_to_numeric_dtype_backend_na(val, dtype): + # GH#50505 + if "pyarrow" in dtype: + pytest.importorskip("pyarrow") + dtype_backend = "pyarrow" + else: + dtype_backend = "numpy_nullable" + ser = Series([val, None], dtype=object) + result = to_numeric(ser, dtype_backend=dtype_backend) + expected = Series([val, pd.NA], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "val, dtype, downcast", + [ + (1, "Int8", "integer"), + (1.5, "Float32", "float"), + (1, "Int8", "signed"), + (1, "int8[pyarrow]", "integer"), + (1.5, "float[pyarrow]", "float"), + (1, "int8[pyarrow]", "signed"), + ], +) +def test_to_numeric_dtype_backend_downcasting(val, dtype, downcast): + # GH#50505 + if "pyarrow" in dtype: + pytest.importorskip("pyarrow") + dtype_backend = "pyarrow" + else: + dtype_backend = "numpy_nullable" + ser = Series([val, None], dtype=object) + result = to_numeric(ser, dtype_backend=dtype_backend, downcast=downcast) + expected = Series([val, pd.NA], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "smaller, dtype_backend", + [["UInt8", "numpy_nullable"], ["uint8[pyarrow]", "pyarrow"]], +) +def test_to_numeric_dtype_backend_downcasting_uint(smaller, dtype_backend): + # GH#50505 + if dtype_backend == "pyarrow": + pytest.importorskip("pyarrow") + ser = Series([1, pd.NA], dtype="UInt64") + result = to_numeric(ser, dtype_backend=dtype_backend, downcast="unsigned") + expected = Series([1, pd.NA], dtype=smaller) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [ + "Int64", + "UInt64", + "Float64", + "boolean", + "int64[pyarrow]", + "uint64[pyarrow]", + "float64[pyarrow]", + "bool[pyarrow]", + ], +) +def test_to_numeric_dtype_backend_already_nullable(dtype): + # GH#50505 + if "pyarrow" in dtype: + pytest.importorskip("pyarrow") + ser = Series([1, pd.NA], dtype=dtype) + result = to_numeric(ser, dtype_backend="numpy_nullable") + expected = Series([1, pd.NA], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_to_numeric_dtype_backend_error(dtype_backend): + # GH#50505 + ser = Series(["a", "b", ""]) + expected = ser.copy() + with pytest.raises(ValueError, match="Unable to parse string"): + to_numeric(ser, dtype_backend=dtype_backend) + + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_numeric(ser, dtype_backend=dtype_backend, errors="ignore") + tm.assert_series_equal(result, expected) + + result = to_numeric(ser, dtype_backend=dtype_backend, errors="coerce") + if dtype_backend == "pyarrow": + dtype = "double[pyarrow]" + else: + dtype = "Float64" + expected = Series([np.nan, np.nan, np.nan], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_invalid_dtype_backend(): + ser = Series([1, 2, 3]) + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + to_numeric(ser, dtype_backend="numpy") + + +def test_coerce_pyarrow_backend(): + # GH 52588 + pa = pytest.importorskip("pyarrow") + ser = Series(list("12x"), dtype=ArrowDtype(pa.string())) + result = to_numeric(ser, errors="coerce", dtype_backend="pyarrow") + expected = Series([1, 2, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_time.py b/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_time.py new file mode 100644 index 0000000000000000000000000000000000000000..b673bd9c2ec7168971ae0ed802336e4f03ff63a7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_time.py @@ -0,0 +1,72 @@ +from datetime import time +import locale + +import numpy as np +import pytest + +from pandas.compat import PY311 + +from pandas import Series +import pandas._testing as tm +from pandas.core.tools.times import to_time + +# The tests marked with this are locale-dependent. +# They pass, except when the machine locale is zh_CN or it_IT. +fails_on_non_english = pytest.mark.xfail( + locale.getlocale()[0] in ("zh_CN", "it_IT"), + reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8", + strict=False, +) + + +class TestToTime: + @pytest.mark.parametrize( + "time_string", + [ + "14:15", + "1415", + pytest.param("2:15pm", marks=fails_on_non_english), + pytest.param("0215pm", marks=fails_on_non_english), + "14:15:00", + "141500", + pytest.param("2:15:00pm", marks=fails_on_non_english), + pytest.param("021500pm", marks=fails_on_non_english), + time(14, 15), + ], + ) + def test_parsers_time(self, time_string): + # GH#11818 + assert to_time(time_string) == time(14, 15) + + def test_odd_format(self): + new_string = "14.15" + msg = r"Cannot convert arg \['14\.15'\] to a time" + if not PY311: + with pytest.raises(ValueError, match=msg): + to_time(new_string) + assert to_time(new_string, format="%H.%M") == time(14, 15) + + def test_arraylike(self): + arg = ["14:15", "20:20"] + expected_arr = [time(14, 15), time(20, 20)] + assert to_time(arg) == expected_arr + assert to_time(arg, format="%H:%M") == expected_arr + assert to_time(arg, infer_time_format=True) == expected_arr + assert to_time(arg, format="%I:%M%p", errors="coerce") == [None, None] + + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = to_time(arg, format="%I:%M%p", errors="ignore") + tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_)) + + msg = "Cannot convert.+to a time with given format" + with pytest.raises(ValueError, match=msg): + to_time(arg, format="%I:%M%p", errors="raise") + + tm.assert_series_equal( + to_time(Series(arg, name="test")), Series(expected_arr, name="test") + ) + + res = to_time(np.array(arg)) + assert isinstance(res, list) + assert res == expected_arr diff --git a/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_timedelta.py b/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_timedelta.py new file mode 100644 index 0000000000000000000000000000000000000000..b67694f1c58c7016221ed629358e8867b2a1534a --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/tools/test_to_timedelta.py @@ -0,0 +1,340 @@ +from datetime import ( + time, + timedelta, +) + +import numpy as np +import pytest + +from pandas.compat import IS64 +from pandas.errors import OutOfBoundsTimedelta + +import pandas as pd +from pandas import ( + Series, + TimedeltaIndex, + isna, + to_timedelta, +) +import pandas._testing as tm +from pandas.core.arrays import TimedeltaArray + + +class TestTimedeltas: + def test_to_timedelta_dt64_raises(self): + # Passing datetime64-dtype data to TimedeltaIndex is no longer + # supported GH#29794 + msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]" + + ser = Series([pd.NaT]) + with pytest.raises(TypeError, match=msg): + to_timedelta(ser) + with pytest.raises(TypeError, match=msg): + ser.to_frame().apply(to_timedelta) + + @pytest.mark.parametrize("readonly", [True, False]) + def test_to_timedelta_readonly(self, readonly): + # GH#34857 + arr = np.array([], dtype=object) + if readonly: + arr.setflags(write=False) + result = to_timedelta(arr) + expected = to_timedelta([]) + tm.assert_index_equal(result, expected) + + def test_to_timedelta_null(self): + result = to_timedelta(["", ""]) + assert isna(result).all() + + def test_to_timedelta_same_np_timedelta64(self): + # pass thru + result = to_timedelta(np.array([np.timedelta64(1, "s")])) + expected = pd.Index(np.array([np.timedelta64(1, "s")])) + tm.assert_index_equal(result, expected) + + def test_to_timedelta_series(self): + # Series + expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)]) + result = to_timedelta(Series(["1d", "1days 00:00:01"])) + tm.assert_series_equal(result, expected) + + def test_to_timedelta_units(self): + # with units + result = TimedeltaIndex( + [np.timedelta64(0, "ns"), np.timedelta64(10, "s").astype("m8[ns]")] + ) + expected = to_timedelta([0, 10], unit="s") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, unit", + [ + ["int64", "s"], + ["int64", "m"], + ["int64", "h"], + ["timedelta64[s]", "s"], + ["timedelta64[D]", "D"], + ], + ) + def test_to_timedelta_units_dtypes(self, dtype, unit): + # arrays of various dtypes + arr = np.array([1] * 5, dtype=dtype) + result = to_timedelta(arr, unit=unit) + exp_dtype = "m8[ns]" if dtype == "int64" else "m8[s]" + expected = TimedeltaIndex([np.timedelta64(1, unit)] * 5, dtype=exp_dtype) + tm.assert_index_equal(result, expected) + + def test_to_timedelta_oob_non_nano(self): + arr = np.array([pd.NaT._value + 1], dtype="timedelta64[m]") + + msg = ( + "Cannot convert -9223372036854775807 minutes to " + r"timedelta64\[s\] without overflow" + ) + with pytest.raises(OutOfBoundsTimedelta, match=msg): + to_timedelta(arr) + + with pytest.raises(OutOfBoundsTimedelta, match=msg): + TimedeltaIndex(arr) + + with pytest.raises(OutOfBoundsTimedelta, match=msg): + TimedeltaArray._from_sequence(arr, dtype="m8[s]") + + @pytest.mark.parametrize( + "arg", [np.arange(10).reshape(2, 5), pd.DataFrame(np.arange(10).reshape(2, 5))] + ) + @pytest.mark.parametrize("errors", ["ignore", "raise", "coerce"]) + @pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") + def test_to_timedelta_dataframe(self, arg, errors): + # GH 11776 + with pytest.raises(TypeError, match="1-d array"): + to_timedelta(arg, errors=errors) + + def test_to_timedelta_invalid_errors(self): + # bad value for errors parameter + msg = "errors must be one of" + with pytest.raises(ValueError, match=msg): + to_timedelta(["foo"], errors="never") + + @pytest.mark.parametrize("arg", [[1, 2], 1]) + def test_to_timedelta_invalid_unit(self, arg): + # these will error + msg = "invalid unit abbreviation: foo" + with pytest.raises(ValueError, match=msg): + to_timedelta(arg, unit="foo") + + def test_to_timedelta_time(self): + # time not supported ATM + msg = ( + "Value must be Timedelta, string, integer, float, timedelta or convertible" + ) + with pytest.raises(ValueError, match=msg): + to_timedelta(time(second=1)) + assert to_timedelta(time(second=1), errors="coerce") is pd.NaT + + def test_to_timedelta_bad_value(self): + msg = "Could not convert 'foo' to NumPy timedelta" + with pytest.raises(ValueError, match=msg): + to_timedelta(["foo", "bar"]) + + def test_to_timedelta_bad_value_coerce(self): + tm.assert_index_equal( + TimedeltaIndex([pd.NaT, pd.NaT]), + to_timedelta(["foo", "bar"], errors="coerce"), + ) + + tm.assert_index_equal( + TimedeltaIndex(["1 day", pd.NaT, "1 min"]), + to_timedelta(["1 day", "bar", "1 min"], errors="coerce"), + ) + + def test_to_timedelta_invalid_errors_ignore(self): + # gh-13613: these should not error because errors='ignore' + msg = "errors='ignore' is deprecated" + invalid_data = "apple" + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(invalid_data, errors="ignore") + assert invalid_data == result + + invalid_data = ["apple", "1 days"] + expected = np.array(invalid_data, dtype=object) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(invalid_data, errors="ignore") + tm.assert_numpy_array_equal(expected, result) + + invalid_data = pd.Index(["apple", "1 days"]) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(invalid_data, errors="ignore") + tm.assert_index_equal(invalid_data, result) + + invalid_data = Series(["apple", "1 days"]) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(invalid_data, errors="ignore") + tm.assert_series_equal(invalid_data, result) + + @pytest.mark.parametrize( + "val, errors", + [ + ("1M", True), + ("1 M", True), + ("1Y", True), + ("1 Y", True), + ("1y", True), + ("1 y", True), + ("1m", False), + ("1 m", False), + ("1 day", False), + ("2day", False), + ], + ) + def test_unambiguous_timedelta_values(self, val, errors): + # GH36666 Deprecate use of strings denoting units with 'M', 'Y', 'm' or 'y' + # in pd.to_timedelta + msg = "Units 'M', 'Y' and 'y' do not represent unambiguous timedelta" + if errors: + with pytest.raises(ValueError, match=msg): + to_timedelta(val) + else: + # check it doesn't raise + to_timedelta(val) + + def test_to_timedelta_via_apply(self): + # GH 5458 + expected = Series([np.timedelta64(1, "s")]) + result = Series(["00:00:01"]).apply(to_timedelta) + tm.assert_series_equal(result, expected) + + result = Series([to_timedelta("00:00:01")]) + tm.assert_series_equal(result, expected) + + def test_to_timedelta_inference_without_warning(self): + # GH#41731 inference produces a warning in the Series constructor, + # but _not_ in to_timedelta + vals = ["00:00:01", pd.NaT] + with tm.assert_produces_warning(None): + result = to_timedelta(vals) + + expected = TimedeltaIndex([pd.Timedelta(seconds=1), pd.NaT]) + tm.assert_index_equal(result, expected) + + def test_to_timedelta_on_missing_values(self): + # GH5438 + timedelta_NaT = np.timedelta64("NaT") + + actual = to_timedelta(Series(["00:00:01", np.nan])) + expected = Series( + [np.timedelta64(1000000000, "ns"), timedelta_NaT], + dtype=f"{tm.ENDIAN}m8[ns]", + ) + tm.assert_series_equal(actual, expected) + + ser = Series(["00:00:01", pd.NaT], dtype="m8[ns]") + actual = to_timedelta(ser) + tm.assert_series_equal(actual, expected) + + @pytest.mark.parametrize("val", [np.nan, pd.NaT, pd.NA]) + def test_to_timedelta_on_missing_values_scalar(self, val): + actual = to_timedelta(val) + assert actual._value == np.timedelta64("NaT").astype("int64") + + @pytest.mark.parametrize("val", [np.nan, pd.NaT, pd.NA]) + def test_to_timedelta_on_missing_values_list(self, val): + actual = to_timedelta([val]) + assert actual[0]._value == np.timedelta64("NaT").astype("int64") + + @pytest.mark.xfail(not IS64, reason="Floating point error") + def test_to_timedelta_float(self): + # https://github.com/pandas-dev/pandas/issues/25077 + arr = np.arange(0, 1, 1e-6)[-10:] + result = to_timedelta(arr, unit="s") + expected_asi8 = np.arange(999990000, 10**9, 1000, dtype="int64") + tm.assert_numpy_array_equal(result.asi8, expected_asi8) + + def test_to_timedelta_coerce_strings_unit(self): + arr = np.array([1, 2, "error"], dtype=object) + result = to_timedelta(arr, unit="ns", errors="coerce") + expected = to_timedelta([1, 2, pd.NaT], unit="ns") + tm.assert_index_equal(result, expected) + + def test_to_timedelta_ignore_strings_unit(self): + arr = np.array([1, 2, "error"], dtype=object) + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(arr, unit="ns", errors="ignore") + tm.assert_numpy_array_equal(result, arr) + + @pytest.mark.parametrize( + "expected_val, result_val", [[timedelta(days=2), 2], [None, None]] + ) + def test_to_timedelta_nullable_int64_dtype(self, expected_val, result_val): + # GH 35574 + expected = Series([timedelta(days=1), expected_val]) + result = to_timedelta(Series([1, result_val], dtype="Int64"), unit="days") + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + ("input", "expected"), + [ + ("8:53:08.71800000001", "8:53:08.718"), + ("8:53:08.718001", "8:53:08.718001"), + ("8:53:08.7180000001", "8:53:08.7180000001"), + ("-8:53:08.71800000001", "-8:53:08.718"), + ("8:53:08.7180000089", "8:53:08.718000008"), + ], + ) + @pytest.mark.parametrize("func", [pd.Timedelta, to_timedelta]) + def test_to_timedelta_precision_over_nanos(self, input, expected, func): + # GH: 36738 + expected = pd.Timedelta(expected) + result = func(input) + assert result == expected + + def test_to_timedelta_zerodim(self, fixed_now_ts): + # ndarray.item() incorrectly returns int for dt64[ns] and td64[ns] + dt64 = fixed_now_ts.to_datetime64() + arg = np.array(dt64) + + msg = ( + "Value must be Timedelta, string, integer, float, timedelta " + "or convertible, not datetime64" + ) + with pytest.raises(ValueError, match=msg): + to_timedelta(arg) + + arg2 = arg.view("m8[ns]") + result = to_timedelta(arg2) + assert isinstance(result, pd.Timedelta) + assert result._value == dt64.view("i8") + + def test_to_timedelta_numeric_ea(self, any_numeric_ea_dtype): + # GH#48796 + ser = Series([1, pd.NA], dtype=any_numeric_ea_dtype) + result = to_timedelta(ser) + expected = Series([pd.Timedelta(1, unit="ns"), pd.NaT]) + tm.assert_series_equal(result, expected) + + def test_to_timedelta_fraction(self): + result = to_timedelta(1.0 / 3, unit="h") + expected = pd.Timedelta("0 days 00:19:59.999999998") + assert result == expected + + +def test_from_numeric_arrow_dtype(any_numeric_ea_dtype): + # GH 52425 + pytest.importorskip("pyarrow") + ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]") + result = to_timedelta(ser) + expected = Series([1, 2], dtype="timedelta64[ns]") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["ns", "ms"]) +def test_from_timedelta_arrow_dtype(unit): + # GH 54298 + pytest.importorskip("pyarrow") + expected = Series([timedelta(1)], dtype=f"duration[{unit}][pyarrow]") + result = to_timedelta(expected) + tm.assert_series_equal(result, expected)