diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7c55281417cd091360115942048a438aa7e1a4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..02a0d6b49fb6c36ea82b1f2ce370e596e3f4340e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/conftest.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a28c8abdc996f18659795def65bdfe7d91abc300 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/conftest.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_array_ops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45f5b2b4f3ee33410b44e9c0bee0ac79de44e6f4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_array_ops.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_categorical.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e869d2e85b60cb997fe85f0d555b9c31614443a0 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_categorical.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_datetime64.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_datetime64.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aaa3461bd63f0d5fc21647ae5e48c2b10b16c89f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_datetime64.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_interval.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_interval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eb95a0f26b5a7cedb8fa27d89ef1e6e17d2688c6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_interval.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_numeric.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_numeric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0659cea74359900eaac37d275d29fc070863e808 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_numeric.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_object.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_object.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..35d427303a8f72e5853048939ea00e47cd53cabe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_object.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_period.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37e88af4cd0338f2aea49c1d259126b31ab701ce Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_period.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_timedelta64.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_timedelta64.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9fe3e35c92281ef96dc1c1e8f42baf3868b0074 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/__pycache__/test_timedelta64.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/common.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/common.py new file mode 100644 index 0000000000000000000000000000000000000000..b608df1554154f4723a0147ea02c04c780839c65 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/common.py @@ -0,0 +1,155 @@ +""" +Assertion helpers for arithmetic tests. +""" +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Index, + Series, + array, +) +import pandas._testing as tm +from pandas.core.arrays import ( + BooleanArray, + NumpyExtensionArray, +) + + +def assert_cannot_add(left, right, msg="cannot add"): + """ + Helper to assert that left and right cannot be added. + + Parameters + ---------- + left : object + right : object + msg : str, default "cannot add" + """ + with pytest.raises(TypeError, match=msg): + left + right + with pytest.raises(TypeError, match=msg): + right + left + + +def assert_invalid_addsub_type(left, right, msg=None): + """ + Helper to assert that left and right can be neither added nor subtracted. + + Parameters + ---------- + left : object + right : object + msg : str or None, default None + """ + with pytest.raises(TypeError, match=msg): + left + right + with pytest.raises(TypeError, match=msg): + right + left + with pytest.raises(TypeError, match=msg): + left - right + with pytest.raises(TypeError, match=msg): + right - left + + +def get_upcast_box(left, right, is_cmp: bool = False): + """ + Get the box to use for 'expected' in an arithmetic or comparison operation. + + Parameters + left : Any + right : Any + is_cmp : bool, default False + Whether the operation is a comparison method. + """ + + if isinstance(left, DataFrame) or isinstance(right, DataFrame): + return DataFrame + if isinstance(left, Series) or isinstance(right, Series): + if is_cmp and isinstance(left, Index): + # Index does not defer for comparisons + return np.array + return Series + if isinstance(left, Index) or isinstance(right, Index): + if is_cmp: + return np.array + return Index + return tm.to_array + + +def assert_invalid_comparison(left, right, box): + """ + Assert that comparison operations with mismatched types behave correctly. + + Parameters + ---------- + left : np.ndarray, ExtensionArray, Index, or Series + right : object + box : {pd.DataFrame, pd.Series, pd.Index, pd.array, tm.to_array} + """ + # Not for tznaive-tzaware comparison + + # Note: not quite the same as how we do this for tm.box_expected + xbox = box if box not in [Index, array] else np.array + + def xbox2(x): + # Eventually we'd like this to be tighter, but for now we'll + # just exclude NumpyExtensionArray[bool] + if isinstance(x, NumpyExtensionArray): + return x._ndarray + if isinstance(x, BooleanArray): + # NB: we are assuming no pd.NAs for now + return x.astype(bool) + return x + + # rev_box: box to use for reversed comparisons + rev_box = xbox + if isinstance(right, Index) and isinstance(left, Series): + rev_box = np.array + + result = xbox2(left == right) + expected = xbox(np.zeros(result.shape, dtype=np.bool_)) + + tm.assert_equal(result, expected) + + result = xbox2(right == left) + tm.assert_equal(result, rev_box(expected)) + + result = xbox2(left != right) + tm.assert_equal(result, ~expected) + + result = xbox2(right != left) + tm.assert_equal(result, rev_box(~expected)) + + msg = "|".join( + [ + "Invalid comparison between", + "Cannot compare type", + "not supported between", + "invalid type promotion", + ( + # GH#36706 npdev 1.20.0 2020-09-28 + r"The DTypes and " + r" do not have a common DType. " + "For example they cannot be stored in a single array unless the " + "dtype is `object`." + ), + ] + ) + with pytest.raises(TypeError, match=msg): + left < right + with pytest.raises(TypeError, match=msg): + left <= right + with pytest.raises(TypeError, match=msg): + left > right + with pytest.raises(TypeError, match=msg): + left >= right + with pytest.raises(TypeError, match=msg): + right < left + with pytest.raises(TypeError, match=msg): + right <= left + with pytest.raises(TypeError, match=msg): + right > left + with pytest.raises(TypeError, match=msg): + right >= left diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..c7703b34a5e38e7a3887d727b0a8c954016ad836 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/conftest.py @@ -0,0 +1,139 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import Index + + +@pytest.fixture(params=[1, np.array(1, dtype=np.int64)]) +def one(request): + """ + Several variants of integer value 1. The zero-dim integer array + behaves like an integer. + + This fixture can be used to check that datetimelike indexes handle + addition and subtraction of integers and zero-dimensional arrays + of integers. + + Examples + -------- + dti = pd.date_range('2016-01-01', periods=2, freq='h') + dti + DatetimeIndex(['2016-01-01 00:00:00', '2016-01-01 01:00:00'], + dtype='datetime64[ns]', freq='h') + dti + one + DatetimeIndex(['2016-01-01 01:00:00', '2016-01-01 02:00:00'], + dtype='datetime64[ns]', freq='h') + """ + return request.param + + +zeros = [ + box_cls([0] * 5, dtype=dtype) + for box_cls in [Index, np.array, pd.array] + for dtype in [np.int64, np.uint64, np.float64] +] +zeros.extend([box_cls([-0.0] * 5, dtype=np.float64) for box_cls in [Index, np.array]]) +zeros.extend([np.array(0, dtype=dtype) for dtype in [np.int64, np.uint64, np.float64]]) +zeros.extend([np.array(-0.0, dtype=np.float64)]) +zeros.extend([0, 0.0, -0.0]) + + +@pytest.fixture(params=zeros) +def zero(request): + """ + Several types of scalar zeros and length 5 vectors of zeros. + + This fixture can be used to check that numeric-dtype indexes handle + division by any zero numeric-dtype. + + Uses vector of length 5 for broadcasting with `numeric_idx` fixture, + which creates numeric-dtype vectors also of length 5. + + Examples + -------- + arr = RangeIndex(5) + arr / zeros + Index([nan, inf, inf, inf, inf], dtype='float64') + """ + return request.param + + +# ------------------------------------------------------------------ +# Scalar Fixtures + + +@pytest.fixture( + params=[ + pd.Timedelta("10m7s").to_pytimedelta(), + pd.Timedelta("10m7s"), + pd.Timedelta("10m7s").to_timedelta64(), + ], + ids=lambda x: type(x).__name__, +) +def scalar_td(request): + """ + Several variants of Timedelta scalars representing 10 minutes and 7 seconds. + """ + return request.param + + +@pytest.fixture( + params=[ + pd.offsets.Day(3), + pd.offsets.Hour(72), + pd.Timedelta(days=3).to_pytimedelta(), + pd.Timedelta("72:00:00"), + np.timedelta64(3, "D"), + np.timedelta64(72, "h"), + ], + ids=lambda x: type(x).__name__, +) +def three_days(request): + """ + Several timedelta-like and DateOffset objects that each represent + a 3-day timedelta + """ + return request.param + + +@pytest.fixture( + params=[ + pd.offsets.Hour(2), + pd.offsets.Minute(120), + pd.Timedelta(hours=2).to_pytimedelta(), + pd.Timedelta(seconds=2 * 3600), + np.timedelta64(2, "h"), + np.timedelta64(120, "m"), + ], + ids=lambda x: type(x).__name__, +) +def two_hours(request): + """ + Several timedelta-like and DateOffset objects that each represent + a 2-hour timedelta + """ + return request.param + + +_common_mismatch = [ + pd.offsets.YearBegin(2), + pd.offsets.MonthBegin(1), + pd.offsets.Minute(), +] + + +@pytest.fixture( + params=[ + np.timedelta64(4, "h"), + pd.Timedelta(hours=23).to_pytimedelta(), + pd.Timedelta("23:00:00"), + ] + + _common_mismatch +) +def not_daily(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Daily frequencies. + """ + return request.param diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_array_ops.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..2c347d965bbf7353a6a4e81ca955341f8041b6de --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_array_ops.py @@ -0,0 +1,39 @@ +import operator + +import numpy as np +import pytest + +import pandas._testing as tm +from pandas.core.ops.array_ops import ( + comparison_op, + na_logical_op, +) + + +def test_na_logical_op_2d(): + left = np.arange(8).reshape(4, 2) + right = left.astype(object) + right[0, 0] = np.nan + + # Check that we fall back to the vec_binop branch + with pytest.raises(TypeError, match="unsupported operand type"): + operator.or_(left, right) + + result = na_logical_op(left, right, operator.or_) + expected = right + tm.assert_numpy_array_equal(result, expected) + + +def test_object_comparison_2d(): + left = np.arange(9).reshape(3, 3).astype(object) + right = left.T + + result = comparison_op(left, right, operator.eq) + expected = np.eye(3).astype(bool) + tm.assert_numpy_array_equal(result, expected) + + # Ensure that cython doesn't raise on non-writeable arg, which + # we can get from np.broadcast_to + right.flags.writeable = False + result = comparison_op(left, right, operator.ne) + tm.assert_numpy_array_equal(result, ~expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_categorical.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..d6f3a13ce670596a12ca10b9e8d02d69d63c96fb --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_categorical.py @@ -0,0 +1,25 @@ +import numpy as np + +from pandas import ( + Categorical, + Series, +) +import pandas._testing as tm + + +class TestCategoricalComparisons: + def test_categorical_nan_equality(self): + cat = Series(Categorical(["a", "b", "c", np.nan])) + expected = Series([True, True, True, False]) + result = cat == cat + tm.assert_series_equal(result, expected) + + def test_categorical_tuple_equality(self): + # GH 18050 + ser = Series([(0, 0), (0, 1), (0, 0), (1, 0), (1, 1)]) + expected = Series([True, False, True, False, False]) + result = ser == (0, 0) + tm.assert_series_equal(result, expected) + + result = ser.astype("category") == (0, 0) + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_datetime64.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_datetime64.py new file mode 100644 index 0000000000000000000000000000000000000000..a468449efd507fae37f3fcb15f64a3e1bf551f93 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_datetime64.py @@ -0,0 +1,2469 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for datetime64 and datetime64tz dtypes +from datetime import ( + datetime, + time, + timedelta, +) +from itertools import ( + product, + starmap, +) +import operator + +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs.conversion import localize_pydatetime +from pandas._libs.tslibs.offsets import shift_months +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + DateOffset, + DatetimeIndex, + NaT, + Period, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, + date_range, +) +import pandas._testing as tm +from pandas.core import roperator +from pandas.tests.arithmetic.common import ( + assert_cannot_add, + assert_invalid_addsub_type, + assert_invalid_comparison, + get_upcast_box, +) + +# ------------------------------------------------------------------ +# Comparisons + + +class TestDatetime64ArrayLikeComparisons: + # Comparison tests for datetime64 vectors fully parametrized over + # DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison + # tests will eventually end up here. + + def test_compare_zerodim(self, tz_naive_fixture, box_with_array): + # Test comparison with zero-dimensional array is unboxed + tz = tz_naive_fixture + box = box_with_array + dti = date_range("20130101", periods=3, tz=tz) + + other = np.array(dti.to_numpy()[0]) + + dtarr = tm.box_expected(dti, box) + xbox = get_upcast_box(dtarr, other, True) + result = dtarr <= other + expected = np.array([True, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + "foo", + -1, + 99, + 4.0, + object(), + timedelta(days=2), + # GH#19800, GH#19301 datetime.date comparison raises to + # match DatetimeIndex/Timestamp. This also matches the behavior + # of stdlib datetime.datetime + datetime(2001, 1, 1).date(), + # GH#19301 None and NaN are *not* cast to NaT for comparisons + None, + np.nan, + ], + ) + def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array): + # GH#22074, GH#15966 + tz = tz_naive_fixture + + rng = date_range("1/1/2000", periods=10, tz=tz) + dtarr = tm.box_expected(rng, box_with_array) + assert_invalid_comparison(dtarr, other, box_with_array) + + @pytest.mark.parametrize( + "other", + [ + # GH#4968 invalid date/int comparisons + list(range(10)), + np.arange(10), + np.arange(10).astype(np.float32), + np.arange(10).astype(object), + pd.timedelta_range("1ns", periods=10).array, + np.array(pd.timedelta_range("1ns", periods=10)), + list(pd.timedelta_range("1ns", periods=10)), + pd.timedelta_range("1 Day", periods=10).astype(object), + pd.period_range("1971-01-01", freq="D", periods=10).array, + pd.period_range("1971-01-01", freq="D", periods=10).astype(object), + ], + ) + def test_dt64arr_cmp_arraylike_invalid( + self, other, tz_naive_fixture, box_with_array + ): + tz = tz_naive_fixture + + dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data + obj = tm.box_expected(dta, box_with_array) + assert_invalid_comparison(obj, other, box_with_array) + + def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture): + tz = tz_naive_fixture + + dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data + + other = np.array([0, 1, 2, dta[3], Timedelta(days=1)]) + result = dta == other + expected = np.array([False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = dta != other + tm.assert_numpy_array_equal(result, ~expected) + + msg = "Invalid comparison between|Cannot compare type|not supported between" + with pytest.raises(TypeError, match=msg): + dta < other + with pytest.raises(TypeError, match=msg): + dta > other + with pytest.raises(TypeError, match=msg): + dta <= other + with pytest.raises(TypeError, match=msg): + dta >= other + + def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array): + # GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly + tz = tz_naive_fixture + box = box_with_array + + ts = Timestamp("2021-01-01", tz=tz) + ser = Series([ts, NaT]) + + obj = tm.box_expected(ser, box) + xbox = get_upcast_box(obj, ts, True) + + expected = Series([True, False], dtype=np.bool_) + expected = tm.box_expected(expected, xbox) + + result = obj == ts + tm.assert_equal(result, expected) + + +class TestDatetime64SeriesComparison: + # TODO: moved from tests.series.test_operators; needs cleanup + + @pytest.mark.parametrize( + "pair", + [ + ( + [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")], + [NaT, NaT, Timestamp("2011-01-03")], + ), + ( + [Timedelta("1 days"), NaT, Timedelta("3 days")], + [NaT, NaT, Timedelta("3 days")], + ), + ( + [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")], + [NaT, NaT, Period("2011-03", freq="M")], + ), + ], + ) + @pytest.mark.parametrize("reverse", [True, False]) + @pytest.mark.parametrize("dtype", [None, object]) + @pytest.mark.parametrize( + "op, expected", + [ + (operator.eq, Series([False, False, True])), + (operator.ne, Series([True, True, False])), + (operator.lt, Series([False, False, False])), + (operator.gt, Series([False, False, False])), + (operator.ge, Series([False, False, True])), + (operator.le, Series([False, False, True])), + ], + ) + def test_nat_comparisons( + self, + dtype, + index_or_series, + reverse, + pair, + op, + expected, + ): + box = index_or_series + lhs, rhs = pair + if reverse: + # add lhs / rhs switched data + lhs, rhs = rhs, lhs + + left = Series(lhs, dtype=dtype) + right = box(rhs, dtype=dtype) + + result = op(left, right) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data", + [ + [Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")], + [Timedelta("1 days"), NaT, Timedelta("3 days")], + [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")], + ], + ) + @pytest.mark.parametrize("dtype", [None, object]) + def test_nat_comparisons_scalar(self, dtype, data, box_with_array): + box = box_with_array + + left = Series(data, dtype=dtype) + left = tm.box_expected(left, box) + xbox = get_upcast_box(left, NaT, True) + + expected = [False, False, False] + expected = tm.box_expected(expected, xbox) + if box is pd.array and dtype is object: + expected = pd.array(expected, dtype="bool") + + tm.assert_equal(left == NaT, expected) + tm.assert_equal(NaT == left, expected) + + expected = [True, True, True] + expected = tm.box_expected(expected, xbox) + if box is pd.array and dtype is object: + expected = pd.array(expected, dtype="bool") + tm.assert_equal(left != NaT, expected) + tm.assert_equal(NaT != left, expected) + + expected = [False, False, False] + expected = tm.box_expected(expected, xbox) + if box is pd.array and dtype is object: + expected = pd.array(expected, dtype="bool") + tm.assert_equal(left < NaT, expected) + tm.assert_equal(NaT > left, expected) + tm.assert_equal(left <= NaT, expected) + tm.assert_equal(NaT >= left, expected) + + tm.assert_equal(left > NaT, expected) + tm.assert_equal(NaT < left, expected) + tm.assert_equal(left >= NaT, expected) + tm.assert_equal(NaT <= left, expected) + + @pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)]) + def test_series_comparison_scalars(self, val): + series = Series(date_range("1/1/2000", periods=10)) + + result = series > val + expected = Series([x > val for x in series]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")] + ) + def test_timestamp_compare_series(self, left, right): + # see gh-4982 + # Make sure we can compare Timestamps on the right AND left hand side. + ser = Series(date_range("20010101", periods=10), name="dates") + s_nat = ser.copy(deep=True) + + ser[0] = Timestamp("nat") + ser[3] = Timestamp("nat") + + left_f = getattr(operator, left) + right_f = getattr(operator, right) + + # No NaT + expected = left_f(ser, Timestamp("20010109")) + result = right_f(Timestamp("20010109"), ser) + tm.assert_series_equal(result, expected) + + # NaT + expected = left_f(ser, Timestamp("nat")) + result = right_f(Timestamp("nat"), ser) + tm.assert_series_equal(result, expected) + + # Compare to Timestamp with series containing NaT + expected = left_f(s_nat, Timestamp("20010109")) + result = right_f(Timestamp("20010109"), s_nat) + tm.assert_series_equal(result, expected) + + # Compare to NaT with series containing NaT + expected = left_f(s_nat, NaT) + result = right_f(NaT, s_nat) + tm.assert_series_equal(result, expected) + + def test_dt64arr_timestamp_equality(self, box_with_array): + # GH#11034 + box = box_with_array + + ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT]) + ser = tm.box_expected(ser, box) + xbox = get_upcast_box(ser, ser, True) + + result = ser != ser + expected = tm.box_expected([False, False, True], xbox) + tm.assert_equal(result, expected) + + if box is pd.DataFrame: + # alignment for frame vs series comparisons deprecated + # in GH#46795 enforced 2.0 + with pytest.raises(ValueError, match="not aligned"): + ser != ser[0] + + else: + result = ser != ser[0] + expected = tm.box_expected([False, True, True], xbox) + tm.assert_equal(result, expected) + + if box is pd.DataFrame: + # alignment for frame vs series comparisons deprecated + # in GH#46795 enforced 2.0 + with pytest.raises(ValueError, match="not aligned"): + ser != ser[2] + else: + result = ser != ser[2] + expected = tm.box_expected([True, True, True], xbox) + tm.assert_equal(result, expected) + + result = ser == ser + expected = tm.box_expected([True, True, False], xbox) + tm.assert_equal(result, expected) + + if box is pd.DataFrame: + # alignment for frame vs series comparisons deprecated + # in GH#46795 enforced 2.0 + with pytest.raises(ValueError, match="not aligned"): + ser == ser[0] + else: + result = ser == ser[0] + expected = tm.box_expected([True, False, False], xbox) + tm.assert_equal(result, expected) + + if box is pd.DataFrame: + # alignment for frame vs series comparisons deprecated + # in GH#46795 enforced 2.0 + with pytest.raises(ValueError, match="not aligned"): + ser == ser[2] + else: + result = ser == ser[2] + expected = tm.box_expected([False, False, False], xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "datetimelike", + [ + Timestamp("20130101"), + datetime(2013, 1, 1), + np.datetime64("2013-01-01T00:00", "ns"), + ], + ) + @pytest.mark.parametrize( + "op,expected", + [ + (operator.lt, [True, False, False, False]), + (operator.le, [True, True, False, False]), + (operator.eq, [False, True, False, False]), + (operator.gt, [False, False, False, True]), + ], + ) + def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected): + # GH#17965, test for ability to compare datetime64[ns] columns + # to datetimelike + ser = Series( + [ + Timestamp("20120101"), + Timestamp("20130101"), + np.nan, + Timestamp("20130103"), + ], + name="A", + ) + result = op(ser, datetimelike) + expected = Series(expected, name="A") + tm.assert_series_equal(result, expected) + + +class TestDatetimeIndexComparisons: + # TODO: moved from tests.indexes.test_base; parametrize and de-duplicate + def test_comparators(self, comparison_op): + index = date_range("2020-01-01", periods=10) + element = index[len(index) // 2] + element = Timestamp(element).to_datetime64() + + arr = np.array(index) + arr_result = comparison_op(arr, element) + index_result = comparison_op(index, element) + + assert isinstance(index_result, np.ndarray) + tm.assert_numpy_array_equal(arr_result, index_result) + + @pytest.mark.parametrize( + "other", + [datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")], + ) + def test_dti_cmp_datetimelike(self, other, tz_naive_fixture): + tz = tz_naive_fixture + dti = date_range("2016-01-01", periods=2, tz=tz) + if tz is not None: + if isinstance(other, np.datetime64): + pytest.skip(f"{type(other).__name__} is not tz aware") + other = localize_pydatetime(other, dti.tzinfo) + + result = dti == other + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = dti > other + expected = np.array([False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = dti >= other + expected = np.array([True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = dti < other + expected = np.array([False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = dti <= other + expected = np.array([True, False]) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_dti_cmp_nat(self, dtype, box_with_array): + left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")]) + right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")]) + + left = tm.box_expected(left, box_with_array) + right = tm.box_expected(right, box_with_array) + xbox = get_upcast_box(left, right, True) + + lhs, rhs = left, right + if dtype is object: + lhs, rhs = left.astype(object), right.astype(object) + + result = rhs == lhs + expected = np.array([False, False, True]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + result = lhs != rhs + expected = np.array([True, True, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + expected = np.array([False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs == NaT, expected) + tm.assert_equal(NaT == rhs, expected) + + expected = np.array([True, True, True]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs != NaT, expected) + tm.assert_equal(NaT != lhs, expected) + + expected = np.array([False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(lhs < NaT, expected) + tm.assert_equal(NaT > lhs, expected) + + def test_dti_cmp_nat_behaves_like_float_cmp_nan(self): + fidx1 = pd.Index([1.0, np.nan, 3.0, np.nan, 5.0, 7.0]) + fidx2 = pd.Index([2.0, 3.0, np.nan, np.nan, 6.0, 7.0]) + + didx1 = DatetimeIndex( + ["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"] + ) + didx2 = DatetimeIndex( + ["2014-02-01", "2014-03-01", NaT, NaT, "2014-06-01", "2014-07-01"] + ) + darr = np.array( + [ + np.datetime64("2014-02-01 00:00"), + np.datetime64("2014-03-01 00:00"), + np.datetime64("nat"), + np.datetime64("nat"), + np.datetime64("2014-06-01 00:00"), + np.datetime64("2014-07-01 00:00"), + ] + ) + + cases = [(fidx1, fidx2), (didx1, didx2), (didx1, darr)] + + # Check pd.NaT is handles as the same as np.nan + with tm.assert_produces_warning(None): + for idx1, idx2 in cases: + result = idx1 < idx2 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 > idx1 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= idx2 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 >= idx1 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == idx2 + expected = np.array([False, False, False, False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != idx2 + expected = np.array([True, True, True, True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(None): + for idx1, val in [(fidx1, np.nan), (didx1, NaT)]: + result = idx1 < val + expected = np.array([False, False, False, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 > val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= val + tm.assert_numpy_array_equal(result, expected) + result = idx1 >= val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == val + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != val + expected = np.array([True, True, True, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + + # Check pd.NaT is handles as the same as np.nan + with tm.assert_produces_warning(None): + for idx1, val in [(fidx1, 3), (didx1, datetime(2014, 3, 1))]: + result = idx1 < val + expected = np.array([True, False, False, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 > val + expected = np.array([False, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= val + expected = np.array([True, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + result = idx1 >= val + expected = np.array([False, False, True, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == val + expected = np.array([False, False, True, False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != val + expected = np.array([True, True, False, True, True, True]) + tm.assert_numpy_array_equal(result, expected) + + def test_comparison_tzawareness_compat(self, comparison_op, box_with_array): + # GH#18162 + op = comparison_op + box = box_with_array + + dr = date_range("2016-01-01", periods=6) + dz = dr.tz_localize("US/Pacific") + + dr = tm.box_expected(dr, box) + dz = tm.box_expected(dz, box) + + if box is pd.DataFrame: + tolist = lambda x: x.astype(object).values.tolist()[0] + else: + tolist = list + + if op not in [operator.eq, operator.ne]: + msg = ( + r"Invalid comparison between dtype=datetime64\[ns.*\] " + "and (Timestamp|DatetimeArray|list|ndarray)" + ) + with pytest.raises(TypeError, match=msg): + op(dr, dz) + + with pytest.raises(TypeError, match=msg): + op(dr, tolist(dz)) + with pytest.raises(TypeError, match=msg): + op(dr, np.array(tolist(dz), dtype=object)) + with pytest.raises(TypeError, match=msg): + op(dz, dr) + + with pytest.raises(TypeError, match=msg): + op(dz, tolist(dr)) + with pytest.raises(TypeError, match=msg): + op(dz, np.array(tolist(dr), dtype=object)) + + # The aware==aware and naive==naive comparisons should *not* raise + assert np.all(dr == dr) + assert np.all(dr == tolist(dr)) + assert np.all(tolist(dr) == dr) + assert np.all(np.array(tolist(dr), dtype=object) == dr) + assert np.all(dr == np.array(tolist(dr), dtype=object)) + + assert np.all(dz == dz) + assert np.all(dz == tolist(dz)) + assert np.all(tolist(dz) == dz) + assert np.all(np.array(tolist(dz), dtype=object) == dz) + assert np.all(dz == np.array(tolist(dz), dtype=object)) + + def test_comparison_tzawareness_compat_scalars(self, comparison_op, box_with_array): + # GH#18162 + op = comparison_op + + dr = date_range("2016-01-01", periods=6) + dz = dr.tz_localize("US/Pacific") + + dr = tm.box_expected(dr, box_with_array) + dz = tm.box_expected(dz, box_with_array) + + # Check comparisons against scalar Timestamps + ts = Timestamp("2000-03-14 01:59") + ts_tz = Timestamp("2000-03-14 01:59", tz="Europe/Amsterdam") + + assert np.all(dr > ts) + msg = r"Invalid comparison between dtype=datetime64\[ns.*\] and Timestamp" + if op not in [operator.eq, operator.ne]: + with pytest.raises(TypeError, match=msg): + op(dr, ts_tz) + + assert np.all(dz > ts_tz) + if op not in [operator.eq, operator.ne]: + with pytest.raises(TypeError, match=msg): + op(dz, ts) + + if op not in [operator.eq, operator.ne]: + # GH#12601: Check comparison against Timestamps and DatetimeIndex + with pytest.raises(TypeError, match=msg): + op(ts, dz) + + @pytest.mark.parametrize( + "other", + [datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")], + ) + # Bug in NumPy? https://github.com/numpy/numpy/issues/13841 + # Raising in __eq__ will fallback to NumPy, which warns, fails, + # then re-raises the original exception. So we just need to ignore. + @pytest.mark.filterwarnings("ignore:elementwise comp:DeprecationWarning") + def test_scalar_comparison_tzawareness( + self, comparison_op, other, tz_aware_fixture, box_with_array + ): + op = comparison_op + tz = tz_aware_fixture + dti = date_range("2016-01-01", periods=2, tz=tz) + + dtarr = tm.box_expected(dti, box_with_array) + xbox = get_upcast_box(dtarr, other, True) + if op in [operator.eq, operator.ne]: + exbool = op is operator.ne + expected = np.array([exbool, exbool], dtype=bool) + expected = tm.box_expected(expected, xbox) + + result = op(dtarr, other) + tm.assert_equal(result, expected) + + result = op(other, dtarr) + tm.assert_equal(result, expected) + else: + msg = ( + r"Invalid comparison between dtype=datetime64\[ns, .*\] " + f"and {type(other).__name__}" + ) + with pytest.raises(TypeError, match=msg): + op(dtarr, other) + with pytest.raises(TypeError, match=msg): + op(other, dtarr) + + def test_nat_comparison_tzawareness(self, comparison_op): + # GH#19276 + # tzaware DatetimeIndex should not raise when compared to NaT + op = comparison_op + + dti = DatetimeIndex( + ["2014-01-01", NaT, "2014-03-01", NaT, "2014-05-01", "2014-07-01"] + ) + expected = np.array([op == operator.ne] * len(dti)) + result = op(dti, NaT) + tm.assert_numpy_array_equal(result, expected) + + result = op(dti.tz_localize("US/Pacific"), NaT) + tm.assert_numpy_array_equal(result, expected) + + def test_dti_cmp_str(self, tz_naive_fixture): + # GH#22074 + # regardless of tz, we expect these comparisons are valid + tz = tz_naive_fixture + rng = date_range("1/1/2000", periods=10, tz=tz) + other = "1/1/2000" + + result = rng == other + expected = np.array([True] + [False] * 9) + tm.assert_numpy_array_equal(result, expected) + + result = rng != other + expected = np.array([False] + [True] * 9) + tm.assert_numpy_array_equal(result, expected) + + result = rng < other + expected = np.array([False] * 10) + tm.assert_numpy_array_equal(result, expected) + + result = rng <= other + expected = np.array([True] + [False] * 9) + tm.assert_numpy_array_equal(result, expected) + + result = rng > other + expected = np.array([False] + [True] * 9) + tm.assert_numpy_array_equal(result, expected) + + result = rng >= other + expected = np.array([True] * 10) + tm.assert_numpy_array_equal(result, expected) + + def test_dti_cmp_list(self): + rng = date_range("1/1/2000", periods=10) + + result = rng == list(rng) + expected = rng == rng + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + pd.timedelta_range("1D", periods=10), + pd.timedelta_range("1D", periods=10).to_series(), + pd.timedelta_range("1D", periods=10).asi8.view("m8[ns]"), + ], + ids=lambda x: type(x).__name__, + ) + def test_dti_cmp_tdi_tzawareness(self, other): + # GH#22074 + # reversion test that we _don't_ call _assert_tzawareness_compat + # when comparing against TimedeltaIndex + dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo") + + result = dti == other + expected = np.array([False] * 10) + tm.assert_numpy_array_equal(result, expected) + + result = dti != other + expected = np.array([True] * 10) + tm.assert_numpy_array_equal(result, expected) + msg = "Invalid comparison between" + with pytest.raises(TypeError, match=msg): + dti < other + with pytest.raises(TypeError, match=msg): + dti <= other + with pytest.raises(TypeError, match=msg): + dti > other + with pytest.raises(TypeError, match=msg): + dti >= other + + def test_dti_cmp_object_dtype(self): + # GH#22074 + dti = date_range("2000-01-01", periods=10, tz="Asia/Tokyo") + + other = dti.astype("O") + + result = dti == other + expected = np.array([True] * 10) + tm.assert_numpy_array_equal(result, expected) + + other = dti.tz_localize(None) + result = dti != other + tm.assert_numpy_array_equal(result, expected) + + other = np.array(list(dti[:5]) + [Timedelta(days=1)] * 5) + result = dti == other + expected = np.array([True] * 5 + [False] * 5) + tm.assert_numpy_array_equal(result, expected) + msg = ">=' not supported between instances of 'Timestamp' and 'Timedelta'" + with pytest.raises(TypeError, match=msg): + dti >= other + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestDatetime64Arithmetic: + # This class is intended for "finished" tests that are fully parametrized + # over DataFrame/Series/Index/DatetimeArray + + # ------------------------------------------------------------- + # Addition/Subtraction of timedelta-like + + @pytest.mark.arm_slow + def test_dt64arr_add_timedeltalike_scalar( + self, tz_naive_fixture, two_hours, box_with_array + ): + # GH#22005, GH#22163 check DataFrame doesn't raise TypeError + tz = tz_naive_fixture + + rng = date_range("2000-01-01", "2000-02-01", tz=tz) + expected = date_range("2000-01-01 02:00", "2000-02-01 02:00", tz=tz) + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = rng + two_hours + tm.assert_equal(result, expected) + + result = two_hours + rng + tm.assert_equal(result, expected) + + rng += two_hours + tm.assert_equal(rng, expected) + + def test_dt64arr_sub_timedeltalike_scalar( + self, tz_naive_fixture, two_hours, box_with_array + ): + tz = tz_naive_fixture + + rng = date_range("2000-01-01", "2000-02-01", tz=tz) + expected = date_range("1999-12-31 22:00", "2000-01-31 22:00", tz=tz) + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = rng - two_hours + tm.assert_equal(result, expected) + + rng -= two_hours + tm.assert_equal(rng, expected) + + def test_dt64_array_sub_dt_with_different_timezone(self, box_with_array): + t1 = date_range("20130101", periods=3).tz_localize("US/Eastern") + t1 = tm.box_expected(t1, box_with_array) + t2 = Timestamp("20130101").tz_localize("CET") + tnaive = Timestamp(20130101) + + result = t1 - t2 + expected = TimedeltaIndex( + ["0 days 06:00:00", "1 days 06:00:00", "2 days 06:00:00"] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = t2 - t1 + expected = TimedeltaIndex( + ["-1 days +18:00:00", "-2 days +18:00:00", "-3 days +18:00:00"] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + t1 - tnaive + + with pytest.raises(TypeError, match=msg): + tnaive - t1 + + def test_dt64_array_sub_dt64_array_with_different_timezone(self, box_with_array): + t1 = date_range("20130101", periods=3).tz_localize("US/Eastern") + t1 = tm.box_expected(t1, box_with_array) + t2 = date_range("20130101", periods=3).tz_localize("CET") + t2 = tm.box_expected(t2, box_with_array) + tnaive = date_range("20130101", periods=3) + + result = t1 - t2 + expected = TimedeltaIndex( + ["0 days 06:00:00", "0 days 06:00:00", "0 days 06:00:00"] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = t2 - t1 + expected = TimedeltaIndex( + ["-1 days +18:00:00", "-1 days +18:00:00", "-1 days +18:00:00"] + ) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + t1 - tnaive + + with pytest.raises(TypeError, match=msg): + tnaive - t1 + + def test_dt64arr_add_sub_td64_nat(self, box_with_array, tz_naive_fixture): + # GH#23320 special handling for timedelta64("NaT") + tz = tz_naive_fixture + + dti = date_range("1994-04-01", periods=9, tz=tz, freq="QS") + other = np.timedelta64("NaT") + expected = DatetimeIndex(["NaT"] * 9, tz=tz).as_unit("ns") + + obj = tm.box_expected(dti, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + msg = "cannot subtract" + with pytest.raises(TypeError, match=msg): + other - obj + + def test_dt64arr_add_sub_td64ndarray(self, tz_naive_fixture, box_with_array): + tz = tz_naive_fixture + dti = date_range("2016-01-01", periods=3, tz=tz) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdarr = tdi.values + + expected = date_range("2015-12-31", "2016-01-02", periods=3, tz=tz) + + dtarr = tm.box_expected(dti, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = dtarr + tdarr + tm.assert_equal(result, expected) + result = tdarr + dtarr + tm.assert_equal(result, expected) + + expected = date_range("2016-01-02", "2016-01-04", periods=3, tz=tz) + expected = tm.box_expected(expected, box_with_array) + + result = dtarr - tdarr + tm.assert_equal(result, expected) + msg = "cannot subtract|(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + tdarr - dtarr + + # ----------------------------------------------------------------- + # Subtraction of datetime-like scalars + + @pytest.mark.parametrize( + "ts", + [ + Timestamp("2013-01-01"), + Timestamp("2013-01-01").to_pydatetime(), + Timestamp("2013-01-01").to_datetime64(), + # GH#7996, GH#22163 ensure non-nano datetime64 is converted to nano + # for DataFrame operation + np.datetime64("2013-01-01", "D"), + ], + ) + def test_dt64arr_sub_dtscalar(self, box_with_array, ts): + # GH#8554, GH#22163 DataFrame op should _not_ return dt64 dtype + idx = date_range("2013-01-01", periods=3)._with_freq(None) + idx = tm.box_expected(idx, box_with_array) + + expected = TimedeltaIndex(["0 Days", "1 Day", "2 Days"]) + expected = tm.box_expected(expected, box_with_array) + + result = idx - ts + tm.assert_equal(result, expected) + + result = ts - idx + tm.assert_equal(result, -expected) + tm.assert_equal(result, -expected) + + def test_dt64arr_sub_timestamp_tzaware(self, box_with_array): + ser = date_range("2014-03-17", periods=2, freq="D", tz="US/Eastern") + ser = ser._with_freq(None) + ts = ser[0] + + ser = tm.box_expected(ser, box_with_array) + + delta_series = Series([np.timedelta64(0, "D"), np.timedelta64(1, "D")]) + expected = tm.box_expected(delta_series, box_with_array) + + tm.assert_equal(ser - ts, expected) + tm.assert_equal(ts - ser, -expected) + + def test_dt64arr_sub_NaT(self, box_with_array, unit): + # GH#18808 + dti = DatetimeIndex([NaT, Timestamp("19900315")]).as_unit(unit) + ser = tm.box_expected(dti, box_with_array) + + result = ser - NaT + expected = Series([NaT, NaT], dtype=f"timedelta64[{unit}]") + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + dti_tz = dti.tz_localize("Asia/Tokyo") + ser_tz = tm.box_expected(dti_tz, box_with_array) + + result = ser_tz - NaT + expected = Series([NaT, NaT], dtype=f"timedelta64[{unit}]") + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + # ------------------------------------------------------------- + # Subtraction of datetime-like array-like + + def test_dt64arr_sub_dt64object_array(self, box_with_array, tz_naive_fixture): + dti = date_range("2016-01-01", periods=3, tz=tz_naive_fixture) + expected = dti - dti + + obj = tm.box_expected(dti, box_with_array) + expected = tm.box_expected(expected, box_with_array).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + result = obj - obj.astype(object) + tm.assert_equal(result, expected) + + def test_dt64arr_naive_sub_dt64ndarray(self, box_with_array): + dti = date_range("2016-01-01", periods=3, tz=None) + dt64vals = dti.values + + dtarr = tm.box_expected(dti, box_with_array) + + expected = dtarr - dtarr + result = dtarr - dt64vals + tm.assert_equal(result, expected) + result = dt64vals - dtarr + tm.assert_equal(result, expected) + + def test_dt64arr_aware_sub_dt64ndarray_raises( + self, tz_aware_fixture, box_with_array + ): + tz = tz_aware_fixture + dti = date_range("2016-01-01", periods=3, tz=tz) + dt64vals = dti.values + + dtarr = tm.box_expected(dti, box_with_array) + msg = "Cannot subtract tz-naive and tz-aware datetime" + with pytest.raises(TypeError, match=msg): + dtarr - dt64vals + with pytest.raises(TypeError, match=msg): + dt64vals - dtarr + + # ------------------------------------------------------------- + # Addition of datetime-like others (invalid) + + def test_dt64arr_add_dtlike_raises(self, tz_naive_fixture, box_with_array): + # GH#22163 ensure DataFrame doesn't cast Timestamp to i8 + # GH#9631 + tz = tz_naive_fixture + + dti = date_range("2016-01-01", periods=3, tz=tz) + if tz is None: + dti2 = dti.tz_localize("US/Eastern") + else: + dti2 = dti.tz_localize(None) + dtarr = tm.box_expected(dti, box_with_array) + + assert_cannot_add(dtarr, dti.values) + assert_cannot_add(dtarr, dti) + assert_cannot_add(dtarr, dtarr) + assert_cannot_add(dtarr, dti[0]) + assert_cannot_add(dtarr, dti[0].to_pydatetime()) + assert_cannot_add(dtarr, dti[0].to_datetime64()) + assert_cannot_add(dtarr, dti2[0]) + assert_cannot_add(dtarr, dti2[0].to_pydatetime()) + assert_cannot_add(dtarr, np.datetime64("2011-01-01", "D")) + + # ------------------------------------------------------------- + # Other Invalid Addition/Subtraction + + # Note: freq here includes both Tick and non-Tick offsets; this is + # relevant because historically integer-addition was allowed if we had + # a freq. + @pytest.mark.parametrize("freq", ["h", "D", "W", "2ME", "MS", "QE", "B", None]) + @pytest.mark.parametrize("dtype", [None, "uint8"]) + def test_dt64arr_addsub_intlike( + self, request, dtype, index_or_series_or_array, freq, tz_naive_fixture + ): + # GH#19959, GH#19123, GH#19012 + # GH#55860 use index_or_series_or_array instead of box_with_array + # bc DataFrame alignment makes it inapplicable + tz = tz_naive_fixture + + if freq is None: + dti = DatetimeIndex(["NaT", "2017-04-05 06:07:08"], tz=tz) + else: + dti = date_range("2016-01-01", periods=2, freq=freq, tz=tz) + + obj = index_or_series_or_array(dti) + other = np.array([4, -1]) + if dtype is not None: + other = other.astype(dtype) + + msg = "|".join( + [ + "Addition/subtraction of integers", + "cannot subtract DatetimeArray from", + # IntegerArray + "can only perform ops with numeric values", + "unsupported operand type.*Categorical", + r"unsupported operand type\(s\) for -: 'int' and 'Timestamp'", + ] + ) + assert_invalid_addsub_type(obj, 1, msg) + assert_invalid_addsub_type(obj, np.int64(2), msg) + assert_invalid_addsub_type(obj, np.array(3, dtype=np.int64), msg) + assert_invalid_addsub_type(obj, other, msg) + assert_invalid_addsub_type(obj, np.array(other), msg) + assert_invalid_addsub_type(obj, pd.array(other), msg) + assert_invalid_addsub_type(obj, pd.Categorical(other), msg) + assert_invalid_addsub_type(obj, pd.Index(other), msg) + assert_invalid_addsub_type(obj, Series(other), msg) + + @pytest.mark.parametrize( + "other", + [ + 3.14, + np.array([2.0, 3.0]), + # GH#13078 datetime +/- Period is invalid + Period("2011-01-01", freq="D"), + # https://github.com/pandas-dev/pandas/issues/10329 + time(1, 2, 3), + ], + ) + @pytest.mark.parametrize("dti_freq", [None, "D"]) + def test_dt64arr_add_sub_invalid(self, dti_freq, other, box_with_array): + dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq) + dtarr = tm.box_expected(dti, box_with_array) + msg = "|".join( + [ + "unsupported operand type", + "cannot (add|subtract)", + "cannot use operands with types", + "ufunc '?(add|subtract)'? cannot use operands with types", + "Concatenation operation is not implemented for NumPy arrays", + ] + ) + assert_invalid_addsub_type(dtarr, other, msg) + + @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"]) + @pytest.mark.parametrize("dti_freq", [None, "D"]) + def test_dt64arr_add_sub_parr( + self, dti_freq, pi_freq, box_with_array, box_with_array2 + ): + # GH#20049 subtracting PeriodIndex should raise TypeError + dti = DatetimeIndex(["2011-01-01", "2011-01-02"], freq=dti_freq) + pi = dti.to_period(pi_freq) + + dtarr = tm.box_expected(dti, box_with_array) + parr = tm.box_expected(pi, box_with_array2) + msg = "|".join( + [ + "cannot (add|subtract)", + "unsupported operand", + "descriptor.*requires", + "ufunc.*cannot use operands", + ] + ) + assert_invalid_addsub_type(dtarr, parr, msg) + + @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") + def test_dt64arr_addsub_time_objects_raises(self, box_with_array, tz_naive_fixture): + # https://github.com/pandas-dev/pandas/issues/10329 + + tz = tz_naive_fixture + + obj1 = date_range("2012-01-01", periods=3, tz=tz) + obj2 = [time(i, i, i) for i in range(3)] + + obj1 = tm.box_expected(obj1, box_with_array) + obj2 = tm.box_expected(obj2, box_with_array) + + msg = "|".join( + [ + "unsupported operand", + "cannot subtract DatetimeArray from ndarray", + ] + ) + # pandas.errors.PerformanceWarning: Non-vectorized DateOffset being + # applied to Series or DatetimeIndex + # we aren't testing that here, so ignore. + assert_invalid_addsub_type(obj1, obj2, msg=msg) + + # ------------------------------------------------------------- + # Other invalid operations + + @pytest.mark.parametrize( + "dt64_series", + [ + Series([Timestamp("19900315"), Timestamp("19900315")]), + Series([NaT, Timestamp("19900315")]), + Series([NaT, NaT], dtype="datetime64[ns]"), + ], + ) + @pytest.mark.parametrize("one", [1, 1.0, np.array(1)]) + def test_dt64_mul_div_numeric_invalid(self, one, dt64_series, box_with_array): + obj = tm.box_expected(dt64_series, box_with_array) + + msg = "cannot perform .* with this index type" + + # multiplication + with pytest.raises(TypeError, match=msg): + obj * one + with pytest.raises(TypeError, match=msg): + one * obj + + # division + with pytest.raises(TypeError, match=msg): + obj / one + with pytest.raises(TypeError, match=msg): + one / obj + + +class TestDatetime64DateOffsetArithmetic: + # ------------------------------------------------------------- + # Tick DateOffsets + + # TODO: parametrize over timezone? + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_dt64arr_series_add_tick_DateOffset(self, box_with_array, unit): + # GH#4532 + # operate with pd.offsets + ser = Series( + [Timestamp("20130101 9:01"), Timestamp("20130101 9:02")] + ).dt.as_unit(unit) + expected = Series( + [Timestamp("20130101 9:01:05"), Timestamp("20130101 9:02:05")] + ).dt.as_unit(unit) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = ser + pd.offsets.Second(5) + tm.assert_equal(result, expected) + + result2 = pd.offsets.Second(5) + ser + tm.assert_equal(result2, expected) + + def test_dt64arr_series_sub_tick_DateOffset(self, box_with_array): + # GH#4532 + # operate with pd.offsets + ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]) + expected = Series( + [Timestamp("20130101 9:00:55"), Timestamp("20130101 9:01:55")] + ) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = ser - pd.offsets.Second(5) + tm.assert_equal(result, expected) + + result2 = -pd.offsets.Second(5) + ser + tm.assert_equal(result2, expected) + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + pd.offsets.Second(5) - ser + + @pytest.mark.parametrize( + "cls_name", ["Day", "Hour", "Minute", "Second", "Milli", "Micro", "Nano"] + ) + def test_dt64arr_add_sub_tick_DateOffset_smoke(self, cls_name, box_with_array): + # GH#4532 + # smoke tests for valid DateOffsets + ser = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]) + ser = tm.box_expected(ser, box_with_array) + + offset_cls = getattr(pd.offsets, cls_name) + ser + offset_cls(5) + offset_cls(5) + ser + ser - offset_cls(5) + + def test_dti_add_tick_tzaware(self, tz_aware_fixture, box_with_array): + # GH#21610, GH#22163 ensure DataFrame doesn't return object-dtype + tz = tz_aware_fixture + if tz == "US/Pacific": + dates = date_range("2012-11-01", periods=3, tz=tz) + offset = dates + pd.offsets.Hour(5) + assert dates[0] + pd.offsets.Hour(5) == offset[0] + + dates = date_range("2010-11-01 00:00", periods=3, tz=tz, freq="h") + expected = DatetimeIndex( + ["2010-11-01 05:00", "2010-11-01 06:00", "2010-11-01 07:00"], + freq="h", + tz=tz, + ).as_unit("ns") + + dates = tm.box_expected(dates, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + for scalar in [pd.offsets.Hour(5), np.timedelta64(5, "h"), timedelta(hours=5)]: + offset = dates + scalar + tm.assert_equal(offset, expected) + offset = scalar + dates + tm.assert_equal(offset, expected) + + roundtrip = offset - scalar + tm.assert_equal(roundtrip, dates) + + msg = "|".join( + ["bad operand type for unary -", "cannot subtract DatetimeArray"] + ) + with pytest.raises(TypeError, match=msg): + scalar - dates + + # ------------------------------------------------------------- + # RelativeDelta DateOffsets + + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_dt64arr_add_sub_relativedelta_offsets(self, box_with_array, unit): + # GH#10699 + vec = DatetimeIndex( + [ + Timestamp("2000-01-05 00:15:00"), + Timestamp("2000-01-31 00:23:00"), + Timestamp("2000-01-01"), + Timestamp("2000-03-31"), + Timestamp("2000-02-29"), + Timestamp("2000-12-31"), + Timestamp("2000-05-15"), + Timestamp("2001-06-15"), + ] + ).as_unit(unit) + vec = tm.box_expected(vec, box_with_array) + vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec + + # DateOffset relativedelta fastpath + relative_kwargs = [ + ("years", 2), + ("months", 5), + ("days", 3), + ("hours", 5), + ("minutes", 10), + ("seconds", 2), + ("microseconds", 5), + ] + for i, (offset_unit, value) in enumerate(relative_kwargs): + off = DateOffset(**{offset_unit: value}) + + exp_unit = unit + if offset_unit == "microseconds" and unit != "ns": + exp_unit = "us" + + # TODO(GH#55564): as_unit will be unnecessary + expected = DatetimeIndex([x + off for x in vec_items]).as_unit(exp_unit) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec + off) + + expected = DatetimeIndex([x - off for x in vec_items]).as_unit(exp_unit) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec - off) + + off = DateOffset(**dict(relative_kwargs[: i + 1])) + + expected = DatetimeIndex([x + off for x in vec_items]).as_unit(exp_unit) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec + off) + + expected = DatetimeIndex([x - off for x in vec_items]).as_unit(exp_unit) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec - off) + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + off - vec + + # ------------------------------------------------------------- + # Non-Tick, Non-RelativeDelta DateOffsets + + # TODO: redundant with test_dt64arr_add_sub_DateOffset? that includes + # tz-aware cases which this does not + @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning") + @pytest.mark.parametrize( + "cls_and_kwargs", + [ + "YearBegin", + ("YearBegin", {"month": 5}), + "YearEnd", + ("YearEnd", {"month": 5}), + "MonthBegin", + "MonthEnd", + "SemiMonthEnd", + "SemiMonthBegin", + "Week", + ("Week", {"weekday": 3}), + "Week", + ("Week", {"weekday": 6}), + "BusinessDay", + "BDay", + "QuarterEnd", + "QuarterBegin", + "CustomBusinessDay", + "CDay", + "CBMonthEnd", + "CBMonthBegin", + "BMonthBegin", + "BMonthEnd", + "BusinessHour", + "BYearBegin", + "BYearEnd", + "BQuarterBegin", + ("LastWeekOfMonth", {"weekday": 2}), + ( + "FY5253Quarter", + { + "qtr_with_extra_week": 1, + "startingMonth": 1, + "weekday": 2, + "variation": "nearest", + }, + ), + ("FY5253", {"weekday": 0, "startingMonth": 2, "variation": "nearest"}), + ("WeekOfMonth", {"weekday": 2, "week": 2}), + "Easter", + ("DateOffset", {"day": 4}), + ("DateOffset", {"month": 5}), + ], + ) + @pytest.mark.parametrize("normalize", [True, False]) + @pytest.mark.parametrize("n", [0, 5]) + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_dt64arr_add_sub_DateOffsets( + self, box_with_array, n, normalize, cls_and_kwargs, unit, tz + ): + # GH#10699 + # assert vectorized operation matches pointwise operations + + if isinstance(cls_and_kwargs, tuple): + # If cls_name param is a tuple, then 2nd entry is kwargs for + # the offset constructor + cls_name, kwargs = cls_and_kwargs + else: + cls_name = cls_and_kwargs + kwargs = {} + + if n == 0 and cls_name in [ + "WeekOfMonth", + "LastWeekOfMonth", + "FY5253Quarter", + "FY5253", + ]: + # passing n = 0 is invalid for these offset classes + return + + vec = ( + DatetimeIndex( + [ + Timestamp("2000-01-05 00:15:00"), + Timestamp("2000-01-31 00:23:00"), + Timestamp("2000-01-01"), + Timestamp("2000-03-31"), + Timestamp("2000-02-29"), + Timestamp("2000-12-31"), + Timestamp("2000-05-15"), + Timestamp("2001-06-15"), + ] + ) + .as_unit(unit) + .tz_localize(tz) + ) + vec = tm.box_expected(vec, box_with_array) + vec_items = vec.iloc[0] if box_with_array is pd.DataFrame else vec + + offset_cls = getattr(pd.offsets, cls_name) + offset = offset_cls(n, normalize=normalize, **kwargs) + + # TODO(GH#55564): as_unit will be unnecessary + expected = DatetimeIndex([x + offset for x in vec_items]).as_unit(unit) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec + offset) + tm.assert_equal(expected, offset + vec) + + expected = DatetimeIndex([x - offset for x in vec_items]).as_unit(unit) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, vec - offset) + + expected = DatetimeIndex([offset + x for x in vec_items]).as_unit(unit) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(expected, offset + vec) + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + offset - vec + + @pytest.mark.parametrize( + "other", + [ + np.array([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]), + np.array([pd.offsets.DateOffset(years=1), pd.offsets.MonthEnd()]), + np.array( # matching offsets + [pd.offsets.DateOffset(years=1), pd.offsets.DateOffset(years=1)] + ), + ], + ) + @pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub]) + def test_dt64arr_add_sub_offset_array( + self, tz_naive_fixture, box_with_array, op, other + ): + # GH#18849 + # GH#10699 array of offsets + + tz = tz_naive_fixture + dti = date_range("2017-01-01", periods=2, tz=tz) + dtarr = tm.box_expected(dti, box_with_array) + + expected = DatetimeIndex([op(dti[n], other[n]) for n in range(len(dti))]) + expected = tm.box_expected(expected, box_with_array).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = op(dtarr, other) + tm.assert_equal(res, expected) + + # Same thing but boxing other + other = tm.box_expected(other, box_with_array) + if box_with_array is pd.array and op is roperator.radd: + # We expect a NumpyExtensionArray, not ndarray[object] here + expected = pd.array(expected, dtype=object) + with tm.assert_produces_warning(PerformanceWarning): + res = op(dtarr, other) + tm.assert_equal(res, expected) + + @pytest.mark.parametrize( + "op, offset, exp, exp_freq", + [ + ( + "__add__", + DateOffset(months=3, days=10), + [ + Timestamp("2014-04-11"), + Timestamp("2015-04-11"), + Timestamp("2016-04-11"), + Timestamp("2017-04-11"), + ], + None, + ), + ( + "__add__", + DateOffset(months=3), + [ + Timestamp("2014-04-01"), + Timestamp("2015-04-01"), + Timestamp("2016-04-01"), + Timestamp("2017-04-01"), + ], + "YS-APR", + ), + ( + "__sub__", + DateOffset(months=3, days=10), + [ + Timestamp("2013-09-21"), + Timestamp("2014-09-21"), + Timestamp("2015-09-21"), + Timestamp("2016-09-21"), + ], + None, + ), + ( + "__sub__", + DateOffset(months=3), + [ + Timestamp("2013-10-01"), + Timestamp("2014-10-01"), + Timestamp("2015-10-01"), + Timestamp("2016-10-01"), + ], + "YS-OCT", + ), + ], + ) + def test_dti_add_sub_nonzero_mth_offset( + self, op, offset, exp, exp_freq, tz_aware_fixture, box_with_array + ): + # GH 26258 + tz = tz_aware_fixture + date = date_range(start="01 Jan 2014", end="01 Jan 2017", freq="YS", tz=tz) + date = tm.box_expected(date, box_with_array, False) + mth = getattr(date, op) + result = mth(offset) + + expected = DatetimeIndex(exp, tz=tz).as_unit("ns") + expected = tm.box_expected(expected, box_with_array, False) + tm.assert_equal(result, expected) + + def test_dt64arr_series_add_DateOffset_with_milli(self): + # GH 57529 + dti = DatetimeIndex( + [ + "2000-01-01 00:00:00.012345678", + "2000-01-31 00:00:00.012345678", + "2000-02-29 00:00:00.012345678", + ], + dtype="datetime64[ns]", + ) + result = dti + DateOffset(milliseconds=4) + expected = DatetimeIndex( + [ + "2000-01-01 00:00:00.016345678", + "2000-01-31 00:00:00.016345678", + "2000-02-29 00:00:00.016345678", + ], + dtype="datetime64[ns]", + ) + tm.assert_index_equal(result, expected) + + result = dti + DateOffset(days=1, milliseconds=4) + expected = DatetimeIndex( + [ + "2000-01-02 00:00:00.016345678", + "2000-02-01 00:00:00.016345678", + "2000-03-01 00:00:00.016345678", + ], + dtype="datetime64[ns]", + ) + tm.assert_index_equal(result, expected) + + +class TestDatetime64OverflowHandling: + # TODO: box + de-duplicate + + def test_dt64_overflow_masking(self, box_with_array): + # GH#25317 + left = Series([Timestamp("1969-12-31")], dtype="M8[ns]") + right = Series([NaT]) + + left = tm.box_expected(left, box_with_array) + right = tm.box_expected(right, box_with_array) + + expected = TimedeltaIndex([NaT], dtype="m8[ns]") + expected = tm.box_expected(expected, box_with_array) + + result = left - right + tm.assert_equal(result, expected) + + def test_dt64_series_arith_overflow(self): + # GH#12534, fixed by GH#19024 + dt = Timestamp("1700-01-31") + td = Timedelta("20000 Days") + dti = date_range("1949-09-30", freq="100YE", periods=4) + ser = Series(dti) + msg = "Overflow in int64 addition" + with pytest.raises(OverflowError, match=msg): + ser - dt + with pytest.raises(OverflowError, match=msg): + dt - ser + with pytest.raises(OverflowError, match=msg): + ser + td + with pytest.raises(OverflowError, match=msg): + td + ser + + ser.iloc[-1] = NaT + expected = Series( + ["2004-10-03", "2104-10-04", "2204-10-04", "NaT"], dtype="datetime64[ns]" + ) + res = ser + td + tm.assert_series_equal(res, expected) + res = td + ser + tm.assert_series_equal(res, expected) + + ser.iloc[1:] = NaT + expected = Series(["91279 Days", "NaT", "NaT", "NaT"], dtype="timedelta64[ns]") + res = ser - dt + tm.assert_series_equal(res, expected) + res = dt - ser + tm.assert_series_equal(res, -expected) + + def test_datetimeindex_sub_timestamp_overflow(self): + dtimax = pd.to_datetime(["2021-12-28 17:19", Timestamp.max]).as_unit("ns") + dtimin = pd.to_datetime(["2021-12-28 17:19", Timestamp.min]).as_unit("ns") + + tsneg = Timestamp("1950-01-01").as_unit("ns") + ts_neg_variants = [ + tsneg, + tsneg.to_pydatetime(), + tsneg.to_datetime64().astype("datetime64[ns]"), + tsneg.to_datetime64().astype("datetime64[D]"), + ] + + tspos = Timestamp("1980-01-01").as_unit("ns") + ts_pos_variants = [ + tspos, + tspos.to_pydatetime(), + tspos.to_datetime64().astype("datetime64[ns]"), + tspos.to_datetime64().astype("datetime64[D]"), + ] + msg = "Overflow in int64 addition" + for variant in ts_neg_variants: + with pytest.raises(OverflowError, match=msg): + dtimax - variant + + expected = Timestamp.max._value - tspos._value + for variant in ts_pos_variants: + res = dtimax - variant + assert res[1]._value == expected + + expected = Timestamp.min._value - tsneg._value + for variant in ts_neg_variants: + res = dtimin - variant + assert res[1]._value == expected + + for variant in ts_pos_variants: + with pytest.raises(OverflowError, match=msg): + dtimin - variant + + def test_datetimeindex_sub_datetimeindex_overflow(self): + # GH#22492, GH#22508 + dtimax = pd.to_datetime(["2021-12-28 17:19", Timestamp.max]).as_unit("ns") + dtimin = pd.to_datetime(["2021-12-28 17:19", Timestamp.min]).as_unit("ns") + + ts_neg = pd.to_datetime(["1950-01-01", "1950-01-01"]).as_unit("ns") + ts_pos = pd.to_datetime(["1980-01-01", "1980-01-01"]).as_unit("ns") + + # General tests + expected = Timestamp.max._value - ts_pos[1]._value + result = dtimax - ts_pos + assert result[1]._value == expected + + expected = Timestamp.min._value - ts_neg[1]._value + result = dtimin - ts_neg + assert result[1]._value == expected + msg = "Overflow in int64 addition" + with pytest.raises(OverflowError, match=msg): + dtimax - ts_neg + + with pytest.raises(OverflowError, match=msg): + dtimin - ts_pos + + # Edge cases + tmin = pd.to_datetime([Timestamp.min]) + t1 = tmin + Timedelta.max + Timedelta("1us") + with pytest.raises(OverflowError, match=msg): + t1 - tmin + + tmax = pd.to_datetime([Timestamp.max]) + t2 = tmax + Timedelta.min - Timedelta("1us") + with pytest.raises(OverflowError, match=msg): + tmax - t2 + + +class TestTimestampSeriesArithmetic: + def test_empty_series_add_sub(self, box_with_array): + # GH#13844 + a = Series(dtype="M8[ns]") + b = Series(dtype="m8[ns]") + a = box_with_array(a) + b = box_with_array(b) + tm.assert_equal(a, a + b) + tm.assert_equal(a, a - b) + tm.assert_equal(a, b + a) + msg = "cannot subtract" + with pytest.raises(TypeError, match=msg): + b - a + + def test_operators_datetimelike(self): + # ## timedelta64 ### + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + # ## datetime64 ### + dt1 = Series( + [ + Timestamp("20111230"), + Timestamp("20120101"), + Timestamp("20120103"), + ] + ) + dt1.iloc[2] = np.nan + dt2 = Series( + [ + Timestamp("20111231"), + Timestamp("20120102"), + Timestamp("20120104"), + ] + ) + dt1 - dt2 + dt2 - dt1 + + # datetime64 with timetimedelta + dt1 + td1 + td1 + dt1 + dt1 - td1 + + # timetimedelta with datetime64 + td1 + dt1 + dt1 + td1 + + def test_dt64ser_sub_datetime_dtype(self, unit): + ts = Timestamp(datetime(1993, 1, 7, 13, 30, 00)) + dt = datetime(1993, 6, 22, 13, 30) + ser = Series([ts], dtype=f"M8[{unit}]") + result = ser - dt + + # the expected unit is the max of `unit` and the unit imputed to `dt`, + # which is "us" + exp_unit = tm.get_finest_unit(unit, "us") + assert result.dtype == f"timedelta64[{exp_unit}]" + + # ------------------------------------------------------------- + # TODO: This next block of tests came from tests.series.test_operators, + # needs to be de-duplicated and parametrized over `box` classes + + @pytest.mark.parametrize( + "left, right, op_fail", + [ + [ + [Timestamp("20111230"), Timestamp("20120101"), NaT], + [Timestamp("20111231"), Timestamp("20120102"), Timestamp("20120104")], + ["__sub__", "__rsub__"], + ], + [ + [Timestamp("20111230"), Timestamp("20120101"), NaT], + [timedelta(minutes=5, seconds=3), timedelta(minutes=5, seconds=3), NaT], + ["__add__", "__radd__", "__sub__"], + ], + [ + [ + Timestamp("20111230", tz="US/Eastern"), + Timestamp("20111230", tz="US/Eastern"), + NaT, + ], + [timedelta(minutes=5, seconds=3), NaT, timedelta(minutes=5, seconds=3)], + ["__add__", "__radd__", "__sub__"], + ], + ], + ) + def test_operators_datetimelike_invalid( + self, left, right, op_fail, all_arithmetic_operators + ): + # these are all TypeError ops + op_str = all_arithmetic_operators + arg1 = Series(left) + arg2 = Series(right) + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + op = getattr(arg1, op_str, None) + # Previously, _validate_for_numeric_binop in core/indexes/base.py + # did this for us. + if op_str not in op_fail: + with pytest.raises( + TypeError, match="operate|[cC]annot|unsupported operand" + ): + op(arg2) + else: + # Smoke test + op(arg2) + + def test_sub_single_tz(self, unit): + # GH#12290 + s1 = Series([Timestamp("2016-02-10", tz="America/Sao_Paulo")]).dt.as_unit(unit) + s2 = Series([Timestamp("2016-02-08", tz="America/Sao_Paulo")]).dt.as_unit(unit) + result = s1 - s2 + expected = Series([Timedelta("2days")]).dt.as_unit(unit) + tm.assert_series_equal(result, expected) + result = s2 - s1 + expected = Series([Timedelta("-2days")]).dt.as_unit(unit) + tm.assert_series_equal(result, expected) + + def test_dt64tz_series_sub_dtitz(self): + # GH#19071 subtracting tzaware DatetimeIndex from tzaware Series + # (with same tz) raises, fixed by #19024 + dti = date_range("1999-09-30", periods=10, tz="US/Pacific") + ser = Series(dti) + expected = Series(TimedeltaIndex(["0days"] * 10)) + + res = dti - ser + tm.assert_series_equal(res, expected) + res = ser - dti + tm.assert_series_equal(res, expected) + + def test_sub_datetime_compat(self, unit): + # see GH#14088 + ser = Series([datetime(2016, 8, 23, 12, tzinfo=pytz.utc), NaT]).dt.as_unit(unit) + dt = datetime(2016, 8, 22, 12, tzinfo=pytz.utc) + # The datetime object has "us" so we upcast lower units + exp_unit = tm.get_finest_unit(unit, "us") + exp = Series([Timedelta("1 days"), NaT]).dt.as_unit(exp_unit) + result = ser - dt + tm.assert_series_equal(result, exp) + result2 = ser - Timestamp(dt) + tm.assert_series_equal(result2, exp) + + def test_dt64_series_add_mixed_tick_DateOffset(self): + # GH#4532 + # operate with pd.offsets + s = Series([Timestamp("20130101 9:01"), Timestamp("20130101 9:02")]) + + result = s + pd.offsets.Milli(5) + result2 = pd.offsets.Milli(5) + s + expected = Series( + [Timestamp("20130101 9:01:00.005"), Timestamp("20130101 9:02:00.005")] + ) + tm.assert_series_equal(result, expected) + tm.assert_series_equal(result2, expected) + + result = s + pd.offsets.Minute(5) + pd.offsets.Milli(5) + expected = Series( + [Timestamp("20130101 9:06:00.005"), Timestamp("20130101 9:07:00.005")] + ) + tm.assert_series_equal(result, expected) + + def test_datetime64_ops_nat(self, unit): + # GH#11349 + datetime_series = Series([NaT, Timestamp("19900315")]).dt.as_unit(unit) + nat_series_dtype_timestamp = Series([NaT, NaT], dtype=f"datetime64[{unit}]") + single_nat_dtype_datetime = Series([NaT], dtype=f"datetime64[{unit}]") + + # subtraction + tm.assert_series_equal(-NaT + datetime_series, nat_series_dtype_timestamp) + msg = "bad operand type for unary -: 'DatetimeArray'" + with pytest.raises(TypeError, match=msg): + -single_nat_dtype_datetime + datetime_series + + tm.assert_series_equal( + -NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp + ) + with pytest.raises(TypeError, match=msg): + -single_nat_dtype_datetime + nat_series_dtype_timestamp + + # addition + tm.assert_series_equal( + nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp + ) + + tm.assert_series_equal( + nat_series_dtype_timestamp + NaT, nat_series_dtype_timestamp + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timestamp, nat_series_dtype_timestamp + ) + + # ------------------------------------------------------------- + # Timezone-Centric Tests + + def test_operators_datetimelike_with_timezones(self): + tz = "US/Eastern" + dt1 = Series(date_range("2000-01-01 09:00:00", periods=5, tz=tz), name="foo") + dt2 = dt1.copy() + dt2.iloc[2] = np.nan + + td1 = Series(pd.timedelta_range("1 days 1 min", periods=5, freq="h")) + td2 = td1.copy() + td2.iloc[1] = np.nan + assert td2._values.freq is None + + result = dt1 + td1[0] + exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 + td2[0] + exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + # odd numpy behavior with scalar timedeltas + result = td1[0] + dt1 + exp = (dt1.dt.tz_localize(None) + td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = td2[0] + dt2 + exp = (dt2.dt.tz_localize(None) + td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt1 - td1[0] + exp = (dt1.dt.tz_localize(None) - td1[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + td1[0] - dt1 + + result = dt2 - td2[0] + exp = (dt2.dt.tz_localize(None) - td2[0]).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + with pytest.raises(TypeError, match=msg): + td2[0] - dt2 + + result = dt1 + td1 + exp = (dt1.dt.tz_localize(None) + td1).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 + td2 + exp = (dt2.dt.tz_localize(None) + td2).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt1 - td1 + exp = (dt1.dt.tz_localize(None) - td1).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + + result = dt2 - td2 + exp = (dt2.dt.tz_localize(None) - td2).dt.tz_localize(tz) + tm.assert_series_equal(result, exp) + msg = "cannot (add|subtract)" + with pytest.raises(TypeError, match=msg): + td1 - dt1 + with pytest.raises(TypeError, match=msg): + td2 - dt2 + + +class TestDatetimeIndexArithmetic: + # ------------------------------------------------------------- + # Binary operations DatetimeIndex and TimedeltaIndex/array + + def test_dti_add_tdi(self, tz_naive_fixture): + # GH#17558 + tz = tz_naive_fixture + dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + tdi = pd.timedelta_range("0 days", periods=10) + expected = date_range("2017-01-01", periods=10, tz=tz) + expected = expected._with_freq(None) + + # add with TimedeltaIndex + result = dti + tdi + tm.assert_index_equal(result, expected) + + result = tdi + dti + tm.assert_index_equal(result, expected) + + # add with timedelta64 array + result = dti + tdi.values + tm.assert_index_equal(result, expected) + + result = tdi.values + dti + tm.assert_index_equal(result, expected) + + def test_dti_iadd_tdi(self, tz_naive_fixture): + # GH#17558 + tz = tz_naive_fixture + dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + tdi = pd.timedelta_range("0 days", periods=10) + expected = date_range("2017-01-01", periods=10, tz=tz) + expected = expected._with_freq(None) + + # iadd with TimedeltaIndex + result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + result += tdi + tm.assert_index_equal(result, expected) + + result = pd.timedelta_range("0 days", periods=10) + result += dti + tm.assert_index_equal(result, expected) + + # iadd with timedelta64 array + result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + result += tdi.values + tm.assert_index_equal(result, expected) + + result = pd.timedelta_range("0 days", periods=10) + result += dti + tm.assert_index_equal(result, expected) + + def test_dti_sub_tdi(self, tz_naive_fixture): + # GH#17558 + tz = tz_naive_fixture + dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10) + tdi = pd.timedelta_range("0 days", periods=10) + expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D") + expected = expected._with_freq(None) + + # sub with TimedeltaIndex + result = dti - tdi + tm.assert_index_equal(result, expected) + + msg = "cannot subtract .*TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi - dti + + # sub with timedelta64 array + result = dti - tdi.values + tm.assert_index_equal(result, expected) + + msg = "cannot subtract a datelike from a TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi.values - dti + + def test_dti_isub_tdi(self, tz_naive_fixture, unit): + # GH#17558 + tz = tz_naive_fixture + dti = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit) + tdi = pd.timedelta_range("0 days", periods=10, unit=unit) + expected = date_range("2017-01-01", periods=10, tz=tz, freq="-1D", unit=unit) + expected = expected._with_freq(None) + + # isub with TimedeltaIndex + result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit) + result -= tdi + tm.assert_index_equal(result, expected) + + # DTA.__isub__ GH#43904 + dta = dti._data.copy() + dta -= tdi + tm.assert_datetime_array_equal(dta, expected._data) + + out = dti._data.copy() + np.subtract(out, tdi, out=out) + tm.assert_datetime_array_equal(out, expected._data) + + msg = "cannot subtract a datelike from a TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi -= dti + + # isub with timedelta64 array + result = DatetimeIndex([Timestamp("2017-01-01", tz=tz)] * 10).as_unit(unit) + result -= tdi.values + tm.assert_index_equal(result, expected) + + with pytest.raises(TypeError, match=msg): + tdi.values -= dti + + with pytest.raises(TypeError, match=msg): + tdi._values -= dti + + # ------------------------------------------------------------- + # Binary Operations DatetimeIndex and datetime-like + # TODO: A couple other tests belong in this section. Move them in + # A PR where there isn't already a giant diff. + + # ------------------------------------------------------------- + + def test_dta_add_sub_index(self, tz_naive_fixture): + # Check that DatetimeArray defers to Index classes + dti = date_range("20130101", periods=3, tz=tz_naive_fixture) + dta = dti.array + result = dta - dti + expected = dti - dti + tm.assert_index_equal(result, expected) + + tdi = result + result = dta + tdi + expected = dti + tdi + tm.assert_index_equal(result, expected) + + result = dta - tdi + expected = dti - tdi + tm.assert_index_equal(result, expected) + + def test_sub_dti_dti(self, unit): + # previously performed setop (deprecated in 0.16.0), now changed to + # return subtraction -> TimeDeltaIndex (GH ...) + + dti = date_range("20130101", periods=3, unit=unit) + dti_tz = date_range("20130101", periods=3, unit=unit).tz_localize("US/Eastern") + expected = TimedeltaIndex([0, 0, 0]).as_unit(unit) + + result = dti - dti + tm.assert_index_equal(result, expected) + + result = dti_tz - dti_tz + tm.assert_index_equal(result, expected) + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + dti_tz - dti + + with pytest.raises(TypeError, match=msg): + dti - dti_tz + + # isub + dti -= dti + tm.assert_index_equal(dti, expected) + + # different length raises ValueError + dti1 = date_range("20130101", periods=3, unit=unit) + dti2 = date_range("20130101", periods=4, unit=unit) + msg = "cannot add indices of unequal length" + with pytest.raises(ValueError, match=msg): + dti1 - dti2 + + # NaN propagation + dti1 = DatetimeIndex(["2012-01-01", np.nan, "2012-01-03"]).as_unit(unit) + dti2 = DatetimeIndex(["2012-01-02", "2012-01-03", np.nan]).as_unit(unit) + expected = TimedeltaIndex(["1 days", np.nan, np.nan]).as_unit(unit) + result = dti2 - dti1 + tm.assert_index_equal(result, expected) + + # ------------------------------------------------------------------- + # TODO: Most of this block is moved from series or frame tests, needs + # cleanup, box-parametrization, and de-duplication + + @pytest.mark.parametrize("op", [operator.add, operator.sub]) + def test_timedelta64_equal_timedelta_supported_ops(self, op, box_with_array): + ser = Series( + [ + Timestamp("20130301"), + Timestamp("20130228 23:00:00"), + Timestamp("20130228 22:00:00"), + Timestamp("20130228 21:00:00"), + ] + ) + obj = box_with_array(ser) + + intervals = ["D", "h", "m", "s", "us"] + + def timedelta64(*args): + # see casting notes in NumPy gh-12927 + return np.sum(list(starmap(np.timedelta64, zip(args, intervals)))) + + for d, h, m, s, us in product(*([range(2)] * 5)): + nptd = timedelta64(d, h, m, s, us) + pytd = timedelta(days=d, hours=h, minutes=m, seconds=s, microseconds=us) + lhs = op(obj, nptd) + rhs = op(obj, pytd) + + tm.assert_equal(lhs, rhs) + + def test_ops_nat_mixed_datetime64_timedelta64(self): + # GH#11349 + timedelta_series = Series([NaT, Timedelta("1s")]) + datetime_series = Series([NaT, Timestamp("19900315")]) + nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]") + nat_series_dtype_timestamp = Series([NaT, NaT], dtype="datetime64[ns]") + single_nat_dtype_datetime = Series([NaT], dtype="datetime64[ns]") + single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]") + + # subtraction + tm.assert_series_equal( + datetime_series - single_nat_dtype_datetime, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + datetime_series - single_nat_dtype_timedelta, nat_series_dtype_timestamp + ) + tm.assert_series_equal( + -single_nat_dtype_timedelta + datetime_series, nat_series_dtype_timestamp + ) + + # without a Series wrapping the NaT, it is ambiguous + # whether it is a datetime64 or timedelta64 + # defaults to interpreting it as timedelta64 + tm.assert_series_equal( + nat_series_dtype_timestamp - single_nat_dtype_datetime, + nat_series_dtype_timedelta, + ) + + tm.assert_series_equal( + nat_series_dtype_timestamp - single_nat_dtype_timedelta, + nat_series_dtype_timestamp, + ) + tm.assert_series_equal( + -single_nat_dtype_timedelta + nat_series_dtype_timestamp, + nat_series_dtype_timestamp, + ) + msg = "cannot subtract a datelike" + with pytest.raises(TypeError, match=msg): + timedelta_series - single_nat_dtype_datetime + + # addition + tm.assert_series_equal( + nat_series_dtype_timestamp + single_nat_dtype_timedelta, + nat_series_dtype_timestamp, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timestamp, + nat_series_dtype_timestamp, + ) + + tm.assert_series_equal( + nat_series_dtype_timestamp + single_nat_dtype_timedelta, + nat_series_dtype_timestamp, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timestamp, + nat_series_dtype_timestamp, + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + single_nat_dtype_datetime, + nat_series_dtype_timestamp, + ) + tm.assert_series_equal( + single_nat_dtype_datetime + nat_series_dtype_timedelta, + nat_series_dtype_timestamp, + ) + + def test_ufunc_coercions(self, unit): + idx = date_range("2011-01-01", periods=3, freq="2D", name="x", unit=unit) + + delta = np.timedelta64(1, "D") + exp = date_range("2011-01-02", periods=3, freq="2D", name="x", unit=unit) + for result in [idx + delta, np.add(idx, delta)]: + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + exp = date_range("2010-12-31", periods=3, freq="2D", name="x", unit=unit) + + for result in [idx - delta, np.subtract(idx, delta)]: + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + # When adding/subtracting an ndarray (which has no .freq), the result + # does not infer freq + idx = idx._with_freq(None) + delta = np.array( + [np.timedelta64(1, "D"), np.timedelta64(2, "D"), np.timedelta64(3, "D")] + ) + exp = DatetimeIndex( + ["2011-01-02", "2011-01-05", "2011-01-08"], name="x" + ).as_unit(unit) + + for result in [idx + delta, np.add(idx, delta)]: + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + exp = DatetimeIndex( + ["2010-12-31", "2011-01-01", "2011-01-02"], name="x" + ).as_unit(unit) + for result in [idx - delta, np.subtract(idx, delta)]: + assert isinstance(result, DatetimeIndex) + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + def test_dti_add_series(self, tz_naive_fixture, names): + # GH#13905 + tz = tz_naive_fixture + index = DatetimeIndex( + ["2016-06-28 05:30", "2016-06-28 05:31"], tz=tz, name=names[0] + ).as_unit("ns") + ser = Series([Timedelta(seconds=5)] * 2, index=index, name=names[1]) + expected = Series(index + Timedelta(seconds=5), index=index, name=names[2]) + + # passing name arg isn't enough when names[2] is None + expected.name = names[2] + assert expected.dtype == index.dtype + result = ser + index + tm.assert_series_equal(result, expected) + result2 = index + ser + tm.assert_series_equal(result2, expected) + + expected = index + Timedelta(seconds=5) + result3 = ser.values + index + tm.assert_index_equal(result3, expected) + result4 = index + ser.values + tm.assert_index_equal(result4, expected) + + @pytest.mark.parametrize("op", [operator.add, roperator.radd, operator.sub]) + def test_dti_addsub_offset_arraylike( + self, tz_naive_fixture, names, op, index_or_series + ): + # GH#18849, GH#19744 + other_box = index_or_series + + tz = tz_naive_fixture + dti = date_range("2017-01-01", periods=2, tz=tz, name=names[0]) + other = other_box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)], name=names[1]) + + xbox = get_upcast_box(dti, other) + + with tm.assert_produces_warning(PerformanceWarning): + res = op(dti, other) + + expected = DatetimeIndex( + [op(dti[n], other[n]) for n in range(len(dti))], name=names[2], freq="infer" + ) + expected = tm.box_expected(expected, xbox).astype(object) + tm.assert_equal(res, expected) + + @pytest.mark.parametrize("other_box", [pd.Index, np.array]) + def test_dti_addsub_object_arraylike( + self, tz_naive_fixture, box_with_array, other_box + ): + tz = tz_naive_fixture + + dti = date_range("2017-01-01", periods=2, tz=tz) + dtarr = tm.box_expected(dti, box_with_array) + other = other_box([pd.offsets.MonthEnd(), Timedelta(days=4)]) + xbox = get_upcast_box(dtarr, other) + + expected = DatetimeIndex(["2017-01-31", "2017-01-06"], tz=tz_naive_fixture) + expected = tm.box_expected(expected, xbox).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + result = dtarr + other + tm.assert_equal(result, expected) + + expected = DatetimeIndex(["2016-12-31", "2016-12-29"], tz=tz_naive_fixture) + expected = tm.box_expected(expected, xbox).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + result = dtarr - other + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize("years", [-1, 0, 1]) +@pytest.mark.parametrize("months", [-2, 0, 2]) +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) +def test_shift_months(years, months, unit): + dti = DatetimeIndex( + [ + Timestamp("2000-01-05 00:15:00"), + Timestamp("2000-01-31 00:23:00"), + Timestamp("2000-01-01"), + Timestamp("2000-02-29"), + Timestamp("2000-12-31"), + ] + ).as_unit(unit) + shifted = shift_months(dti.asi8, years * 12 + months, reso=dti._data._creso) + shifted_dt64 = shifted.view(f"M8[{dti.unit}]") + actual = DatetimeIndex(shifted_dt64) + + raw = [x + pd.offsets.DateOffset(years=years, months=months) for x in dti] + expected = DatetimeIndex(raw).as_unit(dti.unit) + tm.assert_index_equal(actual, expected) + + +def test_dt64arr_addsub_object_dtype_2d(): + # block-wise DataFrame operations will require operating on 2D + # DatetimeArray/TimedeltaArray, so check that specifically. + dti = date_range("1994-02-13", freq="2W", periods=4) + dta = dti._data.reshape((4, 1)) + + other = np.array([[pd.offsets.Day(n)] for n in range(4)]) + assert other.shape == dta.shape + + with tm.assert_produces_warning(PerformanceWarning): + result = dta + other + with tm.assert_produces_warning(PerformanceWarning): + expected = (dta[:, 0] + other[:, 0]).reshape(-1, 1) + + tm.assert_numpy_array_equal(result, expected) + + with tm.assert_produces_warning(PerformanceWarning): + # Case where we expect to get a TimedeltaArray back + result2 = dta - dta.astype(object) + + assert result2.shape == (4, 1) + assert all(td._value == 0 for td in result2.ravel()) + + +def test_non_nano_dt64_addsub_np_nat_scalars(): + # GH 52295 + ser = Series([1233242342344, 232432434324, 332434242344], dtype="datetime64[ms]") + result = ser - np.datetime64("nat", "ms") + expected = Series([NaT] * 3, dtype="timedelta64[ms]") + tm.assert_series_equal(result, expected) + + result = ser + np.timedelta64("nat", "ms") + expected = Series([NaT] * 3, dtype="datetime64[ms]") + tm.assert_series_equal(result, expected) + + +def test_non_nano_dt64_addsub_np_nat_scalars_unitless(): + # GH 52295 + # TODO: Can we default to the ser unit? + ser = Series([1233242342344, 232432434324, 332434242344], dtype="datetime64[ms]") + result = ser - np.datetime64("nat") + expected = Series([NaT] * 3, dtype="timedelta64[ns]") + tm.assert_series_equal(result, expected) + + result = ser + np.timedelta64("nat") + expected = Series([NaT] * 3, dtype="datetime64[ns]") + tm.assert_series_equal(result, expected) + + +def test_non_nano_dt64_addsub_np_nat_scalars_unsupported_unit(): + # GH 52295 + ser = Series([12332, 23243, 33243], dtype="datetime64[s]") + result = ser - np.datetime64("nat", "D") + expected = Series([NaT] * 3, dtype="timedelta64[s]") + tm.assert_series_equal(result, expected) + + result = ser + np.timedelta64("nat", "D") + expected = Series([NaT] * 3, dtype="datetime64[s]") + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_interval.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..0e316cf419cb0d3be489f474a9c6d889e668e7c9 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_interval.py @@ -0,0 +1,306 @@ +import operator + +import numpy as np +import pytest + +from pandas.core.dtypes.common import is_list_like + +import pandas as pd +from pandas import ( + Categorical, + Index, + Interval, + IntervalIndex, + Period, + Series, + Timedelta, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import ( + BooleanArray, + IntervalArray, +) +from pandas.tests.arithmetic.common import get_upcast_box + + +@pytest.fixture( + params=[ + (Index([0, 2, 4, 4]), Index([1, 3, 5, 8])), + (Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])), + ( + timedelta_range("0 days", periods=3).insert(3, pd.NaT), + timedelta_range("1 day", periods=3).insert(3, pd.NaT), + ), + ( + date_range("20170101", periods=3).insert(3, pd.NaT), + date_range("20170102", periods=3).insert(3, pd.NaT), + ), + ( + date_range("20170101", periods=3, tz="US/Eastern").insert(3, pd.NaT), + date_range("20170102", periods=3, tz="US/Eastern").insert(3, pd.NaT), + ), + ], + ids=lambda x: str(x[0].dtype), +) +def left_right_dtypes(request): + """ + Fixture for building an IntervalArray from various dtypes + """ + return request.param + + +@pytest.fixture +def interval_array(left_right_dtypes): + """ + Fixture to generate an IntervalArray of various dtypes containing NA if possible + """ + left, right = left_right_dtypes + return IntervalArray.from_arrays(left, right) + + +def create_categorical_intervals(left, right, closed="right"): + return Categorical(IntervalIndex.from_arrays(left, right, closed)) + + +def create_series_intervals(left, right, closed="right"): + return Series(IntervalArray.from_arrays(left, right, closed)) + + +def create_series_categorical_intervals(left, right, closed="right"): + return Series(Categorical(IntervalIndex.from_arrays(left, right, closed))) + + +class TestComparison: + @pytest.fixture(params=[operator.eq, operator.ne]) + def op(self, request): + return request.param + + @pytest.fixture( + params=[ + IntervalArray.from_arrays, + IntervalIndex.from_arrays, + create_categorical_intervals, + create_series_intervals, + create_series_categorical_intervals, + ], + ids=[ + "IntervalArray", + "IntervalIndex", + "Categorical[Interval]", + "Series[Interval]", + "Series[Categorical[Interval]]", + ], + ) + def interval_constructor(self, request): + """ + Fixture for all pandas native interval constructors. + To be used as the LHS of IntervalArray comparisons. + """ + return request.param + + def elementwise_comparison(self, op, interval_array, other): + """ + Helper that performs elementwise comparisons between `array` and `other` + """ + other = other if is_list_like(other) else [other] * len(interval_array) + expected = np.array([op(x, y) for x, y in zip(interval_array, other)]) + if isinstance(other, Series): + return Series(expected, index=other.index) + return expected + + def test_compare_scalar_interval(self, op, interval_array): + # matches first interval + other = interval_array[0] + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + # matches on a single endpoint but not both + other = Interval(interval_array.left[0], interval_array.right[1]) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed): + interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) + other = Interval(0, 1, closed=other_closed) + + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_scalar_na(self, op, interval_array, nulls_fixture, box_with_array): + box = box_with_array + obj = tm.box_expected(interval_array, box) + result = op(obj, nulls_fixture) + + if nulls_fixture is pd.NA: + # GH#31882 + exp = np.ones(interval_array.shape, dtype=bool) + expected = BooleanArray(exp, exp) + else: + expected = self.elementwise_comparison(op, interval_array, nulls_fixture) + + if not (box is Index and nulls_fixture is pd.NA): + # don't cast expected from BooleanArray to ndarray[object] + xbox = get_upcast_box(obj, nulls_fixture, True) + expected = tm.box_expected(expected, xbox) + + tm.assert_equal(result, expected) + + rev = op(nulls_fixture, obj) + tm.assert_equal(rev, expected) + + @pytest.mark.parametrize( + "other", + [ + 0, + 1.0, + True, + "foo", + Timestamp("2017-01-01"), + Timestamp("2017-01-01", tz="US/Eastern"), + Timedelta("0 days"), + Period("2017-01-01", "D"), + ], + ) + def test_compare_scalar_other(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_list_like_interval(self, op, interval_array, interval_constructor): + # same endpoints + other = interval_constructor(interval_array.left, interval_array.right) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + # different endpoints + other = interval_constructor( + interval_array.left[::-1], interval_array.right[::-1] + ) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + # all nan endpoints + other = interval_constructor([np.nan] * 4, [np.nan] * 4) + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + def test_compare_list_like_interval_mixed_closed( + self, op, interval_constructor, closed, other_closed + ): + interval_array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed) + other = interval_constructor(range(2), range(1, 3), closed=other_closed) + + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + ( + Interval(0, 1), + Interval(Timedelta("1 day"), Timedelta("2 days")), + Interval(4, 5, "both"), + Interval(10, 20, "neither"), + ), + (0, 1.5, Timestamp("20170103"), np.nan), + ( + Timestamp("20170102", tz="US/Eastern"), + Timedelta("2 days"), + "baz", + pd.NaT, + ), + ], + ) + def test_compare_list_like_object(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + def test_compare_list_like_nan(self, op, interval_array, nulls_fixture): + other = [nulls_fixture] * 4 + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + np.arange(4, dtype="int64"), + np.arange(4, dtype="float64"), + date_range("2017-01-01", periods=4), + date_range("2017-01-01", periods=4, tz="US/Eastern"), + timedelta_range("0 days", periods=4), + period_range("2017-01-01", periods=4, freq="D"), + Categorical(list("abab")), + Categorical(date_range("2017-01-01", periods=4)), + pd.array(list("abcd")), + pd.array(["foo", 3.14, None, object()], dtype=object), + ], + ids=lambda x: str(x.dtype), + ) + def test_compare_list_like_other(self, op, interval_array, other): + result = op(interval_array, other) + expected = self.elementwise_comparison(op, interval_array, other) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("length", [1, 3, 5]) + @pytest.mark.parametrize("other_constructor", [IntervalArray, list]) + def test_compare_length_mismatch_errors(self, op, other_constructor, length): + interval_array = IntervalArray.from_arrays(range(4), range(1, 5)) + other = other_constructor([Interval(0, 1)] * length) + with pytest.raises(ValueError, match="Lengths must match to compare"): + op(interval_array, other) + + @pytest.mark.parametrize( + "constructor, expected_type, assert_func", + [ + (IntervalIndex, np.array, tm.assert_numpy_array_equal), + (Series, Series, tm.assert_series_equal), + ], + ) + def test_index_series_compat(self, op, constructor, expected_type, assert_func): + # IntervalIndex/Series that rely on IntervalArray for comparisons + breaks = range(4) + index = constructor(IntervalIndex.from_breaks(breaks)) + + # scalar comparisons + other = index[0] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + other = breaks[0] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + # list-like comparisons + other = IntervalArray.from_breaks(breaks) + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + other = [index[0], breaks[0], "foo"] + result = op(index, other) + expected = expected_type(self.elementwise_comparison(op, index, other)) + assert_func(result, expected) + + @pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None]) + def test_comparison_operations(self, scalars): + # GH #28981 + expected = Series([False, False]) + s = Series([Interval(0, 1), Interval(1, 2)], dtype="interval") + result = s == scalars + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_numeric.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..d8c1786b6b422c32a0f396b43f51d75b2b3ffe25 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_numeric.py @@ -0,0 +1,1567 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for numeric dtypes +from __future__ import annotations + +from collections import abc +from datetime import timedelta +from decimal import Decimal +import operator + +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + RangeIndex, + Series, + Timedelta, + TimedeltaIndex, + array, + date_range, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.computation import expressions as expr +from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, + assert_invalid_comparison, +) + + +@pytest.fixture(autouse=True, params=[0, 1000000], ids=["numexpr", "python"]) +def switch_numexpr_min_elements(request, monkeypatch): + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", request.param) + yield request.param + + +@pytest.fixture(params=[Index, Series, tm.to_array]) +def box_pandas_1d_array(request): + """ + Fixture to test behavior for Index, Series and tm.to_array classes + """ + return request.param + + +@pytest.fixture( + params=[ + # TODO: add more dtypes here + Index(np.arange(5, dtype="float64")), + Index(np.arange(5, dtype="int64")), + Index(np.arange(5, dtype="uint64")), + RangeIndex(5), + ], + ids=lambda x: type(x).__name__, +) +def numeric_idx(request): + """ + Several types of numeric-dtypes Index objects + """ + return request.param + + +@pytest.fixture( + params=[Index, Series, tm.to_array, np.array, list], ids=lambda x: x.__name__ +) +def box_1d_array(request): + """ + Fixture to test behavior for Index, Series, tm.to_array, numpy Array and list + classes + """ + return request.param + + +def adjust_negative_zero(zero, expected): + """ + Helper to adjust the expected result if we are dividing by -0.0 + as opposed to 0.0 + """ + if np.signbit(np.array(zero)).any(): + # All entries in the `zero` fixture should be either + # all-negative or no-negative. + assert np.signbit(np.array(zero)).all() + + expected *= -1 + + return expected + + +def compare_op(series, other, op): + left = np.abs(series) if op in (ops.rpow, operator.pow) else series + right = np.abs(other) if op in (ops.rpow, operator.pow) else other + + cython_or_numpy = op(left, right) + python = left.combine(right, op) + if isinstance(other, Series) and not other.index.equals(series.index): + python.index = python.index._with_freq(None) + tm.assert_series_equal(cython_or_numpy, python) + + +# TODO: remove this kludge once mypy stops giving false positives here +# List comprehension has incompatible type List[PandasObject]; expected List[RangeIndex] +# See GH#29725 +_ldtypes = ["i1", "i2", "i4", "i8", "u1", "u2", "u4", "u8", "f2", "f4", "f8"] +lefts: list[Index | Series] = [RangeIndex(10, 40, 10)] +lefts.extend([Series([10, 20, 30], dtype=dtype) for dtype in _ldtypes]) +lefts.extend([Index([10, 20, 30], dtype=dtype) for dtype in _ldtypes if dtype != "f2"]) + +# ------------------------------------------------------------------ +# Comparisons + + +class TestNumericComparisons: + def test_operator_series_comparison_zerorank(self): + # GH#13006 + result = np.float64(0) > Series([1, 2, 3]) + expected = 0.0 > Series([1, 2, 3]) + tm.assert_series_equal(result, expected) + result = Series([1, 2, 3]) < np.float64(0) + expected = Series([1, 2, 3]) < 0.0 + tm.assert_series_equal(result, expected) + result = np.array([0, 1, 2])[0] > Series([0, 1, 2]) + expected = 0.0 > Series([1, 2, 3]) + tm.assert_series_equal(result, expected) + + def test_df_numeric_cmp_dt64_raises(self, box_with_array, fixed_now_ts): + # GH#8932, GH#22163 + ts = fixed_now_ts + obj = np.array(range(5)) + obj = tm.box_expected(obj, box_with_array) + + assert_invalid_comparison(obj, ts, box_with_array) + + def test_compare_invalid(self): + # GH#8058 + # ops testing + a = Series(np.random.default_rng(2).standard_normal(5), name=0) + b = Series(np.random.default_rng(2).standard_normal(5)) + b.name = pd.Timestamp("2000-01-01") + tm.assert_series_equal(a / b, 1 / (b / a)) + + def test_numeric_cmp_string_numexpr_path(self, box_with_array, monkeypatch): + # GH#36377, GH#35700 + box = box_with_array + xbox = box if box is not Index else np.ndarray + + obj = Series(np.random.default_rng(2).standard_normal(51)) + obj = tm.box_expected(obj, box, transpose=False) + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 50) + result = obj == "a" + + expected = Series(np.zeros(51, dtype=bool)) + expected = tm.box_expected(expected, xbox, transpose=False) + tm.assert_equal(result, expected) + + with monkeypatch.context() as m: + m.setattr(expr, "_MIN_ELEMENTS", 50) + result = obj != "a" + tm.assert_equal(result, ~expected) + + msg = "Invalid comparison between dtype=float64 and str" + with pytest.raises(TypeError, match=msg): + obj < "a" + + +# ------------------------------------------------------------------ +# Numeric dtypes Arithmetic with Datetime/Timedelta Scalar + + +class TestNumericArraylikeArithmeticWithDatetimeLike: + @pytest.mark.parametrize("box_cls", [np.array, Index, Series]) + @pytest.mark.parametrize( + "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype) + ) + def test_mul_td64arr(self, left, box_cls): + # GH#22390 + right = np.array([1, 2, 3], dtype="m8[s]") + right = box_cls(right) + + expected = TimedeltaIndex(["10s", "40s", "90s"], dtype=right.dtype) + + if isinstance(left, Series) or box_cls is Series: + expected = Series(expected) + assert expected.dtype == right.dtype + + result = left * right + tm.assert_equal(result, expected) + + result = right * left + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("box_cls", [np.array, Index, Series]) + @pytest.mark.parametrize( + "left", lefts, ids=lambda x: type(x).__name__ + str(x.dtype) + ) + def test_div_td64arr(self, left, box_cls): + # GH#22390 + right = np.array([10, 40, 90], dtype="m8[s]") + right = box_cls(right) + + expected = TimedeltaIndex(["1s", "2s", "3s"], dtype=right.dtype) + if isinstance(left, Series) or box_cls is Series: + expected = Series(expected) + assert expected.dtype == right.dtype + + result = right / left + tm.assert_equal(result, expected) + + result = right // left + tm.assert_equal(result, expected) + + # (true_) needed for min-versions build 2022-12-26 + msg = "ufunc '(true_)?divide' cannot use operands with types" + with pytest.raises(TypeError, match=msg): + left / right + + msg = "ufunc 'floor_divide' cannot use operands with types" + with pytest.raises(TypeError, match=msg): + left // right + + # TODO: also test Tick objects; + # see test_numeric_arr_rdiv_tdscalar for note on these failing + @pytest.mark.parametrize( + "scalar_td", + [ + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta(), + Timedelta(days=1).to_timedelta64().astype("timedelta64[s]"), + Timedelta(days=1).to_timedelta64().astype("timedelta64[ms]"), + ], + ids=lambda x: type(x).__name__, + ) + def test_numeric_arr_mul_tdscalar(self, scalar_td, numeric_idx, box_with_array): + # GH#19333 + box = box_with_array + index = numeric_idx + expected = TimedeltaIndex([Timedelta(days=n) for n in range(len(index))]) + if isinstance(scalar_td, np.timedelta64): + dtype = scalar_td.dtype + expected = expected.astype(dtype) + elif type(scalar_td) is timedelta: + expected = expected.astype("m8[us]") + + index = tm.box_expected(index, box) + expected = tm.box_expected(expected, box) + + result = index * scalar_td + tm.assert_equal(result, expected) + + commute = scalar_td * index + tm.assert_equal(commute, expected) + + @pytest.mark.parametrize( + "scalar_td", + [ + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + Timedelta(days=1).to_pytimedelta(), + ], + ids=lambda x: type(x).__name__, + ) + @pytest.mark.parametrize("dtype", [np.int64, np.float64]) + def test_numeric_arr_mul_tdscalar_numexpr_path( + self, dtype, scalar_td, box_with_array + ): + # GH#44772 for the float64 case + box = box_with_array + + arr_i8 = np.arange(2 * 10**4).astype(np.int64, copy=False) + arr = arr_i8.astype(dtype, copy=False) + obj = tm.box_expected(arr, box, transpose=False) + + expected = arr_i8.view("timedelta64[D]").astype("timedelta64[ns]") + if type(scalar_td) is timedelta: + expected = expected.astype("timedelta64[us]") + + expected = tm.box_expected(expected, box, transpose=False) + + result = obj * scalar_td + tm.assert_equal(result, expected) + + result = scalar_td * obj + tm.assert_equal(result, expected) + + def test_numeric_arr_rdiv_tdscalar(self, three_days, numeric_idx, box_with_array): + box = box_with_array + + index = numeric_idx[1:3] + + expected = TimedeltaIndex(["3 Days", "36 Hours"]) + if isinstance(three_days, np.timedelta64): + dtype = three_days.dtype + if dtype < np.dtype("m8[s]"): + # i.e. resolution is lower -> use lowest supported resolution + dtype = np.dtype("m8[s]") + expected = expected.astype(dtype) + elif type(three_days) is timedelta: + expected = expected.astype("m8[us]") + elif isinstance( + three_days, + (pd.offsets.Day, pd.offsets.Hour, pd.offsets.Minute, pd.offsets.Second), + ): + # closest reso is Second + expected = expected.astype("m8[s]") + + index = tm.box_expected(index, box) + expected = tm.box_expected(expected, box) + + result = three_days / index + tm.assert_equal(result, expected) + + msg = "cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + index / three_days + + @pytest.mark.parametrize( + "other", + [ + Timedelta(hours=31), + Timedelta(hours=31).to_pytimedelta(), + Timedelta(hours=31).to_timedelta64(), + Timedelta(hours=31).to_timedelta64().astype("m8[h]"), + np.timedelta64("NaT"), + np.timedelta64("NaT", "D"), + pd.offsets.Minute(3), + pd.offsets.Second(0), + # GH#28080 numeric+datetimelike should raise; Timestamp used + # to raise NullFrequencyError but that behavior was removed in 1.0 + pd.Timestamp("2021-01-01", tz="Asia/Tokyo"), + pd.Timestamp("2021-01-01"), + pd.Timestamp("2021-01-01").to_pydatetime(), + pd.Timestamp("2021-01-01", tz="UTC").to_pydatetime(), + pd.Timestamp("2021-01-01").to_datetime64(), + np.datetime64("NaT", "ns"), + pd.NaT, + ], + ids=repr, + ) + def test_add_sub_datetimedeltalike_invalid( + self, numeric_idx, other, box_with_array + ): + box = box_with_array + + left = tm.box_expected(numeric_idx, box) + msg = "|".join( + [ + "unsupported operand type", + "Addition/subtraction of integers and integer-arrays", + "Instead of adding/subtracting", + "cannot use operands with types dtype", + "Concatenation operation is not implemented for NumPy arrays", + "Cannot (add|subtract) NaT (to|from) ndarray", + # pd.array vs np.datetime64 case + r"operand type\(s\) all returned NotImplemented from __array_ufunc__", + "can only perform ops with numeric values", + "cannot subtract DatetimeArray from ndarray", + # pd.Timedelta(1) + Index([0, 1, 2]) + "Cannot add or subtract Timedelta from integers", + ] + ) + assert_invalid_addsub_type(left, other, msg) + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestDivisionByZero: + def test_div_zero(self, zero, numeric_idx): + idx = numeric_idx + + expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) + # We only adjust for Index, because Series does not yet apply + # the adjustment correctly. + expected2 = adjust_negative_zero(zero, expected) + + result = idx / zero + tm.assert_index_equal(result, expected2) + ser_compat = Series(idx).astype("i8") / np.array(zero).astype("i8") + tm.assert_series_equal(ser_compat, Series(expected)) + + def test_floordiv_zero(self, zero, numeric_idx): + idx = numeric_idx + + expected = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) + # We only adjust for Index, because Series does not yet apply + # the adjustment correctly. + expected2 = adjust_negative_zero(zero, expected) + + result = idx // zero + tm.assert_index_equal(result, expected2) + ser_compat = Series(idx).astype("i8") // np.array(zero).astype("i8") + tm.assert_series_equal(ser_compat, Series(expected)) + + def test_mod_zero(self, zero, numeric_idx): + idx = numeric_idx + + expected = Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64) + result = idx % zero + tm.assert_index_equal(result, expected) + ser_compat = Series(idx).astype("i8") % np.array(zero).astype("i8") + tm.assert_series_equal(ser_compat, Series(result)) + + def test_divmod_zero(self, zero, numeric_idx): + idx = numeric_idx + + exleft = Index([np.nan, np.inf, np.inf, np.inf, np.inf], dtype=np.float64) + exright = Index([np.nan, np.nan, np.nan, np.nan, np.nan], dtype=np.float64) + exleft = adjust_negative_zero(zero, exleft) + + result = divmod(idx, zero) + tm.assert_index_equal(result[0], exleft) + tm.assert_index_equal(result[1], exright) + + @pytest.mark.parametrize("op", [operator.truediv, operator.floordiv]) + def test_div_negative_zero(self, zero, numeric_idx, op): + # Check that -1 / -0.0 returns np.inf, not -np.inf + if numeric_idx.dtype == np.uint64: + pytest.skip(f"Div by negative 0 not relevant for {numeric_idx.dtype}") + idx = numeric_idx - 3 + + expected = Index([-np.inf, -np.inf, -np.inf, np.nan, np.inf], dtype=np.float64) + expected = adjust_negative_zero(zero, expected) + + result = op(idx, zero) + tm.assert_index_equal(result, expected) + + # ------------------------------------------------------------------ + + @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64]) + def test_ser_div_ser( + self, + switch_numexpr_min_elements, + dtype1, + any_real_numpy_dtype, + ): + # no longer do integer div for any ops, but deal with the 0's + dtype2 = any_real_numpy_dtype + + first = Series([3, 4, 5, 8], name="first").astype(dtype1) + second = Series([0, 0, 0, 3], name="second").astype(dtype2) + + with np.errstate(all="ignore"): + expected = Series( + first.values.astype(np.float64) / second.values, + dtype="float64", + name=None, + ) + expected.iloc[0:3] = np.inf + if first.dtype == "int64" and second.dtype == "float32": + # when using numexpr, the casting rules are slightly different + # and int64/float32 combo results in float32 instead of float64 + if expr.USE_NUMEXPR and switch_numexpr_min_elements == 0: + expected = expected.astype("float32") + + result = first / second + tm.assert_series_equal(result, expected) + assert not result.equals(second / first) + + @pytest.mark.parametrize("dtype1", [np.int64, np.float64, np.uint64]) + def test_ser_divmod_zero(self, dtype1, any_real_numpy_dtype): + # GH#26987 + dtype2 = any_real_numpy_dtype + left = Series([1, 1]).astype(dtype1) + right = Series([0, 2]).astype(dtype2) + + # GH#27321 pandas convention is to set 1 // 0 to np.inf, as opposed + # to numpy which sets to np.nan; patch `expected[0]` below + expected = left // right, left % right + expected = list(expected) + expected[0] = expected[0].astype(np.float64) + expected[0][0] = np.inf + result = divmod(left, right) + + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + # rdivmod case + result = divmod(left.values, right) + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + def test_ser_divmod_inf(self): + left = Series([np.inf, 1.0]) + right = Series([np.inf, 2.0]) + + expected = left // right, left % right + result = divmod(left, right) + + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + # rdivmod case + result = divmod(left.values, right) + tm.assert_series_equal(result[0], expected[0]) + tm.assert_series_equal(result[1], expected[1]) + + def test_rdiv_zero_compat(self): + # GH#8674 + zero_array = np.array([0] * 5) + data = np.random.default_rng(2).standard_normal(5) + expected = Series([0.0] * 5) + + result = zero_array / Series(data) + tm.assert_series_equal(result, expected) + + result = Series(zero_array) / data + tm.assert_series_equal(result, expected) + + result = Series(zero_array) / Series(data) + tm.assert_series_equal(result, expected) + + def test_div_zero_inf_signs(self): + # GH#9144, inf signing + ser = Series([-1, 0, 1], name="first") + expected = Series([-np.inf, np.nan, np.inf], name="first") + + result = ser / 0 + tm.assert_series_equal(result, expected) + + def test_rdiv_zero(self): + # GH#9144 + ser = Series([-1, 0, 1], name="first") + expected = Series([0.0, np.nan, 0.0], name="first") + + result = 0 / ser + tm.assert_series_equal(result, expected) + + def test_floordiv_div(self): + # GH#9144 + ser = Series([-1, 0, 1], name="first") + + result = ser // 0 + expected = Series([-np.inf, np.nan, np.inf], name="first") + tm.assert_series_equal(result, expected) + + def test_df_div_zero_df(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + result = df / df + + first = Series([1.0, 1.0, 1.0, 1.0]) + second = Series([np.nan, np.nan, np.nan, 1]) + expected = pd.DataFrame({"first": first, "second": second}) + tm.assert_frame_equal(result, expected) + + def test_df_div_zero_array(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + + first = Series([1.0, 1.0, 1.0, 1.0]) + second = Series([np.nan, np.nan, np.nan, 1]) + expected = pd.DataFrame({"first": first, "second": second}) + + with np.errstate(all="ignore"): + arr = df.values.astype("float") / df.values + result = pd.DataFrame(arr, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + def test_df_div_zero_int(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + + result = df / 0 + expected = pd.DataFrame(np.inf, index=df.index, columns=df.columns) + expected.iloc[0:3, 1] = np.nan + tm.assert_frame_equal(result, expected) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all="ignore"): + arr = df.values.astype("float64") / 0 + result2 = pd.DataFrame(arr, index=df.index, columns=df.columns) + tm.assert_frame_equal(result2, expected) + + def test_df_div_zero_series_does_not_commute(self): + # integer div, but deal with the 0's (GH#9144) + df = pd.DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + ser = df[0] + res = ser / df + res2 = df / ser + assert not res.fillna(0).equals(res2.fillna(0)) + + # ------------------------------------------------------------------ + # Mod By Zero + + def test_df_mod_zero_df(self, using_array_manager): + # GH#3590, modulo as ints + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + # this is technically wrong, as the integer portion is coerced to float + first = Series([0, 0, 0, 0]) + if not using_array_manager: + # INFO(ArrayManager) BlockManager doesn't preserve dtype per column + # while ArrayManager performs op column-wisedoes and thus preserves + # dtype if possible + first = first.astype("float64") + second = Series([np.nan, np.nan, np.nan, 0]) + expected = pd.DataFrame({"first": first, "second": second}) + result = df % df + tm.assert_frame_equal(result, expected) + + # GH#38939 If we dont pass copy=False, df is consolidated and + # result["first"] is float64 instead of int64 + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}, copy=False) + first = Series([0, 0, 0, 0], dtype="int64") + second = Series([np.nan, np.nan, np.nan, 0]) + expected = pd.DataFrame({"first": first, "second": second}) + result = df % df + tm.assert_frame_equal(result, expected) + + def test_df_mod_zero_array(self): + # GH#3590, modulo as ints + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + + # this is technically wrong, as the integer portion is coerced to float + # ### + first = Series([0, 0, 0, 0], dtype="float64") + second = Series([np.nan, np.nan, np.nan, 0]) + expected = pd.DataFrame({"first": first, "second": second}) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all="ignore"): + arr = df.values % df.values + result2 = pd.DataFrame(arr, index=df.index, columns=df.columns, dtype="float64") + result2.iloc[0:3, 1] = np.nan + tm.assert_frame_equal(result2, expected) + + def test_df_mod_zero_int(self): + # GH#3590, modulo as ints + df = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + + result = df % 0 + expected = pd.DataFrame(np.nan, index=df.index, columns=df.columns) + tm.assert_frame_equal(result, expected) + + # numpy has a slightly different (wrong) treatment + with np.errstate(all="ignore"): + arr = df.values.astype("float64") % 0 + result2 = pd.DataFrame(arr, index=df.index, columns=df.columns) + tm.assert_frame_equal(result2, expected) + + def test_df_mod_zero_series_does_not_commute(self): + # GH#3590, modulo as ints + # not commutative with series + df = pd.DataFrame(np.random.default_rng(2).standard_normal((10, 5))) + ser = df[0] + res = ser % df + res2 = df % ser + assert not res.fillna(0).equals(res2.fillna(0)) + + +class TestMultiplicationDivision: + # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__ + # for non-timestamp/timedelta/period dtypes + + def test_divide_decimal(self, box_with_array): + # resolves issue GH#9787 + box = box_with_array + ser = Series([Decimal(10)]) + expected = Series([Decimal(5)]) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = ser / Decimal(2) + + tm.assert_equal(result, expected) + + result = ser // Decimal(2) + tm.assert_equal(result, expected) + + def test_div_equiv_binop(self): + # Test Series.div as well as Series.__div__ + # float/integer issue + # GH#7785 + first = Series([1, 0], name="first") + second = Series([-0.01, -0.02], name="second") + expected = Series([-0.01, -np.inf]) + + result = second.div(first) + tm.assert_series_equal(result, expected, check_names=False) + + result = second / first + tm.assert_series_equal(result, expected) + + def test_div_int(self, numeric_idx): + idx = numeric_idx + result = idx / 1 + expected = idx.astype("float64") + tm.assert_index_equal(result, expected) + + result = idx / 2 + expected = Index(idx.values / 2) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("op", [operator.mul, ops.rmul, operator.floordiv]) + def test_mul_int_identity(self, op, numeric_idx, box_with_array): + idx = numeric_idx + idx = tm.box_expected(idx, box_with_array) + + result = op(idx, 1) + tm.assert_equal(result, idx) + + def test_mul_int_array(self, numeric_idx): + idx = numeric_idx + didx = idx * idx + + result = idx * np.array(5, dtype="int64") + tm.assert_index_equal(result, idx * 5) + + arr_dtype = "uint64" if idx.dtype == np.uint64 else "int64" + result = idx * np.arange(5, dtype=arr_dtype) + tm.assert_index_equal(result, didx) + + def test_mul_int_series(self, numeric_idx): + idx = numeric_idx + didx = idx * idx + + arr_dtype = "uint64" if idx.dtype == np.uint64 else "int64" + result = idx * Series(np.arange(5, dtype=arr_dtype)) + tm.assert_series_equal(result, Series(didx)) + + def test_mul_float_series(self, numeric_idx): + idx = numeric_idx + rng5 = np.arange(5, dtype="float64") + + result = idx * Series(rng5 + 0.1) + expected = Series(rng5 * (rng5 + 0.1)) + tm.assert_series_equal(result, expected) + + def test_mul_index(self, numeric_idx): + idx = numeric_idx + + result = idx * idx + tm.assert_index_equal(result, idx**2) + + def test_mul_datelike_raises(self, numeric_idx): + idx = numeric_idx + msg = "cannot perform __rmul__ with this index type" + with pytest.raises(TypeError, match=msg): + idx * date_range("20130101", periods=5) + + def test_mul_size_mismatch_raises(self, numeric_idx): + idx = numeric_idx + msg = "operands could not be broadcast together" + with pytest.raises(ValueError, match=msg): + idx * idx[0:3] + with pytest.raises(ValueError, match=msg): + idx * np.array([1, 2]) + + @pytest.mark.parametrize("op", [operator.pow, ops.rpow]) + def test_pow_float(self, op, numeric_idx, box_with_array): + # test power calculations both ways, GH#14973 + box = box_with_array + idx = numeric_idx + expected = Index(op(idx.values, 2.0)) + + idx = tm.box_expected(idx, box) + expected = tm.box_expected(expected, box) + + result = op(idx, 2.0) + tm.assert_equal(result, expected) + + def test_modulo(self, numeric_idx, box_with_array): + # GH#9244 + box = box_with_array + idx = numeric_idx + expected = Index(idx.values % 2) + + idx = tm.box_expected(idx, box) + expected = tm.box_expected(expected, box) + + result = idx % 2 + tm.assert_equal(result, expected) + + def test_divmod_scalar(self, numeric_idx): + idx = numeric_idx + + result = divmod(idx, 2) + with np.errstate(all="ignore"): + div, mod = divmod(idx.values, 2) + + expected = Index(div), Index(mod) + for r, e in zip(result, expected): + tm.assert_index_equal(r, e) + + def test_divmod_ndarray(self, numeric_idx): + idx = numeric_idx + other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2 + + result = divmod(idx, other) + with np.errstate(all="ignore"): + div, mod = divmod(idx.values, other) + + expected = Index(div), Index(mod) + for r, e in zip(result, expected): + tm.assert_index_equal(r, e) + + def test_divmod_series(self, numeric_idx): + idx = numeric_idx + other = np.ones(idx.values.shape, dtype=idx.values.dtype) * 2 + + result = divmod(idx, Series(other)) + with np.errstate(all="ignore"): + div, mod = divmod(idx.values, other) + + expected = Series(div), Series(mod) + for r, e in zip(result, expected): + tm.assert_series_equal(r, e) + + @pytest.mark.parametrize("other", [np.nan, 7, -23, 2.718, -3.14, np.inf]) + def test_ops_np_scalar(self, other): + vals = np.random.default_rng(2).standard_normal((5, 3)) + f = lambda x: pd.DataFrame( + x, index=list("ABCDE"), columns=["jim", "joe", "jolie"] + ) + + df = f(vals) + + tm.assert_frame_equal(df / np.array(other), f(vals / other)) + tm.assert_frame_equal(np.array(other) * df, f(vals * other)) + tm.assert_frame_equal(df + np.array(other), f(vals + other)) + tm.assert_frame_equal(np.array(other) - df, f(other - vals)) + + # TODO: This came from series.test.test_operators, needs cleanup + def test_operators_frame(self): + # rpow does not work with DataFrame + ts = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + ts.name = "ts" + + df = pd.DataFrame({"A": ts}) + + tm.assert_series_equal(ts + ts, ts + df["A"], check_names=False) + tm.assert_series_equal(ts**ts, ts ** df["A"], check_names=False) + tm.assert_series_equal(ts < ts, ts < df["A"], check_names=False) + tm.assert_series_equal(ts / ts, ts / df["A"], check_names=False) + + # TODO: this came from tests.series.test_analytics, needs cleanup and + # de-duplication with test_modulo above + def test_modulo2(self): + with np.errstate(all="ignore"): + # GH#3590, modulo as ints + p = pd.DataFrame({"first": [3, 4, 5, 8], "second": [0, 0, 0, 3]}) + result = p["first"] % p["second"] + expected = Series(p["first"].values % p["second"].values, dtype="float64") + expected.iloc[0:3] = np.nan + tm.assert_series_equal(result, expected) + + result = p["first"] % 0 + expected = Series(np.nan, index=p.index, name="first") + tm.assert_series_equal(result, expected) + + p = p.astype("float64") + result = p["first"] % p["second"] + expected = Series(p["first"].values % p["second"].values) + tm.assert_series_equal(result, expected) + + p = p.astype("float64") + result = p["first"] % p["second"] + result2 = p["second"] % p["first"] + assert not result.equals(result2) + + def test_modulo_zero_int(self): + # GH#9144 + with np.errstate(all="ignore"): + s = Series([0, 1]) + + result = s % 0 + expected = Series([np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + result = 0 % s + expected = Series([np.nan, 0.0]) + tm.assert_series_equal(result, expected) + + +class TestAdditionSubtraction: + # __add__, __sub__, __radd__, __rsub__, __iadd__, __isub__ + # for non-timestamp/timedelta/period dtypes + + @pytest.mark.parametrize( + "first, second, expected", + [ + ( + Series([1, 2, 3], index=list("ABC"), name="x"), + Series([2, 2, 2], index=list("ABD"), name="x"), + Series([3.0, 4.0, np.nan, np.nan], index=list("ABCD"), name="x"), + ), + ( + Series([1, 2, 3], index=list("ABC"), name="x"), + Series([2, 2, 2, 2], index=list("ABCD"), name="x"), + Series([3, 4, 5, np.nan], index=list("ABCD"), name="x"), + ), + ], + ) + def test_add_series(self, first, second, expected): + # GH#1134 + tm.assert_series_equal(first + second, expected) + tm.assert_series_equal(second + first, expected) + + @pytest.mark.parametrize( + "first, second, expected", + [ + ( + pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")), + pd.DataFrame({"x": [2, 2, 2]}, index=list("ABD")), + pd.DataFrame({"x": [3.0, 4.0, np.nan, np.nan]}, index=list("ABCD")), + ), + ( + pd.DataFrame({"x": [1, 2, 3]}, index=list("ABC")), + pd.DataFrame({"x": [2, 2, 2, 2]}, index=list("ABCD")), + pd.DataFrame({"x": [3, 4, 5, np.nan]}, index=list("ABCD")), + ), + ], + ) + def test_add_frames(self, first, second, expected): + # GH#1134 + tm.assert_frame_equal(first + second, expected) + tm.assert_frame_equal(second + first, expected) + + # TODO: This came from series.test.test_operators, needs cleanup + def test_series_frame_radd_bug(self, fixed_now_ts): + # GH#353 + vals = Series([str(i) for i in range(5)]) + result = "foo_" + vals + expected = vals.map(lambda x: "foo_" + x) + tm.assert_series_equal(result, expected) + + frame = pd.DataFrame({"vals": vals}) + result = "foo_" + frame + expected = pd.DataFrame({"vals": vals.map(lambda x: "foo_" + x)}) + tm.assert_frame_equal(result, expected) + + ts = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + + # really raise this time + fix_now = fixed_now_ts.to_pydatetime() + msg = "|".join( + [ + "unsupported operand type", + # wrong error message, see https://github.com/numpy/numpy/issues/18832 + "Concatenation operation", + ] + ) + with pytest.raises(TypeError, match=msg): + fix_now + ts + + with pytest.raises(TypeError, match=msg): + ts + fix_now + + # TODO: This came from series.test.test_operators, needs cleanup + def test_datetime64_with_index(self): + # arithmetic integer ops with an index + ser = Series(np.random.default_rng(2).standard_normal(5)) + expected = ser - ser.index.to_series() + result = ser - ser.index + tm.assert_series_equal(result, expected) + + # GH#4629 + # arithmetic datetime64 ops with an index + ser = Series( + date_range("20130101", periods=5), + index=date_range("20130101", periods=5), + ) + expected = ser - ser.index.to_series() + result = ser - ser.index + tm.assert_series_equal(result, expected) + + msg = "cannot subtract PeriodArray from DatetimeArray" + with pytest.raises(TypeError, match=msg): + # GH#18850 + result = ser - ser.index.to_period() + + df = pd.DataFrame( + np.random.default_rng(2).standard_normal((5, 2)), + index=date_range("20130101", periods=5), + ) + df["date"] = pd.Timestamp("20130102") + df["expected"] = df["date"] - df.index.to_series() + df["result"] = df["date"] - df.index + tm.assert_series_equal(df["result"], df["expected"], check_names=False) + + # TODO: taken from tests.frame.test_operators, needs cleanup + def test_frame_operators(self, float_frame): + frame = float_frame + + garbage = np.random.default_rng(2).random(4) + colSeries = Series(garbage, index=np.array(frame.columns)) + + idSum = frame + frame + seriesSum = frame + colSeries + + for col, series in idSum.items(): + for idx, val in series.items(): + origVal = frame[col][idx] * 2 + if not np.isnan(val): + assert val == origVal + else: + assert np.isnan(origVal) + + for col, series in seriesSum.items(): + for idx, val in series.items(): + origVal = frame[col][idx] + colSeries[col] + if not np.isnan(val): + assert val == origVal + else: + assert np.isnan(origVal) + + def test_frame_operators_col_align(self, float_frame): + frame2 = pd.DataFrame(float_frame, columns=["D", "C", "B", "A"]) + added = frame2 + frame2 + expected = frame2 * 2 + tm.assert_frame_equal(added, expected) + + def test_frame_operators_none_to_nan(self): + df = pd.DataFrame({"a": ["a", None, "b"]}) + tm.assert_frame_equal(df + df, pd.DataFrame({"a": ["aa", np.nan, "bb"]})) + + @pytest.mark.parametrize("dtype", ("float", "int64")) + def test_frame_operators_empty_like(self, dtype): + # Test for issue #10181 + frames = [ + pd.DataFrame(dtype=dtype), + pd.DataFrame(columns=["A"], dtype=dtype), + pd.DataFrame(index=[0], dtype=dtype), + ] + for df in frames: + assert (df + df).equals(df) + tm.assert_frame_equal(df + df, df) + + @pytest.mark.parametrize( + "func", + [lambda x: x * 2, lambda x: x[::2], lambda x: 5], + ids=["multiply", "slice", "constant"], + ) + def test_series_operators_arithmetic(self, all_arithmetic_functions, func): + op = all_arithmetic_functions + series = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + other = func(series) + compare_op(series, other, op) + + @pytest.mark.parametrize( + "func", [lambda x: x + 1, lambda x: 5], ids=["add", "constant"] + ) + def test_series_operators_compare(self, comparison_op, func): + op = comparison_op + series = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + other = func(series) + compare_op(series, other, op) + + @pytest.mark.parametrize( + "func", + [lambda x: x * 2, lambda x: x[::2], lambda x: 5], + ids=["multiply", "slice", "constant"], + ) + def test_divmod(self, func): + series = Series( + np.arange(10, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + other = func(series) + results = divmod(series, other) + if isinstance(other, abc.Iterable) and len(series) != len(other): + # if the lengths don't match, this is the test where we use + # `tser[::2]`. Pad every other value in `other_np` with nan. + other_np = [] + for n in other: + other_np.append(n) + other_np.append(np.nan) + else: + other_np = other + other_np = np.asarray(other_np) + with np.errstate(all="ignore"): + expecteds = divmod(series.values, np.asarray(other_np)) + + for result, expected in zip(results, expecteds): + # check the values, name, and index separately + tm.assert_almost_equal(np.asarray(result), expected) + + assert result.name == series.name + tm.assert_index_equal(result.index, series.index._with_freq(None)) + + def test_series_divmod_zero(self): + # Check that divmod uses pandas convention for division by zero, + # which does not match numpy. + # pandas convention has + # 1/0 == np.inf + # -1/0 == -np.inf + # 1/-0.0 == -np.inf + # -1/-0.0 == np.inf + tser = Series( + np.arange(1, 11, dtype=np.float64), + index=date_range("2020-01-01", periods=10), + name="ts", + ) + other = tser * 0 + + result = divmod(tser, other) + exp1 = Series([np.inf] * len(tser), index=tser.index, name="ts") + exp2 = Series([np.nan] * len(tser), index=tser.index, name="ts") + tm.assert_series_equal(result[0], exp1) + tm.assert_series_equal(result[1], exp2) + + +class TestUFuncCompat: + # TODO: add more dtypes + @pytest.mark.parametrize("holder", [Index, RangeIndex, Series]) + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_ufunc_compat(self, holder, dtype): + box = Series if holder is Series else Index + + if holder is RangeIndex: + if dtype != np.int64: + pytest.skip(f"dtype {dtype} not relevant for RangeIndex") + idx = RangeIndex(0, 5, name="foo") + else: + idx = holder(np.arange(5, dtype=dtype), name="foo") + result = np.sin(idx) + expected = box(np.sin(np.arange(5, dtype=dtype)), name="foo") + tm.assert_equal(result, expected) + + # TODO: add more dtypes + @pytest.mark.parametrize("holder", [Index, Series]) + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_ufunc_coercions(self, holder, dtype): + idx = holder([1, 2, 3, 4, 5], dtype=dtype, name="x") + box = Series if holder is Series else Index + + result = np.sqrt(idx) + assert result.dtype == "f8" and isinstance(result, box) + exp = Index(np.sqrt(np.array([1, 2, 3, 4, 5], dtype=np.float64)), name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + result = np.divide(idx, 2.0) + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + # _evaluate_numeric_binop + result = idx + 2.0 + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([3.0, 4.0, 5.0, 6.0, 7.0], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + result = idx - 2.0 + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([-1.0, 0.0, 1.0, 2.0, 3.0], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + result = idx * 1.0 + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([1.0, 2.0, 3.0, 4.0, 5.0], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + result = idx / 2.0 + assert result.dtype == "f8" and isinstance(result, box) + exp = Index([0.5, 1.0, 1.5, 2.0, 2.5], dtype=np.float64, name="x") + exp = tm.box_expected(exp, box) + tm.assert_equal(result, exp) + + # TODO: add more dtypes + @pytest.mark.parametrize("holder", [Index, Series]) + @pytest.mark.parametrize("dtype", [np.int64, np.uint64, np.float64]) + def test_ufunc_multiple_return_values(self, holder, dtype): + obj = holder([1, 2, 3], dtype=dtype, name="x") + box = Series if holder is Series else Index + + result = np.modf(obj) + assert isinstance(result, tuple) + exp1 = Index([0.0, 0.0, 0.0], dtype=np.float64, name="x") + exp2 = Index([1.0, 2.0, 3.0], dtype=np.float64, name="x") + tm.assert_equal(result[0], tm.box_expected(exp1, box)) + tm.assert_equal(result[1], tm.box_expected(exp2, box)) + + def test_ufunc_at(self): + s = Series([0, 1, 2], index=[1, 2, 3], name="x") + np.add.at(s, [0, 2], 10) + expected = Series([10, 1, 12], index=[1, 2, 3], name="x") + tm.assert_series_equal(s, expected) + + +class TestObjectDtypeEquivalence: + # Tests that arithmetic operations match operations executed elementwise + + @pytest.mark.parametrize("dtype", [None, object]) + def test_numarr_with_dtype_add_nan(self, dtype, box_with_array): + box = box_with_array + ser = Series([1, 2, 3], dtype=dtype) + expected = Series([np.nan, np.nan, np.nan], dtype=dtype) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = np.nan + ser + tm.assert_equal(result, expected) + + result = ser + np.nan + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_numarr_with_dtype_add_int(self, dtype, box_with_array): + box = box_with_array + ser = Series([1, 2, 3], dtype=dtype) + expected = Series([2, 3, 4], dtype=dtype) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = 1 + ser + tm.assert_equal(result, expected) + + result = ser + 1 + tm.assert_equal(result, expected) + + # TODO: moved from tests.series.test_operators; needs cleanup + @pytest.mark.parametrize( + "op", + [operator.add, operator.sub, operator.mul, operator.truediv, operator.floordiv], + ) + def test_operators_reverse_object(self, op): + # GH#56 + arr = Series( + np.random.default_rng(2).standard_normal(10), + index=np.arange(10), + dtype=object, + ) + + result = op(1.0, arr) + expected = op(1.0, arr.astype(float)) + tm.assert_series_equal(result.astype(float), expected) + + +class TestNumericArithmeticUnsorted: + # Tests in this class have been moved from type-specific test modules + # but not yet sorted, parametrized, and de-duplicated + @pytest.mark.parametrize( + "op", + [ + operator.add, + operator.sub, + operator.mul, + operator.floordiv, + operator.truediv, + ], + ) + @pytest.mark.parametrize( + "idx1", + [ + RangeIndex(0, 10, 1), + RangeIndex(0, 20, 2), + RangeIndex(-10, 10, 2), + RangeIndex(5, -5, -1), + ], + ) + @pytest.mark.parametrize( + "idx2", + [ + RangeIndex(0, 10, 1), + RangeIndex(0, 20, 2), + RangeIndex(-10, 10, 2), + RangeIndex(5, -5, -1), + ], + ) + def test_binops_index(self, op, idx1, idx2): + idx1 = idx1._rename("foo") + idx2 = idx2._rename("bar") + result = op(idx1, idx2) + expected = op(Index(idx1.to_numpy()), Index(idx2.to_numpy())) + tm.assert_index_equal(result, expected, exact="equiv") + + @pytest.mark.parametrize( + "op", + [ + operator.add, + operator.sub, + operator.mul, + operator.floordiv, + operator.truediv, + ], + ) + @pytest.mark.parametrize( + "idx", + [ + RangeIndex(0, 10, 1), + RangeIndex(0, 20, 2), + RangeIndex(-10, 10, 2), + RangeIndex(5, -5, -1), + ], + ) + @pytest.mark.parametrize("scalar", [-1, 1, 2]) + def test_binops_index_scalar(self, op, idx, scalar): + result = op(idx, scalar) + expected = op(Index(idx.to_numpy()), scalar) + tm.assert_index_equal(result, expected, exact="equiv") + + @pytest.mark.parametrize("idx1", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)]) + @pytest.mark.parametrize("idx2", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)]) + def test_binops_index_pow(self, idx1, idx2): + # numpy does not allow powers of negative integers so test separately + # https://github.com/numpy/numpy/pull/8127 + idx1 = idx1._rename("foo") + idx2 = idx2._rename("bar") + result = pow(idx1, idx2) + expected = pow(Index(idx1.to_numpy()), Index(idx2.to_numpy())) + tm.assert_index_equal(result, expected, exact="equiv") + + @pytest.mark.parametrize("idx", [RangeIndex(0, 10, 1), RangeIndex(0, 20, 2)]) + @pytest.mark.parametrize("scalar", [1, 2]) + def test_binops_index_scalar_pow(self, idx, scalar): + # numpy does not allow powers of negative integers so test separately + # https://github.com/numpy/numpy/pull/8127 + result = pow(idx, scalar) + expected = pow(Index(idx.to_numpy()), scalar) + tm.assert_index_equal(result, expected, exact="equiv") + + # TODO: divmod? + @pytest.mark.parametrize( + "op", + [ + operator.add, + operator.sub, + operator.mul, + operator.floordiv, + operator.truediv, + operator.pow, + operator.mod, + ], + ) + def test_arithmetic_with_frame_or_series(self, op): + # check that we return NotImplemented when operating with Series + # or DataFrame + index = RangeIndex(5) + other = Series(np.random.default_rng(2).standard_normal(5)) + + expected = op(Series(index), other) + result = op(index, other) + tm.assert_series_equal(result, expected) + + other = pd.DataFrame(np.random.default_rng(2).standard_normal((2, 5))) + expected = op(pd.DataFrame([index, index]), other) + result = op(index, other) + tm.assert_frame_equal(result, expected) + + def test_numeric_compat2(self): + # validate that we are handling the RangeIndex overrides to numeric ops + # and returning RangeIndex where possible + + idx = RangeIndex(0, 10, 2) + + result = idx * 2 + expected = RangeIndex(0, 20, 4) + tm.assert_index_equal(result, expected, exact=True) + + result = idx + 2 + expected = RangeIndex(2, 12, 2) + tm.assert_index_equal(result, expected, exact=True) + + result = idx - 2 + expected = RangeIndex(-2, 8, 2) + tm.assert_index_equal(result, expected, exact=True) + + result = idx / 2 + expected = RangeIndex(0, 5, 1).astype("float64") + tm.assert_index_equal(result, expected, exact=True) + + result = idx / 4 + expected = RangeIndex(0, 10, 2) / 4 + tm.assert_index_equal(result, expected, exact=True) + + result = idx // 1 + expected = idx + tm.assert_index_equal(result, expected, exact=True) + + # __mul__ + result = idx * idx + expected = Index(idx.values * idx.values) + tm.assert_index_equal(result, expected, exact=True) + + # __pow__ + idx = RangeIndex(0, 1000, 2) + result = idx**2 + expected = Index(idx._values) ** 2 + tm.assert_index_equal(Index(result.values), expected, exact=True) + + @pytest.mark.parametrize( + "idx, div, expected", + [ + # TODO: add more dtypes + (RangeIndex(0, 1000, 2), 2, RangeIndex(0, 500, 1)), + (RangeIndex(-99, -201, -3), -3, RangeIndex(33, 67, 1)), + ( + RangeIndex(0, 1000, 1), + 2, + Index(RangeIndex(0, 1000, 1)._values) // 2, + ), + ( + RangeIndex(0, 100, 1), + 2.0, + Index(RangeIndex(0, 100, 1)._values) // 2.0, + ), + (RangeIndex(0), 50, RangeIndex(0)), + (RangeIndex(2, 4, 2), 3, RangeIndex(0, 1, 1)), + (RangeIndex(-5, -10, -6), 4, RangeIndex(-2, -1, 1)), + (RangeIndex(-100, -200, 3), 2, RangeIndex(0)), + ], + ) + def test_numeric_compat2_floordiv(self, idx, div, expected): + # __floordiv__ + tm.assert_index_equal(idx // div, expected, exact=True) + + @pytest.mark.parametrize("dtype", [np.int64, np.float64]) + @pytest.mark.parametrize("delta", [1, 0, -1]) + def test_addsub_arithmetic(self, dtype, delta): + # GH#8142 + delta = dtype(delta) + index = Index([10, 11, 12], dtype=dtype) + result = index + delta + expected = Index(index.values + delta, dtype=dtype) + tm.assert_index_equal(result, expected) + + # this subtraction used to fail + result = index - delta + expected = Index(index.values - delta, dtype=dtype) + tm.assert_index_equal(result, expected) + + tm.assert_index_equal(index + index, 2 * index) + tm.assert_index_equal(index - index, 0 * index) + assert not (index - index).empty + + def test_pow_nan_with_zero(self, box_with_array): + left = Index([np.nan, np.nan, np.nan]) + right = Index([0, 0, 0]) + expected = Index([1.0, 1.0, 1.0]) + + left = tm.box_expected(left, box_with_array) + right = tm.box_expected(right, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = left**right + tm.assert_equal(result, expected) + + +def test_fill_value_inf_masking(): + # GH #27464 make sure we mask 0/1 with Inf and not NaN + df = pd.DataFrame({"A": [0, 1, 2], "B": [1.1, None, 1.1]}) + + other = pd.DataFrame({"A": [1.1, 1.2, 1.3]}, index=[0, 2, 3]) + + result = df.rfloordiv(other, fill_value=1) + + expected = pd.DataFrame( + {"A": [np.inf, 1.0, 0.0, 1.0], "B": [0.0, np.nan, 0.0, np.nan]} + ) + tm.assert_frame_equal(result, expected) + + +def test_dataframe_div_silenced(): + # GH#26793 + pdf1 = pd.DataFrame( + { + "A": np.arange(10), + "B": [np.nan, 1, 2, 3, 4] * 2, + "C": [np.nan] * 10, + "D": np.arange(10), + }, + index=list("abcdefghij"), + columns=list("ABCD"), + ) + pdf2 = pd.DataFrame( + np.random.default_rng(2).standard_normal((10, 4)), + index=list("abcdefghjk"), + columns=list("ABCX"), + ) + with tm.assert_produces_warning(None): + pdf1.div(pdf2, fill_value=0) + + +@pytest.mark.parametrize( + "data, expected_data", + [([0, 1, 2], [0, 2, 4])], +) +def test_integer_array_add_list_like( + box_pandas_1d_array, box_1d_array, data, expected_data +): + # GH22606 Verify operators with IntegerArray and list-likes + arr = array(data, dtype="Int64") + container = box_pandas_1d_array(arr) + left = container + box_1d_array(data) + right = box_1d_array(data) + container + + if Series in [box_1d_array, box_pandas_1d_array]: + cls = Series + elif Index in [box_1d_array, box_pandas_1d_array]: + cls = Index + else: + cls = array + + expected = cls(expected_data, dtype="Int64") + + tm.assert_equal(left, expected) + tm.assert_equal(right, expected) + + +def test_sub_multiindex_swapped_levels(): + # GH 9952 + df = pd.DataFrame( + {"a": np.random.default_rng(2).standard_normal(6)}, + index=pd.MultiIndex.from_product( + [["a", "b"], [0, 1, 2]], names=["levA", "levB"] + ), + ) + df2 = df.copy() + df2.index = df2.index.swaplevel(0, 1) + result = df - df2 + expected = pd.DataFrame([0.0] * 6, columns=["a"], index=df.index) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("power", [1, 2, 5]) +@pytest.mark.parametrize("string_size", [0, 1, 2, 5]) +def test_empty_str_comparison(power, string_size): + # GH 37348 + a = np.array(range(10**power)) + right = pd.DataFrame(a, dtype=np.int64) + left = " " * string_size + + result = right == left + expected = pd.DataFrame(np.zeros(right.shape, dtype=bool)) + tm.assert_frame_equal(result, expected) + + +def test_series_add_sub_with_UInt64(): + # GH 22023 + series1 = Series([1, 2, 3]) + series2 = Series([2, 1, 3], dtype="UInt64") + + result = series1 + series2 + expected = Series([3, 3, 6], dtype="Float64") + tm.assert_series_equal(result, expected) + + result = series1 - series2 + expected = Series([-1, 1, 0], dtype="Float64") + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_object.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_object.py new file mode 100644 index 0000000000000000000000000000000000000000..4ffd76722286ab0e6729334216652f4613d9769f --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_object.py @@ -0,0 +1,420 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for object dtype +import datetime +from decimal import Decimal +import operator + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + Series, + Timestamp, + option_context, +) +import pandas._testing as tm +from pandas.core import ops + +# ------------------------------------------------------------------ +# Comparisons + + +class TestObjectComparisons: + def test_comparison_object_numeric_nas(self, comparison_op): + ser = Series(np.random.default_rng(2).standard_normal(10), dtype=object) + shifted = ser.shift(2) + + func = comparison_op + + result = func(ser, shifted) + expected = func(ser.astype(float), shifted.astype(float)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] + ) + def test_object_comparisons(self, infer_string): + with option_context("future.infer_string", infer_string): + ser = Series(["a", "b", np.nan, "c", "a"]) + + result = ser == "a" + expected = Series([True, False, False, False, True]) + tm.assert_series_equal(result, expected) + + result = ser < "a" + expected = Series([False, False, False, False, False]) + tm.assert_series_equal(result, expected) + + result = ser != "a" + expected = -(ser == "a") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_more_na_comparisons(self, dtype): + left = Series(["a", np.nan, "c"], dtype=dtype) + right = Series(["a", np.nan, "d"], dtype=dtype) + + result = left == right + expected = Series([True, False, False]) + tm.assert_series_equal(result, expected) + + result = left != right + expected = Series([False, True, True]) + tm.assert_series_equal(result, expected) + + result = left == np.nan + expected = Series([False, False, False]) + tm.assert_series_equal(result, expected) + + result = left != np.nan + expected = Series([True, True, True]) + tm.assert_series_equal(result, expected) + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestArithmetic: + def test_add_period_to_array_of_offset(self): + # GH#50162 + per = pd.Period("2012-1-1", freq="D") + pi = pd.period_range("2012-1-1", periods=10, freq="D") + idx = per - pi + + expected = pd.Index([x + per for x in idx], dtype=object) + result = idx + per + tm.assert_index_equal(result, expected) + + result = per + idx + tm.assert_index_equal(result, expected) + + # TODO: parametrize + def test_pow_ops_object(self): + # GH#22922 + # pow is weird with masking & 1, so testing here + a = Series([1, np.nan, 1, np.nan], dtype=object) + b = Series([1, np.nan, np.nan, 1], dtype=object) + result = a**b + expected = Series(a.values**b.values, dtype=object) + tm.assert_series_equal(result, expected) + + result = b**a + expected = Series(b.values**a.values, dtype=object) + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + @pytest.mark.parametrize("other", ["category", "Int64"]) + def test_add_extension_scalar(self, other, box_with_array, op): + # GH#22378 + # Check that scalars satisfying is_extension_array_dtype(obj) + # do not incorrectly try to dispatch to an ExtensionArray operation + + arr = Series(["a", "b", "c"]) + expected = Series([op(x, other) for x in arr]) + + arr = tm.box_expected(arr, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = op(arr, other) + tm.assert_equal(result, expected) + + def test_objarr_add_str(self, box_with_array): + ser = Series(["x", np.nan, "x"]) + expected = Series(["xa", np.nan, "xa"]) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = ser + "a" + tm.assert_equal(result, expected) + + def test_objarr_radd_str(self, box_with_array): + ser = Series(["x", np.nan, "x"]) + expected = Series(["ax", np.nan, "ax"]) + + ser = tm.box_expected(ser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = "a" + ser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "data", + [ + [1, 2, 3], + [1.1, 2.2, 3.3], + [Timestamp("2011-01-01"), Timestamp("2011-01-02"), pd.NaT], + ["x", "y", 1], + ], + ) + @pytest.mark.parametrize("dtype", [None, object]) + def test_objarr_radd_str_invalid(self, dtype, data, box_with_array): + ser = Series(data, dtype=dtype) + + ser = tm.box_expected(ser, box_with_array) + msg = "|".join( + [ + "can only concatenate str", + "did not contain a loop with signature matching types", + "unsupported operand type", + "must be str", + ] + ) + with pytest.raises(TypeError, match=msg): + "foo_" + ser + + @pytest.mark.parametrize("op", [operator.add, ops.radd, operator.sub, ops.rsub]) + def test_objarr_add_invalid(self, op, box_with_array): + # invalid ops + box = box_with_array + + obj_ser = Series(list("abc"), dtype=object, name="objects") + + obj_ser = tm.box_expected(obj_ser, box) + msg = "|".join( + [ + "can only concatenate str", + "unsupported operand type", + "must be str", + "has no kernel", + ] + ) + with pytest.raises(Exception, match=msg): + op(obj_ser, 1) + with pytest.raises(Exception, match=msg): + op(obj_ser, np.array(1, dtype=np.int64)) + + # TODO: Moved from tests.series.test_operators; needs cleanup + def test_operators_na_handling(self): + ser = Series(["foo", "bar", "baz", np.nan]) + result = "prefix_" + ser + expected = Series(["prefix_foo", "prefix_bar", "prefix_baz", np.nan]) + tm.assert_series_equal(result, expected) + + result = ser + "_suffix" + expected = Series(["foo_suffix", "bar_suffix", "baz_suffix", np.nan]) + tm.assert_series_equal(result, expected) + + # TODO: parametrize over box + @pytest.mark.parametrize("dtype", [None, object]) + def test_series_with_dtype_radd_timedelta(self, dtype): + # note this test is _not_ aimed at timedelta64-dtyped Series + # as of 2.0 we retain object dtype when ser.dtype == object + ser = Series( + [pd.Timedelta("1 days"), pd.Timedelta("2 days"), pd.Timedelta("3 days")], + dtype=dtype, + ) + expected = Series( + [pd.Timedelta("4 days"), pd.Timedelta("5 days"), pd.Timedelta("6 days")], + dtype=dtype, + ) + + result = pd.Timedelta("3 days") + ser + tm.assert_series_equal(result, expected) + + result = ser + pd.Timedelta("3 days") + tm.assert_series_equal(result, expected) + + # TODO: cleanup & parametrize over box + def test_mixed_timezone_series_ops_object(self): + # GH#13043 + ser = Series( + [ + Timestamp("2015-01-01", tz="US/Eastern"), + Timestamp("2015-01-01", tz="Asia/Tokyo"), + ], + name="xxx", + ) + assert ser.dtype == object + + exp = Series( + [ + Timestamp("2015-01-02", tz="US/Eastern"), + Timestamp("2015-01-02", tz="Asia/Tokyo"), + ], + name="xxx", + ) + tm.assert_series_equal(ser + pd.Timedelta("1 days"), exp) + tm.assert_series_equal(pd.Timedelta("1 days") + ser, exp) + + # object series & object series + ser2 = Series( + [ + Timestamp("2015-01-03", tz="US/Eastern"), + Timestamp("2015-01-05", tz="Asia/Tokyo"), + ], + name="xxx", + ) + assert ser2.dtype == object + exp = Series( + [pd.Timedelta("2 days"), pd.Timedelta("4 days")], name="xxx", dtype=object + ) + tm.assert_series_equal(ser2 - ser, exp) + tm.assert_series_equal(ser - ser2, -exp) + + ser = Series( + [pd.Timedelta("01:00:00"), pd.Timedelta("02:00:00")], + name="xxx", + dtype=object, + ) + assert ser.dtype == object + + exp = Series( + [pd.Timedelta("01:30:00"), pd.Timedelta("02:30:00")], + name="xxx", + dtype=object, + ) + tm.assert_series_equal(ser + pd.Timedelta("00:30:00"), exp) + tm.assert_series_equal(pd.Timedelta("00:30:00") + ser, exp) + + # TODO: cleanup & parametrize over box + def test_iadd_preserves_name(self): + # GH#17067, GH#19723 __iadd__ and __isub__ should preserve index name + ser = Series([1, 2, 3]) + ser.index.name = "foo" + + ser.index += 1 + assert ser.index.name == "foo" + + ser.index -= 1 + assert ser.index.name == "foo" + + def test_add_string(self): + # from bug report + index = pd.Index(["a", "b", "c"]) + index2 = index + "foo" + + assert "a" not in index2 + assert "afoo" in index2 + + def test_iadd_string(self): + index = pd.Index(["a", "b", "c"]) + # doesn't fail test unless there is a check before `+=` + assert "a" in index + + index += "_x" + assert "a_x" in index + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="add doesn't work") + def test_add(self): + index = pd.Index([str(i) for i in range(10)]) + expected = pd.Index(index.values * 2) + tm.assert_index_equal(index + index, expected) + tm.assert_index_equal(index + index.tolist(), expected) + tm.assert_index_equal(index.tolist() + index, expected) + + # test add and radd + index = pd.Index(list("abc")) + expected = pd.Index(["a1", "b1", "c1"]) + tm.assert_index_equal(index + "1", expected) + expected = pd.Index(["1a", "1b", "1c"]) + tm.assert_index_equal("1" + index, expected) + + def test_sub_fail(self, using_infer_string): + index = pd.Index([str(i) for i in range(10)]) + + if using_infer_string: + import pyarrow as pa + + err = pa.lib.ArrowNotImplementedError + msg = "has no kernel" + else: + err = TypeError + msg = "unsupported operand type|Cannot broadcast" + with pytest.raises(err, match=msg): + index - "a" + with pytest.raises(err, match=msg): + index - index + with pytest.raises(err, match=msg): + index - index.tolist() + with pytest.raises(err, match=msg): + index.tolist() - index + + def test_sub_object(self): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(0), Decimal(1)]) + + result = index - Decimal(1) + tm.assert_index_equal(result, expected) + + result = index - pd.Index([Decimal(1), Decimal(1)]) + tm.assert_index_equal(result, expected) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + index - "foo" + + with pytest.raises(TypeError, match=msg): + index - np.array([2, "foo"], dtype=object) + + def test_rsub_object(self, fixed_now_ts): + # GH#19369 + index = pd.Index([Decimal(1), Decimal(2)]) + expected = pd.Index([Decimal(1), Decimal(0)]) + + result = Decimal(2) - index + tm.assert_index_equal(result, expected) + + result = np.array([Decimal(2), Decimal(2)]) - index + tm.assert_index_equal(result, expected) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + "foo" - index + + with pytest.raises(TypeError, match=msg): + np.array([True, fixed_now_ts]) - index + + +class MyIndex(pd.Index): + # Simple index subclass that tracks ops calls. + + _calls: int + + @classmethod + def _simple_new(cls, values, name=None, dtype=None): + result = object.__new__(cls) + result._data = values + result._name = name + result._calls = 0 + result._reset_identity() + + return result + + def __add__(self, other): + self._calls += 1 + return self._simple_new(self._data) + + def __radd__(self, other): + return self.__add__(other) + + +@pytest.mark.parametrize( + "other", + [ + [datetime.timedelta(1), datetime.timedelta(2)], + [datetime.datetime(2000, 1, 1), datetime.datetime(2000, 1, 2)], + [pd.Period("2000"), pd.Period("2001")], + ["a", "b"], + ], + ids=["timedelta", "datetime", "period", "object"], +) +def test_index_ops_defer_to_unknown_subclasses(other): + # https://github.com/pandas-dev/pandas/issues/31109 + values = np.array( + [datetime.date(2000, 1, 1), datetime.date(2000, 1, 2)], dtype=object + ) + a = MyIndex._simple_new(values) + other = pd.Index(other) + result = other + a + assert isinstance(result, MyIndex) + assert a._calls == 1 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_period.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_period.py new file mode 100644 index 0000000000000000000000000000000000000000..5535fe8ff928d10b994bd6556229e0163a358ab0 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_period.py @@ -0,0 +1,1675 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +# Specifically for Period dtype +import operator + +import numpy as np +import pytest + +from pandas._libs.tslibs import ( + IncompatibleFrequency, + Period, + Timestamp, + to_offset, +) +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import ( + PeriodIndex, + Series, + Timedelta, + TimedeltaIndex, + period_range, +) +import pandas._testing as tm +from pandas.core import ops +from pandas.core.arrays import TimedeltaArray +from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, + assert_invalid_comparison, + get_upcast_box, +) + +_common_mismatch = [ + pd.offsets.YearBegin(2), + pd.offsets.MonthBegin(1), + pd.offsets.Minute(), +] + + +@pytest.fixture( + params=[ + Timedelta(minutes=30).to_pytimedelta(), + np.timedelta64(30, "s"), + Timedelta(seconds=30), + ] + + _common_mismatch +) +def not_hourly(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Hourly frequencies. + """ + return request.param + + +@pytest.fixture( + params=[ + np.timedelta64(365, "D"), + Timedelta(days=365).to_pytimedelta(), + Timedelta(days=365), + ] + + _common_mismatch +) +def mismatched_freq(request): + """ + Several timedelta-like and DateOffset instances that are _not_ + compatible with Monthly or Annual frequencies. + """ + return request.param + + +# ------------------------------------------------------------------ +# Comparisons + + +class TestPeriodArrayLikeComparisons: + # Comparison tests for PeriodDtype vectors fully parametrized over + # DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison + # tests will eventually end up here. + + @pytest.mark.parametrize("other", ["2017", Period("2017", freq="D")]) + def test_eq_scalar(self, other, box_with_array): + idx = PeriodIndex(["2017", "2017", "2018"], freq="D") + idx = tm.box_expected(idx, box_with_array) + xbox = get_upcast_box(idx, other, True) + + expected = np.array([True, True, False]) + expected = tm.box_expected(expected, xbox) + + result = idx == other + + tm.assert_equal(result, expected) + + def test_compare_zerodim(self, box_with_array): + # GH#26689 make sure we unbox zero-dimensional arrays + + pi = period_range("2000", periods=4) + other = np.array(pi.to_numpy()[0]) + + pi = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(pi, other, True) + + result = pi <= other + expected = np.array([True, False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "scalar", + [ + "foo", + Timestamp("2021-01-01"), + Timedelta(days=4), + 9, + 9.5, + 2000, # specifically don't consider 2000 to match Period("2000", "D") + False, + None, + ], + ) + def test_compare_invalid_scalar(self, box_with_array, scalar): + # GH#28980 + # comparison with scalar that cannot be interpreted as a Period + pi = period_range("2000", periods=4) + parr = tm.box_expected(pi, box_with_array) + assert_invalid_comparison(parr, scalar, box_with_array) + + @pytest.mark.parametrize( + "other", + [ + pd.date_range("2000", periods=4).array, + pd.timedelta_range("1D", periods=4).array, + np.arange(4), + np.arange(4).astype(np.float64), + list(range(4)), + # match Period semantics by not treating integers as Periods + [2000, 2001, 2002, 2003], + np.arange(2000, 2004), + np.arange(2000, 2004).astype(object), + pd.Index([2000, 2001, 2002, 2003]), + ], + ) + def test_compare_invalid_listlike(self, box_with_array, other): + pi = period_range("2000", periods=4) + parr = tm.box_expected(pi, box_with_array) + assert_invalid_comparison(parr, other, box_with_array) + + @pytest.mark.parametrize("other_box", [list, np.array, lambda x: x.astype(object)]) + def test_compare_object_dtype(self, box_with_array, other_box): + pi = period_range("2000", periods=5) + parr = tm.box_expected(pi, box_with_array) + + other = other_box(pi) + xbox = get_upcast_box(parr, other, True) + + expected = np.array([True, True, True, True, True]) + expected = tm.box_expected(expected, xbox) + + result = parr == other + tm.assert_equal(result, expected) + result = parr <= other + tm.assert_equal(result, expected) + result = parr >= other + tm.assert_equal(result, expected) + + result = parr != other + tm.assert_equal(result, ~expected) + result = parr < other + tm.assert_equal(result, ~expected) + result = parr > other + tm.assert_equal(result, ~expected) + + other = other_box(pi[::-1]) + + expected = np.array([False, False, True, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr == other + tm.assert_equal(result, expected) + + expected = np.array([True, True, True, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr <= other + tm.assert_equal(result, expected) + + expected = np.array([False, False, True, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr >= other + tm.assert_equal(result, expected) + + expected = np.array([True, True, False, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr != other + tm.assert_equal(result, expected) + + expected = np.array([True, True, False, False, False]) + expected = tm.box_expected(expected, xbox) + result = parr < other + tm.assert_equal(result, expected) + + expected = np.array([False, False, False, True, True]) + expected = tm.box_expected(expected, xbox) + result = parr > other + tm.assert_equal(result, expected) + + +class TestPeriodIndexComparisons: + # TODO: parameterize over boxes + + def test_pi_cmp_period(self): + idx = period_range("2007-01", periods=20, freq="M") + per = idx[10] + + result = idx < per + exp = idx.values < idx.values[10] + tm.assert_numpy_array_equal(result, exp) + + # Tests Period.__richcmp__ against ndarray[object, ndim=2] + result = idx.values.reshape(10, 2) < per + tm.assert_numpy_array_equal(result, exp.reshape(10, 2)) + + # Tests Period.__richcmp__ against ndarray[object, ndim=0] + result = idx < np.array(per) + tm.assert_numpy_array_equal(result, exp) + + # TODO: moved from test_datetime64; de-duplicate with version below + def test_parr_cmp_period_scalar2(self, box_with_array): + pi = period_range("2000-01-01", periods=10, freq="D") + + val = pi[3] + expected = [x > val for x in pi] + + ser = tm.box_expected(pi, box_with_array) + xbox = get_upcast_box(ser, val, True) + + expected = tm.box_expected(expected, xbox) + result = ser > val + tm.assert_equal(result, expected) + + val = pi[5] + result = ser > val + expected = [x > val for x in pi] + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_period_scalar(self, freq, box_with_array): + # GH#13200 + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + per = Period("2011-02", freq=freq) + xbox = get_upcast_box(base, per, True) + + exp = np.array([False, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base == per, exp) + tm.assert_equal(per == base, exp) + + exp = np.array([True, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base != per, exp) + tm.assert_equal(per != base, exp) + + exp = np.array([False, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base > per, exp) + tm.assert_equal(per < base, exp) + + exp = np.array([True, False, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base < per, exp) + tm.assert_equal(per > base, exp) + + exp = np.array([False, True, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base >= per, exp) + tm.assert_equal(per <= base, exp) + + exp = np.array([True, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base <= per, exp) + tm.assert_equal(per >= base, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_pi(self, freq, box_with_array): + # GH#13200 + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + + # TODO: could also box idx? + idx = PeriodIndex(["2011-02", "2011-01", "2011-03", "2011-05"], freq=freq) + + xbox = get_upcast_box(base, idx, True) + + exp = np.array([False, False, True, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base == idx, exp) + + exp = np.array([True, True, False, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base != idx, exp) + + exp = np.array([False, True, False, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base > idx, exp) + + exp = np.array([True, False, False, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base < idx, exp) + + exp = np.array([False, True, True, False]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base >= idx, exp) + + exp = np.array([True, False, True, True]) + exp = tm.box_expected(exp, xbox) + tm.assert_equal(base <= idx, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_parr_cmp_pi_mismatched_freq(self, freq, box_with_array): + # GH#13200 + # different base freq + base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq) + base = tm.box_expected(base, box_with_array) + + msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period" + with pytest.raises(TypeError, match=msg): + base <= Period("2011", freq="Y") + + with pytest.raises(TypeError, match=msg): + Period("2011", freq="Y") >= base + + # TODO: Could parametrize over boxes for idx? + idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="Y") + rev_msg = r"Invalid comparison between dtype=period\[Y-DEC\] and PeriodArray" + idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg + with pytest.raises(TypeError, match=idx_msg): + base <= idx + + # Different frequency + msg = rf"Invalid comparison between dtype=period\[{freq}\] and Period" + with pytest.raises(TypeError, match=msg): + base <= Period("2011", freq="4M") + + with pytest.raises(TypeError, match=msg): + Period("2011", freq="4M") >= base + + idx = PeriodIndex(["2011", "2012", "2013", "2014"], freq="4M") + rev_msg = r"Invalid comparison between dtype=period\[4M\] and PeriodArray" + idx_msg = rev_msg if box_with_array in [tm.to_array, pd.array] else msg + with pytest.raises(TypeError, match=idx_msg): + base <= idx + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_pi_cmp_nat(self, freq): + idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq) + per = idx1[1] + + result = idx1 > per + exp = np.array([False, False, False, True]) + tm.assert_numpy_array_equal(result, exp) + result = per < idx1 + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == pd.NaT + exp = np.array([False, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + result = pd.NaT == idx1 + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != pd.NaT + exp = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, exp) + result = pd.NaT != idx1 + tm.assert_numpy_array_equal(result, exp) + + idx2 = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq=freq) + result = idx1 < idx2 + exp = np.array([True, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == idx2 + exp = np.array([False, False, False, False]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != idx2 + exp = np.array([True, True, True, True]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 == idx1 + exp = np.array([True, True, False, True]) + tm.assert_numpy_array_equal(result, exp) + + result = idx1 != idx1 + exp = np.array([False, False, True, False]) + tm.assert_numpy_array_equal(result, exp) + + @pytest.mark.parametrize("freq", ["M", "2M", "3M"]) + def test_pi_cmp_nat_mismatched_freq_raises(self, freq): + idx1 = PeriodIndex(["2011-01", "2011-02", "NaT", "2011-05"], freq=freq) + + diff = PeriodIndex(["2011-02", "2011-01", "2011-04", "NaT"], freq="4M") + msg = rf"Invalid comparison between dtype=period\[{freq}\] and PeriodArray" + with pytest.raises(TypeError, match=msg): + idx1 > diff + + result = idx1 == diff + expected = np.array([False, False, False, False], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + # TODO: De-duplicate with test_pi_cmp_nat + @pytest.mark.parametrize("dtype", [object, None]) + def test_comp_nat(self, dtype): + left = PeriodIndex([Period("2011-01-01"), pd.NaT, Period("2011-01-03")]) + right = PeriodIndex([pd.NaT, pd.NaT, Period("2011-01-03")]) + + if dtype is not None: + left = left.astype(dtype) + right = right.astype(dtype) + + result = left == right + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = left != right + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(left == pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT == right, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(left != pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT != left, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(left < pd.NaT, expected) + tm.assert_numpy_array_equal(pd.NaT > left, expected) + + +class TestPeriodSeriesComparisons: + def test_cmp_series_period_series_mixed_freq(self): + # GH#13200 + base = Series( + [ + Period("2011", freq="Y"), + Period("2011-02", freq="M"), + Period("2013", freq="Y"), + Period("2011-04", freq="M"), + ] + ) + + ser = Series( + [ + Period("2012", freq="Y"), + Period("2011-01", freq="M"), + Period("2013", freq="Y"), + Period("2011-05", freq="M"), + ] + ) + + exp = Series([False, False, True, False]) + tm.assert_series_equal(base == ser, exp) + + exp = Series([True, True, False, True]) + tm.assert_series_equal(base != ser, exp) + + exp = Series([False, True, False, False]) + tm.assert_series_equal(base > ser, exp) + + exp = Series([True, False, False, True]) + tm.assert_series_equal(base < ser, exp) + + exp = Series([False, True, True, False]) + tm.assert_series_equal(base >= ser, exp) + + exp = Series([True, False, True, True]) + tm.assert_series_equal(base <= ser, exp) + + +class TestPeriodIndexSeriesComparisonConsistency: + """Test PeriodIndex and Period Series Ops consistency""" + + # TODO: needs parametrization+de-duplication + + def _check(self, values, func, expected): + # Test PeriodIndex and Period Series Ops consistency + + idx = PeriodIndex(values) + result = func(idx) + + # check that we don't pass an unwanted type to tm.assert_equal + assert isinstance(expected, (pd.Index, np.ndarray)) + tm.assert_equal(result, expected) + + s = Series(values) + result = func(s) + + exp = Series(expected, name=values.name) + tm.assert_series_equal(result, exp) + + def test_pi_comp_period(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + per = idx[2] + + f = lambda x: x == per + exp = np.array([False, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per == x + self._check(idx, f, exp) + + f = lambda x: x != per + exp = np.array([True, True, False, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per != x + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, True, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x > per + exp = np.array([False, False, False, True], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, True, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + def test_pi_comp_period_nat(self): + idx = PeriodIndex( + ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx" + ) + per = idx[2] + + f = lambda x: x == per + exp = np.array([False, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per == x + self._check(idx, f, exp) + + f = lambda x: x == pd.NaT + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: pd.NaT == x + self._check(idx, f, exp) + + f = lambda x: x != per + exp = np.array([True, True, False, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: per != x + self._check(idx, f, exp) + + f = lambda x: x != pd.NaT + exp = np.array([True, True, True, True], dtype=np.bool_) + self._check(idx, f, exp) + f = lambda x: pd.NaT != x + self._check(idx, f, exp) + + f = lambda x: per >= x + exp = np.array([True, False, True, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x < per + exp = np.array([True, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: x > pd.NaT + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + f = lambda x: pd.NaT >= x + exp = np.array([False, False, False, False], dtype=np.bool_) + self._check(idx, f, exp) + + +# ------------------------------------------------------------------ +# Arithmetic + + +class TestPeriodFrameArithmetic: + def test_ops_frame_period(self): + # GH#13043 + df = pd.DataFrame( + { + "A": [Period("2015-01", freq="M"), Period("2015-02", freq="M")], + "B": [Period("2014-01", freq="M"), Period("2014-02", freq="M")], + } + ) + assert df["A"].dtype == "Period[M]" + assert df["B"].dtype == "Period[M]" + + p = Period("2015-03", freq="M") + off = p.freq + # dtype will be object because of original dtype + exp = pd.DataFrame( + { + "A": np.array([2 * off, 1 * off], dtype=object), + "B": np.array([14 * off, 13 * off], dtype=object), + } + ) + tm.assert_frame_equal(p - df, exp) + tm.assert_frame_equal(df - p, -1 * exp) + + df2 = pd.DataFrame( + { + "A": [Period("2015-05", freq="M"), Period("2015-06", freq="M")], + "B": [Period("2015-05", freq="M"), Period("2015-06", freq="M")], + } + ) + assert df2["A"].dtype == "Period[M]" + assert df2["B"].dtype == "Period[M]" + + exp = pd.DataFrame( + { + "A": np.array([4 * off, 4 * off], dtype=object), + "B": np.array([16 * off, 16 * off], dtype=object), + } + ) + tm.assert_frame_equal(df2 - df, exp) + tm.assert_frame_equal(df - df2, -1 * exp) + + +class TestPeriodIndexArithmetic: + # --------------------------------------------------------------- + # __add__/__sub__ with PeriodIndex + # PeriodIndex + other is defined for integers and timedelta-like others + # PeriodIndex - other is defined for integers, timedelta-like others, + # and PeriodIndex (with matching freq) + + def test_parr_add_iadd_parr_raises(self, box_with_array): + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="D", periods=5) + # TODO: parametrize over boxes for other? + + rng = tm.box_expected(rng, box_with_array) + # An earlier implementation of PeriodIndex addition performed + # a set operation (union). This has since been changed to + # raise a TypeError. See GH#14164 and GH#13077 for historical + # reference. + msg = r"unsupported operand type\(s\) for \+: .* and .*" + with pytest.raises(TypeError, match=msg): + rng + other + + with pytest.raises(TypeError, match=msg): + rng += other + + def test_pi_sub_isub_pi(self): + # GH#20049 + # For historical reference see GH#14164, GH#13077. + # PeriodIndex subtraction originally performed set difference, + # then changed to raise TypeError before being implemented in GH#20049 + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="D", periods=5) + + off = rng.freq + expected = pd.Index([-5 * off] * 5) + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_pi_sub_pi_with_nat(self): + rng = period_range("1/1/2000", freq="D", periods=5) + other = rng[1:].insert(0, pd.NaT) + assert other[1:].equals(rng[1:]) + + result = rng - other + off = rng.freq + expected = pd.Index([pd.NaT, 0 * off, 0 * off, 0 * off, 0 * off]) + tm.assert_index_equal(result, expected) + + def test_parr_sub_pi_mismatched_freq(self, box_with_array, box_with_array2): + rng = period_range("1/1/2000", freq="D", periods=5) + other = period_range("1/6/2000", freq="h", periods=5) + + rng = tm.box_expected(rng, box_with_array) + other = tm.box_expected(other, box_with_array2) + msg = r"Input has different freq=[hD] from PeriodArray\(freq=[Dh]\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + + @pytest.mark.parametrize("n", [1, 2, 3, 4]) + def test_sub_n_gt_1_ticks(self, tick_classes, n): + # GH 23878 + p1_d = "19910905" + p2_d = "19920406" + p1 = PeriodIndex([p1_d], freq=tick_classes(n)) + p2 = PeriodIndex([p2_d], freq=tick_classes(n)) + + expected = PeriodIndex([p2_d], freq=p2.freq.base) - PeriodIndex( + [p1_d], freq=p1.freq.base + ) + + tm.assert_index_equal((p2 - p1), expected) + + @pytest.mark.parametrize("n", [1, 2, 3, 4]) + @pytest.mark.parametrize( + "offset, kwd_name", + [ + (pd.offsets.YearEnd, "month"), + (pd.offsets.QuarterEnd, "startingMonth"), + (pd.offsets.MonthEnd, None), + (pd.offsets.Week, "weekday"), + ], + ) + def test_sub_n_gt_1_offsets(self, offset, kwd_name, n): + # GH 23878 + kwds = {kwd_name: 3} if kwd_name is not None else {} + p1_d = "19910905" + p2_d = "19920406" + freq = offset(n, normalize=False, **kwds) + p1 = PeriodIndex([p1_d], freq=freq) + p2 = PeriodIndex([p2_d], freq=freq) + + result = p2 - p1 + expected = PeriodIndex([p2_d], freq=freq.base) - PeriodIndex( + [p1_d], freq=freq.base + ) + + tm.assert_index_equal(result, expected) + + # ------------------------------------------------------------- + # Invalid Operations + + @pytest.mark.parametrize( + "other", + [ + # datetime scalars + Timestamp("2016-01-01"), + Timestamp("2016-01-01").to_pydatetime(), + Timestamp("2016-01-01").to_datetime64(), + # datetime-like arrays + pd.date_range("2016-01-01", periods=3, freq="h"), + pd.date_range("2016-01-01", periods=3, tz="Europe/Brussels"), + pd.date_range("2016-01-01", periods=3, freq="s")._data, + pd.date_range("2016-01-01", periods=3, tz="Asia/Tokyo")._data, + # Miscellaneous invalid types + 3.14, + np.array([2.0, 3.0, 4.0]), + ], + ) + def test_parr_add_sub_invalid(self, other, box_with_array): + # GH#23215 + rng = period_range("1/1/2000", freq="D", periods=3) + rng = tm.box_expected(rng, box_with_array) + + msg = "|".join( + [ + r"(:?cannot add PeriodArray and .*)", + r"(:?cannot subtract .* from (:?a\s)?.*)", + r"(:?unsupported operand type\(s\) for \+: .* and .*)", + r"unsupported operand type\(s\) for [+-]: .* and .*", + ] + ) + assert_invalid_addsub_type(rng, other, msg) + with pytest.raises(TypeError, match=msg): + rng + other + with pytest.raises(TypeError, match=msg): + other + rng + with pytest.raises(TypeError, match=msg): + rng - other + with pytest.raises(TypeError, match=msg): + other - rng + + # ----------------------------------------------------------------- + # __add__/__sub__ with ndarray[datetime64] and ndarray[timedelta64] + + def test_pi_add_sub_td64_array_non_tick_raises(self): + rng = period_range("1/1/2000", freq="Q", periods=3) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdarr = tdi.values + + msg = r"Cannot add or subtract timedelta64\[ns\] dtype from period\[Q-DEC\]" + with pytest.raises(TypeError, match=msg): + rng + tdarr + with pytest.raises(TypeError, match=msg): + tdarr + rng + + with pytest.raises(TypeError, match=msg): + rng - tdarr + msg = r"cannot subtract PeriodArray from TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdarr - rng + + def test_pi_add_sub_td64_array_tick(self): + # PeriodIndex + Timedelta-like is allowed only with + # tick-like frequencies + rng = period_range("1/1/2000", freq="90D", periods=3) + tdi = TimedeltaIndex(["-1 Day", "-1 Day", "-1 Day"]) + tdarr = tdi.values + + expected = period_range("12/31/1999", freq="90D", periods=3) + result = rng + tdi + tm.assert_index_equal(result, expected) + result = rng + tdarr + tm.assert_index_equal(result, expected) + result = tdi + rng + tm.assert_index_equal(result, expected) + result = tdarr + rng + tm.assert_index_equal(result, expected) + + expected = period_range("1/2/2000", freq="90D", periods=3) + + result = rng - tdi + tm.assert_index_equal(result, expected) + result = rng - tdarr + tm.assert_index_equal(result, expected) + + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + tdarr - rng + + with pytest.raises(TypeError, match=msg): + tdi - rng + + @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"]) + @pytest.mark.parametrize("tdi_freq", [None, "h"]) + def test_parr_sub_td64array(self, box_with_array, tdi_freq, pi_freq): + box = box_with_array + xbox = box if box not in [pd.array, tm.to_array] else pd.Index + + tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq) + dti = Timestamp("2018-03-07 17:16:40") + tdi + pi = dti.to_period(pi_freq) + + # TODO: parametrize over box for pi? + td64obj = tm.box_expected(tdi, box) + + if pi_freq == "h": + result = pi - td64obj + expected = (pi.to_timestamp("s") - tdi).to_period(pi_freq) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(result, expected) + + # Subtract from scalar + result = pi[0] - td64obj + expected = (pi[0].to_timestamp("s") - tdi).to_period(pi_freq) + expected = tm.box_expected(expected, box) + tm.assert_equal(result, expected) + + elif pi_freq == "D": + # Tick, but non-compatible + msg = ( + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq." + ) + with pytest.raises(IncompatibleFrequency, match=msg): + pi - td64obj + + with pytest.raises(IncompatibleFrequency, match=msg): + pi[0] - td64obj + + else: + # With non-Tick freq, we could not add timedelta64 array regardless + # of what its resolution is + msg = "Cannot add or subtract timedelta64" + with pytest.raises(TypeError, match=msg): + pi - td64obj + with pytest.raises(TypeError, match=msg): + pi[0] - td64obj + + # ----------------------------------------------------------------- + # operations with array/Index of DateOffset objects + + @pytest.mark.parametrize("box", [np.array, pd.Index]) + def test_pi_add_offset_array(self, box): + # GH#18849 + pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")]) + offs = box( + [ + pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12), + ] + ) + expected = PeriodIndex([Period("2015Q2"), Period("2015Q4")]).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = pi + offs + tm.assert_index_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = offs + pi + tm.assert_index_equal(res2, expected) + + unanchored = np.array([pd.offsets.Hour(n=1), pd.offsets.Minute(n=-2)]) + # addition/subtraction ops with incompatible offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = r"Input cannot be converted to Period\(freq=Q-DEC\)" + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + pi + unanchored + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + unanchored + pi + + @pytest.mark.parametrize("box", [np.array, pd.Index]) + def test_pi_sub_offset_array(self, box): + # GH#18824 + pi = PeriodIndex([Period("2015Q1"), Period("2016Q2")]) + other = box( + [ + pd.offsets.QuarterEnd(n=1, startingMonth=12), + pd.offsets.QuarterEnd(n=-2, startingMonth=12), + ] + ) + + expected = PeriodIndex([pi[n] - other[n] for n in range(len(pi))]) + expected = expected.astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = pi - other + tm.assert_index_equal(res, expected) + + anchored = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = r"Input has different freq=-1M from Period\(freq=Q-DEC\)" + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + pi - anchored + with pytest.raises(IncompatibleFrequency, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored - pi + + def test_pi_add_iadd_int(self, one): + # Variants of `one` for #19012 + rng = period_range("2000-01-01 09:00", freq="h", periods=10) + result = rng + one + expected = period_range("2000-01-01 10:00", freq="h", periods=10) + tm.assert_index_equal(result, expected) + rng += one + tm.assert_index_equal(rng, expected) + + def test_pi_sub_isub_int(self, one): + """ + PeriodIndex.__sub__ and __isub__ with several representations of + the integer 1, e.g. int, np.int64, np.uint8, ... + """ + rng = period_range("2000-01-01 09:00", freq="h", periods=10) + result = rng - one + expected = period_range("2000-01-01 08:00", freq="h", periods=10) + tm.assert_index_equal(result, expected) + rng -= one + tm.assert_index_equal(rng, expected) + + @pytest.mark.parametrize("five", [5, np.array(5, dtype=np.int64)]) + def test_pi_sub_intlike(self, five): + rng = period_range("2007-01", periods=50) + + result = rng - five + exp = rng + (-five) + tm.assert_index_equal(result, exp) + + def test_pi_add_sub_int_array_freqn_gt1(self): + # GH#47209 test adding array of ints when freq.n > 1 matches + # scalar behavior + pi = period_range("2016-01-01", periods=10, freq="2D") + arr = np.arange(10) + result = pi + arr + expected = pd.Index([x + y for x, y in zip(pi, arr)]) + tm.assert_index_equal(result, expected) + + result = pi - arr + expected = pd.Index([x - y for x, y in zip(pi, arr)]) + tm.assert_index_equal(result, expected) + + def test_pi_sub_isub_offset(self): + # offset + # DateOffset + rng = period_range("2014", "2024", freq="Y") + result = rng - pd.offsets.YearEnd(5) + expected = period_range("2009", "2019", freq="Y") + tm.assert_index_equal(result, expected) + rng -= pd.offsets.YearEnd(5) + tm.assert_index_equal(rng, expected) + + rng = period_range("2014-01", "2016-12", freq="M") + result = rng - pd.offsets.MonthEnd(5) + expected = period_range("2013-08", "2016-07", freq="M") + tm.assert_index_equal(result, expected) + + rng -= pd.offsets.MonthEnd(5) + tm.assert_index_equal(rng, expected) + + @pytest.mark.parametrize("transpose", [True, False]) + def test_pi_add_offset_n_gt1(self, box_with_array, transpose): + # GH#23215 + # add offset to PeriodIndex with freq.n > 1 + + per = Period("2016-01", freq="2M") + pi = PeriodIndex([per]) + + expected = PeriodIndex(["2016-03"], freq="2M") + + pi = tm.box_expected(pi, box_with_array, transpose=transpose) + expected = tm.box_expected(expected, box_with_array, transpose=transpose) + + result = pi + per.freq + tm.assert_equal(result, expected) + + result = per.freq + pi + tm.assert_equal(result, expected) + + def test_pi_add_offset_n_gt1_not_divisible(self, box_with_array): + # GH#23215 + # PeriodIndex with freq.n > 1 add offset with offset.n % freq.n != 0 + pi = PeriodIndex(["2016-01"], freq="2M") + expected = PeriodIndex(["2016-04"], freq="2M") + + pi = tm.box_expected(pi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = pi + to_offset("3ME") + tm.assert_equal(result, expected) + + result = to_offset("3ME") + pi + tm.assert_equal(result, expected) + + # --------------------------------------------------------------- + # __add__/__sub__ with integer arrays + + @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) + @pytest.mark.parametrize("op", [operator.add, ops.radd]) + def test_pi_add_intarray(self, int_holder, op): + # GH#19959 + pi = PeriodIndex([Period("2015Q1"), Period("NaT")]) + other = int_holder([4, -1]) + + result = op(pi, other) + expected = PeriodIndex([Period("2016Q1"), Period("NaT")]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("int_holder", [np.array, pd.Index]) + def test_pi_sub_intarray(self, int_holder): + # GH#19959 + pi = PeriodIndex([Period("2015Q1"), Period("NaT")]) + other = int_holder([4, -1]) + + result = pi - other + expected = PeriodIndex([Period("2014Q1"), Period("NaT")]) + tm.assert_index_equal(result, expected) + + msg = r"bad operand type for unary -: 'PeriodArray'" + with pytest.raises(TypeError, match=msg): + other - pi + + # --------------------------------------------------------------- + # Timedelta-like (timedelta, timedelta64, Timedelta, Tick) + # TODO: Some of these are misnomers because of non-Tick DateOffsets + + def test_parr_add_timedeltalike_minute_gt1(self, three_days, box_with_array): + # GH#23031 adding a time-delta-like offset to a PeriodArray that has + # minute frequency with n != 1. A more general case is tested below + # in test_pi_add_timedeltalike_tick_gt1, but here we write out the + # expected result more explicitly. + other = three_days + rng = period_range("2014-05-01", periods=3, freq="2D") + rng = tm.box_expected(rng, box_with_array) + + expected = PeriodIndex(["2014-05-04", "2014-05-06", "2014-05-08"], freq="2D") + expected = tm.box_expected(expected, box_with_array) + + result = rng + other + tm.assert_equal(result, expected) + + result = other + rng + tm.assert_equal(result, expected) + + # subtraction + expected = PeriodIndex(["2014-04-28", "2014-04-30", "2014-05-02"], freq="2D") + expected = tm.box_expected(expected, box_with_array) + result = rng - other + tm.assert_equal(result, expected) + + msg = "|".join( + [ + r"bad operand type for unary -: 'PeriodArray'", + r"cannot subtract PeriodArray from timedelta64\[[hD]\]", + ] + ) + with pytest.raises(TypeError, match=msg): + other - rng + + @pytest.mark.parametrize("freqstr", ["5ns", "5us", "5ms", "5s", "5min", "5h", "5d"]) + def test_parr_add_timedeltalike_tick_gt1(self, three_days, freqstr, box_with_array): + # GH#23031 adding a time-delta-like offset to a PeriodArray that has + # tick-like frequency with n != 1 + other = three_days + rng = period_range("2014-05-01", periods=6, freq=freqstr) + first = rng[0] + rng = tm.box_expected(rng, box_with_array) + + expected = period_range(first + other, periods=6, freq=freqstr) + expected = tm.box_expected(expected, box_with_array) + + result = rng + other + tm.assert_equal(result, expected) + + result = other + rng + tm.assert_equal(result, expected) + + # subtraction + expected = period_range(first - other, periods=6, freq=freqstr) + expected = tm.box_expected(expected, box_with_array) + result = rng - other + tm.assert_equal(result, expected) + msg = "|".join( + [ + r"bad operand type for unary -: 'PeriodArray'", + r"cannot subtract PeriodArray from timedelta64\[[hD]\]", + ] + ) + with pytest.raises(TypeError, match=msg): + other - rng + + def test_pi_add_iadd_timedeltalike_daily(self, three_days): + # Tick + other = three_days + rng = period_range("2014-05-01", "2014-05-15", freq="D") + expected = period_range("2014-05-04", "2014-05-18", freq="D") + + result = rng + other + tm.assert_index_equal(result, expected) + + rng += other + tm.assert_index_equal(rng, expected) + + def test_pi_sub_isub_timedeltalike_daily(self, three_days): + # Tick-like 3 Days + other = three_days + rng = period_range("2014-05-01", "2014-05-15", freq="D") + expected = period_range("2014-04-28", "2014-05-12", freq="D") + + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_parr_add_sub_timedeltalike_freq_mismatch_daily( + self, not_daily, box_with_array + ): + other = not_daily + rng = period_range("2014-05-01", "2014-05-15", freq="D") + rng = tm.box_expected(rng, box_with_array) + + msg = "|".join( + [ + # non-timedelta-like DateOffset + "Input has different freq(=.+)? from Period.*?\\(freq=D\\)", + # timedelta/td64/Timedelta but not a multiple of 24H + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq.", + ] + ) + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + def test_pi_add_iadd_timedeltalike_hourly(self, two_hours): + other = two_hours + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h") + expected = period_range("2014-01-01 12:00", "2014-01-05 12:00", freq="h") + + result = rng + other + tm.assert_index_equal(result, expected) + + rng += other + tm.assert_index_equal(rng, expected) + + def test_parr_add_timedeltalike_mismatched_freq_hourly( + self, not_hourly, box_with_array + ): + other = not_hourly + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h") + rng = tm.box_expected(rng, box_with_array) + msg = "|".join( + [ + # non-timedelta-like DateOffset + "Input has different freq(=.+)? from Period.*?\\(freq=h\\)", + # timedelta/td64/Timedelta but not a multiple of 24H + "Cannot add/subtract timedelta-like from PeriodArray that is " + "not an integer multiple of the PeriodArray's freq.", + ] + ) + + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + + def test_pi_sub_isub_timedeltalike_hourly(self, two_hours): + other = two_hours + rng = period_range("2014-01-01 10:00", "2014-01-05 10:00", freq="h") + expected = period_range("2014-01-01 08:00", "2014-01-05 08:00", freq="h") + + result = rng - other + tm.assert_index_equal(result, expected) + + rng -= other + tm.assert_index_equal(rng, expected) + + def test_add_iadd_timedeltalike_annual(self): + # offset + # DateOffset + rng = period_range("2014", "2024", freq="Y") + result = rng + pd.offsets.YearEnd(5) + expected = period_range("2019", "2029", freq="Y") + tm.assert_index_equal(result, expected) + rng += pd.offsets.YearEnd(5) + tm.assert_index_equal(rng, expected) + + def test_pi_add_sub_timedeltalike_freq_mismatch_annual(self, mismatched_freq): + other = mismatched_freq + rng = period_range("2014", "2024", freq="Y") + msg = "Input has different freq(=.+)? from Period.*?\\(freq=Y-DEC\\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + def test_pi_add_iadd_timedeltalike_M(self): + rng = period_range("2014-01", "2016-12", freq="M") + expected = period_range("2014-06", "2017-05", freq="M") + + result = rng + pd.offsets.MonthEnd(5) + tm.assert_index_equal(result, expected) + + rng += pd.offsets.MonthEnd(5) + tm.assert_index_equal(rng, expected) + + def test_pi_add_sub_timedeltalike_freq_mismatch_monthly(self, mismatched_freq): + other = mismatched_freq + rng = period_range("2014-01", "2016-12", freq="M") + msg = "Input has different freq(=.+)? from Period.*?\\(freq=M\\)" + with pytest.raises(IncompatibleFrequency, match=msg): + rng + other + with pytest.raises(IncompatibleFrequency, match=msg): + rng += other + with pytest.raises(IncompatibleFrequency, match=msg): + rng - other + with pytest.raises(IncompatibleFrequency, match=msg): + rng -= other + + @pytest.mark.parametrize("transpose", [True, False]) + def test_parr_add_sub_td64_nat(self, box_with_array, transpose): + # GH#23320 special handling for timedelta64("NaT") + pi = period_range("1994-04-01", periods=9, freq="19D") + other = np.timedelta64("NaT") + expected = PeriodIndex(["NaT"] * 9, freq="19D") + + obj = tm.box_expected(pi, box_with_array, transpose=transpose) + expected = tm.box_expected(expected, box_with_array, transpose=transpose) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + other - obj + + @pytest.mark.parametrize( + "other", + [ + np.array(["NaT"] * 9, dtype="m8[ns]"), + TimedeltaArray._from_sequence(["NaT"] * 9, dtype="m8[ns]"), + ], + ) + def test_parr_add_sub_tdt64_nat_array(self, box_with_array, other): + pi = period_range("1994-04-01", periods=9, freq="19D") + expected = PeriodIndex(["NaT"] * 9, freq="19D") + + obj = tm.box_expected(pi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + msg = r"cannot subtract .* from .*" + with pytest.raises(TypeError, match=msg): + other - obj + + # some but not *all* NaT + other = other.copy() + other[0] = np.timedelta64(0, "ns") + expected = PeriodIndex([pi[0]] + ["NaT"] * 8, freq="19D") + expected = tm.box_expected(expected, box_with_array) + + result = obj + other + tm.assert_equal(result, expected) + result = other + obj + tm.assert_equal(result, expected) + result = obj - other + tm.assert_equal(result, expected) + with pytest.raises(TypeError, match=msg): + other - obj + + # --------------------------------------------------------------- + # Unsorted + + def test_parr_add_sub_index(self): + # Check that PeriodArray defers to Index on arithmetic ops + pi = period_range("2000-12-31", periods=3) + parr = pi.array + + result = parr - pi + expected = pi - pi + tm.assert_index_equal(result, expected) + + def test_parr_add_sub_object_array(self): + pi = period_range("2000-12-31", periods=3, freq="D") + parr = pi.array + + other = np.array([Timedelta(days=1), pd.offsets.Day(2), 3]) + + with tm.assert_produces_warning(PerformanceWarning): + result = parr + other + + expected = PeriodIndex( + ["2001-01-01", "2001-01-03", "2001-01-05"], freq="D" + )._data.astype(object) + tm.assert_equal(result, expected) + + with tm.assert_produces_warning(PerformanceWarning): + result = parr - other + + expected = PeriodIndex(["2000-12-30"] * 3, freq="D")._data.astype(object) + tm.assert_equal(result, expected) + + def test_period_add_timestamp_raises(self, box_with_array): + # GH#17983 + ts = Timestamp("2017") + per = Period("2017", freq="M") + + arr = pd.Index([per], dtype="Period[M]") + arr = tm.box_expected(arr, box_with_array) + + msg = "cannot add PeriodArray and Timestamp" + with pytest.raises(TypeError, match=msg): + arr + ts + with pytest.raises(TypeError, match=msg): + ts + arr + msg = "cannot add PeriodArray and DatetimeArray" + with pytest.raises(TypeError, match=msg): + arr + Series([ts]) + with pytest.raises(TypeError, match=msg): + Series([ts]) + arr + with pytest.raises(TypeError, match=msg): + arr + pd.Index([ts]) + with pytest.raises(TypeError, match=msg): + pd.Index([ts]) + arr + + if box_with_array is pd.DataFrame: + msg = "cannot add PeriodArray and DatetimeArray" + else: + msg = r"unsupported operand type\(s\) for \+: 'Period' and 'DatetimeArray" + with pytest.raises(TypeError, match=msg): + arr + pd.DataFrame([ts]) + if box_with_array is pd.DataFrame: + msg = "cannot add PeriodArray and DatetimeArray" + else: + msg = r"unsupported operand type\(s\) for \+: 'DatetimeArray' and 'Period'" + with pytest.raises(TypeError, match=msg): + pd.DataFrame([ts]) + arr + + +class TestPeriodSeriesArithmetic: + def test_parr_add_timedeltalike_scalar(self, three_days, box_with_array): + # GH#13043 + ser = Series( + [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")], + name="xxx", + ) + assert ser.dtype == "Period[D]" + + expected = Series( + [Period("2015-01-04", freq="D"), Period("2015-01-05", freq="D")], + name="xxx", + ) + + obj = tm.box_expected(ser, box_with_array) + if box_with_array is pd.DataFrame: + assert (obj.dtypes == "Period[D]").all() + + expected = tm.box_expected(expected, box_with_array) + + result = obj + three_days + tm.assert_equal(result, expected) + + result = three_days + obj + tm.assert_equal(result, expected) + + def test_ops_series_period(self): + # GH#13043 + ser = Series( + [Period("2015-01-01", freq="D"), Period("2015-01-02", freq="D")], + name="xxx", + ) + assert ser.dtype == "Period[D]" + + per = Period("2015-01-10", freq="D") + off = per.freq + # dtype will be object because of original dtype + expected = Series([9 * off, 8 * off], name="xxx", dtype=object) + tm.assert_series_equal(per - ser, expected) + tm.assert_series_equal(ser - per, -1 * expected) + + s2 = Series( + [Period("2015-01-05", freq="D"), Period("2015-01-04", freq="D")], + name="xxx", + ) + assert s2.dtype == "Period[D]" + + expected = Series([4 * off, 2 * off], name="xxx", dtype=object) + tm.assert_series_equal(s2 - ser, expected) + tm.assert_series_equal(ser - s2, -1 * expected) + + +class TestPeriodIndexSeriesMethods: + """Test PeriodIndex and Period Series Ops consistency""" + + def _check(self, values, func, expected): + idx = PeriodIndex(values) + result = func(idx) + tm.assert_equal(result, expected) + + ser = Series(values) + result = func(ser) + + exp = Series(expected, name=values.name) + tm.assert_series_equal(result, exp) + + def test_pi_ops(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + + expected = PeriodIndex( + ["2011-03", "2011-04", "2011-05", "2011-06"], freq="M", name="idx" + ) + + self._check(idx, lambda x: x + 2, expected) + self._check(idx, lambda x: 2 + x, expected) + + self._check(idx + 2, lambda x: x - 2, idx) + + result = idx - Period("2011-01", freq="M") + off = idx.freq + exp = pd.Index([0 * off, 1 * off, 2 * off, 3 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = Period("2011-01", freq="M") - idx + exp = pd.Index([0 * off, -1 * off, -2 * off, -3 * off], name="idx") + tm.assert_index_equal(result, exp) + + @pytest.mark.parametrize("ng", ["str", 1.5]) + @pytest.mark.parametrize( + "func", + [ + lambda obj, ng: obj + ng, + lambda obj, ng: ng + obj, + lambda obj, ng: obj - ng, + lambda obj, ng: ng - obj, + lambda obj, ng: np.add(obj, ng), + lambda obj, ng: np.add(ng, obj), + lambda obj, ng: np.subtract(obj, ng), + lambda obj, ng: np.subtract(ng, obj), + ], + ) + def test_parr_ops_errors(self, ng, func, box_with_array): + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + obj = tm.box_expected(idx, box_with_array) + msg = "|".join( + [ + r"unsupported operand type\(s\)", + "can only concatenate", + r"must be str", + "object to str implicitly", + ] + ) + + with pytest.raises(TypeError, match=msg): + func(obj, ng) + + def test_pi_ops_nat(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + expected = PeriodIndex( + ["2011-03", "2011-04", "NaT", "2011-06"], freq="M", name="idx" + ) + + self._check(idx, lambda x: x + 2, expected) + self._check(idx, lambda x: 2 + x, expected) + self._check(idx, lambda x: np.add(x, 2), expected) + + self._check(idx + 2, lambda x: x - 2, idx) + self._check(idx + 2, lambda x: np.subtract(x, 2), idx) + + # freq with mult + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="2M", name="idx" + ) + expected = PeriodIndex( + ["2011-07", "2011-08", "NaT", "2011-10"], freq="2M", name="idx" + ) + + self._check(idx, lambda x: x + 3, expected) + self._check(idx, lambda x: 3 + x, expected) + self._check(idx, lambda x: np.add(x, 3), expected) + + self._check(idx + 3, lambda x: x - 3, idx) + self._check(idx + 3, lambda x: np.subtract(x, 3), idx) + + def test_pi_ops_array_int(self): + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + f = lambda x: x + np.array([1, 2, 3, 4]) + exp = PeriodIndex( + ["2011-02", "2011-04", "NaT", "2011-08"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: np.add(x, np.array([4, -1, 1, 2])) + exp = PeriodIndex( + ["2011-05", "2011-01", "NaT", "2011-06"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: x - np.array([1, 2, 3, 4]) + exp = PeriodIndex( + ["2010-12", "2010-12", "NaT", "2010-12"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + f = lambda x: np.subtract(x, np.array([3, 2, 3, -2])) + exp = PeriodIndex( + ["2010-10", "2010-12", "NaT", "2011-06"], freq="M", name="idx" + ) + self._check(idx, f, exp) + + def test_pi_ops_offset(self): + idx = PeriodIndex( + ["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"], + freq="D", + name="idx", + ) + f = lambda x: x + pd.offsets.Day() + exp = PeriodIndex( + ["2011-01-02", "2011-02-02", "2011-03-02", "2011-04-02"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + f = lambda x: x + pd.offsets.Day(2) + exp = PeriodIndex( + ["2011-01-03", "2011-02-03", "2011-03-03", "2011-04-03"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + f = lambda x: x - pd.offsets.Day(2) + exp = PeriodIndex( + ["2010-12-30", "2011-01-30", "2011-02-27", "2011-03-30"], + freq="D", + name="idx", + ) + self._check(idx, f, exp) + + def test_pi_offset_errors(self): + idx = PeriodIndex( + ["2011-01-01", "2011-02-01", "2011-03-01", "2011-04-01"], + freq="D", + name="idx", + ) + ser = Series(idx) + + msg = ( + "Cannot add/subtract timedelta-like from PeriodArray that is not " + "an integer multiple of the PeriodArray's freq" + ) + for obj in [idx, ser]: + with pytest.raises(IncompatibleFrequency, match=msg): + obj + pd.offsets.Hour(2) + + with pytest.raises(IncompatibleFrequency, match=msg): + pd.offsets.Hour(2) + obj + + with pytest.raises(IncompatibleFrequency, match=msg): + obj - pd.offsets.Hour(2) + + def test_pi_sub_period(self): + # GH#13071 + idx = PeriodIndex( + ["2011-01", "2011-02", "2011-03", "2011-04"], freq="M", name="idx" + ) + + result = idx - Period("2012-01", freq="M") + off = idx.freq + exp = pd.Index([-12 * off, -11 * off, -10 * off, -9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = np.subtract(idx, Period("2012-01", freq="M")) + tm.assert_index_equal(result, exp) + + result = Period("2012-01", freq="M") - idx + exp = pd.Index([12 * off, 11 * off, 10 * off, 9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = np.subtract(Period("2012-01", freq="M"), idx) + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") + result = idx - Period("NaT", freq="M") + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + result = Period("NaT", freq="M") - idx + tm.assert_index_equal(result, exp) + assert result.freq == exp.freq + + def test_pi_sub_pdnat(self): + # GH#13071, GH#19389 + idx = PeriodIndex( + ["2011-01", "2011-02", "NaT", "2011-04"], freq="M", name="idx" + ) + exp = TimedeltaIndex([pd.NaT] * 4, name="idx") + tm.assert_index_equal(pd.NaT - idx, exp) + tm.assert_index_equal(idx - pd.NaT, exp) + + def test_pi_sub_period_nat(self): + # GH#13071 + idx = PeriodIndex( + ["2011-01", "NaT", "2011-03", "2011-04"], freq="M", name="idx" + ) + + result = idx - Period("2012-01", freq="M") + off = idx.freq + exp = pd.Index([-12 * off, pd.NaT, -10 * off, -9 * off], name="idx") + tm.assert_index_equal(result, exp) + + result = Period("2012-01", freq="M") - idx + exp = pd.Index([12 * off, pd.NaT, 10 * off, 9 * off], name="idx") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([np.nan, np.nan, np.nan, np.nan], name="idx") + tm.assert_index_equal(idx - Period("NaT", freq="M"), exp) + tm.assert_index_equal(Period("NaT", freq="M") - idx, exp) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_timedelta64.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_timedelta64.py new file mode 100644 index 0000000000000000000000000000000000000000..007d1e670e1e0bcb8317192b88aa9eb800fdfa3a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/arithmetic/test_timedelta64.py @@ -0,0 +1,2173 @@ +# Arithmetic tests for DataFrame/Series/Index/Array classes that should +# behave identically. +from datetime import ( + datetime, + timedelta, +) + +import numpy as np +import pytest + +from pandas.errors import ( + OutOfBoundsDatetime, + PerformanceWarning, +) + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + NaT, + Series, + Timedelta, + TimedeltaIndex, + Timestamp, + offsets, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import NumpyExtensionArray +from pandas.tests.arithmetic.common import ( + assert_invalid_addsub_type, + assert_invalid_comparison, + get_upcast_box, +) + + +def assert_dtype(obj, expected_dtype): + """ + Helper to check the dtype for a Series, Index, or single-column DataFrame. + """ + dtype = tm.get_dtype(obj) + + assert dtype == expected_dtype + + +def get_expected_name(box, names): + if box is DataFrame: + # Since we are operating with a DataFrame and a non-DataFrame, + # the non-DataFrame is cast to Series and its name ignored. + exname = names[0] + elif box in [tm.to_array, pd.array]: + exname = names[1] + else: + exname = names[2] + return exname + + +# ------------------------------------------------------------------ +# Timedelta64[ns] dtype Comparisons + + +class TestTimedelta64ArrayLikeComparisons: + # Comparison tests for timedelta64[ns] vectors fully parametrized over + # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison + # tests will eventually end up here. + + def test_compare_timedelta64_zerodim(self, box_with_array): + # GH#26689 should unbox when comparing with zerodim array + box = box_with_array + xbox = box_with_array if box_with_array not in [Index, pd.array] else np.ndarray + + tdi = timedelta_range("2h", periods=4) + other = np.array(tdi.to_numpy()[0]) + + tdi = tm.box_expected(tdi, box) + res = tdi <= other + expected = np.array([True, False, False, False]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(res, expected) + + @pytest.mark.parametrize( + "td_scalar", + [ + timedelta(days=1), + Timedelta(days=1), + Timedelta(days=1).to_timedelta64(), + offsets.Hour(24), + ], + ) + def test_compare_timedeltalike_scalar(self, box_with_array, td_scalar): + # regression test for GH#5963 + box = box_with_array + xbox = box if box not in [Index, pd.array] else np.ndarray + + ser = Series([timedelta(days=1), timedelta(days=2)]) + ser = tm.box_expected(ser, box) + actual = ser > td_scalar + expected = Series([False, True]) + expected = tm.box_expected(expected, xbox) + tm.assert_equal(actual, expected) + + @pytest.mark.parametrize( + "invalid", + [ + 345600000000000, + "a", + Timestamp("2021-01-01"), + Timestamp("2021-01-01").now("UTC"), + Timestamp("2021-01-01").now().to_datetime64(), + Timestamp("2021-01-01").now().to_pydatetime(), + Timestamp("2021-01-01").date(), + np.array(4), # zero-dim mismatched dtype + ], + ) + def test_td64_comparisons_invalid(self, box_with_array, invalid): + # GH#13624 for str + box = box_with_array + + rng = timedelta_range("1 days", periods=10) + obj = tm.box_expected(rng, box) + + assert_invalid_comparison(obj, invalid, box) + + @pytest.mark.parametrize( + "other", + [ + list(range(10)), + np.arange(10), + np.arange(10).astype(np.float32), + np.arange(10).astype(object), + pd.date_range("1970-01-01", periods=10, tz="UTC").array, + np.array(pd.date_range("1970-01-01", periods=10)), + list(pd.date_range("1970-01-01", periods=10)), + pd.date_range("1970-01-01", periods=10).astype(object), + pd.period_range("1971-01-01", freq="D", periods=10).array, + pd.period_range("1971-01-01", freq="D", periods=10).astype(object), + ], + ) + def test_td64arr_cmp_arraylike_invalid(self, other, box_with_array): + # We don't parametrize this over box_with_array because listlike + # other plays poorly with assert_invalid_comparison reversed checks + + rng = timedelta_range("1 days", periods=10)._data + rng = tm.box_expected(rng, box_with_array) + assert_invalid_comparison(rng, other, box_with_array) + + def test_td64arr_cmp_mixed_invalid(self): + rng = timedelta_range("1 days", periods=5)._data + other = np.array([0, 1, 2, rng[3], Timestamp("2021-01-01")]) + + result = rng == other + expected = np.array([False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = rng != other + tm.assert_numpy_array_equal(result, ~expected) + + msg = "Invalid comparison between|Cannot compare type|not supported between" + with pytest.raises(TypeError, match=msg): + rng < other + with pytest.raises(TypeError, match=msg): + rng > other + with pytest.raises(TypeError, match=msg): + rng <= other + with pytest.raises(TypeError, match=msg): + rng >= other + + +class TestTimedelta64ArrayComparisons: + # TODO: All of these need to be parametrized over box + + @pytest.mark.parametrize("dtype", [None, object]) + def test_comp_nat(self, dtype): + left = TimedeltaIndex([Timedelta("1 days"), NaT, Timedelta("3 days")]) + right = TimedeltaIndex([NaT, NaT, Timedelta("3 days")]) + + lhs, rhs = left, right + if dtype is object: + lhs, rhs = left.astype(object), right.astype(object) + + result = rhs == lhs + expected = np.array([False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = rhs != lhs + expected = np.array([True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs == NaT, expected) + tm.assert_numpy_array_equal(NaT == rhs, expected) + + expected = np.array([True, True, True]) + tm.assert_numpy_array_equal(lhs != NaT, expected) + tm.assert_numpy_array_equal(NaT != lhs, expected) + + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(lhs < NaT, expected) + tm.assert_numpy_array_equal(NaT > lhs, expected) + + @pytest.mark.parametrize( + "idx2", + [ + TimedeltaIndex( + ["2 day", "2 day", NaT, NaT, "1 day 00:00:02", "5 days 00:00:03"] + ), + np.array( + [ + np.timedelta64(2, "D"), + np.timedelta64(2, "D"), + np.timedelta64("nat"), + np.timedelta64("nat"), + np.timedelta64(1, "D") + np.timedelta64(2, "s"), + np.timedelta64(5, "D") + np.timedelta64(3, "s"), + ] + ), + ], + ) + def test_comparisons_nat(self, idx2): + idx1 = TimedeltaIndex( + [ + "1 day", + NaT, + "1 day 00:00:01", + NaT, + "1 day 00:00:01", + "5 day 00:00:03", + ] + ) + # Check pd.NaT is handles as the same as np.nan + result = idx1 < idx2 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 > idx1 + expected = np.array([True, False, False, False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 <= idx2 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx2 >= idx1 + expected = np.array([True, False, False, False, True, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 == idx2 + expected = np.array([False, False, False, False, False, True]) + tm.assert_numpy_array_equal(result, expected) + + result = idx1 != idx2 + expected = np.array([True, True, True, True, True, False]) + tm.assert_numpy_array_equal(result, expected) + + # TODO: better name + def test_comparisons_coverage(self): + rng = timedelta_range("1 days", periods=10) + + result = rng < rng[3] + expected = np.array([True, True, True] + [False] * 7) + tm.assert_numpy_array_equal(result, expected) + + result = rng == list(rng) + exp = rng == rng + tm.assert_numpy_array_equal(result, exp) + + +# ------------------------------------------------------------------ +# Timedelta64[ns] dtype Arithmetic Operations + + +class TestTimedelta64ArithmeticUnsorted: + # Tests moved from type-specific test files but not + # yet sorted/parametrized/de-duplicated + + def test_ufunc_coercions(self): + # normal ops are also tested in tseries/test_timedeltas.py + idx = TimedeltaIndex(["2h", "4h", "6h", "8h", "10h"], freq="2h", name="x") + + for result in [idx * 2, np.multiply(idx, 2)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["4h", "8h", "12h", "16h", "20h"], freq="4h", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "4h" + + for result in [idx / 2, np.divide(idx, 2)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["1h", "2h", "3h", "4h", "5h"], freq="h", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "h" + + for result in [-idx, np.negative(idx)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex( + ["-2h", "-4h", "-6h", "-8h", "-10h"], freq="-2h", name="x" + ) + tm.assert_index_equal(result, exp) + assert result.freq == "-2h" + + idx = TimedeltaIndex(["-2h", "-1h", "0h", "1h", "2h"], freq="h", name="x") + for result in [abs(idx), np.absolute(idx)]: + assert isinstance(result, TimedeltaIndex) + exp = TimedeltaIndex(["2h", "1h", "0h", "1h", "2h"], freq=None, name="x") + tm.assert_index_equal(result, exp) + assert result.freq is None + + def test_subtraction_ops(self): + # with datetimes/timedelta and tdi/dti + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + td = Timedelta("1 days") + dt = Timestamp("20130101") + + msg = "cannot subtract a datelike from a TimedeltaArray" + with pytest.raises(TypeError, match=msg): + tdi - dt + with pytest.raises(TypeError, match=msg): + tdi - dti + + msg = r"unsupported operand type\(s\) for -" + with pytest.raises(TypeError, match=msg): + td - dt + + msg = "(bad|unsupported) operand type for unary" + with pytest.raises(TypeError, match=msg): + td - dti + + result = dt - dti + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"], name="bar") + tm.assert_index_equal(result, expected) + + result = dti - dt + expected = TimedeltaIndex(["0 days", "1 days", "2 days"], name="bar") + tm.assert_index_equal(result, expected) + + result = tdi - td + expected = TimedeltaIndex(["0 days", NaT, "1 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = td - tdi + expected = TimedeltaIndex(["0 days", NaT, "-1 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = dti - td + expected = DatetimeIndex( + ["20121231", "20130101", "20130102"], dtype="M8[ns]", freq="D", name="bar" + ) + tm.assert_index_equal(result, expected) + + result = dt - tdi + expected = DatetimeIndex( + ["20121231", NaT, "20121230"], dtype="M8[ns]", name="foo" + ) + tm.assert_index_equal(result, expected) + + def test_subtraction_ops_with_tz(self, box_with_array): + # check that dt/dti subtraction ops with tz are validated + dti = pd.date_range("20130101", periods=3) + dti = tm.box_expected(dti, box_with_array) + ts = Timestamp("20130101") + dt = ts.to_pydatetime() + dti_tz = pd.date_range("20130101", periods=3).tz_localize("US/Eastern") + dti_tz = tm.box_expected(dti_tz, box_with_array) + ts_tz = Timestamp("20130101").tz_localize("US/Eastern") + ts_tz2 = Timestamp("20130101").tz_localize("CET") + dt_tz = ts_tz.to_pydatetime() + td = Timedelta("1 days") + + def _check(result, expected): + assert result == expected + assert isinstance(result, Timedelta) + + # scalars + result = ts - ts + expected = Timedelta("0 days") + _check(result, expected) + + result = dt_tz - ts_tz + expected = Timedelta("0 days") + _check(result, expected) + + result = ts_tz - dt_tz + expected = Timedelta("0 days") + _check(result, expected) + + # tz mismatches + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects." + with pytest.raises(TypeError, match=msg): + dt_tz - ts + msg = "can't subtract offset-naive and offset-aware datetimes" + with pytest.raises(TypeError, match=msg): + dt_tz - dt + msg = "can't subtract offset-naive and offset-aware datetimes" + with pytest.raises(TypeError, match=msg): + dt - dt_tz + msg = "Cannot subtract tz-naive and tz-aware datetime-like objects." + with pytest.raises(TypeError, match=msg): + ts - dt_tz + with pytest.raises(TypeError, match=msg): + ts_tz2 - ts + with pytest.raises(TypeError, match=msg): + ts_tz2 - dt + + msg = "Cannot subtract tz-naive and tz-aware" + # with dti + with pytest.raises(TypeError, match=msg): + dti - ts_tz + with pytest.raises(TypeError, match=msg): + dti_tz - ts + + result = dti_tz - dt_tz + expected = TimedeltaIndex(["0 days", "1 days", "2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = dt_tz - dti_tz + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = dti_tz - ts_tz + expected = TimedeltaIndex(["0 days", "1 days", "2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = ts_tz - dti_tz + expected = TimedeltaIndex(["0 days", "-1 days", "-2 days"]) + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + result = td - td + expected = Timedelta("0 days") + _check(result, expected) + + result = dti_tz - td + expected = DatetimeIndex( + ["20121231", "20130101", "20130102"], tz="US/Eastern" + ).as_unit("ns") + expected = tm.box_expected(expected, box_with_array) + tm.assert_equal(result, expected) + + def test_dti_tdi_numeric_ops(self): + # These are normally union/diff set-like ops + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + + result = tdi - tdi + expected = TimedeltaIndex(["0 days", NaT, "0 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = tdi + tdi + expected = TimedeltaIndex(["2 days", NaT, "4 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = dti - tdi # name will be reset + expected = DatetimeIndex(["20121231", NaT, "20130101"], dtype="M8[ns]") + tm.assert_index_equal(result, expected) + + def test_addition_ops(self): + # with datetimes/timedelta and tdi/dti + tdi = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + dti = pd.date_range("20130101", periods=3, name="bar") + td = Timedelta("1 days") + dt = Timestamp("20130101") + + result = tdi + dt + expected = DatetimeIndex( + ["20130102", NaT, "20130103"], dtype="M8[ns]", name="foo" + ) + tm.assert_index_equal(result, expected) + + result = dt + tdi + expected = DatetimeIndex( + ["20130102", NaT, "20130103"], dtype="M8[ns]", name="foo" + ) + tm.assert_index_equal(result, expected) + + result = td + tdi + expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo") + tm.assert_index_equal(result, expected) + + result = tdi + td + expected = TimedeltaIndex(["2 days", NaT, "3 days"], name="foo") + tm.assert_index_equal(result, expected) + + # unequal length + msg = "cannot add indices of unequal length" + with pytest.raises(ValueError, match=msg): + tdi + dti[0:1] + with pytest.raises(ValueError, match=msg): + tdi[0:1] + dti + + # random indexes + msg = "Addition/subtraction of integers and integer-arrays" + with pytest.raises(TypeError, match=msg): + tdi + Index([1, 2, 3], dtype=np.int64) + + # this is a union! + # FIXME: don't leave commented-out + # pytest.raises(TypeError, lambda : Index([1,2,3]) + tdi) + + result = tdi + dti # name will be reset + expected = DatetimeIndex(["20130102", NaT, "20130105"], dtype="M8[ns]") + tm.assert_index_equal(result, expected) + + result = dti + tdi # name will be reset + expected = DatetimeIndex(["20130102", NaT, "20130105"], dtype="M8[ns]") + tm.assert_index_equal(result, expected) + + result = dt + td + expected = Timestamp("20130102") + assert result == expected + + result = td + dt + expected = Timestamp("20130102") + assert result == expected + + # TODO: Needs more informative name, probably split up into + # more targeted tests + @pytest.mark.parametrize("freq", ["D", "B"]) + def test_timedelta(self, freq): + index = pd.date_range("1/1/2000", periods=50, freq=freq) + + shifted = index + timedelta(1) + back = shifted + timedelta(-1) + back = back._with_freq("infer") + tm.assert_index_equal(index, back) + + if freq == "D": + expected = pd.tseries.offsets.Day(1) + assert index.freq == expected + assert shifted.freq == expected + assert back.freq == expected + else: # freq == 'B' + assert index.freq == pd.tseries.offsets.BusinessDay(1) + assert shifted.freq is None + assert back.freq == pd.tseries.offsets.BusinessDay(1) + + result = index - timedelta(1) + expected = index + timedelta(-1) + tm.assert_index_equal(result, expected) + + def test_timedelta_tick_arithmetic(self): + # GH#4134, buggy with timedeltas + rng = pd.date_range("2013", "2014") + s = Series(rng) + result1 = rng - offsets.Hour(1) + result2 = DatetimeIndex(s - np.timedelta64(100000000)) + result3 = rng - np.timedelta64(100000000) + result4 = DatetimeIndex(s - offsets.Hour(1)) + + assert result1.freq == rng.freq + result1 = result1._with_freq(None) + tm.assert_index_equal(result1, result4) + + assert result3.freq == rng.freq + result3 = result3._with_freq(None) + tm.assert_index_equal(result2, result3) + + def test_tda_add_sub_index(self): + # Check that TimedeltaArray defers to Index on arithmetic ops + tdi = TimedeltaIndex(["1 days", NaT, "2 days"]) + tda = tdi.array + + dti = pd.date_range("1999-12-31", periods=3, freq="D") + + result = tda + dti + expected = tdi + dti + tm.assert_index_equal(result, expected) + + result = tda + tdi + expected = tdi + tdi + tm.assert_index_equal(result, expected) + + result = tda - tdi + expected = tdi - tdi + tm.assert_index_equal(result, expected) + + def test_tda_add_dt64_object_array(self, box_with_array, tz_naive_fixture): + # Result should be cast back to DatetimeArray + box = box_with_array + + dti = pd.date_range("2016-01-01", periods=3, tz=tz_naive_fixture) + dti = dti._with_freq(None) + tdi = dti - dti + + obj = tm.box_expected(tdi, box) + other = tm.box_expected(dti, box) + + with tm.assert_produces_warning(PerformanceWarning): + result = obj + other.astype(object) + tm.assert_equal(result, other.astype(object)) + + # ------------------------------------------------------------- + # Binary operations TimedeltaIndex and timedelta-like + + def test_tdi_iadd_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as + is now numeric + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D") + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + orig_rng = rng + rng += two_hours + tm.assert_equal(rng, expected) + if box_with_array is not Index: + # Check that operation is actually inplace + tm.assert_equal(orig_rng, expected) + + def test_tdi_isub_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as - is now numeric + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00") + + rng = tm.box_expected(rng, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + orig_rng = rng + rng -= two_hours + tm.assert_equal(rng, expected) + if box_with_array is not Index: + # Check that operation is actually inplace + tm.assert_equal(orig_rng, expected) + + # ------------------------------------------------------------- + + def test_tdi_ops_attributes(self): + rng = timedelta_range("2 days", periods=5, freq="2D", name="x") + + result = rng + 1 * rng.freq + exp = timedelta_range("4 days", periods=5, freq="2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + result = rng - 2 * rng.freq + exp = timedelta_range("-2 days", periods=5, freq="2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "2D" + + result = rng * 2 + exp = timedelta_range("4 days", periods=5, freq="4D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "4D" + + result = rng / 2 + exp = timedelta_range("1 days", periods=5, freq="D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "D" + + result = -rng + exp = timedelta_range("-2 days", periods=5, freq="-2D", name="x") + tm.assert_index_equal(result, exp) + assert result.freq == "-2D" + + rng = timedelta_range("-2 days", periods=5, freq="D", name="x") + + result = abs(rng) + exp = TimedeltaIndex( + ["2 days", "1 days", "0 days", "1 days", "2 days"], name="x" + ) + tm.assert_index_equal(result, exp) + assert result.freq is None + + +class TestAddSubNaTMasking: + # TODO: parametrize over boxes + + @pytest.mark.parametrize("str_ts", ["1950-01-01", "1980-01-01"]) + def test_tdarr_add_timestamp_nat_masking(self, box_with_array, str_ts): + # GH#17991 checking for overflow-masking with NaT + tdinat = pd.to_timedelta(["24658 days 11:15:00", "NaT"]) + tdobj = tm.box_expected(tdinat, box_with_array) + + ts = Timestamp(str_ts) + ts_variants = [ + ts, + ts.to_pydatetime(), + ts.to_datetime64().astype("datetime64[ns]"), + ts.to_datetime64().astype("datetime64[D]"), + ] + + for variant in ts_variants: + res = tdobj + variant + if box_with_array is DataFrame: + assert res.iloc[1, 1] is NaT + else: + assert res[1] is NaT + + def test_tdi_add_overflow(self): + # See GH#14068 + # preliminary test scalar analogue of vectorized tests below + # TODO: Make raised error message more informative and test + with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"): + pd.to_timedelta(106580, "D") + Timestamp("2000") + with pytest.raises(OutOfBoundsDatetime, match="10155196800000000000"): + Timestamp("2000") + pd.to_timedelta(106580, "D") + + _NaT = NaT._value + 1 + msg = "Overflow in int64 addition" + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta([106580], "D") + Timestamp("2000") + with pytest.raises(OverflowError, match=msg): + Timestamp("2000") + pd.to_timedelta([106580], "D") + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta([_NaT]) - Timedelta("1 days") + with pytest.raises(OverflowError, match=msg): + pd.to_timedelta(["5 days", _NaT]) - Timedelta("1 days") + with pytest.raises(OverflowError, match=msg): + ( + pd.to_timedelta([_NaT, "5 days", "1 hours"]) + - pd.to_timedelta(["7 seconds", _NaT, "4 hours"]) + ) + + # These should not overflow! + exp = TimedeltaIndex([NaT]) + result = pd.to_timedelta([NaT]) - Timedelta("1 days") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex(["4 days", NaT]) + result = pd.to_timedelta(["5 days", NaT]) - Timedelta("1 days") + tm.assert_index_equal(result, exp) + + exp = TimedeltaIndex([NaT, NaT, "5 hours"]) + result = pd.to_timedelta([NaT, "5 days", "1 hours"]) + pd.to_timedelta( + ["7 seconds", NaT, "4 hours"] + ) + tm.assert_index_equal(result, exp) + + +class TestTimedeltaArraylikeAddSubOps: + # Tests for timedelta64[ns] __add__, __sub__, __radd__, __rsub__ + + def test_sub_nat_retain_unit(self): + ser = pd.to_timedelta(Series(["00:00:01"])).astype("m8[s]") + + result = ser - NaT + expected = Series([NaT], dtype="m8[s]") + tm.assert_series_equal(result, expected) + + # TODO: moved from tests.indexes.timedeltas.test_arithmetic; needs + # parametrization+de-duplication + def test_timedelta_ops_with_missing_values(self): + # setup + s1 = pd.to_timedelta(Series(["00:00:01"])) + s2 = pd.to_timedelta(Series(["00:00:02"])) + + sn = pd.to_timedelta(Series([NaT], dtype="m8[ns]")) + + df1 = DataFrame(["00:00:01"]).apply(pd.to_timedelta) + df2 = DataFrame(["00:00:02"]).apply(pd.to_timedelta) + + dfn = DataFrame([NaT._value]).apply(pd.to_timedelta) + + scalar1 = pd.to_timedelta("00:00:01") + scalar2 = pd.to_timedelta("00:00:02") + timedelta_NaT = pd.to_timedelta("NaT") + + actual = scalar1 + scalar1 + assert actual == scalar2 + actual = scalar2 - scalar1 + assert actual == scalar1 + + actual = s1 + s1 + tm.assert_series_equal(actual, s2) + actual = s2 - s1 + tm.assert_series_equal(actual, s1) + + actual = s1 + scalar1 + tm.assert_series_equal(actual, s2) + actual = scalar1 + s1 + tm.assert_series_equal(actual, s2) + actual = s2 - scalar1 + tm.assert_series_equal(actual, s1) + actual = -scalar1 + s2 + tm.assert_series_equal(actual, s1) + + actual = s1 + timedelta_NaT + tm.assert_series_equal(actual, sn) + actual = timedelta_NaT + s1 + tm.assert_series_equal(actual, sn) + actual = s1 - timedelta_NaT + tm.assert_series_equal(actual, sn) + actual = -timedelta_NaT + s1 + tm.assert_series_equal(actual, sn) + + msg = "unsupported operand type" + with pytest.raises(TypeError, match=msg): + s1 + np.nan + with pytest.raises(TypeError, match=msg): + np.nan + s1 + with pytest.raises(TypeError, match=msg): + s1 - np.nan + with pytest.raises(TypeError, match=msg): + -np.nan + s1 + + actual = s1 + NaT + tm.assert_series_equal(actual, sn) + actual = s2 - NaT + tm.assert_series_equal(actual, sn) + + actual = s1 + df1 + tm.assert_frame_equal(actual, df2) + actual = s2 - df1 + tm.assert_frame_equal(actual, df1) + actual = df1 + s1 + tm.assert_frame_equal(actual, df2) + actual = df2 - s1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + df1 + tm.assert_frame_equal(actual, df2) + actual = df2 - df1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + scalar1 + tm.assert_frame_equal(actual, df2) + actual = df2 - scalar1 + tm.assert_frame_equal(actual, df1) + + actual = df1 + timedelta_NaT + tm.assert_frame_equal(actual, dfn) + actual = df1 - timedelta_NaT + tm.assert_frame_equal(actual, dfn) + + msg = "cannot subtract a datelike from|unsupported operand type" + with pytest.raises(TypeError, match=msg): + df1 + np.nan + with pytest.raises(TypeError, match=msg): + df1 - np.nan + + actual = df1 + NaT # NaT is datetime, not timedelta + tm.assert_frame_equal(actual, dfn) + actual = df1 - NaT + tm.assert_frame_equal(actual, dfn) + + # TODO: moved from tests.series.test_operators, needs splitting, cleanup, + # de-duplication, box-parametrization... + def test_operators_timedelta64(self): + # series ops + v1 = pd.date_range("2012-1-1", periods=3, freq="D") + v2 = pd.date_range("2012-1-2", periods=3, freq="D") + rs = Series(v2) - Series(v1) + xp = Series(1e9 * 3600 * 24, rs.index).astype("int64").astype("timedelta64[ns]") + tm.assert_series_equal(rs, xp) + assert rs.dtype == "timedelta64[ns]" + + df = DataFrame({"A": v1}) + td = Series([timedelta(days=i) for i in range(3)]) + assert td.dtype == "timedelta64[ns]" + + # series on the rhs + result = df["A"] - df["A"].shift() + assert result.dtype == "timedelta64[ns]" + + result = df["A"] + td + assert result.dtype == "M8[ns]" + + # scalar Timestamp on rhs + maxa = df["A"].max() + assert isinstance(maxa, Timestamp) + + resultb = df["A"] - df["A"].max() + assert resultb.dtype == "timedelta64[ns]" + + # timestamp on lhs + result = resultb + df["A"] + values = [Timestamp("20111230"), Timestamp("20120101"), Timestamp("20120103")] + expected = Series(values, dtype="M8[ns]", name="A") + tm.assert_series_equal(result, expected) + + # datetimes on rhs + result = df["A"] - datetime(2001, 1, 1) + expected = Series([timedelta(days=4017 + i) for i in range(3)], name="A") + tm.assert_series_equal(result, expected) + assert result.dtype == "m8[ns]" + + d = datetime(2001, 1, 1, 3, 4) + resulta = df["A"] - d + assert resulta.dtype == "m8[ns]" + + # roundtrip + resultb = resulta + d + tm.assert_series_equal(df["A"], resultb) + + # timedeltas on rhs + td = timedelta(days=1) + resulta = df["A"] + td + resultb = resulta - td + tm.assert_series_equal(resultb, df["A"]) + assert resultb.dtype == "M8[ns]" + + # roundtrip + td = timedelta(minutes=5, seconds=3) + resulta = df["A"] + td + resultb = resulta - td + tm.assert_series_equal(df["A"], resultb) + assert resultb.dtype == "M8[ns]" + + # inplace + value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1)) + rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1)) + assert rs[2] == value + + def test_timedelta64_ops_nat(self): + # GH 11349 + timedelta_series = Series([NaT, Timedelta("1s")]) + nat_series_dtype_timedelta = Series([NaT, NaT], dtype="timedelta64[ns]") + single_nat_dtype_timedelta = Series([NaT], dtype="timedelta64[ns]") + + # subtraction + tm.assert_series_equal(timedelta_series - NaT, nat_series_dtype_timedelta) + tm.assert_series_equal(-NaT + timedelta_series, nat_series_dtype_timedelta) + + tm.assert_series_equal( + timedelta_series - single_nat_dtype_timedelta, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + -single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta + ) + + # addition + tm.assert_series_equal( + nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + single_nat_dtype_timedelta, + nat_series_dtype_timedelta, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timedelta, + nat_series_dtype_timedelta, + ) + + tm.assert_series_equal(timedelta_series + NaT, nat_series_dtype_timedelta) + tm.assert_series_equal(NaT + timedelta_series, nat_series_dtype_timedelta) + + tm.assert_series_equal( + timedelta_series + single_nat_dtype_timedelta, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + timedelta_series, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + NaT, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + NaT + nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal( + nat_series_dtype_timedelta + single_nat_dtype_timedelta, + nat_series_dtype_timedelta, + ) + tm.assert_series_equal( + single_nat_dtype_timedelta + nat_series_dtype_timedelta, + nat_series_dtype_timedelta, + ) + + # multiplication + tm.assert_series_equal( + nat_series_dtype_timedelta * 1.0, nat_series_dtype_timedelta + ) + tm.assert_series_equal( + 1.0 * nat_series_dtype_timedelta, nat_series_dtype_timedelta + ) + + tm.assert_series_equal(timedelta_series * 1, timedelta_series) + tm.assert_series_equal(1 * timedelta_series, timedelta_series) + + tm.assert_series_equal(timedelta_series * 1.5, Series([NaT, Timedelta("1.5s")])) + tm.assert_series_equal(1.5 * timedelta_series, Series([NaT, Timedelta("1.5s")])) + + tm.assert_series_equal(timedelta_series * np.nan, nat_series_dtype_timedelta) + tm.assert_series_equal(np.nan * timedelta_series, nat_series_dtype_timedelta) + + # division + tm.assert_series_equal(timedelta_series / 2, Series([NaT, Timedelta("0.5s")])) + tm.assert_series_equal(timedelta_series / 2.0, Series([NaT, Timedelta("0.5s")])) + tm.assert_series_equal(timedelta_series / np.nan, nat_series_dtype_timedelta) + + # ------------------------------------------------------------- + # Binary operations td64 arraylike and datetime-like + + @pytest.mark.parametrize("cls", [Timestamp, datetime, np.datetime64]) + def test_td64arr_add_sub_datetimelike_scalar( + self, cls, box_with_array, tz_naive_fixture + ): + # GH#11925, GH#29558, GH#23215 + tz = tz_naive_fixture + + dt_scalar = Timestamp("2012-01-01", tz=tz) + if cls is datetime: + ts = dt_scalar.to_pydatetime() + elif cls is np.datetime64: + if tz_naive_fixture is not None: + pytest.skip(f"{cls} doesn support {tz_naive_fixture}") + ts = dt_scalar.to_datetime64() + else: + ts = dt_scalar + + tdi = timedelta_range("1 day", periods=3) + expected = pd.date_range("2012-01-02", periods=3, tz=tz) + + tdarr = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + tm.assert_equal(ts + tdarr, expected) + tm.assert_equal(tdarr + ts, expected) + + expected2 = pd.date_range("2011-12-31", periods=3, freq="-1D", tz=tz) + expected2 = tm.box_expected(expected2, box_with_array) + + tm.assert_equal(ts - tdarr, expected2) + tm.assert_equal(ts + (-tdarr), expected2) + + msg = "cannot subtract a datelike" + with pytest.raises(TypeError, match=msg): + tdarr - ts + + def test_td64arr_add_datetime64_nat(self, box_with_array): + # GH#23215 + other = np.datetime64("NaT") + + tdi = timedelta_range("1 day", periods=3) + expected = DatetimeIndex(["NaT", "NaT", "NaT"], dtype="M8[ns]") + + tdser = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + tm.assert_equal(tdser + other, expected) + tm.assert_equal(other + tdser, expected) + + def test_td64arr_sub_dt64_array(self, box_with_array): + dti = pd.date_range("2016-01-01", periods=3) + tdi = TimedeltaIndex(["-1 Day"] * 3) + dtarr = dti.values + expected = DatetimeIndex(dtarr) - tdi + + tdi = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + msg = "cannot subtract a datelike from" + with pytest.raises(TypeError, match=msg): + tdi - dtarr + + # TimedeltaIndex.__rsub__ + result = dtarr - tdi + tm.assert_equal(result, expected) + + def test_td64arr_add_dt64_array(self, box_with_array): + dti = pd.date_range("2016-01-01", periods=3) + tdi = TimedeltaIndex(["-1 Day"] * 3) + dtarr = dti.values + expected = DatetimeIndex(dtarr) + tdi + + tdi = tm.box_expected(tdi, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdi + dtarr + tm.assert_equal(result, expected) + result = dtarr + tdi + tm.assert_equal(result, expected) + + # ------------------------------------------------------------------ + # Invalid __add__/__sub__ operations + + @pytest.mark.parametrize("pi_freq", ["D", "W", "Q", "h"]) + @pytest.mark.parametrize("tdi_freq", [None, "h"]) + def test_td64arr_sub_periodlike( + self, box_with_array, box_with_array2, tdi_freq, pi_freq + ): + # GH#20049 subtracting PeriodIndex should raise TypeError + tdi = TimedeltaIndex(["1 hours", "2 hours"], freq=tdi_freq) + dti = Timestamp("2018-03-07 17:16:40") + tdi + pi = dti.to_period(pi_freq) + per = pi[0] + + tdi = tm.box_expected(tdi, box_with_array) + pi = tm.box_expected(pi, box_with_array2) + msg = "cannot subtract|unsupported operand type" + with pytest.raises(TypeError, match=msg): + tdi - pi + + # GH#13078 subtraction of Period scalar not supported + with pytest.raises(TypeError, match=msg): + tdi - per + + @pytest.mark.parametrize( + "other", + [ + # GH#12624 for str case + "a", + # GH#19123 + 1, + 1.5, + np.array(2), + ], + ) + def test_td64arr_addsub_numeric_scalar_invalid(self, box_with_array, other): + # vector-like others are tested in test_td64arr_add_sub_numeric_arr_invalid + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + tdarr = tm.box_expected(tdser, box_with_array) + + assert_invalid_addsub_type(tdarr, other) + + @pytest.mark.parametrize( + "vec", + [ + np.array([1, 2, 3]), + Index([1, 2, 3]), + Series([1, 2, 3]), + DataFrame([[1, 2, 3]]), + ], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_addsub_numeric_arr_invalid( + self, box_with_array, vec, any_real_numpy_dtype + ): + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + tdarr = tm.box_expected(tdser, box_with_array) + + vector = vec.astype(any_real_numpy_dtype) + assert_invalid_addsub_type(tdarr, vector) + + def test_td64arr_add_sub_int(self, box_with_array, one): + # Variants of `one` for #19012, deprecated GH#22535 + rng = timedelta_range("1 days 09:00:00", freq="h", periods=10) + tdarr = tm.box_expected(rng, box_with_array) + + msg = "Addition/subtraction of integers" + assert_invalid_addsub_type(tdarr, one, msg) + + # TODO: get inplace ops into assert_invalid_addsub_type + with pytest.raises(TypeError, match=msg): + tdarr += one + with pytest.raises(TypeError, match=msg): + tdarr -= one + + def test_td64arr_add_sub_integer_array(self, box_with_array): + # GH#19959, deprecated GH#22535 + # GH#22696 for DataFrame case, check that we don't dispatch to numpy + # implementation, which treats int64 as m8[ns] + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days 09:00:00", freq="h", periods=3) + tdarr = tm.box_expected(rng, box) + other = tm.box_expected([4, 3, 2], xbox) + + msg = "Addition/subtraction of integers and integer-arrays" + assert_invalid_addsub_type(tdarr, other, msg) + + def test_td64arr_addsub_integer_array_no_freq(self, box_with_array): + # GH#19959 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + tdi = TimedeltaIndex(["1 Day", "NaT", "3 Hours"]) + tdarr = tm.box_expected(tdi, box) + other = tm.box_expected([14, -1, 16], xbox) + + msg = "Addition/subtraction of integers" + assert_invalid_addsub_type(tdarr, other, msg) + + # ------------------------------------------------------------------ + # Operations with timedelta-like others + + def test_td64arr_add_sub_td64_array(self, box_with_array): + box = box_with_array + dti = pd.date_range("2016-01-01", periods=3) + tdi = dti - dti.shift(1) + tdarr = tdi.values + + expected = 2 * tdi + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box) + + result = tdi + tdarr + tm.assert_equal(result, expected) + result = tdarr + tdi + tm.assert_equal(result, expected) + + expected_sub = 0 * tdi + result = tdi - tdarr + tm.assert_equal(result, expected_sub) + result = tdarr - tdi + tm.assert_equal(result, expected_sub) + + def test_td64arr_add_sub_tdi(self, box_with_array, names): + # GH#17250 make sure result dtype is correct + # GH#19043 make sure names are propagated correctly + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["0 days", "1 day"], name=names[1]) + tdi = np.array(tdi) if box in [tm.to_array, pd.array] else tdi + ser = Series([Timedelta(hours=3), Timedelta(hours=4)], name=names[0]) + expected = Series([Timedelta(hours=3), Timedelta(days=1, hours=4)], name=exname) + + ser = tm.box_expected(ser, box) + expected = tm.box_expected(expected, box) + + result = tdi + ser + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + result = ser + tdi + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + expected = Series( + [Timedelta(hours=-3), Timedelta(days=1, hours=-4)], name=exname + ) + expected = tm.box_expected(expected, box) + + result = tdi - ser + tm.assert_equal(result, expected) + assert_dtype(result, "timedelta64[ns]") + + result = ser - tdi + tm.assert_equal(result, -expected) + assert_dtype(result, "timedelta64[ns]") + + @pytest.mark.parametrize("tdnat", [np.timedelta64("NaT"), NaT]) + def test_td64arr_add_sub_td64_nat(self, box_with_array, tdnat): + # GH#18808, GH#23320 special handling for timedelta64("NaT") + box = box_with_array + tdi = TimedeltaIndex([NaT, Timedelta("1s")]) + expected = TimedeltaIndex(["NaT"] * 2) + + obj = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box) + + result = obj + tdnat + tm.assert_equal(result, expected) + result = tdnat + obj + tm.assert_equal(result, expected) + result = obj - tdnat + tm.assert_equal(result, expected) + result = tdnat - obj + tm.assert_equal(result, expected) + + def test_td64arr_add_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as + is now numeric + # GH#10699 for Tick cases + box = box_with_array + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("1 days 02:00:00", "10 days 02:00:00", freq="D") + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, box) + + result = rng + two_hours + tm.assert_equal(result, expected) + + result = two_hours + rng + tm.assert_equal(result, expected) + + def test_td64arr_sub_timedeltalike(self, two_hours, box_with_array): + # only test adding/sub offsets as - is now numeric + # GH#10699 for Tick cases + box = box_with_array + rng = timedelta_range("1 days", "10 days") + expected = timedelta_range("0 days 22:00:00", "9 days 22:00:00") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, box) + + result = rng - two_hours + tm.assert_equal(result, expected) + + result = two_hours - rng + tm.assert_equal(result, -expected) + + # ------------------------------------------------------------------ + # __add__/__sub__ with DateOffsets and arrays of DateOffsets + + def test_td64arr_add_sub_offset_index(self, names, box_with_array): + # GH#18849, GH#19744 + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0]) + other = Index([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1]) + other = np.array(other) if box in [tm.to_array, pd.array] else other + + expected = TimedeltaIndex( + [tdi[n] + other[n] for n in range(len(tdi))], freq="infer", name=exname + ) + expected_sub = TimedeltaIndex( + [tdi[n] - other[n] for n in range(len(tdi))], freq="infer", name=exname + ) + + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box).astype(object, copy=False) + expected_sub = tm.box_expected(expected_sub, box).astype(object, copy=False) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other + tm.assert_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi + tm.assert_equal(res2, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res_sub = tdi - other + tm.assert_equal(res_sub, expected_sub) + + def test_td64arr_add_sub_offset_array(self, box_with_array): + # GH#18849, GH#18824 + box = box_with_array + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"]) + other = np.array([offsets.Hour(n=1), offsets.Minute(n=-2)]) + + expected = TimedeltaIndex( + [tdi[n] + other[n] for n in range(len(tdi))], freq="infer" + ) + expected_sub = TimedeltaIndex( + [tdi[n] - other[n] for n in range(len(tdi))], freq="infer" + ) + + tdi = tm.box_expected(tdi, box) + expected = tm.box_expected(expected, box).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = tdi + other + tm.assert_equal(res, expected) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + tdi + tm.assert_equal(res2, expected) + + expected_sub = tm.box_expected(expected_sub, box_with_array).astype(object) + with tm.assert_produces_warning(PerformanceWarning): + res_sub = tdi - other + tm.assert_equal(res_sub, expected_sub) + + def test_td64arr_with_offset_series(self, names, box_with_array): + # GH#18849 + box = box_with_array + box2 = Series if box in [Index, tm.to_array, pd.array] else box + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"], name=names[0]) + other = Series([offsets.Hour(n=1), offsets.Minute(n=-2)], name=names[1]) + + expected_add = Series( + [tdi[n] + other[n] for n in range(len(tdi))], name=exname, dtype=object + ) + obj = tm.box_expected(tdi, box) + expected_add = tm.box_expected(expected_add, box2).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res = obj + other + tm.assert_equal(res, expected_add) + + with tm.assert_produces_warning(PerformanceWarning): + res2 = other + obj + tm.assert_equal(res2, expected_add) + + expected_sub = Series( + [tdi[n] - other[n] for n in range(len(tdi))], name=exname, dtype=object + ) + expected_sub = tm.box_expected(expected_sub, box2).astype(object) + + with tm.assert_produces_warning(PerformanceWarning): + res3 = obj - other + tm.assert_equal(res3, expected_sub) + + @pytest.mark.parametrize("obox", [np.array, Index, Series]) + def test_td64arr_addsub_anchored_offset_arraylike(self, obox, box_with_array): + # GH#18824 + tdi = TimedeltaIndex(["1 days 00:00:00", "3 days 04:00:00"]) + tdi = tm.box_expected(tdi, box_with_array) + + anchored = obox([offsets.MonthEnd(), offsets.Day(n=2)]) + + # addition/subtraction ops with anchored offsets should issue + # a PerformanceWarning and _then_ raise a TypeError. + msg = "has incorrect type|cannot add the type MonthEnd" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdi + anchored + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored + tdi + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdi - anchored + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + anchored - tdi + + # ------------------------------------------------------------------ + # Unsorted + + def test_td64arr_add_sub_object_array(self, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + tdi = timedelta_range("1 day", periods=3, freq="D") + tdarr = tm.box_expected(tdi, box) + + other = np.array([Timedelta(days=1), offsets.Day(2), Timestamp("2000-01-04")]) + + with tm.assert_produces_warning(PerformanceWarning): + result = tdarr + other + + expected = Index( + [Timedelta(days=2), Timedelta(days=4), Timestamp("2000-01-07")] + ) + expected = tm.box_expected(expected, xbox).astype(object) + tm.assert_equal(result, expected) + + msg = "unsupported operand type|cannot subtract a datelike" + with pytest.raises(TypeError, match=msg): + with tm.assert_produces_warning(PerformanceWarning): + tdarr - other + + with tm.assert_produces_warning(PerformanceWarning): + result = other - tdarr + + expected = Index([Timedelta(0), Timedelta(0), Timestamp("2000-01-01")]) + expected = tm.box_expected(expected, xbox).astype(object) + tm.assert_equal(result, expected) + + +class TestTimedeltaArraylikeMulDivOps: + # Tests for timedelta64[ns] + # __mul__, __rmul__, __div__, __rdiv__, __floordiv__, __rfloordiv__ + + # ------------------------------------------------------------------ + # Multiplication + # organized with scalar others first, then array-like + + def test_td64arr_mul_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + + result = idx * 1 + tm.assert_equal(result, idx) + + result = 1 * idx + tm.assert_equal(result, idx) + + def test_td64arr_mul_tdlike_scalar_raises(self, two_hours, box_with_array): + rng = timedelta_range("1 days", "10 days", name="foo") + rng = tm.box_expected(rng, box_with_array) + msg = "argument must be an integer|cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + rng * two_hours + + def test_tdi_mul_int_array_zerodim(self, box_with_array): + rng5 = np.arange(5, dtype="int64") + idx = TimedeltaIndex(rng5) + expected = TimedeltaIndex(rng5 * 5) + + idx = tm.box_expected(idx, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = idx * np.array(5, dtype="int64") + tm.assert_equal(result, expected) + + def test_tdi_mul_int_array(self, box_with_array): + rng5 = np.arange(5, dtype="int64") + idx = TimedeltaIndex(rng5) + expected = TimedeltaIndex(rng5**2) + + idx = tm.box_expected(idx, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = idx * rng5 + tm.assert_equal(result, expected) + + def test_tdi_mul_int_series(self, box_with_array): + box = box_with_array + xbox = Series if box in [Index, tm.to_array, pd.array] else box + + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + expected = TimedeltaIndex(np.arange(5, dtype="int64") ** 2) + + idx = tm.box_expected(idx, box) + expected = tm.box_expected(expected, xbox) + + result = idx * Series(np.arange(5, dtype="int64")) + tm.assert_equal(result, expected) + + def test_tdi_mul_float_series(self, box_with_array): + box = box_with_array + xbox = Series if box in [Index, tm.to_array, pd.array] else box + + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box) + + rng5f = np.arange(5, dtype="float64") + expected = TimedeltaIndex(rng5f * (rng5f + 1.0)) + expected = tm.box_expected(expected, xbox) + + result = idx * Series(rng5f + 1.0) + tm.assert_equal(result, expected) + + # TODO: Put Series/DataFrame in others? + @pytest.mark.parametrize( + "other", + [ + np.arange(1, 11), + Index(np.arange(1, 11), np.int64), + Index(range(1, 11), np.uint64), + Index(range(1, 11), np.float64), + pd.RangeIndex(1, 11), + ], + ids=lambda x: type(x).__name__, + ) + def test_tdi_rmul_arraylike(self, other, box_with_array): + box = box_with_array + + tdi = TimedeltaIndex(["1 Day"] * 10) + expected = timedelta_range("1 days", "10 days")._with_freq(None) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, other) + + expected = tm.box_expected(expected, xbox) + + result = other * tdi + tm.assert_equal(result, expected) + commute = tdi * other + tm.assert_equal(commute, expected) + + # ------------------------------------------------------------------ + # __div__, __rdiv__ + + def test_td64arr_div_nat_invalid(self, box_with_array): + # don't allow division by NaT (maybe could in the future) + rng = timedelta_range("1 days", "10 days", name="foo") + rng = tm.box_expected(rng, box_with_array) + + with pytest.raises(TypeError, match="unsupported operand type"): + rng / NaT + with pytest.raises(TypeError, match="Cannot divide NaTType by"): + NaT / rng + + dt64nat = np.datetime64("NaT", "ns") + msg = "|".join( + [ + # 'divide' on npdev as of 2021-12-18 + "ufunc '(true_divide|divide)' cannot use operands", + "cannot perform __r?truediv__", + "Cannot divide datetime64 by TimedeltaArray", + ] + ) + with pytest.raises(TypeError, match=msg): + rng / dt64nat + with pytest.raises(TypeError, match=msg): + dt64nat / rng + + def test_td64arr_div_td64nat(self, box_with_array): + # GH#23829 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days", "10 days") + rng = tm.box_expected(rng, box) + + other = np.timedelta64("NaT") + + expected = np.array([np.nan] * 10) + expected = tm.box_expected(expected, xbox) + + result = rng / other + tm.assert_equal(result, expected) + + result = other / rng + tm.assert_equal(result, expected) + + def test_td64arr_div_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + + result = idx / 1 + tm.assert_equal(result, idx) + + with pytest.raises(TypeError, match="Cannot divide"): + # GH#23829 + 1 / idx + + def test_td64arr_div_tdlike_scalar(self, two_hours, box_with_array): + # GH#20088, GH#22163 ensure DataFrame returns correct dtype + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = timedelta_range("1 days", "10 days", name="foo") + expected = Index((np.arange(10) + 1) * 12, dtype=np.float64, name="foo") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + result = rng / two_hours + tm.assert_equal(result, expected) + + result = two_hours / rng + expected = 1 / expected + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("m", [1, 3, 10]) + @pytest.mark.parametrize("unit", ["D", "h", "m", "s", "ms", "us", "ns"]) + def test_td64arr_div_td64_scalar(self, m, unit, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + ser = Series([Timedelta(days=59)] * 3) + ser[2] = np.nan + flat = ser + ser = tm.box_expected(ser, box) + + # op + expected = Series([x / np.timedelta64(m, unit) for x in flat]) + expected = tm.box_expected(expected, xbox) + result = ser / np.timedelta64(m, unit) + tm.assert_equal(result, expected) + + # reverse op + expected = Series([Timedelta(np.timedelta64(m, unit)) / x for x in flat]) + expected = tm.box_expected(expected, xbox) + result = np.timedelta64(m, unit) / ser + tm.assert_equal(result, expected) + + def test_td64arr_div_tdlike_scalar_with_nat(self, two_hours, box_with_array): + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = TimedeltaIndex(["1 days", NaT, "2 days"], name="foo") + expected = Index([12, np.nan, 24], dtype=np.float64, name="foo") + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + result = rng / two_hours + tm.assert_equal(result, expected) + + result = two_hours / rng + expected = 1 / expected + tm.assert_equal(result, expected) + + def test_td64arr_div_td64_ndarray(self, box_with_array): + # GH#22631 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + rng = TimedeltaIndex(["1 days", NaT, "2 days"]) + expected = Index([12, np.nan, 24], dtype=np.float64) + + rng = tm.box_expected(rng, box) + expected = tm.box_expected(expected, xbox) + + other = np.array([2, 4, 2], dtype="m8[h]") + result = rng / other + tm.assert_equal(result, expected) + + result = rng / tm.box_expected(other, box) + tm.assert_equal(result, expected) + + result = rng / other.astype(object) + tm.assert_equal(result, expected.astype(object)) + + result = rng / list(other) + tm.assert_equal(result, expected) + + # reversed op + expected = 1 / expected + result = other / rng + tm.assert_equal(result, expected) + + result = tm.box_expected(other, box) / rng + tm.assert_equal(result, expected) + + result = other.astype(object) / rng + tm.assert_equal(result, expected) + + result = list(other) / rng + tm.assert_equal(result, expected) + + def test_tdarr_div_length_mismatch(self, box_with_array): + rng = TimedeltaIndex(["1 days", NaT, "2 days"]) + mismatched = [1, 2, 3, 4] + + rng = tm.box_expected(rng, box_with_array) + msg = "Cannot divide vectors|Unable to coerce to Series" + for obj in [mismatched, mismatched[:2]]: + # one shorter, one longer + for other in [obj, np.array(obj), Index(obj)]: + with pytest.raises(ValueError, match=msg): + rng / other + with pytest.raises(ValueError, match=msg): + other / rng + + def test_td64_div_object_mixed_result(self, box_with_array): + # Case where we having a NaT in the result inseat of timedelta64("NaT") + # is misleading + orig = timedelta_range("1 Day", periods=3).insert(1, NaT) + tdi = tm.box_expected(orig, box_with_array, transpose=False) + + other = np.array([orig[0], 1.5, 2.0, orig[2]], dtype=object) + other = tm.box_expected(other, box_with_array, transpose=False) + + res = tdi / other + + expected = Index([1.0, np.timedelta64("NaT", "ns"), orig[0], 1.5], dtype=object) + expected = tm.box_expected(expected, box_with_array, transpose=False) + if isinstance(expected, NumpyExtensionArray): + expected = expected.to_numpy() + tm.assert_equal(res, expected) + if box_with_array is DataFrame: + # We have a np.timedelta64(NaT), not pd.NaT + assert isinstance(res.iloc[1, 0], np.timedelta64) + + res = tdi // other + + expected = Index([1, np.timedelta64("NaT", "ns"), orig[0], 1], dtype=object) + expected = tm.box_expected(expected, box_with_array, transpose=False) + if isinstance(expected, NumpyExtensionArray): + expected = expected.to_numpy() + tm.assert_equal(res, expected) + if box_with_array is DataFrame: + # We have a np.timedelta64(NaT), not pd.NaT + assert isinstance(res.iloc[1, 0], np.timedelta64) + + # ------------------------------------------------------------------ + # __floordiv__, __rfloordiv__ + + def test_td64arr_floordiv_td64arr_with_nat( + self, box_with_array, using_array_manager + ): + # GH#35529 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + + left = Series([1000, 222330, 30], dtype="timedelta64[ns]") + right = Series([1000, 222330, None], dtype="timedelta64[ns]") + + left = tm.box_expected(left, box) + right = tm.box_expected(right, box) + + expected = np.array([1.0, 1.0, np.nan], dtype=np.float64) + expected = tm.box_expected(expected, xbox) + if box is DataFrame and using_array_manager: + # INFO(ArrayManager) floordiv returns integer, and ArrayManager + # performs ops column-wise and thus preserves int64 dtype for + # columns without missing values + expected[[0, 1]] = expected[[0, 1]].astype("int64") + + with tm.maybe_produces_warning( + RuntimeWarning, box is pd.array, check_stacklevel=False + ): + result = left // right + + tm.assert_equal(result, expected) + + # case that goes through __rfloordiv__ with arraylike + with tm.maybe_produces_warning( + RuntimeWarning, box is pd.array, check_stacklevel=False + ): + result = np.asarray(left) // right + tm.assert_equal(result, expected) + + @pytest.mark.filterwarnings("ignore:invalid value encountered:RuntimeWarning") + def test_td64arr_floordiv_tdscalar(self, box_with_array, scalar_td): + # GH#18831, GH#19125 + box = box_with_array + xbox = np.ndarray if box is pd.array else box + td = Timedelta("5m3s") # i.e. (scalar_td - 1sec) / 2 + + td1 = Series([td, td, NaT], dtype="m8[ns]") + td1 = tm.box_expected(td1, box, transpose=False) + + expected = Series([0, 0, np.nan]) + expected = tm.box_expected(expected, xbox, transpose=False) + + result = td1 // scalar_td + tm.assert_equal(result, expected) + + # Reversed op + expected = Series([2, 2, np.nan]) + expected = tm.box_expected(expected, xbox, transpose=False) + + result = scalar_td // td1 + tm.assert_equal(result, expected) + + # same thing buts let's be explicit about calling __rfloordiv__ + result = td1.__rfloordiv__(scalar_td) + tm.assert_equal(result, expected) + + def test_td64arr_floordiv_int(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + result = idx // 1 + tm.assert_equal(result, idx) + + pattern = "floor_divide cannot use operands|Cannot divide int by Timedelta*" + with pytest.raises(TypeError, match=pattern): + 1 // idx + + # ------------------------------------------------------------------ + # mod, divmod + # TODO: operations with timedelta-like arrays, numeric arrays, + # reversed ops + + def test_td64arr_mod_tdscalar(self, box_with_array, three_days): + tdi = timedelta_range("1 Day", "9 days") + tdarr = tm.box_expected(tdi, box_with_array) + + expected = TimedeltaIndex(["1 Day", "2 Days", "0 Days"] * 3) + expected = tm.box_expected(expected, box_with_array) + + result = tdarr % three_days + tm.assert_equal(result, expected) + + warn = None + if box_with_array is DataFrame and isinstance(three_days, pd.DateOffset): + warn = PerformanceWarning + # TODO: making expected be object here a result of DataFrame.__divmod__ + # being defined in a naive way that does not dispatch to the underlying + # array's __divmod__ + expected = expected.astype(object) + + with tm.assert_produces_warning(warn): + result = divmod(tdarr, three_days) + + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], tdarr // three_days) + + def test_td64arr_mod_int(self, box_with_array): + tdi = timedelta_range("1 ns", "10 ns", periods=10) + tdarr = tm.box_expected(tdi, box_with_array) + + expected = TimedeltaIndex(["1 ns", "0 ns"] * 5) + expected = tm.box_expected(expected, box_with_array) + + result = tdarr % 2 + tm.assert_equal(result, expected) + + msg = "Cannot divide int by" + with pytest.raises(TypeError, match=msg): + 2 % tdarr + + result = divmod(tdarr, 2) + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], tdarr // 2) + + def test_td64arr_rmod_tdscalar(self, box_with_array, three_days): + tdi = timedelta_range("1 Day", "9 days") + tdarr = tm.box_expected(tdi, box_with_array) + + expected = ["0 Days", "1 Day", "0 Days"] + ["3 Days"] * 6 + expected = TimedeltaIndex(expected) + expected = tm.box_expected(expected, box_with_array) + + result = three_days % tdarr + tm.assert_equal(result, expected) + + result = divmod(three_days, tdarr) + tm.assert_equal(result[1], expected) + tm.assert_equal(result[0], three_days // tdarr) + + # ------------------------------------------------------------------ + # Operations with invalid others + + def test_td64arr_mul_tdscalar_invalid(self, box_with_array, scalar_td): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 = tm.box_expected(td1, box_with_array) + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = "operate|unsupported|cannot|not supported" + with pytest.raises(TypeError, match=pattern): + td1 * scalar_td + with pytest.raises(TypeError, match=pattern): + scalar_td * td1 + + def test_td64arr_mul_too_short_raises(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + msg = "|".join( + [ + "cannot use operands with types dtype", + "Cannot multiply with unequal lengths", + "Unable to coerce to Series", + ] + ) + with pytest.raises(TypeError, match=msg): + # length check before dtype check + idx * idx[:3] + with pytest.raises(ValueError, match=msg): + idx * np.array([1, 2]) + + def test_td64arr_mul_td64arr_raises(self, box_with_array): + idx = TimedeltaIndex(np.arange(5, dtype="int64")) + idx = tm.box_expected(idx, box_with_array) + msg = "cannot use operands with types dtype" + with pytest.raises(TypeError, match=msg): + idx * idx + + # ------------------------------------------------------------------ + # Operations with numeric others + + def test_td64arr_mul_numeric_scalar(self, box_with_array, one): + # GH#4521 + # divide/multiply by integers + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["-59 Days", "-59 Days", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser * (-one) + tm.assert_equal(result, expected) + result = (-one) * tdser + tm.assert_equal(result, expected) + + expected = Series(["118 Days", "118 Days", "NaT"], dtype="timedelta64[ns]") + expected = tm.box_expected(expected, box_with_array) + + result = tdser * (2 * one) + tm.assert_equal(result, expected) + result = (2 * one) * tdser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)]) + def test_td64arr_div_numeric_scalar(self, box_with_array, two): + # GH#4521 + # divide/multiply by integers + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser / two + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match="Cannot divide"): + two / tdser + + @pytest.mark.parametrize("two", [2, 2.0, np.array(2), np.array(2.0)]) + def test_td64arr_floordiv_numeric_scalar(self, box_with_array, two): + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + expected = Series(["29.5D", "29.5D", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + expected = tm.box_expected(expected, box_with_array) + + result = tdser // two + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match="Cannot divide"): + two // tdser + + @pytest.mark.parametrize( + "vector", + [np.array([20, 30, 40]), Index([20, 30, 40]), Series([20, 30, 40])], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_rmul_numeric_array( + self, + box_with_array, + vector, + any_real_numpy_dtype, + ): + # GH#4521 + # divide/multiply by integers + + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + vector = vector.astype(any_real_numpy_dtype) + + expected = Series(["1180 Days", "1770 Days", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) + + expected = tm.box_expected(expected, xbox) + + result = tdser * vector + tm.assert_equal(result, expected) + + result = vector * tdser + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "vector", + [np.array([20, 30, 40]), Index([20, 30, 40]), Series([20, 30, 40])], + ids=lambda x: type(x).__name__, + ) + def test_td64arr_div_numeric_array( + self, box_with_array, vector, any_real_numpy_dtype + ): + # GH#4521 + # divide/multiply by integers + + tdser = Series(["59 Days", "59 Days", "NaT"], dtype="m8[ns]") + vector = vector.astype(any_real_numpy_dtype) + + expected = Series(["2.95D", "1D 23h 12m", "NaT"], dtype="timedelta64[ns]") + + tdser = tm.box_expected(tdser, box_with_array) + xbox = get_upcast_box(tdser, vector) + expected = tm.box_expected(expected, xbox) + + result = tdser / vector + tm.assert_equal(result, expected) + + pattern = "|".join( + [ + "true_divide'? cannot use operands", + "cannot perform __div__", + "cannot perform __truediv__", + "unsupported operand", + "Cannot divide", + "ufunc 'divide' cannot use operands with types", + ] + ) + with pytest.raises(TypeError, match=pattern): + vector / tdser + + result = tdser / vector.astype(object) + if box_with_array is DataFrame: + expected = [tdser.iloc[0, n] / vector[n] for n in range(len(vector))] + expected = tm.box_expected(expected, xbox).astype(object) + # We specifically expect timedelta64("NaT") here, not pd.NA + msg = "The 'downcast' keyword in fillna" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected[2] = expected[2].fillna( + np.timedelta64("NaT", "ns"), downcast=False + ) + else: + expected = [tdser[n] / vector[n] for n in range(len(tdser))] + expected = [ + x if x is not NaT else np.timedelta64("NaT", "ns") for x in expected + ] + if xbox is tm.to_array: + expected = tm.to_array(expected).astype(object) + else: + expected = xbox(expected, dtype=object) + + tm.assert_equal(result, expected) + + with pytest.raises(TypeError, match=pattern): + vector.astype(object) / tdser + + def test_td64arr_mul_int_series(self, box_with_array, names): + # GH#19042 test for correct name attachment + box = box_with_array + exname = get_expected_name(box, names) + + tdi = TimedeltaIndex( + ["0days", "1day", "2days", "3days", "4days"], name=names[0] + ) + # TODO: Should we be parametrizing over types for `ser` too? + ser = Series([0, 1, 2, 3, 4], dtype=np.int64, name=names[1]) + + expected = Series( + ["0days", "1day", "4days", "9days", "16days"], + dtype="timedelta64[ns]", + name=exname, + ) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, ser) + + expected = tm.box_expected(expected, xbox) + + result = ser * tdi + tm.assert_equal(result, expected) + + result = tdi * ser + tm.assert_equal(result, expected) + + # TODO: Should we be parametrizing over types for `ser` too? + def test_float_series_rdiv_td64arr(self, box_with_array, names): + # GH#19042 test for correct name attachment + box = box_with_array + tdi = TimedeltaIndex( + ["0days", "1day", "2days", "3days", "4days"], name=names[0] + ) + ser = Series([1.5, 3, 4.5, 6, 7.5], dtype=np.float64, name=names[1]) + + xname = names[2] if box not in [tm.to_array, pd.array] else names[1] + expected = Series( + [tdi[n] / ser[n] for n in range(len(ser))], + dtype="timedelta64[ns]", + name=xname, + ) + + tdi = tm.box_expected(tdi, box) + xbox = get_upcast_box(tdi, ser) + expected = tm.box_expected(expected, xbox) + + result = ser.__rtruediv__(tdi) + if box is DataFrame: + assert result is NotImplemented + else: + tm.assert_equal(result, expected) + + def test_td64arr_all_nat_div_object_dtype_numeric(self, box_with_array): + # GH#39750 make sure we infer the result as td64 + tdi = TimedeltaIndex([NaT, NaT]) + + left = tm.box_expected(tdi, box_with_array) + right = np.array([2, 2.0], dtype=object) + + tdnat = np.timedelta64("NaT", "ns") + expected = Index([tdnat] * 2, dtype=object) + if box_with_array is not Index: + expected = tm.box_expected(expected, box_with_array).astype(object) + if box_with_array in [Series, DataFrame]: + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = expected.fillna(tdnat, downcast=False) # GH#18463 + + result = left / right + tm.assert_equal(result, expected) + + result = left // right + tm.assert_equal(result, expected) + + +class TestTimedelta64ArrayLikeArithmetic: + # Arithmetic tests for timedelta64[ns] vectors fully parametrized over + # DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all arithmetic + # tests will eventually end up here. + + def test_td64arr_pow_invalid(self, scalar_td, box_with_array): + td1 = Series([timedelta(minutes=5, seconds=3)] * 3) + td1.iloc[2] = np.nan + + td1 = tm.box_expected(td1, box_with_array) + + # check that we are getting a TypeError + # with 'operate' (from core/ops.py) for the ops that are not + # defined + pattern = "operate|unsupported|cannot|not supported" + with pytest.raises(TypeError, match=pattern): + scalar_td**td1 + + with pytest.raises(TypeError, match=pattern): + td1**scalar_td + + +def test_add_timestamp_to_timedelta(): + # GH: 35897 + timestamp = Timestamp("2021-01-01") + result = timestamp + timedelta_range("0s", "1s", periods=31) + expected = DatetimeIndex( + [ + timestamp + + ( + pd.to_timedelta("0.033333333s") * i + + pd.to_timedelta("0.000000001s") * divmod(i, 3)[0] + ) + for i in range(31) + ] + ) + tm.assert_index_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38e4f7a15ee3586bee2735f223e1aca159b8fc34 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0d5faf376a7a13dced03a7762cb23369cd5e4c9 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odf.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91a0cb2cae1b5edc1fae1e9973bf97576fb5515b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_odswriter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92d94943a2c21ea6bfbe3eb6034b867e815f1c74 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_openpyxl.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..460c6786164afdc39ecba1c8631fd6141243c211 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_readers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..654b19ce137bbf0de22b92ced489f571e64802bb Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_style.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e8ffbbd642ebd555aa9b2348a1abaa20bf93720 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_writers.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d4ef07f2f80dac953125ead2ad59d48c14f52b2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlrd.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..378bedb0b5aa2414e4224429ec9f3de190a341ef Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/__pycache__/test_xlsxwriter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py new file mode 100644 index 0000000000000000000000000000000000000000..b5bb9b27258d86cda6e44aeae17a4cdba4157a43 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_odf.py @@ -0,0 +1,77 @@ +import functools + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +import pandas._testing as tm + +pytest.importorskip("odf") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture(autouse=True) +def cd_and_set_engine(monkeypatch, datapath): + func = functools.partial(pd.read_excel, engine="odf") + monkeypatch.setattr(pd, "read_excel", func) + monkeypatch.chdir(datapath("io", "data", "excel")) + + +def test_read_invalid_types_raises(): + # the invalid_value_type.ods required manually editing + # of the included content.xml file + with pytest.raises(ValueError, match="Unrecognized type awesome_new_type"): + pd.read_excel("invalid_value_type.ods") + + +def test_read_writer_table(): + # Also test reading tables from an text OpenDocument file + # (.odt) + index = pd.Index(["Row 1", "Row 2", "Row 3"], name="Header") + expected = pd.DataFrame( + [[1, np.nan, 7], [2, np.nan, 8], [3, np.nan, 9]], + index=index, + columns=["Column 1", "Unnamed: 2", "Column 3"], + ) + + result = pd.read_excel("writertable.odt", sheet_name="Table1", index_col=0) + + tm.assert_frame_equal(result, expected) + + +def test_read_newlines_between_xml_elements_table(): + # GH#45598 + expected = pd.DataFrame( + [[1.0, 4.0, 7], [np.nan, np.nan, 8], [3.0, 6.0, 9]], + columns=["Column 1", "Column 2", "Column 3"], + ) + + result = pd.read_excel("test_newlines.ods") + + tm.assert_frame_equal(result, expected) + + +def test_read_unempty_cells(): + expected = pd.DataFrame( + [1, np.nan, 3, np.nan, 5], + columns=["Column 1"], + ) + + result = pd.read_excel("test_unempty_cells.ods") + + tm.assert_frame_equal(result, expected) + + +def test_read_cell_annotation(): + expected = pd.DataFrame( + ["test", np.nan, "test 3"], + columns=["Column 1"], + ) + + result = pd.read_excel("test_cell_annotation.ods") + + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py new file mode 100644 index 0000000000000000000000000000000000000000..1c728ad801bc139c1ca1cd2e902884a5a2c91ffc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_odswriter.py @@ -0,0 +1,106 @@ +from datetime import ( + date, + datetime, +) +import re + +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter + +odf = pytest.importorskip("odf") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture +def ext(): + return ".ods" + + +def test_write_append_mode_raises(ext): + msg = "Append mode is not supported with odf!" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=msg): + ExcelWriter(f, engine="odf", mode="a") + + +@pytest.mark.parametrize("engine_kwargs", [None, {"kwarg": 1}]) +def test_engine_kwargs(ext, engine_kwargs): + # GH 42286 + # GH 43445 + # test for error: OpenDocumentSpreadsheet does not accept any arguments + with tm.ensure_clean(ext) as f: + if engine_kwargs is not None: + error = re.escape( + "OpenDocumentSpreadsheet() got an unexpected keyword argument 'kwarg'" + ) + with pytest.raises( + TypeError, + match=error, + ): + ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) + else: + with ExcelWriter(f, engine="odf", engine_kwargs=engine_kwargs) as _: + pass + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f) as writer: + assert writer.sheets == {} + table = odf.table.Table(name="test_name") + writer.book.spreadsheet.addElement(table) + assert writer.sheets == {"test_name": table} + + +@pytest.mark.parametrize( + ["value", "cell_value_type", "cell_value_attribute", "cell_value"], + argvalues=[ + (True, "boolean", "boolean-value", "true"), + ("test string", "string", "string-value", "test string"), + (1, "float", "value", "1"), + (1.5, "float", "value", "1.5"), + ( + datetime(2010, 10, 10, 10, 10, 10), + "date", + "date-value", + "2010-10-10T10:10:10", + ), + (date(2010, 10, 10), "date", "date-value", "2010-10-10"), + ], +) +def test_cell_value_type(ext, value, cell_value_type, cell_value_attribute, cell_value): + # GH#54994 ODS: cell attributes should follow specification + # http://docs.oasis-open.org/office/v1.2/os/OpenDocument-v1.2-os-part1.html#refTable13 + from odf.namespaces import OFFICENS + from odf.table import ( + TableCell, + TableRow, + ) + + table_cell_name = TableCell().qname + + with tm.ensure_clean(ext) as f: + pd.DataFrame([[value]]).to_excel(f, header=False, index=False) + + with pd.ExcelFile(f) as wb: + sheet = wb._reader.get_sheet_by_index(0) + sheet_rows = sheet.getElementsByType(TableRow) + sheet_cells = [ + x + for x in sheet_rows[0].childNodes + if hasattr(x, "qname") and x.qname == table_cell_name + ] + + cell = sheet_cells[0] + assert cell.attributes.get((OFFICENS, "value-type")) == cell_value_type + assert cell.attributes.get((OFFICENS, cell_value_attribute)) == cell_value diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py new file mode 100644 index 0000000000000000000000000000000000000000..e53b5830ec6a4b315165f4896aed27bdaadfbda6 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_openpyxl.py @@ -0,0 +1,432 @@ +import contextlib +from pathlib import Path +import re + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.excel import ( + ExcelWriter, + _OpenpyxlWriter, +) +from pandas.io.excel._openpyxl import OpenpyxlReader + +openpyxl = pytest.importorskip("openpyxl") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture +def ext(): + return ".xlsx" + + +def test_to_excel_styleconverter(): + from openpyxl import styles + + hstyle = { + "font": {"color": "00FF0000", "bold": True}, + "borders": {"top": "thin", "right": "thin", "bottom": "thin", "left": "thin"}, + "alignment": {"horizontal": "center", "vertical": "top"}, + "fill": {"patternType": "solid", "fgColor": {"rgb": "006666FF", "tint": 0.3}}, + "number_format": {"format_code": "0.00"}, + "protection": {"locked": True, "hidden": False}, + } + + font_color = styles.Color("00FF0000") + font = styles.Font(bold=True, color=font_color) + side = styles.Side(style=styles.borders.BORDER_THIN) + border = styles.Border(top=side, right=side, bottom=side, left=side) + alignment = styles.Alignment(horizontal="center", vertical="top") + fill_color = styles.Color(rgb="006666FF", tint=0.3) + fill = styles.PatternFill(patternType="solid", fgColor=fill_color) + + number_format = "0.00" + + protection = styles.Protection(locked=True, hidden=False) + + kw = _OpenpyxlWriter._convert_to_style_kwargs(hstyle) + assert kw["font"] == font + assert kw["border"] == border + assert kw["alignment"] == alignment + assert kw["fill"] == fill + assert kw["number_format"] == number_format + assert kw["protection"] == protection + + +def test_write_cells_merge_styled(ext): + from pandas.io.formats.excel import ExcelCell + + sheet_name = "merge_styled" + + sty_b1 = {"font": {"color": "00FF0000"}} + sty_a2 = {"font": {"color": "0000FF00"}} + + initial_cells = [ + ExcelCell(col=1, row=0, val=42, style=sty_b1), + ExcelCell(col=0, row=1, val=99, style=sty_a2), + ] + + sty_merged = {"font": {"color": "000000FF", "bold": True}} + sty_kwargs = _OpenpyxlWriter._convert_to_style_kwargs(sty_merged) + openpyxl_sty_merged = sty_kwargs["font"] + merge_cells = [ + ExcelCell( + col=0, row=0, val="pandas", mergestart=1, mergeend=1, style=sty_merged + ) + ] + + with tm.ensure_clean(ext) as path: + with _OpenpyxlWriter(path) as writer: + writer._write_cells(initial_cells, sheet_name=sheet_name) + writer._write_cells(merge_cells, sheet_name=sheet_name) + + wks = writer.sheets[sheet_name] + xcell_b1 = wks["B1"] + xcell_a2 = wks["A2"] + assert xcell_b1.font == openpyxl_sty_merged + assert xcell_a2.font == openpyxl_sty_merged + + +@pytest.mark.parametrize("iso_dates", [True, False]) +def test_engine_kwargs_write(ext, iso_dates): + # GH 42286 GH 43445 + engine_kwargs = {"iso_dates": iso_dates} + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="openpyxl", engine_kwargs=engine_kwargs) as writer: + assert writer.book.iso_dates == iso_dates + # ExcelWriter won't allow us to close without writing something + DataFrame().to_excel(writer) + + +def test_engine_kwargs_append_invalid(ext): + # GH 43445 + # test whether an invalid engine kwargs actually raises + with tm.ensure_clean(ext) as f: + DataFrame(["hello", "world"]).to_excel(f) + with pytest.raises( + TypeError, + match=re.escape( + "load_workbook() got an unexpected keyword argument 'apple_banana'" + ), + ): + with ExcelWriter( + f, engine="openpyxl", mode="a", engine_kwargs={"apple_banana": "fruit"} + ) as writer: + # ExcelWriter needs us to write something to close properly + DataFrame(["good"]).to_excel(writer, sheet_name="Sheet2") + + +@pytest.mark.parametrize("data_only, expected", [(True, 0), (False, "=1+1")]) +def test_engine_kwargs_append_data_only(ext, data_only, expected): + # GH 43445 + # tests whether the data_only engine_kwarg actually works well for + # openpyxl's load_workbook + with tm.ensure_clean(ext) as f: + DataFrame(["=1+1"]).to_excel(f) + with ExcelWriter( + f, engine="openpyxl", mode="a", engine_kwargs={"data_only": data_only} + ) as writer: + assert writer.sheets["Sheet1"]["B2"].value == expected + # ExcelWriter needs us to writer something to close properly? + DataFrame().to_excel(writer, sheet_name="Sheet2") + + # ensure that data_only also works for reading + # and that formulas/values roundtrip + assert ( + pd.read_excel( + f, + sheet_name="Sheet1", + engine="openpyxl", + engine_kwargs={"data_only": data_only}, + ).iloc[0, 1] + == expected + ) + + +@pytest.mark.parametrize("kwarg_name", ["read_only", "data_only"]) +@pytest.mark.parametrize("kwarg_value", [True, False]) +def test_engine_kwargs_append_reader(datapath, ext, kwarg_name, kwarg_value): + # GH 55027 + # test that `read_only` and `data_only` can be passed to + # `openpyxl.reader.excel.load_workbook` via `engine_kwargs` + filename = datapath("io", "data", "excel", "test1" + ext) + with contextlib.closing( + OpenpyxlReader(filename, engine_kwargs={kwarg_name: kwarg_value}) + ) as reader: + assert getattr(reader.book, kwarg_name) == kwarg_value + + +@pytest.mark.parametrize( + "mode,expected", [("w", ["baz"]), ("a", ["foo", "bar", "baz"])] +) +def test_write_append_mode(ext, mode, expected): + df = DataFrame([1], columns=["baz"]) + + with tm.ensure_clean(ext) as f: + wb = openpyxl.Workbook() + wb.worksheets[0].title = "foo" + wb.worksheets[0]["A1"].value = "foo" + wb.create_sheet("bar") + wb.worksheets[1]["A1"].value = "bar" + wb.save(f) + + with ExcelWriter(f, engine="openpyxl", mode=mode) as writer: + df.to_excel(writer, sheet_name="baz", index=False) + + with contextlib.closing(openpyxl.load_workbook(f)) as wb2: + result = [sheet.title for sheet in wb2.worksheets] + assert result == expected + + for index, cell_value in enumerate(expected): + assert wb2.worksheets[index]["A1"].value == cell_value + + +@pytest.mark.parametrize( + "if_sheet_exists,num_sheets,expected", + [ + ("new", 2, ["apple", "banana"]), + ("replace", 1, ["pear"]), + ("overlay", 1, ["pear", "banana"]), + ], +) +def test_if_sheet_exists_append_modes(ext, if_sheet_exists, num_sheets, expected): + # GH 40230 + df1 = DataFrame({"fruit": ["apple", "banana"]}) + df2 = DataFrame({"fruit": ["pear"]}) + + with tm.ensure_clean(ext) as f: + df1.to_excel(f, engine="openpyxl", sheet_name="foo", index=False) + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists + ) as writer: + df2.to_excel(writer, sheet_name="foo", index=False) + + with contextlib.closing(openpyxl.load_workbook(f)) as wb: + assert len(wb.sheetnames) == num_sheets + assert wb.sheetnames[0] == "foo" + result = pd.read_excel(wb, "foo", engine="openpyxl") + assert list(result["fruit"]) == expected + if len(wb.sheetnames) == 2: + result = pd.read_excel(wb, wb.sheetnames[1], engine="openpyxl") + tm.assert_frame_equal(result, df2) + + +@pytest.mark.parametrize( + "startrow, startcol, greeting, goodbye", + [ + (0, 0, ["poop", "world"], ["goodbye", "people"]), + (0, 1, ["hello", "world"], ["poop", "people"]), + (1, 0, ["hello", "poop"], ["goodbye", "people"]), + (1, 1, ["hello", "world"], ["goodbye", "poop"]), + ], +) +def test_append_overlay_startrow_startcol(ext, startrow, startcol, greeting, goodbye): + df1 = DataFrame({"greeting": ["hello", "world"], "goodbye": ["goodbye", "people"]}) + df2 = DataFrame(["poop"]) + + with tm.ensure_clean(ext) as f: + df1.to_excel(f, engine="openpyxl", sheet_name="poo", index=False) + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists="overlay" + ) as writer: + # use startrow+1 because we don't have a header + df2.to_excel( + writer, + index=False, + header=False, + startrow=startrow + 1, + startcol=startcol, + sheet_name="poo", + ) + + result = pd.read_excel(f, sheet_name="poo", engine="openpyxl") + expected = DataFrame({"greeting": greeting, "goodbye": goodbye}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "if_sheet_exists,msg", + [ + ( + "invalid", + "'invalid' is not valid for if_sheet_exists. Valid options " + "are 'error', 'new', 'replace' and 'overlay'.", + ), + ( + "error", + "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.", + ), + ( + None, + "Sheet 'foo' already exists and if_sheet_exists is set to 'error'.", + ), + ], +) +def test_if_sheet_exists_raises(ext, if_sheet_exists, msg): + # GH 40230 + df = DataFrame({"fruit": ["pear"]}) + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=re.escape(msg)): + df.to_excel(f, sheet_name="foo", engine="openpyxl") + with ExcelWriter( + f, engine="openpyxl", mode="a", if_sheet_exists=if_sheet_exists + ) as writer: + df.to_excel(writer, sheet_name="foo") + + +def test_to_excel_with_openpyxl_engine(ext): + # GH 29854 + with tm.ensure_clean(ext) as filename: + df1 = DataFrame({"A": np.linspace(1, 10, 10)}) + df2 = DataFrame({"B": np.linspace(1, 20, 10)}) + df = pd.concat([df1, df2], axis=1) + styled = df.style.map( + lambda val: f"color: {'red' if val < 0 else 'black'}" + ).highlight_max() + + styled.to_excel(filename, engine="openpyxl") + + +@pytest.mark.parametrize("read_only", [True, False]) +def test_read_workbook(datapath, ext, read_only): + # GH 39528 + filename = datapath("io", "data", "excel", "test1" + ext) + with contextlib.closing( + openpyxl.load_workbook(filename, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = pd.read_excel(filename) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "header, expected_data", + [ + ( + 0, + { + "Title": [np.nan, "A", 1, 2, 3], + "Unnamed: 1": [np.nan, "B", 4, 5, 6], + "Unnamed: 2": [np.nan, "C", 7, 8, 9], + }, + ), + (2, {"A": [1, 2, 3], "B": [4, 5, 6], "C": [7, 8, 9]}), + ], +) +@pytest.mark.parametrize( + "filename", ["dimension_missing", "dimension_small", "dimension_large"] +) +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_with_bad_dimension( + datapath, ext, header, expected_data, filename, read_only +): + # GH 38956, 39001 - no/incorrect dimension information + path = datapath("io", "data", "excel", f"{filename}{ext}") + if read_only is None: + result = pd.read_excel(path, header=header) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl", header=header) + expected = DataFrame(expected_data) + tm.assert_frame_equal(result, expected) + + +def test_append_mode_file(ext): + # GH 39576 + df = DataFrame() + + with tm.ensure_clean(ext) as f: + df.to_excel(f, engine="openpyxl") + + with ExcelWriter( + f, mode="a", engine="openpyxl", if_sheet_exists="new" + ) as writer: + df.to_excel(writer) + + # make sure that zip files are not concatenated by making sure that + # "docProps/app.xml" only occurs twice in the file + data = Path(f).read_bytes() + first = data.find(b"docProps/app.xml") + second = data.find(b"docProps/app.xml", first + 1) + third = data.find(b"docProps/app.xml", second + 1) + assert second != -1 and third == -1 + + +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_with_empty_trailing_rows(datapath, ext, read_only): + # GH 39181 + path = datapath("io", "data", "excel", f"empty_trailing_rows{ext}") + if read_only is None: + result = pd.read_excel(path) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = DataFrame( + { + "Title": [np.nan, "A", 1, 2, 3], + "Unnamed: 1": [np.nan, "B", 4, 5, 6], + "Unnamed: 2": [np.nan, "C", 7, 8, 9], + } + ) + tm.assert_frame_equal(result, expected) + + +# When read_only is None, use read_excel instead of a workbook +@pytest.mark.parametrize("read_only", [True, False, None]) +def test_read_empty_with_blank_row(datapath, ext, read_only): + # GH 39547 - empty excel file with a row that has no data + path = datapath("io", "data", "excel", f"empty_with_blank_row{ext}") + if read_only is None: + result = pd.read_excel(path) + else: + with contextlib.closing( + openpyxl.load_workbook(path, read_only=read_only) + ) as wb: + result = pd.read_excel(wb, engine="openpyxl") + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="openpyxl") as writer: + assert writer.sheets == {} + sheet = writer.book.create_sheet("test_name", 0) + assert writer.sheets == {"test_name": sheet} + + +def test_ints_spelled_with_decimals(datapath, ext): + # GH 46988 - openpyxl returns this sheet with floats + path = datapath("io", "data", "excel", f"ints_spelled_with_decimals{ext}") + result = pd.read_excel(path) + expected = DataFrame(range(2, 12), columns=[1]) + tm.assert_frame_equal(result, expected) + + +def test_read_multiindex_header_no_index_names(datapath, ext): + # GH#47487 + path = datapath("io", "data", "excel", f"multiindex_no_index_names{ext}") + result = pd.read_excel(path, index_col=[0, 1, 2], header=[0, 1, 2]) + expected = DataFrame( + [[np.nan, "x", "x", "x"], ["x", np.nan, np.nan, np.nan]], + columns=pd.MultiIndex.from_tuples( + [("X", "Y", "A1"), ("X", "Y", "A2"), ("XX", "YY", "B1"), ("XX", "YY", "B2")] + ), + index=pd.MultiIndex.from_tuples([("A", "AA", "AAA"), ("A", "BB", "BBB")]), + ) + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py new file mode 100644 index 0000000000000000000000000000000000000000..8da8535952dcf98481716a2b00863dbfe6354af5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_readers.py @@ -0,0 +1,1751 @@ +from __future__ import annotations + +from datetime import ( + datetime, + time, +) +from functools import partial +from io import BytesIO +import os +from pathlib import Path +import platform +import re +from urllib.error import URLError +from zipfile import BadZipFile + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.compat import is_platform_windows +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + Series, + read_csv, +) +import pandas._testing as tm +from pandas.core.arrays import ( + ArrowStringArray, + StringArray, +) + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + +read_ext_params = [".xls", ".xlsx", ".xlsm", ".xlsb", ".ods"] +engine_params = [ + # Add any engines to test here + # When defusedxml is installed it triggers deprecation warnings for + # xlrd and openpyxl, so catch those here + pytest.param( + "xlrd", + marks=[ + td.skip_if_no("xlrd"), + ], + ), + pytest.param( + "openpyxl", + marks=[ + td.skip_if_no("openpyxl"), + ], + ), + pytest.param( + None, + marks=[ + td.skip_if_no("xlrd"), + ], + ), + pytest.param("pyxlsb", marks=td.skip_if_no("pyxlsb")), + pytest.param("odf", marks=td.skip_if_no("odf")), + pytest.param("calamine", marks=td.skip_if_no("python_calamine")), +] + + +def _is_valid_engine_ext_pair(engine, read_ext: str) -> bool: + """ + Filter out invalid (engine, ext) pairs instead of skipping, as that + produces 500+ pytest.skips. + """ + engine = engine.values[0] + if engine == "openpyxl" and read_ext == ".xls": + return False + if engine == "odf" and read_ext != ".ods": + return False + if read_ext == ".ods" and engine not in {"odf", "calamine"}: + return False + if engine == "pyxlsb" and read_ext != ".xlsb": + return False + if read_ext == ".xlsb" and engine not in {"pyxlsb", "calamine"}: + return False + if engine == "xlrd" and read_ext != ".xls": + return False + return True + + +def _transfer_marks(engine, read_ext): + """ + engine gives us a pytest.param object with some marks, read_ext is just + a string. We need to generate a new pytest.param inheriting the marks. + """ + values = engine.values + (read_ext,) + new_param = pytest.param(values, marks=engine.marks) + return new_param + + +@pytest.fixture( + params=[ + _transfer_marks(eng, ext) + for eng in engine_params + for ext in read_ext_params + if _is_valid_engine_ext_pair(eng, ext) + ], + ids=str, +) +def engine_and_read_ext(request): + """ + Fixture for Excel reader engine and read_ext, only including valid pairs. + """ + return request.param + + +@pytest.fixture +def engine(engine_and_read_ext): + engine, read_ext = engine_and_read_ext + return engine + + +@pytest.fixture +def read_ext(engine_and_read_ext): + engine, read_ext = engine_and_read_ext + return read_ext + + +@pytest.fixture +def df_ref(datapath): + """ + Obtain the reference data from read_csv with the Python engine. + """ + filepath = datapath("io", "data", "csv", "test1.csv") + df_ref = read_csv(filepath, index_col=0, parse_dates=True, engine="python") + return df_ref + + +def get_exp_unit(read_ext: str, engine: str | None) -> str: + return "ns" + + +def adjust_expected(expected: DataFrame, read_ext: str, engine: str) -> None: + expected.index.name = None + unit = get_exp_unit(read_ext, engine) + # error: "Index" has no attribute "as_unit" + expected.index = expected.index.as_unit(unit) # type: ignore[attr-defined] + + +def xfail_datetimes_with_pyxlsb(engine, request): + if engine == "pyxlsb": + request.applymarker( + pytest.mark.xfail( + reason="Sheets containing datetimes not supported by pyxlsb" + ) + ) + + +class TestReaders: + @pytest.fixture(autouse=True) + def cd_and_set_engine(self, engine, datapath, monkeypatch): + """ + Change directory and set engine for read_excel calls. + """ + func = partial(pd.read_excel, engine=engine) + monkeypatch.chdir(datapath("io", "data", "excel")) + monkeypatch.setattr(pd, "read_excel", func) + + def test_engine_used(self, read_ext, engine, monkeypatch): + # GH 38884 + def parser(self, *args, **kwargs): + return self.engine + + monkeypatch.setattr(pd.ExcelFile, "parse", parser) + + expected_defaults = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "xls": "xlrd", + "ods": "odf", + } + + with open("test1" + read_ext, "rb") as f: + result = pd.read_excel(f) + + if engine is not None: + expected = engine + else: + expected = expected_defaults[read_ext[1:]] + assert result == expected + + def test_engine_kwargs(self, read_ext, engine): + # GH#52214 + expected_defaults = { + "xlsx": {"foo": "abcd"}, + "xlsm": {"foo": 123}, + "xlsb": {"foo": "True"}, + "xls": {"foo": True}, + "ods": {"foo": "abcd"}, + } + + if engine in {"xlrd", "pyxlsb"}: + msg = re.escape(r"open_workbook() got an unexpected keyword argument 'foo'") + elif engine == "odf": + msg = re.escape(r"load() got an unexpected keyword argument 'foo'") + else: + msg = re.escape(r"load_workbook() got an unexpected keyword argument 'foo'") + + if engine is not None: + with pytest.raises(TypeError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet1", + index_col=0, + engine_kwargs=expected_defaults[read_ext[1:]], + ) + + def test_usecols_int(self, read_ext): + # usecols as int + msg = "Passing an integer for `usecols`" + with pytest.raises(ValueError, match=msg): + pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=3 + ) + + # usecols as int + with pytest.raises(ValueError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=3, + ) + + def test_usecols_list(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref[["B", "C"]] + adjust_expected(expected, read_ext, engine) + + df1 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=[0, 2, 3] + ) + df2 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols=[0, 2, 3], + ) + + # TODO add index to xls file) + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + def test_usecols_str(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref[["A", "B", "C"]] + adjust_expected(expected, read_ext, engine) + + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A:D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A:D", + ) + + # TODO add index to xls, read xls ignores index name ? + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df3, expected) + + expected = df_ref[["B", "C"]] + adjust_expected(expected, read_ext, engine) + + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C,D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A,C,D", + ) + # TODO add index to xls file + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df3, expected) + + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,C:D" + ) + df3 = pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet2", + skiprows=[1], + index_col=0, + usecols="A,C:D", + ) + tm.assert_frame_equal(df2, expected) + tm.assert_frame_equal(df3, expected) + + @pytest.mark.parametrize( + "usecols", [[0, 1, 3], [0, 3, 1], [1, 0, 3], [1, 3, 0], [3, 0, 1], [3, 1, 0]] + ) + def test_usecols_diff_positional_int_columns_order( + self, request, engine, read_ext, usecols, df_ref + ): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref[["A", "C"]] + adjust_expected(expected, read_ext, engine) + + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols=usecols + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("usecols", [["B", "D"], ["D", "B"]]) + def test_usecols_diff_positional_str_columns_order(self, read_ext, usecols, df_ref): + expected = df_ref[["B", "D"]] + expected.index = range(len(expected)) + + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols=usecols) + tm.assert_frame_equal(result, expected) + + def test_read_excel_without_slicing(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) + tm.assert_frame_equal(result, expected) + + def test_usecols_excel_range_str(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref[["C", "D"]] + adjust_expected(expected, read_ext, engine) + + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, usecols="A,D:E" + ) + tm.assert_frame_equal(result, expected) + + def test_usecols_excel_range_str_invalid(self, read_ext): + msg = "Invalid column name: E1" + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, sheet_name="Sheet1", usecols="D:E1") + + def test_index_col_label_error(self, read_ext): + msg = "list indices must be integers.*, not str" + + with pytest.raises(TypeError, match=msg): + pd.read_excel( + "test1" + read_ext, + sheet_name="Sheet1", + index_col=["A"], + usecols=["A", "C"], + ) + + def test_index_col_str(self, read_ext): + # see gh-52716 + result = pd.read_excel("test1" + read_ext, sheet_name="Sheet3", index_col="A") + expected = DataFrame( + columns=["B", "C", "D", "E", "F"], index=Index([], name="A") + ) + tm.assert_frame_equal(result, expected) + + def test_index_col_empty(self, read_ext): + # see gh-9208 + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet3", index_col=["A", "B", "C"] + ) + expected = DataFrame( + columns=["D", "E", "F"], + index=MultiIndex(levels=[[]] * 3, codes=[[]] * 3, names=["A", "B", "C"]), + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("index_col", [None, 2]) + def test_index_col_with_unnamed(self, read_ext, index_col): + # see gh-18792 + result = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet4", index_col=index_col + ) + expected = DataFrame( + [["i1", "a", "x"], ["i2", "b", "y"]], columns=["Unnamed: 0", "col1", "col2"] + ) + if index_col: + expected = expected.set_index(expected.columns[index_col]) + + tm.assert_frame_equal(result, expected) + + def test_usecols_pass_non_existent_column(self, read_ext): + msg = ( + "Usecols do not match columns, " + "columns expected but not found: " + r"\['E'\]" + ) + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, usecols=["E"]) + + def test_usecols_wrong_type(self, read_ext): + msg = ( + "'usecols' must either be list-like of " + "all strings, all unicode, all integers or a callable." + ) + + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, usecols=["E1", 0]) + + def test_excel_stop_iterator(self, read_ext): + parsed = pd.read_excel("test2" + read_ext, sheet_name="Sheet1") + expected = DataFrame([["aaaa", "bbbbb"]], columns=["Test", "Test1"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_cell_error_na(self, request, engine, read_ext): + xfail_datetimes_with_pyxlsb(engine, request) + + # https://github.com/tafia/calamine/issues/355 + if engine == "calamine" and read_ext == ".ods": + request.applymarker( + pytest.mark.xfail(reason="Calamine can't extract error from ods files") + ) + + parsed = pd.read_excel("test3" + read_ext, sheet_name="Sheet1") + expected = DataFrame([[np.nan]], columns=["Test"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_table(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + df1 = pd.read_excel("test1" + read_ext, sheet_name="Sheet1", index_col=0) + df2 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet2", skiprows=[1], index_col=0 + ) + # TODO add index to file + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + df3 = pd.read_excel( + "test1" + read_ext, sheet_name="Sheet1", index_col=0, skipfooter=1 + ) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + def test_reader_special_dtypes(self, request, engine, read_ext): + xfail_datetimes_with_pyxlsb(engine, request) + + unit = get_exp_unit(read_ext, engine) + expected = DataFrame.from_dict( + { + "IntCol": [1, 2, -3, 4, 0], + "FloatCol": [1.25, 2.25, 1.83, 1.92, 0.0000000005], + "BoolCol": [True, False, True, True, False], + "StrCol": [1, 2, 3, 4, 5], + "Str2Col": ["a", 3, "c", "d", "e"], + "DateCol": Index( + [ + datetime(2013, 10, 30), + datetime(2013, 10, 31), + datetime(1905, 1, 1), + datetime(2013, 12, 14), + datetime(2015, 3, 14), + ], + dtype=f"M8[{unit}]", + ), + }, + ) + basename = "test_types" + + # should read in correctly and infer types + actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + # if not coercing number, then int comes in as float + float_expected = expected.copy() + float_expected.loc[float_expected.index[1], "Str2Col"] = 3.0 + actual = pd.read_excel(basename + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, float_expected) + + # check setting Index (assuming xls and xlsx are the same here) + for icol, name in enumerate(expected.columns): + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", index_col=icol + ) + exp = expected.set_index(name) + tm.assert_frame_equal(actual, exp) + + expected["StrCol"] = expected["StrCol"].apply(str) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", converters={"StrCol": str} + ) + tm.assert_frame_equal(actual, expected) + + # GH8212 - support for converters and missing values + def test_reader_converters(self, read_ext): + basename = "test_converters" + + expected = DataFrame.from_dict( + { + "IntCol": [1, 2, -3, -1000, 0], + "FloatCol": [12.5, np.nan, 18.3, 19.2, 0.000000005], + "BoolCol": ["Found", "Found", "Found", "Not found", "Found"], + "StrCol": ["1", np.nan, "3", "4", "5"], + } + ) + + converters = { + "IntCol": lambda x: int(x) if x != "" else -1000, + "FloatCol": lambda x: 10 * x if x else np.nan, + 2: lambda x: "Found" if x != "" else "Not found", + 3: lambda x: str(x) if x else "", + } + + # should read in correctly and set types of single cells (not array + # dtypes) + actual = pd.read_excel( + basename + read_ext, sheet_name="Sheet1", converters=converters + ) + tm.assert_frame_equal(actual, expected) + + def test_reader_dtype(self, read_ext): + # GH 8212 + basename = "testdtype" + actual = pd.read_excel(basename + read_ext) + + expected = DataFrame( + { + "a": [1, 2, 3, 4], + "b": [2.5, 3.5, 4.5, 5.5], + "c": [1, 2, 3, 4], + "d": [1.0, 2.0, np.nan, 4.0], + } + ) + + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + basename + read_ext, dtype={"a": "float64", "b": "float32", "c": str} + ) + + expected["a"] = expected["a"].astype("float64") + expected["b"] = expected["b"].astype("float32") + expected["c"] = Series(["001", "002", "003", "004"], dtype=object) + tm.assert_frame_equal(actual, expected) + + msg = "Unable to convert column d to type int64" + with pytest.raises(ValueError, match=msg): + pd.read_excel(basename + read_ext, dtype={"d": "int64"}) + + @pytest.mark.parametrize( + "dtype,expected", + [ + ( + None, + DataFrame( + { + "a": [1, 2, 3, 4], + "b": [2.5, 3.5, 4.5, 5.5], + "c": [1, 2, 3, 4], + "d": [1.0, 2.0, np.nan, 4.0], + } + ), + ), + ( + {"a": "float64", "b": "float32", "c": str, "d": str}, + DataFrame( + { + "a": Series([1, 2, 3, 4], dtype="float64"), + "b": Series([2.5, 3.5, 4.5, 5.5], dtype="float32"), + "c": Series(["001", "002", "003", "004"], dtype=object), + "d": Series(["1", "2", np.nan, "4"], dtype=object), + } + ), + ), + ], + ) + def test_reader_dtype_str(self, read_ext, dtype, expected): + # see gh-20377 + basename = "testdtype" + + actual = pd.read_excel(basename + read_ext, dtype=dtype) + tm.assert_frame_equal(actual, expected) + + def test_dtype_backend(self, read_ext, dtype_backend, engine): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + df = DataFrame( + { + "a": Series([1, 3], dtype="Int64"), + "b": Series([2.5, 4.5], dtype="Float64"), + "c": Series([True, False], dtype="boolean"), + "d": Series(["a", "b"], dtype="string"), + "e": Series([pd.NA, 6], dtype="Int64"), + "f": Series([pd.NA, 7.5], dtype="Float64"), + "g": Series([pd.NA, True], dtype="boolean"), + "h": Series([pd.NA, "a"], dtype="string"), + "i": Series([pd.Timestamp("2019-12-31")] * 2), + "j": Series([pd.NA, pd.NA], dtype="Int64"), + } + ) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, sheet_name="test", index=False) + result = pd.read_excel( + file_path, sheet_name="test", dtype_backend=dtype_backend + ) + if dtype_backend == "pyarrow": + import pyarrow as pa + + from pandas.arrays import ArrowExtensionArray + + expected = DataFrame( + { + col: ArrowExtensionArray(pa.array(df[col], from_pandas=True)) + for col in df.columns + } + ) + # pyarrow by default infers timestamp resolution as us, not ns + expected["i"] = ArrowExtensionArray( + expected["i"].array._pa_array.cast(pa.timestamp(unit="us")) + ) + # pyarrow supports a null type, so don't have to default to Int64 + expected["j"] = ArrowExtensionArray(pa.array([None, None])) + else: + expected = df + unit = get_exp_unit(read_ext, engine) + expected["i"] = expected["i"].astype(f"M8[{unit}]") + + tm.assert_frame_equal(result, expected) + + def test_dtype_backend_and_dtype(self, read_ext): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + df = DataFrame({"a": [np.nan, 1.0], "b": [2.5, np.nan]}) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, sheet_name="test", index=False) + result = pd.read_excel( + file_path, + sheet_name="test", + dtype_backend="numpy_nullable", + dtype="float64", + ) + tm.assert_frame_equal(result, df) + + @pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="infer_string takes precedence" + ) + def test_dtype_backend_string(self, read_ext, string_storage): + # GH#36712 + if read_ext in (".xlsb", ".xls"): + pytest.skip(f"No engine for filetype: '{read_ext}'") + + pa = pytest.importorskip("pyarrow") + + with pd.option_context("mode.string_storage", string_storage): + df = DataFrame( + { + "a": np.array(["a", "b"], dtype=np.object_), + "b": np.array(["x", pd.NA], dtype=np.object_), + } + ) + with tm.ensure_clean(read_ext) as file_path: + df.to_excel(file_path, sheet_name="test", index=False) + result = pd.read_excel( + file_path, sheet_name="test", dtype_backend="numpy_nullable" + ) + + if string_storage == "python": + expected = DataFrame( + { + "a": StringArray(np.array(["a", "b"], dtype=np.object_)), + "b": StringArray(np.array(["x", pd.NA], dtype=np.object_)), + } + ) + else: + expected = DataFrame( + { + "a": ArrowStringArray(pa.array(["a", "b"])), + "b": ArrowStringArray(pa.array(["x", None])), + } + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtypes, exp_value", [({}, 1), ({"a.1": "int64"}, 1)]) + def test_dtype_mangle_dup_cols(self, read_ext, dtypes, exp_value): + # GH#35211 + basename = "df_mangle_dup_col_dtypes" + dtype_dict = {"a": object, **dtypes} + dtype_dict_copy = dtype_dict.copy() + # GH#42462 + result = pd.read_excel(basename + read_ext, dtype=dtype_dict) + expected = DataFrame( + { + "a": Series([1], dtype=object), + "a.1": Series([exp_value], dtype=object if not dtypes else None), + } + ) + assert dtype_dict == dtype_dict_copy, "dtype dict changed" + tm.assert_frame_equal(result, expected) + + def test_reader_spaces(self, read_ext): + # see gh-32207 + basename = "test_spaces" + + actual = pd.read_excel(basename + read_ext) + expected = DataFrame( + { + "testcol": [ + "this is great", + "4 spaces", + "1 trailing ", + " 1 leading", + "2 spaces multiple times", + ] + } + ) + tm.assert_frame_equal(actual, expected) + + # gh-36122, gh-35802 + @pytest.mark.parametrize( + "basename,expected", + [ + ("gh-35802", DataFrame({"COLUMN": ["Test (1)"]})), + ("gh-36122", DataFrame(columns=["got 2nd sa"])), + ], + ) + def test_read_excel_ods_nested_xml(self, engine, read_ext, basename, expected): + # see gh-35802 + if engine != "odf": + pytest.skip(f"Skipped for engine: {engine}") + + actual = pd.read_excel(basename + read_ext) + tm.assert_frame_equal(actual, expected) + + def test_reading_all_sheets(self, read_ext): + # Test reading all sheet names by setting sheet_name to None, + # Ensure a dict is returned. + # See PR #9450 + basename = "test_multisheet" + dfs = pd.read_excel(basename + read_ext, sheet_name=None) + # ensure this is not alphabetical to test order preservation + expected_keys = ["Charlie", "Alpha", "Beta"] + tm.assert_contains_all(expected_keys, dfs.keys()) + # Issue 9930 + # Ensure sheet order is preserved + assert expected_keys == list(dfs.keys()) + + def test_reading_multiple_specific_sheets(self, read_ext): + # Test reading specific sheet names by specifying a mixed list + # of integers and strings, and confirm that duplicated sheet + # references (positions/names) are removed properly. + # Ensure a dict is returned + # See PR #9450 + basename = "test_multisheet" + # Explicitly request duplicates. Only the set should be returned. + expected_keys = [2, "Charlie", "Charlie"] + dfs = pd.read_excel(basename + read_ext, sheet_name=expected_keys) + expected_keys = list(set(expected_keys)) + tm.assert_contains_all(expected_keys, dfs.keys()) + assert len(expected_keys) == len(dfs.keys()) + + def test_reading_all_sheets_with_blank(self, read_ext): + # Test reading all sheet names by setting sheet_name to None, + # In the case where some sheets are blank. + # Issue #11711 + basename = "blank_with_header" + dfs = pd.read_excel(basename + read_ext, sheet_name=None) + expected_keys = ["Sheet1", "Sheet2", "Sheet3"] + tm.assert_contains_all(expected_keys, dfs.keys()) + + # GH6403 + def test_read_excel_blank(self, read_ext): + actual = pd.read_excel("blank" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, DataFrame()) + + def test_read_excel_blank_with_header(self, read_ext): + expected = DataFrame(columns=["col_1", "col_2"]) + actual = pd.read_excel("blank_with_header" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + def test_exception_message_includes_sheet_name(self, read_ext): + # GH 48706 + with pytest.raises(ValueError, match=r" \(sheet: Sheet1\)$"): + pd.read_excel("blank_with_header" + read_ext, header=[1], sheet_name=None) + with pytest.raises(ZeroDivisionError, match=r" \(sheet: Sheet1\)$"): + pd.read_excel("test1" + read_ext, usecols=lambda x: 1 / 0, sheet_name=None) + + @pytest.mark.filterwarnings("ignore:Cell A4 is marked:UserWarning:openpyxl") + def test_date_conversion_overflow(self, request, engine, read_ext): + # GH 10001 : pandas.ExcelFile ignore parse_dates=False + xfail_datetimes_with_pyxlsb(engine, request) + + expected = DataFrame( + [ + [pd.Timestamp("2016-03-12"), "Marc Johnson"], + [pd.Timestamp("2016-03-16"), "Jack Black"], + [1e20, "Timothy Brown"], + ], + columns=["DateColWithBigInt", "StringCol"], + ) + + if engine == "openpyxl": + request.applymarker( + pytest.mark.xfail(reason="Maybe not supported by openpyxl") + ) + + if engine is None and read_ext in (".xlsx", ".xlsm"): + # GH 35029 + request.applymarker( + pytest.mark.xfail(reason="Defaults to openpyxl, maybe not supported") + ) + + result = pd.read_excel("testdateoverflow" + read_ext) + tm.assert_frame_equal(result, expected) + + def test_sheet_name(self, request, read_ext, engine, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + filename = "test1" + sheet_name = "Sheet1" + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + df1 = pd.read_excel( + filename + read_ext, sheet_name=sheet_name, index_col=0 + ) # doc + df2 = pd.read_excel(filename + read_ext, index_col=0, sheet_name=sheet_name) + + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + def test_excel_read_buffer(self, read_ext): + pth = "test1" + read_ext + expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0) + with open(pth, "rb") as f: + actual = pd.read_excel(f, sheet_name="Sheet1", index_col=0) + tm.assert_frame_equal(expected, actual) + + def test_bad_engine_raises(self): + bad_engine = "foo" + with pytest.raises(ValueError, match="Unknown engine: foo"): + pd.read_excel("", engine=bad_engine) + + @pytest.mark.parametrize( + "sheet_name", + [3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]], + ) + def test_bad_sheetname_raises(self, read_ext, sheet_name): + # GH 39250 + msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found" + with pytest.raises(ValueError, match=msg): + pd.read_excel("blank" + read_ext, sheet_name=sheet_name) + + def test_missing_file_raises(self, read_ext): + bad_file = f"foo{read_ext}" + # CI tests with other languages, translates to "No such file or directory" + match = "|".join( + [ + "(No such file or directory", + "没有那个文件或目录", + "File o directory non esistente)", + ] + ) + with pytest.raises(FileNotFoundError, match=match): + pd.read_excel(bad_file) + + def test_corrupt_bytes_raises(self, engine): + bad_stream = b"foo" + if engine is None: + error = ValueError + msg = ( + "Excel file format cannot be determined, you must " + "specify an engine manually." + ) + elif engine == "xlrd": + from xlrd import XLRDError + + error = XLRDError + msg = ( + "Unsupported format, or corrupt file: Expected BOF " + "record; found b'foo'" + ) + elif engine == "calamine": + from python_calamine import CalamineError + + error = CalamineError + msg = "Cannot detect file format" + else: + error = BadZipFile + msg = "File is not a zip file" + with pytest.raises(error, match=msg): + pd.read_excel(BytesIO(bad_stream)) + + @pytest.mark.network + @pytest.mark.single_cpu + def test_read_from_http_url(self, httpserver, read_ext): + with open("test1" + read_ext, "rb") as f: + httpserver.serve_content(content=f.read()) + url_table = pd.read_excel(httpserver.url) + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @td.skip_if_not_us_locale + @pytest.mark.single_cpu + def test_read_from_s3_url(self, read_ext, s3_public_bucket, s3so): + # Bucket created in tests/io/conftest.py + with open("test1" + read_ext, "rb") as f: + s3_public_bucket.put_object(Key="test1" + read_ext, Body=f) + + url = f"s3://{s3_public_bucket.name}/test1" + read_ext + + url_table = pd.read_excel(url, storage_options=s3so) + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @pytest.mark.single_cpu + def test_read_from_s3_object(self, read_ext, s3_public_bucket, s3so): + # GH 38788 + # Bucket created in tests/io/conftest.py + with open("test1" + read_ext, "rb") as f: + s3_public_bucket.put_object(Key="test1" + read_ext, Body=f) + + import s3fs + + s3 = s3fs.S3FileSystem(**s3so) + + with s3.open(f"s3://{s3_public_bucket.name}/test1" + read_ext) as f: + url_table = pd.read_excel(f) + + local_table = pd.read_excel("test1" + read_ext) + tm.assert_frame_equal(url_table, local_table) + + @pytest.mark.slow + def test_read_from_file_url(self, read_ext, datapath): + # FILE + localtable = os.path.join(datapath("io", "data", "excel"), "test1" + read_ext) + local_table = pd.read_excel(localtable) + + try: + url_table = pd.read_excel("file://localhost/" + localtable) + except URLError: + # fails on some systems + platform_info = " ".join(platform.uname()).strip() + pytest.skip(f"failing on {platform_info}") + + tm.assert_frame_equal(url_table, local_table) + + def test_read_from_pathlib_path(self, read_ext): + # GH12655 + str_path = "test1" + read_ext + expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) + + path_obj = Path("test1" + read_ext) + actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + @td.skip_if_no("py.path") + def test_read_from_py_localpath(self, read_ext): + # GH12655 + from py.path import local as LocalPath + + str_path = os.path.join("test1" + read_ext) + expected = pd.read_excel(str_path, sheet_name="Sheet1", index_col=0) + + path_obj = LocalPath().join("test1" + read_ext) + actual = pd.read_excel(path_obj, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + def test_close_from_py_localpath(self, read_ext): + # GH31467 + str_path = os.path.join("test1" + read_ext) + with open(str_path, "rb") as f: + x = pd.read_excel(f, sheet_name="Sheet1", index_col=0) + del x + # should not throw an exception because the passed file was closed + f.read() + + def test_reader_seconds(self, request, engine, read_ext): + xfail_datetimes_with_pyxlsb(engine, request) + + # GH 55045 + if engine == "calamine" and read_ext == ".ods": + request.applymarker( + pytest.mark.xfail( + reason="ODS file contains bad datetime (seconds as text)" + ) + ) + + # Test reading times with and without milliseconds. GH5945. + expected = DataFrame.from_dict( + { + "Time": [ + time(1, 2, 3), + time(2, 45, 56, 100000), + time(4, 29, 49, 200000), + time(6, 13, 42, 300000), + time(7, 57, 35, 400000), + time(9, 41, 28, 500000), + time(11, 25, 21, 600000), + time(13, 9, 14, 700000), + time(14, 53, 7, 800000), + time(16, 37, 0, 900000), + time(18, 20, 54), + ] + } + ) + + actual = pd.read_excel("times_1900" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel("times_1904" + read_ext, sheet_name="Sheet1") + tm.assert_frame_equal(actual, expected) + + def test_read_excel_multiindex(self, request, engine, read_ext): + # see gh-4679 + xfail_datetimes_with_pyxlsb(engine, request) + + unit = get_exp_unit(read_ext, engine) + + mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]]) + mi_file = "testmultiindex" + read_ext + + # "mi_column" sheet + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=mi, + ) + expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]") + + actual = pd.read_excel( + mi_file, sheet_name="mi_column", header=[0, 1], index_col=0 + ) + tm.assert_frame_equal(actual, expected) + + # "mi_index" sheet + expected.index = mi + expected.columns = ["a", "b", "c", "d"] + + actual = pd.read_excel(mi_file, sheet_name="mi_index", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # "both" sheet + expected.columns = mi + + actual = pd.read_excel( + mi_file, sheet_name="both", index_col=[0, 1], header=[0, 1] + ) + tm.assert_frame_equal(actual, expected) + + # "mi_index_name" sheet + expected.columns = ["a", "b", "c", "d"] + expected.index = mi.set_names(["ilvl1", "ilvl2"]) + + actual = pd.read_excel(mi_file, sheet_name="mi_index_name", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # "mi_column_name" sheet + expected.index = list(range(4)) + expected.columns = mi.set_names(["c1", "c2"]) + actual = pd.read_excel( + mi_file, sheet_name="mi_column_name", header=[0, 1], index_col=0 + ) + tm.assert_frame_equal(actual, expected) + + # see gh-11317 + # "name_with_int" sheet + expected.columns = mi.set_levels([1, 2], level=1).set_names(["c1", "c2"]) + + actual = pd.read_excel( + mi_file, sheet_name="name_with_int", index_col=0, header=[0, 1] + ) + tm.assert_frame_equal(actual, expected) + + # "both_name" sheet + expected.columns = mi.set_names(["c1", "c2"]) + expected.index = mi.set_names(["ilvl1", "ilvl2"]) + + actual = pd.read_excel( + mi_file, sheet_name="both_name", index_col=[0, 1], header=[0, 1] + ) + tm.assert_frame_equal(actual, expected) + + # "both_skiprows" sheet + actual = pd.read_excel( + mi_file, + sheet_name="both_name_skiprows", + index_col=[0, 1], + header=[0, 1], + skiprows=2, + ) + tm.assert_frame_equal(actual, expected) + + @pytest.mark.parametrize( + "sheet_name,idx_lvl2", + [ + ("both_name_blank_after_mi_name", [np.nan, "b", "a", "b"]), + ("both_name_multiple_blanks", [np.nan] * 4), + ], + ) + def test_read_excel_multiindex_blank_after_name( + self, request, engine, read_ext, sheet_name, idx_lvl2 + ): + # GH34673 + xfail_datetimes_with_pyxlsb(engine, request) + + mi_file = "testmultiindex" + read_ext + mi = MultiIndex.from_product([["foo", "bar"], ["a", "b"]], names=["c1", "c2"]) + + unit = get_exp_unit(read_ext, engine) + + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=mi, + index=MultiIndex.from_arrays( + (["foo", "foo", "bar", "bar"], idx_lvl2), + names=["ilvl1", "ilvl2"], + ), + ) + expected[mi[2]] = expected[mi[2]].astype(f"M8[{unit}]") + result = pd.read_excel( + mi_file, + sheet_name=sheet_name, + index_col=[0, 1], + header=[0, 1], + ) + tm.assert_frame_equal(result, expected) + + def test_read_excel_multiindex_header_only(self, read_ext): + # see gh-11733. + # + # Don't try to parse a header name if there isn't one. + mi_file = "testmultiindex" + read_ext + result = pd.read_excel(mi_file, sheet_name="index_col_none", header=[0, 1]) + + exp_columns = MultiIndex.from_product([("A", "B"), ("key", "val")]) + expected = DataFrame([[1, 2, 3, 4]] * 2, columns=exp_columns) + tm.assert_frame_equal(result, expected) + + def test_excel_old_index_format(self, read_ext): + # see gh-4679 + filename = "test_index_name_pre17" + read_ext + + # We detect headers to determine if index names exist, so + # that "index" name in the "names" version of the data will + # now be interpreted as rows that include null data. + data = np.array( + [ + [np.nan, np.nan, np.nan, np.nan, np.nan], + ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], + ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], + ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], + ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], + ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"], + ], + dtype=object, + ) + columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] + mi = MultiIndex( + levels=[ + ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], + ["R1", "R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"], + ], + codes=[[0, 1, 2, 3, 4, 5], [0, 1, 2, 3, 4, 5]], + names=[None, None], + ) + si = Index( + ["R0", "R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None + ) + + expected = DataFrame(data, index=si, columns=columns) + + actual = pd.read_excel(filename, sheet_name="single_names", index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + + actual = pd.read_excel(filename, sheet_name="multi_names", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + # The analogous versions of the "names" version data + # where there are explicitly no names for the indices. + data = np.array( + [ + ["R0C0", "R0C1", "R0C2", "R0C3", "R0C4"], + ["R1C0", "R1C1", "R1C2", "R1C3", "R1C4"], + ["R2C0", "R2C1", "R2C2", "R2C3", "R2C4"], + ["R3C0", "R3C1", "R3C2", "R3C3", "R3C4"], + ["R4C0", "R4C1", "R4C2", "R4C3", "R4C4"], + ] + ) + columns = ["C_l0_g0", "C_l0_g1", "C_l0_g2", "C_l0_g3", "C_l0_g4"] + mi = MultiIndex( + levels=[ + ["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], + ["R_l1_g0", "R_l1_g1", "R_l1_g2", "R_l1_g3", "R_l1_g4"], + ], + codes=[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4]], + names=[None, None], + ) + si = Index(["R_l0_g0", "R_l0_g1", "R_l0_g2", "R_l0_g3", "R_l0_g4"], name=None) + + expected = DataFrame(data, index=si, columns=columns) + + actual = pd.read_excel(filename, sheet_name="single_no_names", index_col=0) + tm.assert_frame_equal(actual, expected) + + expected.index = mi + + actual = pd.read_excel(filename, sheet_name="multi_no_names", index_col=[0, 1]) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_bool_header_arg(self, read_ext): + # GH 6114 + msg = "Passing a bool to header is invalid" + for arg in [True, False]: + with pytest.raises(TypeError, match=msg): + pd.read_excel("test1" + read_ext, header=arg) + + def test_read_excel_skiprows(self, request, engine, read_ext): + # GH 4903 + xfail_datetimes_with_pyxlsb(engine, request) + + unit = get_exp_unit(read_ext, engine) + + actual = pd.read_excel( + "testskiprows" + read_ext, sheet_name="skiprows_list", skiprows=[0, 2] + ) + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + expected["c"] = expected["c"].astype(f"M8[{unit}]") + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=np.array([0, 2]), + ) + tm.assert_frame_equal(actual, expected) + + # GH36435 + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=lambda x: x in [0, 2], + ) + tm.assert_frame_equal(actual, expected) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=3, + names=["a", "b", "c", "d"], + ) + expected = DataFrame( + [ + # [1, 2.5, pd.Timestamp("2015-01-01"), True], + [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + expected["c"] = expected["c"].astype(f"M8[{unit}]") + tm.assert_frame_equal(actual, expected) + + def test_read_excel_skiprows_callable_not_in(self, request, engine, read_ext): + # GH 4903 + xfail_datetimes_with_pyxlsb(engine, request) + unit = get_exp_unit(read_ext, engine) + + actual = pd.read_excel( + "testskiprows" + read_ext, + sheet_name="skiprows_list", + skiprows=lambda x: x not in [1, 3, 5], + ) + expected = DataFrame( + [ + [1, 2.5, pd.Timestamp("2015-01-01"), True], + # [2, 3.5, pd.Timestamp("2015-01-02"), False], + [3, 4.5, pd.Timestamp("2015-01-03"), False], + # [4, 5.5, pd.Timestamp("2015-01-04"), True], + ], + columns=["a", "b", "c", "d"], + ) + expected["c"] = expected["c"].astype(f"M8[{unit}]") + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows(self, read_ext): + # GH 16645 + num_rows_to_pull = 5 + actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull) + expected = pd.read_excel("test1" + read_ext) + expected = expected[:num_rows_to_pull] + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows_greater_than_nrows_in_file(self, read_ext): + # GH 16645 + expected = pd.read_excel("test1" + read_ext) + num_records_in_file = len(expected) + num_rows_to_pull = num_records_in_file + 10 + actual = pd.read_excel("test1" + read_ext, nrows=num_rows_to_pull) + tm.assert_frame_equal(actual, expected) + + def test_read_excel_nrows_non_integer_parameter(self, read_ext): + # GH 16645 + msg = "'nrows' must be an integer >=0" + with pytest.raises(ValueError, match=msg): + pd.read_excel("test1" + read_ext, nrows="5") + + @pytest.mark.parametrize( + "filename,sheet_name,header,index_col,skiprows", + [ + ("testmultiindex", "mi_column", [0, 1], 0, None), + ("testmultiindex", "mi_index", None, [0, 1], None), + ("testmultiindex", "both", [0, 1], [0, 1], None), + ("testmultiindex", "mi_column_name", [0, 1], 0, None), + ("testskiprows", "skiprows_list", None, None, [0, 2]), + ("testskiprows", "skiprows_list", None, None, lambda x: x in (0, 2)), + ], + ) + def test_read_excel_nrows_params( + self, read_ext, filename, sheet_name, header, index_col, skiprows + ): + """ + For various parameters, we should get the same result whether we + limit the rows during load (nrows=3) or after (df.iloc[:3]). + """ + # GH 46894 + expected = pd.read_excel( + filename + read_ext, + sheet_name=sheet_name, + header=header, + index_col=index_col, + skiprows=skiprows, + ).iloc[:3] + actual = pd.read_excel( + filename + read_ext, + sheet_name=sheet_name, + header=header, + index_col=index_col, + skiprows=skiprows, + nrows=3, + ) + tm.assert_frame_equal(actual, expected) + + def test_deprecated_kwargs(self, read_ext): + with pytest.raises(TypeError, match="but 3 positional arguments"): + pd.read_excel("test1" + read_ext, "Sheet1", 0) + + def test_no_header_with_list_index_col(self, read_ext): + # GH 31783 + file_name = "testmultiindex" + read_ext + data = [("B", "B"), ("key", "val"), (3, 4), (3, 4)] + idx = MultiIndex.from_tuples( + [("A", "A"), ("key", "val"), (1, 2), (1, 2)], names=(0, 1) + ) + expected = DataFrame(data, index=idx, columns=(2, 3)) + result = pd.read_excel( + file_name, sheet_name="index_col_none", index_col=[0, 1], header=None + ) + tm.assert_frame_equal(expected, result) + + def test_one_col_noskip_blank_line(self, read_ext): + # GH 39808 + file_name = "one_col_blank_line" + read_ext + data = [0.5, np.nan, 1, 2] + expected = DataFrame(data, columns=["numbers"]) + result = pd.read_excel(file_name) + tm.assert_frame_equal(result, expected) + + def test_multiheader_two_blank_lines(self, read_ext): + # GH 40442 + file_name = "testmultiindex" + read_ext + columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")]) + data = [[np.nan, np.nan], [np.nan, np.nan], [1, 3], [2, 4]] + expected = DataFrame(data, columns=columns) + result = pd.read_excel( + file_name, sheet_name="mi_column_empty_rows", header=[0, 1] + ) + tm.assert_frame_equal(result, expected) + + def test_trailing_blanks(self, read_ext): + """ + Sheets can contain blank cells with no data. Some of our readers + were including those cells, creating many empty rows and columns + """ + file_name = "trailing_blanks" + read_ext + result = pd.read_excel(file_name) + assert result.shape == (3, 3) + + def test_ignore_chartsheets_by_str(self, request, engine, read_ext): + # GH 41448 + if read_ext == ".ods": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.applymarker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises(ValueError, match="Worksheet named 'Chart1' not found"): + pd.read_excel("chartsheet" + read_ext, sheet_name="Chart1") + + def test_ignore_chartsheets_by_int(self, request, engine, read_ext): + # GH 41448 + if read_ext == ".ods": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.applymarker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pytest.raises( + ValueError, match="Worksheet index 1 is invalid, 1 worksheets found" + ): + pd.read_excel("chartsheet" + read_ext, sheet_name=1) + + def test_euro_decimal_format(self, read_ext): + # copied from read_csv + result = pd.read_excel("test_decimal" + read_ext, decimal=",", skiprows=1) + expected = DataFrame( + [ + [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819], + [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872], + [3, 878.158, 108013.434, "GHI", "rez", 2.735694704], + ], + columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"], + ) + tm.assert_frame_equal(result, expected) + + +class TestExcelFileRead: + def test_deprecate_bytes_input(self, engine, read_ext): + # GH 53830 + msg = ( + "Passing bytes to 'read_excel' is deprecated and " + "will be removed in a future version. To read from a " + "byte string, wrap it in a `BytesIO` object." + ) + + with tm.assert_produces_warning( + FutureWarning, match=msg, raise_on_extra_warnings=False + ): + with open("test1" + read_ext, "rb") as f: + pd.read_excel(f.read(), engine=engine) + + @pytest.fixture(autouse=True) + def cd_and_set_engine(self, engine, datapath, monkeypatch): + """ + Change directory and set engine for ExcelFile objects. + """ + func = partial(pd.ExcelFile, engine=engine) + monkeypatch.chdir(datapath("io", "data", "excel")) + monkeypatch.setattr(pd, "ExcelFile", func) + + def test_engine_used(self, read_ext, engine): + expected_defaults = { + "xlsx": "openpyxl", + "xlsm": "openpyxl", + "xlsb": "pyxlsb", + "xls": "xlrd", + "ods": "odf", + } + + with pd.ExcelFile("test1" + read_ext) as excel: + result = excel.engine + + if engine is not None: + expected = engine + else: + expected = expected_defaults[read_ext[1:]] + assert result == expected + + def test_excel_passes_na(self, read_ext): + with pd.ExcelFile("test4" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"] + ) + expected = DataFrame( + [["NA"], [1], ["NA"], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + with pd.ExcelFile("test4" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"] + ) + expected = DataFrame( + [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + # 13967 + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=False, na_values=["apple"] + ) + expected = DataFrame( + [["1.#QNAN"], [1], ["nan"], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, sheet_name="Sheet1", keep_default_na=True, na_values=["apple"] + ) + expected = DataFrame( + [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]], columns=["Test"] + ) + tm.assert_frame_equal(parsed, expected) + + @pytest.mark.parametrize("na_filter", [None, True, False]) + def test_excel_passes_na_filter(self, read_ext, na_filter): + # gh-25453 + kwargs = {} + + if na_filter is not None: + kwargs["na_filter"] = na_filter + + with pd.ExcelFile("test5" + read_ext) as excel: + parsed = pd.read_excel( + excel, + sheet_name="Sheet1", + keep_default_na=True, + na_values=["apple"], + **kwargs, + ) + + if na_filter is False: + expected = [["1.#QNAN"], [1], ["nan"], ["apple"], ["rabbit"]] + else: + expected = [[np.nan], [1], [np.nan], [np.nan], ["rabbit"]] + + expected = DataFrame(expected, columns=["Test"]) + tm.assert_frame_equal(parsed, expected) + + def test_excel_table_sheet_by_index(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + with pd.ExcelFile("test1" + read_ext) as excel: + df1 = pd.read_excel(excel, sheet_name=0, index_col=0) + df2 = pd.read_excel(excel, sheet_name=1, skiprows=[1], index_col=0) + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + with pd.ExcelFile("test1" + read_ext) as excel: + df1 = excel.parse(0, index_col=0) + df2 = excel.parse(1, skiprows=[1], index_col=0) + tm.assert_frame_equal(df1, expected) + tm.assert_frame_equal(df2, expected) + + with pd.ExcelFile("test1" + read_ext) as excel: + df3 = pd.read_excel(excel, sheet_name=0, index_col=0, skipfooter=1) + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + with pd.ExcelFile("test1" + read_ext) as excel: + df3 = excel.parse(0, index_col=0, skipfooter=1) + + tm.assert_frame_equal(df3, df1.iloc[:-1]) + + def test_sheet_name(self, request, engine, read_ext, df_ref): + xfail_datetimes_with_pyxlsb(engine, request) + + expected = df_ref + adjust_expected(expected, read_ext, engine) + + filename = "test1" + sheet_name = "Sheet1" + + with pd.ExcelFile(filename + read_ext) as excel: + df1_parse = excel.parse(sheet_name=sheet_name, index_col=0) # doc + + with pd.ExcelFile(filename + read_ext) as excel: + df2_parse = excel.parse(index_col=0, sheet_name=sheet_name) + + tm.assert_frame_equal(df1_parse, expected) + tm.assert_frame_equal(df2_parse, expected) + + @pytest.mark.parametrize( + "sheet_name", + [3, [0, 3], [3, 0], "Sheet4", ["Sheet1", "Sheet4"], ["Sheet4", "Sheet1"]], + ) + def test_bad_sheetname_raises(self, read_ext, sheet_name): + # GH 39250 + msg = "Worksheet index 3 is invalid|Worksheet named 'Sheet4' not found" + with pytest.raises(ValueError, match=msg): + with pd.ExcelFile("blank" + read_ext) as excel: + excel.parse(sheet_name=sheet_name) + + def test_excel_read_buffer(self, engine, read_ext): + pth = "test1" + read_ext + expected = pd.read_excel(pth, sheet_name="Sheet1", index_col=0, engine=engine) + + with open(pth, "rb") as f: + with pd.ExcelFile(f) as xls: + actual = pd.read_excel(xls, sheet_name="Sheet1", index_col=0) + + tm.assert_frame_equal(expected, actual) + + def test_reader_closes_file(self, engine, read_ext): + with open("test1" + read_ext, "rb") as f: + with pd.ExcelFile(f) as xlsx: + # parses okay + pd.read_excel(xlsx, sheet_name="Sheet1", index_col=0, engine=engine) + + assert f.closed + + def test_conflicting_excel_engines(self, read_ext): + # GH 26566 + msg = "Engine should not be specified when passing an ExcelFile" + + with pd.ExcelFile("test1" + read_ext) as xl: + with pytest.raises(ValueError, match=msg): + pd.read_excel(xl, engine="foo") + + def test_excel_read_binary(self, engine, read_ext): + # GH 15914 + expected = pd.read_excel("test1" + read_ext, engine=engine) + + with open("test1" + read_ext, "rb") as f: + data = f.read() + + actual = pd.read_excel(BytesIO(data), engine=engine) + tm.assert_frame_equal(expected, actual) + + def test_excel_read_binary_via_read_excel(self, read_ext, engine): + # GH 38424 + with open("test1" + read_ext, "rb") as f: + result = pd.read_excel(f, engine=engine) + expected = pd.read_excel("test1" + read_ext, engine=engine) + tm.assert_frame_equal(result, expected) + + def test_read_excel_header_index_out_of_range(self, engine): + # GH#43143 + with open("df_header_oob.xlsx", "rb") as f: + with pytest.raises(ValueError, match="exceeds maximum"): + pd.read_excel(f, header=[0, 1]) + + @pytest.mark.parametrize("filename", ["df_empty.xlsx", "df_equals.xlsx"]) + def test_header_with_index_col(self, filename): + # GH 33476 + idx = Index(["Z"], name="I2") + cols = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"]) + expected = DataFrame([[1, 3]], index=idx, columns=cols, dtype="int64") + result = pd.read_excel( + filename, sheet_name="Sheet1", index_col=0, header=[0, 1] + ) + tm.assert_frame_equal(expected, result) + + def test_read_datetime_multiindex(self, request, engine, read_ext): + # GH 34748 + xfail_datetimes_with_pyxlsb(engine, request) + + f = "test_datetime_mi" + read_ext + with pd.ExcelFile(f) as excel: + actual = pd.read_excel(excel, header=[0, 1], index_col=0, engine=engine) + + unit = get_exp_unit(read_ext, engine) + dti = pd.DatetimeIndex(["2020-02-29", "2020-03-01"], dtype=f"M8[{unit}]") + expected_column_index = MultiIndex.from_arrays( + [dti[:1], dti[1:]], + names=[ + dti[0].to_pydatetime(), + dti[1].to_pydatetime(), + ], + ) + expected = DataFrame([], index=[], columns=expected_column_index) + + tm.assert_frame_equal(expected, actual) + + def test_engine_invalid_option(self, read_ext): + # read_ext includes the '.' hence the weird formatting + with pytest.raises(ValueError, match="Value must be one of *"): + with pd.option_context(f"io.excel{read_ext}.reader", "abc"): + pass + + def test_ignore_chartsheets(self, request, engine, read_ext): + # GH 41448 + if read_ext == ".ods": + pytest.skip("chartsheets do not exist in the ODF format") + if engine == "pyxlsb": + request.applymarker( + pytest.mark.xfail( + reason="pyxlsb can't distinguish chartsheets from worksheets" + ) + ) + with pd.ExcelFile("chartsheet" + read_ext) as excel: + assert excel.sheet_names == ["Sheet1"] + + def test_corrupt_files_closed(self, engine, read_ext): + # GH41778 + errors = (BadZipFile,) + if engine is None: + pytest.skip(f"Invalid test for engine={engine}") + elif engine == "xlrd": + import xlrd + + errors = (BadZipFile, xlrd.biffh.XLRDError) + elif engine == "calamine": + from python_calamine import CalamineError + + errors = (CalamineError,) + + with tm.ensure_clean(f"corrupt{read_ext}") as file: + Path(file).write_text("corrupt", encoding="utf-8") + with tm.assert_produces_warning(False): + try: + pd.ExcelFile(file, engine=engine) + except errors: + pass diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py new file mode 100644 index 0000000000000000000000000000000000000000..89615172688d7b56fbb070dbcd4750365d7d612d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_style.py @@ -0,0 +1,298 @@ +import contextlib +import time + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + read_excel, +) +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter +from pandas.io.formats.excel import ExcelFormatter + +pytest.importorskip("jinja2") +# jinja2 is currently required for Styler.__init__(). Technically Styler.to_excel +# could compute styles and render to excel without jinja2, since there is no +# 'template' file, but this needs the import error to delayed until render time. + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +def assert_equal_cell_styles(cell1, cell2): + # TODO: should find a better way to check equality + assert cell1.alignment.__dict__ == cell2.alignment.__dict__ + assert cell1.border.__dict__ == cell2.border.__dict__ + assert cell1.fill.__dict__ == cell2.fill.__dict__ + assert cell1.font.__dict__ == cell2.font.__dict__ + assert cell1.number_format == cell2.number_format + assert cell1.protection.__dict__ == cell2.protection.__dict__ + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +def test_styler_to_excel_unstyled(engine): + # compare DataFrame.to_excel and Styler.to_excel when no styles applied + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((2, 2))) + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + df.style.to_excel(writer, sheet_name="unstyled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + for col1, col2 in zip(wb["dataframe"].columns, wb["unstyled"].columns): + assert len(col1) == len(col2) + for cell1, cell2 in zip(col1, col2): + assert cell1.value == cell2.value + assert_equal_cell_styles(cell1, cell2) + + +shared_style_params = [ + ( + "background-color: #111222", + ["fill", "fgColor", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ( + "color: #111222", + ["font", "color", "value"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("font-family: Arial;", ["font", "name"], "arial"), + ("font-weight: bold;", ["font", "b"], True), + ("font-style: italic;", ["font", "i"], True), + ("text-decoration: underline;", ["font", "u"], "single"), + ("number-format: $??,???.00;", ["number_format"], "$??,???.00"), + ("text-align: left;", ["alignment", "horizontal"], "left"), + ( + "vertical-align: bottom;", + ["alignment", "vertical"], + {"xlsxwriter": None, "openpyxl": "bottom"}, # xlsxwriter Fails + ), + ("vertical-align: middle;", ["alignment", "vertical"], "center"), + # Border widths + ("border-left: 2pt solid red", ["border", "left", "style"], "medium"), + ("border-left: 1pt dotted red", ["border", "left", "style"], "dotted"), + ("border-left: 2pt dotted red", ["border", "left", "style"], "mediumDashDotDot"), + ("border-left: 1pt dashed red", ["border", "left", "style"], "dashed"), + ("border-left: 2pt dashed red", ["border", "left", "style"], "mediumDashed"), + ("border-left: 1pt solid red", ["border", "left", "style"], "thin"), + ("border-left: 3pt solid red", ["border", "left", "style"], "thick"), + # Border expansion + ( + "border-left: 2pt solid #111222", + ["border", "left", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "top", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "top", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "right", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "right", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "bottom", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "bottom", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + ("border: 1pt solid red", ["border", "left", "style"], "thin"), + ( + "border: 1pt solid #111222", + ["border", "left", "color", "rgb"], + {"xlsxwriter": "FF111222", "openpyxl": "00111222"}, + ), + # Border styles + ( + "border-left-style: hair; border-left-color: black", + ["border", "left", "style"], + "hair", + ), +] + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("css, attrs, expected", shared_style_params) +def test_styler_to_excel_basic(engine, css, attrs, expected): + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: css) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test unstyled data cell does not have expected styles + # test styled cell has expected styles + u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2) + for attr in attrs: + u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr) + + if isinstance(expected, dict): + assert u_cell is None or u_cell != expected[engine] + assert s_cell == expected[engine] + else: + assert u_cell is None or u_cell != expected + assert s_cell == expected + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("css, attrs, expected", shared_style_params) +def test_styler_to_excel_basic_indexes(engine, css, attrs, expected): + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + + styler = df.style + styler.map_index(lambda x: css, axis=0) + styler.map_index(lambda x: css, axis=1) + + null_styler = df.style + null_styler.map(lambda x: "null: css;") + null_styler.map_index(lambda x: "null: css;", axis=0) + null_styler.map_index(lambda x: "null: css;", axis=1) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + null_styler.to_excel(writer, sheet_name="null_styled") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test null styled index cells does not have expected styles + # test styled cell has expected styles + ui_cell, si_cell = wb["null_styled"].cell(2, 1), wb["styled"].cell(2, 1) + uc_cell, sc_cell = wb["null_styled"].cell(1, 2), wb["styled"].cell(1, 2) + for attr in attrs: + ui_cell, si_cell = getattr(ui_cell, attr, None), getattr(si_cell, attr) + uc_cell, sc_cell = getattr(uc_cell, attr, None), getattr(sc_cell, attr) + + if isinstance(expected, dict): + assert ui_cell is None or ui_cell != expected[engine] + assert si_cell == expected[engine] + assert uc_cell is None or uc_cell != expected[engine] + assert sc_cell == expected[engine] + else: + assert ui_cell is None or ui_cell != expected + assert si_cell == expected + assert uc_cell is None or uc_cell != expected + assert sc_cell == expected + + +# From https://openpyxl.readthedocs.io/en/stable/api/openpyxl.styles.borders.html +# Note: Leaving behavior of "width"-type styles undefined; user should use border-width +# instead +excel_border_styles = [ + # "thin", + "dashed", + "mediumDashDot", + "dashDotDot", + "hair", + "dotted", + "mediumDashDotDot", + # "medium", + "double", + "dashDot", + "slantDashDot", + # "thick", + "mediumDashed", +] + + +@pytest.mark.parametrize( + "engine", + ["xlsxwriter", "openpyxl"], +) +@pytest.mark.parametrize("border_style", excel_border_styles) +def test_styler_to_excel_border_style(engine, border_style): + css = f"border-left: {border_style} black thin" + attrs = ["border", "left", "style"] + expected = border_style + + pytest.importorskip(engine) + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: css) + + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine=engine) as writer: + df.to_excel(writer, sheet_name="dataframe") + styler.to_excel(writer, sheet_name="styled") + + openpyxl = pytest.importorskip("openpyxl") # test loading only with openpyxl + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + # test unstyled data cell does not have expected styles + # test styled cell has expected styles + u_cell, s_cell = wb["dataframe"].cell(2, 2), wb["styled"].cell(2, 2) + for attr in attrs: + u_cell, s_cell = getattr(u_cell, attr, None), getattr(s_cell, attr) + + if isinstance(expected, dict): + assert u_cell is None or u_cell != expected[engine] + assert s_cell == expected[engine] + else: + assert u_cell is None or u_cell != expected + assert s_cell == expected + + +def test_styler_custom_converter(): + openpyxl = pytest.importorskip("openpyxl") + + def custom_converter(css): + return {"font": {"color": {"rgb": "111222"}}} + + df = DataFrame(np.random.default_rng(2).standard_normal((1, 1))) + styler = df.style.map(lambda x: "color: #888999") + with tm.ensure_clean(".xlsx") as path: + with ExcelWriter(path, engine="openpyxl") as writer: + ExcelFormatter(styler, style_converter=custom_converter).write( + writer, sheet_name="custom" + ) + + with contextlib.closing(openpyxl.load_workbook(path)) as wb: + assert wb["custom"].cell(2, 2).font.color.value == "00111222" + + +@pytest.mark.single_cpu +@td.skip_if_not_us_locale +def test_styler_to_s3(s3_public_bucket, s3so): + # GH#46381 + + mock_bucket_name, target_file = s3_public_bucket.name, "test.xlsx" + df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]}) + styler = df.style.set_sticky(axis="index") + styler.to_excel(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so) + timeout = 5 + while True: + if target_file in (obj.key for obj in s3_public_bucket.objects.all()): + break + time.sleep(0.1) + timeout -= 0.1 + assert timeout > 0, "Timed out waiting for file to appear on moto" + result = read_excel( + f"s3://{mock_bucket_name}/{target_file}", index_col=0, storage_options=s3so + ) + tm.assert_frame_equal(result, df) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py new file mode 100644 index 0000000000000000000000000000000000000000..292eab2d881526e6816d85f1fcd38aaa35255243 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_writers.py @@ -0,0 +1,1511 @@ +from datetime import ( + date, + datetime, + timedelta, +) +from functools import partial +from io import BytesIO +import os +import re + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows +from pandas.compat._constants import PY310 +from pandas.compat._optional import import_optional_dependency +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + DataFrame, + Index, + MultiIndex, + date_range, + option_context, +) +import pandas._testing as tm + +from pandas.io.excel import ( + ExcelFile, + ExcelWriter, + _OpenpyxlWriter, + _XlsxWriter, + register_writer, +) +from pandas.io.excel._util import _writers + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +def get_exp_unit(path: str) -> str: + return "ns" + + +@pytest.fixture +def frame(float_frame): + """ + Returns the first ten items in fixture "float_frame". + """ + return float_frame[:10] + + +@pytest.fixture(params=[True, False]) +def merge_cells(request): + return request.param + + +@pytest.fixture +def path(ext): + """ + Fixture to open file for use in each test case. + """ + with tm.ensure_clean(ext) as file_path: + yield file_path + + +@pytest.fixture +def set_engine(engine, ext): + """ + Fixture to set engine for use in each test case. + + Rather than requiring `engine=...` to be provided explicitly as an + argument in each test, this fixture sets a global option to dictate + which engine should be used to write Excel files. After executing + the test it rolls back said change to the global option. + """ + option_name = f"io.excel.{ext.strip('.')}.writer" + with option_context(option_name, engine): + yield + + +@pytest.mark.parametrize( + "ext", + [ + pytest.param(".xlsx", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]), + pytest.param(".xlsm", marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")]), + pytest.param( + ".xlsx", marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")] + ), + pytest.param(".ods", marks=td.skip_if_no("odf")), + ], +) +class TestRoundTrip: + @pytest.mark.parametrize( + "header,expected", + [(None, DataFrame([np.nan] * 4)), (0, DataFrame({"Unnamed: 0": [np.nan] * 3}))], + ) + def test_read_one_empty_col_no_header(self, ext, header, expected): + # xref gh-12292 + filename = "no_header" + df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) + + with tm.ensure_clean(ext) as path: + df.to_excel(path, sheet_name=filename, index=False, header=False) + result = pd.read_excel( + path, sheet_name=filename, usecols=[0], header=header + ) + + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "header,expected", + [(None, DataFrame([0] + [np.nan] * 4)), (0, DataFrame([np.nan] * 4))], + ) + def test_read_one_empty_col_with_header(self, ext, header, expected): + filename = "with_header" + df = DataFrame([["", 1, 100], ["", 2, 200], ["", 3, 300], ["", 4, 400]]) + + with tm.ensure_clean(ext) as path: + df.to_excel(path, sheet_name="with_header", index=False, header=True) + result = pd.read_excel( + path, sheet_name=filename, usecols=[0], header=header + ) + + tm.assert_frame_equal(result, expected) + + def test_set_column_names_in_parameter(self, ext): + # GH 12870 : pass down column names associated with + # keyword argument names + refdf = DataFrame([[1, "foo"], [2, "bar"], [3, "baz"]], columns=["a", "b"]) + + with tm.ensure_clean(ext) as pth: + with ExcelWriter(pth) as writer: + refdf.to_excel( + writer, sheet_name="Data_no_head", header=False, index=False + ) + refdf.to_excel(writer, sheet_name="Data_with_head", index=False) + + refdf.columns = ["A", "B"] + + with ExcelFile(pth) as reader: + xlsdf_no_head = pd.read_excel( + reader, sheet_name="Data_no_head", header=None, names=["A", "B"] + ) + xlsdf_with_head = pd.read_excel( + reader, + sheet_name="Data_with_head", + index_col=None, + names=["A", "B"], + ) + + tm.assert_frame_equal(xlsdf_no_head, refdf) + tm.assert_frame_equal(xlsdf_with_head, refdf) + + def test_creating_and_reading_multiple_sheets(self, ext): + # see gh-9450 + # + # Test reading multiple sheets, from a runtime + # created Excel file with multiple sheets. + def tdf(col_sheet_name): + d, i = [11, 22, 33], [1, 2, 3] + return DataFrame(d, i, columns=[col_sheet_name]) + + sheets = ["AAA", "BBB", "CCC"] + + dfs = [tdf(s) for s in sheets] + dfs = dict(zip(sheets, dfs)) + + with tm.ensure_clean(ext) as pth: + with ExcelWriter(pth) as ew: + for sheetname, df in dfs.items(): + df.to_excel(ew, sheet_name=sheetname) + + dfs_returned = pd.read_excel(pth, sheet_name=sheets, index_col=0) + + for s in sheets: + tm.assert_frame_equal(dfs[s], dfs_returned[s]) + + def test_read_excel_multiindex_empty_level(self, ext): + # see gh-12453 + with tm.ensure_clean(ext) as path: + df = DataFrame( + { + ("One", "x"): {0: 1}, + ("Two", "X"): {0: 3}, + ("Two", "Y"): {0: 7}, + ("Zero", ""): {0: 0}, + } + ) + + expected = DataFrame( + { + ("One", "x"): {0: 1}, + ("Two", "X"): {0: 3}, + ("Two", "Y"): {0: 7}, + ("Zero", "Unnamed: 4_level_1"): {0: 0}, + } + ) + + df.to_excel(path) + actual = pd.read_excel(path, header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + df = DataFrame( + { + ("Beg", ""): {0: 0}, + ("Middle", "x"): {0: 1}, + ("Tail", "X"): {0: 3}, + ("Tail", "Y"): {0: 7}, + } + ) + + expected = DataFrame( + { + ("Beg", "Unnamed: 1_level_1"): {0: 0}, + ("Middle", "x"): {0: 1}, + ("Tail", "X"): {0: 3}, + ("Tail", "Y"): {0: 7}, + } + ) + + df.to_excel(path) + actual = pd.read_excel(path, header=[0, 1], index_col=0) + tm.assert_frame_equal(actual, expected) + + @pytest.mark.parametrize("c_idx_names", ["a", None]) + @pytest.mark.parametrize("r_idx_names", ["b", None]) + @pytest.mark.parametrize("c_idx_levels", [1, 3]) + @pytest.mark.parametrize("r_idx_levels", [1, 3]) + def test_excel_multindex_roundtrip( + self, ext, c_idx_names, r_idx_names, c_idx_levels, r_idx_levels, request + ): + # see gh-4679 + with tm.ensure_clean(ext) as pth: + # Empty name case current read in as + # unnamed levels, not Nones. + check_names = bool(r_idx_names) or r_idx_levels <= 1 + + if c_idx_levels == 1: + columns = Index(list("abcde")) + else: + columns = MultiIndex.from_arrays( + [range(5) for _ in range(c_idx_levels)], + names=[f"{c_idx_names}-{i}" for i in range(c_idx_levels)], + ) + if r_idx_levels == 1: + index = Index(list("ghijk")) + else: + index = MultiIndex.from_arrays( + [range(5) for _ in range(r_idx_levels)], + names=[f"{r_idx_names}-{i}" for i in range(r_idx_levels)], + ) + df = DataFrame( + 1.1 * np.ones((5, 5)), + columns=columns, + index=index, + ) + df.to_excel(pth) + + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[0, :] = np.nan + df.to_excel(pth) + + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + df.iloc[-1, :] = np.nan + df.to_excel(pth) + act = pd.read_excel( + pth, + index_col=list(range(r_idx_levels)), + header=list(range(c_idx_levels)), + ) + tm.assert_frame_equal(df, act, check_names=check_names) + + def test_read_excel_parse_dates(self, ext): + # see gh-11544, gh-12051 + df = DataFrame( + {"col": [1, 2, 3], "date_strings": date_range("2012-01-01", periods=3)} + ) + df2 = df.copy() + df2["date_strings"] = df2["date_strings"].dt.strftime("%m/%d/%Y") + + with tm.ensure_clean(ext) as pth: + df2.to_excel(pth) + + res = pd.read_excel(pth, index_col=0) + tm.assert_frame_equal(df2, res) + + res = pd.read_excel(pth, parse_dates=["date_strings"], index_col=0) + tm.assert_frame_equal(df, res) + + date_parser = lambda x: datetime.strptime(x, "%m/%d/%Y") + with tm.assert_produces_warning( + FutureWarning, + match="use 'date_format' instead", + raise_on_extra_warnings=False, + ): + res = pd.read_excel( + pth, + parse_dates=["date_strings"], + date_parser=date_parser, + index_col=0, + ) + tm.assert_frame_equal(df, res) + res = pd.read_excel( + pth, parse_dates=["date_strings"], date_format="%m/%d/%Y", index_col=0 + ) + tm.assert_frame_equal(df, res) + + def test_multiindex_interval_datetimes(self, ext): + # GH 30986 + midx = MultiIndex.from_arrays( + [ + range(4), + pd.interval_range( + start=pd.Timestamp("2020-01-01"), periods=4, freq="6ME" + ), + ] + ) + df = DataFrame(range(4), index=midx) + with tm.ensure_clean(ext) as pth: + df.to_excel(pth) + result = pd.read_excel(pth, index_col=[0, 1]) + expected = DataFrame( + range(4), + MultiIndex.from_arrays( + [ + range(4), + [ + "(2020-01-31 00:00:00, 2020-07-31 00:00:00]", + "(2020-07-31 00:00:00, 2021-01-31 00:00:00]", + "(2021-01-31 00:00:00, 2021-07-31 00:00:00]", + "(2021-07-31 00:00:00, 2022-01-31 00:00:00]", + ], + ] + ), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "engine,ext", + [ + pytest.param( + "openpyxl", + ".xlsx", + marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")], + ), + pytest.param( + "openpyxl", + ".xlsm", + marks=[td.skip_if_no("openpyxl"), td.skip_if_no("xlrd")], + ), + pytest.param( + "xlsxwriter", + ".xlsx", + marks=[td.skip_if_no("xlsxwriter"), td.skip_if_no("xlrd")], + ), + pytest.param("odf", ".ods", marks=td.skip_if_no("odf")), + ], +) +@pytest.mark.usefixtures("set_engine") +class TestExcelWriter: + def test_excel_sheet_size(self, path): + # GH 26080 + breaking_row_count = 2**20 + 1 + breaking_col_count = 2**14 + 1 + # purposely using two arrays to prevent memory issues while testing + row_arr = np.zeros(shape=(breaking_row_count, 1)) + col_arr = np.zeros(shape=(1, breaking_col_count)) + row_df = DataFrame(row_arr) + col_df = DataFrame(col_arr) + + msg = "sheet is too large" + with pytest.raises(ValueError, match=msg): + row_df.to_excel(path) + + with pytest.raises(ValueError, match=msg): + col_df.to_excel(path) + + def test_excel_sheet_by_name_raise(self, path): + gt = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + gt.to_excel(path) + + with ExcelFile(path) as xl: + df = pd.read_excel(xl, sheet_name=0, index_col=0) + + tm.assert_frame_equal(gt, df) + + msg = "Worksheet named '0' not found" + with pytest.raises(ValueError, match=msg): + pd.read_excel(xl, "0") + + def test_excel_writer_context_manager(self, frame, path): + with ExcelWriter(path) as writer: + frame.to_excel(writer, sheet_name="Data1") + frame2 = frame.copy() + frame2.columns = frame.columns[::-1] + frame2.to_excel(writer, sheet_name="Data2") + + with ExcelFile(path) as reader: + found_df = pd.read_excel(reader, sheet_name="Data1", index_col=0) + found_df2 = pd.read_excel(reader, sheet_name="Data2", index_col=0) + + tm.assert_frame_equal(found_df, frame) + tm.assert_frame_equal(found_df2, frame2) + + def test_roundtrip(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + # test roundtrip + frame.to_excel(path, sheet_name="test1") + recons = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, sheet_name="test1", index=False) + recons = pd.read_excel(path, sheet_name="test1", index_col=None) + recons.index = frame.index + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, sheet_name="test1", na_rep="NA") + recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["NA"]) + tm.assert_frame_equal(frame, recons) + + # GH 3611 + frame.to_excel(path, sheet_name="test1", na_rep="88") + recons = pd.read_excel(path, sheet_name="test1", index_col=0, na_values=["88"]) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, sheet_name="test1", na_rep="88") + recons = pd.read_excel( + path, sheet_name="test1", index_col=0, na_values=[88, 88.0] + ) + tm.assert_frame_equal(frame, recons) + + # GH 6573 + frame.to_excel(path, sheet_name="Sheet1") + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(frame, recons) + + frame.to_excel(path, sheet_name="0") + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(frame, recons) + + # GH 8825 Pandas Series should provide to_excel method + s = frame["A"] + s.to_excel(path) + recons = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(s.to_frame(), recons) + + def test_mixed(self, frame, path): + mixed_frame = frame.copy() + mixed_frame["foo"] = "bar" + + mixed_frame.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(mixed_frame, recons) + + def test_ts_frame(self, path): + unit = get_exp_unit(path) + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + + # freq doesn't round-trip + index = pd.DatetimeIndex(np.asarray(df.index), freq=None) + df.index = index + + expected = df[:] + expected.index = expected.index.as_unit(unit) + + df.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_basics_with_nan(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + @pytest.mark.parametrize("np_type", [np.int8, np.int16, np.int32, np.int64]) + def test_int_types(self, np_type, path): + # Test np.int values read come back as int + # (rather than float which is Excel's format). + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(10, 2)), dtype=np_type + ) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + int_frame = df.astype(np.int64) + tm.assert_frame_equal(int_frame, recons) + + recons2 = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(int_frame, recons2) + + @pytest.mark.parametrize("np_type", [np.float16, np.float32, np.float64]) + def test_float_types(self, np_type, path): + # Test np.float values read come back as float. + df = DataFrame(np.random.default_rng(2).random(10), dtype=np_type) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np_type + ) + + tm.assert_frame_equal(df, recons) + + def test_bool_types(self, path): + # Test np.bool_ values read come back as float. + df = DataFrame([1, 0, True, False], dtype=np.bool_) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.bool_ + ) + + tm.assert_frame_equal(df, recons) + + def test_inf_roundtrip(self, path): + df = DataFrame([(1, np.inf), (2, 3), (5, -np.inf)]) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + tm.assert_frame_equal(df, recons) + + def test_sheets(self, frame, path): + # freq doesn't round-trip + unit = get_exp_unit(path) + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None) + tsframe.index = index + + expected = tsframe[:] + expected.index = expected.index.as_unit(unit) + + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + # Test writing to separate sheets + with ExcelWriter(path) as writer: + frame.to_excel(writer, sheet_name="test1") + tsframe.to_excel(writer, sheet_name="test2") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(frame, recons) + recons = pd.read_excel(reader, sheet_name="test2", index_col=0) + tm.assert_frame_equal(expected, recons) + assert 2 == len(reader.sheet_names) + assert "test1" == reader.sheet_names[0] + assert "test2" == reader.sheet_names[1] + + def test_colaliases(self, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + # column aliases + col_aliases = Index(["AA", "X", "Y", "Z"]) + frame.to_excel(path, sheet_name="test1", header=col_aliases) + with ExcelFile(path) as reader: + rs = pd.read_excel(reader, sheet_name="test1", index_col=0) + xp = frame.copy() + xp.columns = col_aliases + tm.assert_frame_equal(xp, rs) + + def test_roundtrip_indexlabels(self, merge_cells, frame, path): + frame = frame.copy() + frame.iloc[:5, frame.columns.get_loc("A")] = np.nan + + frame.to_excel(path, sheet_name="test1") + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", index=False) + + # test index_label + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel( + path, sheet_name="test1", index_label=["test"], merge_cells=merge_cells + ) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + assert df.index.names == recons.index.names + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel( + path, + sheet_name="test1", + index_label=["test", "dummy", "dummy2"], + merge_cells=merge_cells, + ) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + assert df.index.names == recons.index.names + + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) >= 0 + df.to_excel( + path, sheet_name="test1", index_label="test", merge_cells=merge_cells + ) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0).astype( + np.int64 + ) + df.index.names = ["test"] + tm.assert_frame_equal(df, recons.astype(bool)) + + frame.to_excel( + path, + sheet_name="test1", + columns=["A", "B", "C", "D"], + index=False, + merge_cells=merge_cells, + ) + # take 'A' and 'B' as indexes (same row as cols 'C', 'D') + df = frame.copy() + df = df.set_index(["A", "B"]) + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + tm.assert_frame_equal(df, recons) + + def test_excel_roundtrip_indexname(self, merge_cells, path): + df = DataFrame(np.random.default_rng(2).standard_normal((10, 4))) + df.index.name = "foo" + + df.to_excel(path, merge_cells=merge_cells) + + with ExcelFile(path) as xf: + result = pd.read_excel(xf, sheet_name=xf.sheet_names[0], index_col=0) + + tm.assert_frame_equal(result, df) + assert result.index.name == "foo" + + def test_excel_roundtrip_datetime(self, merge_cells, path): + # datetime.date, not sure what to test here exactly + unit = get_exp_unit(path) + + # freq does not round-trip + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + index = pd.DatetimeIndex(np.asarray(tsframe.index), freq=None) + tsframe.index = index + + tsf = tsframe.copy() + + tsf.index = [x.date() for x in tsframe.index] + tsf.to_excel(path, sheet_name="test1", merge_cells=merge_cells) + + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + + expected = tsframe[:] + expected.index = expected.index.as_unit(unit) + tm.assert_frame_equal(expected, recons) + + def test_excel_date_datetime_format(self, ext, path): + # see gh-4133 + # + # Excel output format strings + unit = get_exp_unit(path) + + df = DataFrame( + [ + [date(2014, 1, 31), date(1999, 9, 24)], + [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)], + ], + index=["DATE", "DATETIME"], + columns=["X", "Y"], + ) + df_expected = DataFrame( + [ + [datetime(2014, 1, 31), datetime(1999, 9, 24)], + [datetime(1998, 5, 26, 23, 33, 4), datetime(2014, 2, 28, 13, 5, 13)], + ], + index=["DATE", "DATETIME"], + columns=["X", "Y"], + ) + df_expected = df_expected.astype(f"M8[{unit}]") + + with tm.ensure_clean(ext) as filename2: + with ExcelWriter(path) as writer1: + df.to_excel(writer1, sheet_name="test1") + + with ExcelWriter( + filename2, + date_format="DD.MM.YYYY", + datetime_format="DD.MM.YYYY HH-MM-SS", + ) as writer2: + df.to_excel(writer2, sheet_name="test1") + + with ExcelFile(path) as reader1: + rs1 = pd.read_excel(reader1, sheet_name="test1", index_col=0) + + with ExcelFile(filename2) as reader2: + rs2 = pd.read_excel(reader2, sheet_name="test1", index_col=0) + + tm.assert_frame_equal(rs1, rs2) + + # Since the reader returns a datetime object for dates, + # we need to use df_expected to check the result. + tm.assert_frame_equal(rs2, df_expected) + + def test_to_excel_interval_no_labels(self, path, using_infer_string): + # see gh-19242 + # + # Test writing Interval without labels. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64 + ) + expected = df.copy() + + df["new"] = pd.cut(df[0], 10) + expected["new"] = pd.cut(expected[0], 10).astype( + str if not using_infer_string else "string[pyarrow_numpy]" + ) + + df.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_interval_labels(self, path): + # see gh-19242 + # + # Test writing Interval with labels. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), dtype=np.int64 + ) + expected = df.copy() + intervals = pd.cut( + df[0], 10, labels=["A", "B", "C", "D", "E", "F", "G", "H", "I", "J"] + ) + df["new"] = intervals + expected["new"] = pd.Series(list(intervals)) + + df.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_timedelta(self, path): + # see gh-19242, gh-9155 + # + # Test writing timedelta to xls. + df = DataFrame( + np.random.default_rng(2).integers(-10, 10, size=(20, 1)), + columns=["A"], + dtype=np.int64, + ) + expected = df.copy() + + df["new"] = df["A"].apply(lambda x: timedelta(seconds=x)) + expected["new"] = expected["A"].apply( + lambda x: timedelta(seconds=x).total_seconds() / 86400 + ) + + df.to_excel(path, sheet_name="test1") + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=0) + tm.assert_frame_equal(expected, recons) + + def test_to_excel_periodindex(self, path): + # xp has a PeriodIndex + df = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + xp = df.resample("ME").mean().to_period("M") + + xp.to_excel(path, sheet_name="sht1") + + with ExcelFile(path) as reader: + rs = pd.read_excel(reader, sheet_name="sht1", index_col=0) + tm.assert_frame_equal(xp, rs.to_period("M")) + + def test_to_excel_multiindex(self, merge_cells, frame, path): + arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1) + new_index = MultiIndex.from_arrays(arrays, names=["first", "second"]) + frame.index = new_index + + frame.to_excel(path, sheet_name="test1", header=False) + frame.to_excel(path, sheet_name="test1", columns=["A", "B"]) + + # round trip + frame.to_excel(path, sheet_name="test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + df = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + tm.assert_frame_equal(frame, df) + + # GH13511 + def test_to_excel_multiindex_nan_label(self, merge_cells, path): + df = DataFrame( + { + "A": [None, 2, 3], + "B": [10, 20, 30], + "C": np.random.default_rng(2).random(3), + } + ) + df = df.set_index(["A", "B"]) + + df.to_excel(path, merge_cells=merge_cells) + df1 = pd.read_excel(path, index_col=[0, 1]) + tm.assert_frame_equal(df, df1) + + # Test for Issue 11328. If column indices are integers, make + # sure they are handled correctly for either setting of + # merge_cells + def test_to_excel_multiindex_cols(self, merge_cells, frame, path): + arrays = np.arange(len(frame.index) * 2, dtype=np.int64).reshape(2, -1) + new_index = MultiIndex.from_arrays(arrays, names=["first", "second"]) + frame.index = new_index + + new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2), (50, 1), (50, 2)]) + frame.columns = new_cols_index + header = [0, 1] + if not merge_cells: + header = 0 + + # round trip + frame.to_excel(path, sheet_name="test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + df = pd.read_excel( + reader, sheet_name="test1", header=header, index_col=[0, 1] + ) + if not merge_cells: + fm = frame.columns._format_multi(sparsify=False, include_names=False) + frame.columns = [".".join(map(str, q)) for q in zip(*fm)] + tm.assert_frame_equal(frame, df) + + def test_to_excel_multiindex_dates(self, merge_cells, path): + # try multiindex with dates + unit = get_exp_unit(path) + tsframe = DataFrame( + np.random.default_rng(2).standard_normal((5, 4)), + columns=Index(list("ABCD")), + index=date_range("2000-01-01", periods=5, freq="B"), + ) + tsframe.index = MultiIndex.from_arrays( + [ + tsframe.index.as_unit(unit), + np.arange(len(tsframe.index), dtype=np.int64), + ], + names=["time", "foo"], + ) + + tsframe.to_excel(path, sheet_name="test1", merge_cells=merge_cells) + with ExcelFile(path) as reader: + recons = pd.read_excel(reader, sheet_name="test1", index_col=[0, 1]) + + tm.assert_frame_equal(tsframe, recons) + assert recons.index.names == ("time", "foo") + + def test_to_excel_multiindex_no_write_index(self, path): + # Test writing and re-reading a MI without the index. GH 5616. + + # Initial non-MI frame. + frame1 = DataFrame({"a": [10, 20], "b": [30, 40], "c": [50, 60]}) + + # Add a MI. + frame2 = frame1.copy() + multi_index = MultiIndex.from_tuples([(70, 80), (90, 100)]) + frame2.index = multi_index + + # Write out to Excel without the index. + frame2.to_excel(path, sheet_name="test1", index=False) + + # Read it back in. + with ExcelFile(path) as reader: + frame3 = pd.read_excel(reader, sheet_name="test1") + + # Test that it is the same as the initial frame. + tm.assert_frame_equal(frame1, frame3) + + def test_to_excel_empty_multiindex(self, path): + # GH 19543. + expected = DataFrame([], columns=[0, 1, 2]) + + df = DataFrame([], index=MultiIndex.from_tuples([], names=[0, 1]), columns=[2]) + df.to_excel(path, sheet_name="test1") + + with ExcelFile(path) as reader: + result = pd.read_excel(reader, sheet_name="test1") + tm.assert_frame_equal( + result, expected, check_index_type=False, check_dtype=False + ) + + def test_to_excel_float_format(self, path): + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.to_excel(path, sheet_name="test1", float_format="%.2f") + + with ExcelFile(path) as reader: + result = pd.read_excel(reader, sheet_name="test1", index_col=0) + + expected = DataFrame( + [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + tm.assert_frame_equal(result, expected) + + def test_to_excel_output_encoding(self, ext): + # Avoid mixed inferred_type. + df = DataFrame( + [["\u0192", "\u0193", "\u0194"], ["\u0195", "\u0196", "\u0197"]], + index=["A\u0192", "B"], + columns=["X\u0193", "Y", "Z"], + ) + + with tm.ensure_clean("__tmp_to_excel_float_format__." + ext) as filename: + df.to_excel(filename, sheet_name="TestSheet") + result = pd.read_excel(filename, sheet_name="TestSheet", index_col=0) + tm.assert_frame_equal(result, df) + + def test_to_excel_unicode_filename(self, ext): + with tm.ensure_clean("\u0192u." + ext) as filename: + try: + with open(filename, "wb"): + pass + except UnicodeEncodeError: + pytest.skip("No unicode file names on this system") + + df = DataFrame( + [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + df.to_excel(filename, sheet_name="test1", float_format="%.2f") + + with ExcelFile(filename) as reader: + result = pd.read_excel(reader, sheet_name="test1", index_col=0) + + expected = DataFrame( + [[0.12, 0.23, 0.57], [12.32, 123123.20, 321321.20]], + index=["A", "B"], + columns=["X", "Y", "Z"], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("use_headers", [True, False]) + @pytest.mark.parametrize("r_idx_nlevels", [1, 2, 3]) + @pytest.mark.parametrize("c_idx_nlevels", [1, 2, 3]) + def test_excel_010_hemstring( + self, merge_cells, c_idx_nlevels, r_idx_nlevels, use_headers, path + ): + def roundtrip(data, header=True, parser_hdr=0, index=True): + data.to_excel(path, header=header, merge_cells=merge_cells, index=index) + + with ExcelFile(path) as xf: + return pd.read_excel( + xf, sheet_name=xf.sheet_names[0], header=parser_hdr + ) + + # Basic test. + parser_header = 0 if use_headers else None + res = roundtrip(DataFrame([0]), use_headers, parser_header) + + assert res.shape == (1, 2) + assert res.iloc[0, 0] is not np.nan + + # More complex tests with multi-index. + nrows = 5 + ncols = 3 + + # ensure limited functionality in 0.10 + # override of gh-2370 until sorted out in 0.11 + + if c_idx_nlevels == 1: + columns = Index([f"a-{i}" for i in range(ncols)], dtype=object) + else: + columns = MultiIndex.from_arrays( + [range(ncols) for _ in range(c_idx_nlevels)], + names=[f"i-{i}" for i in range(c_idx_nlevels)], + ) + if r_idx_nlevels == 1: + index = Index([f"b-{i}" for i in range(nrows)], dtype=object) + else: + index = MultiIndex.from_arrays( + [range(nrows) for _ in range(r_idx_nlevels)], + names=[f"j-{i}" for i in range(r_idx_nlevels)], + ) + + df = DataFrame( + np.ones((nrows, ncols)), + columns=columns, + index=index, + ) + + # This if will be removed once multi-column Excel writing + # is implemented. For now fixing gh-9794. + if c_idx_nlevels > 1: + msg = ( + "Writing to Excel with MultiIndex columns and no index " + "\\('index'=False\\) is not yet implemented." + ) + with pytest.raises(NotImplementedError, match=msg): + roundtrip(df, use_headers, index=False) + else: + res = roundtrip(df, use_headers) + + if use_headers: + assert res.shape == (nrows, ncols + r_idx_nlevels) + else: + # First row taken as columns. + assert res.shape == (nrows - 1, ncols + r_idx_nlevels) + + # No NaNs. + for r in range(len(res.index)): + for c in range(len(res.columns)): + assert res.iloc[r, c] is not np.nan + + def test_duplicated_columns(self, path): + # see gh-5235 + df = DataFrame([[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B"]) + df.to_excel(path, sheet_name="test1") + expected = DataFrame( + [[1, 2, 3], [1, 2, 3], [1, 2, 3]], columns=["A", "B", "B.1"] + ) + + # By default, we mangle. + result = pd.read_excel(path, sheet_name="test1", index_col=0) + tm.assert_frame_equal(result, expected) + + # see gh-11007, gh-10970 + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A", "B"]) + df.to_excel(path, sheet_name="test1") + + result = pd.read_excel(path, sheet_name="test1", index_col=0) + expected = DataFrame( + [[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "A.1", "B.1"] + ) + tm.assert_frame_equal(result, expected) + + # see gh-10982 + df.to_excel(path, sheet_name="test1", index=False, header=False) + result = pd.read_excel(path, sheet_name="test1", header=None) + + expected = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]]) + tm.assert_frame_equal(result, expected) + + def test_swapped_columns(self, path): + # Test for issue #5427. + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]}) + write_frame.to_excel(path, sheet_name="test1", columns=["B", "A"]) + + read_frame = pd.read_excel(path, sheet_name="test1", header=0) + + tm.assert_series_equal(write_frame["A"], read_frame["A"]) + tm.assert_series_equal(write_frame["B"], read_frame["B"]) + + def test_invalid_columns(self, path): + # see gh-10982 + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2]}) + + with pytest.raises(KeyError, match="Not all names specified"): + write_frame.to_excel(path, sheet_name="test1", columns=["B", "C"]) + + with pytest.raises( + KeyError, match="'passes columns are not ALL present dataframe'" + ): + write_frame.to_excel(path, sheet_name="test1", columns=["C", "D"]) + + @pytest.mark.parametrize( + "to_excel_index,read_excel_index_col", + [ + (True, 0), # Include index in write to file + (False, None), # Dont include index in write to file + ], + ) + def test_write_subset_columns(self, path, to_excel_index, read_excel_index_col): + # GH 31677 + write_frame = DataFrame({"A": [1, 1, 1], "B": [2, 2, 2], "C": [3, 3, 3]}) + write_frame.to_excel( + path, sheet_name="col_subset_bug", columns=["A", "B"], index=to_excel_index + ) + + expected = write_frame[["A", "B"]] + read_frame = pd.read_excel( + path, sheet_name="col_subset_bug", index_col=read_excel_index_col + ) + + tm.assert_frame_equal(expected, read_frame) + + def test_comment_arg(self, path): + # see gh-18735 + # + # Test the comment argument functionality to pd.read_excel. + + # Create file to read in. + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, sheet_name="test_c") + + # Read file without comment arg. + result1 = pd.read_excel(path, sheet_name="test_c", index_col=0) + + result1.iloc[1, 0] = None + result1.iloc[1, 1] = None + result1.iloc[2, 1] = None + + result2 = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0) + tm.assert_frame_equal(result1, result2) + + def test_comment_default(self, path): + # Re issue #18735 + # Test the comment argument default to pd.read_excel + + # Create file to read in + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, sheet_name="test_c") + + # Read file with default and explicit comment=None + result1 = pd.read_excel(path, sheet_name="test_c") + result2 = pd.read_excel(path, sheet_name="test_c", comment=None) + tm.assert_frame_equal(result1, result2) + + def test_comment_used(self, path): + # see gh-18735 + # + # Test the comment argument is working as expected when used. + + # Create file to read in. + df = DataFrame({"A": ["one", "#one", "one"], "B": ["two", "two", "#two"]}) + df.to_excel(path, sheet_name="test_c") + + # Test read_frame_comment against manually produced expected output. + expected = DataFrame({"A": ["one", None, "one"], "B": ["two", None, None]}) + result = pd.read_excel(path, sheet_name="test_c", comment="#", index_col=0) + tm.assert_frame_equal(result, expected) + + def test_comment_empty_line(self, path): + # Re issue #18735 + # Test that pd.read_excel ignores commented lines at the end of file + + df = DataFrame({"a": ["1", "#2"], "b": ["2", "3"]}) + df.to_excel(path, index=False) + + # Test that all-comment lines at EoF are ignored + expected = DataFrame({"a": [1], "b": [2]}) + result = pd.read_excel(path, comment="#") + tm.assert_frame_equal(result, expected) + + def test_datetimes(self, path): + # Test writing and reading datetimes. For issue #9139. (xref #9185) + unit = get_exp_unit(path) + datetimes = [ + datetime(2013, 1, 13, 1, 2, 3), + datetime(2013, 1, 13, 2, 45, 56), + datetime(2013, 1, 13, 4, 29, 49), + datetime(2013, 1, 13, 6, 13, 42), + datetime(2013, 1, 13, 7, 57, 35), + datetime(2013, 1, 13, 9, 41, 28), + datetime(2013, 1, 13, 11, 25, 21), + datetime(2013, 1, 13, 13, 9, 14), + datetime(2013, 1, 13, 14, 53, 7), + datetime(2013, 1, 13, 16, 37, 0), + datetime(2013, 1, 13, 18, 20, 52), + ] + + write_frame = DataFrame({"A": datetimes}) + write_frame.to_excel(path, sheet_name="Sheet1") + read_frame = pd.read_excel(path, sheet_name="Sheet1", header=0) + + expected = write_frame.astype(f"M8[{unit}]") + tm.assert_series_equal(expected["A"], read_frame["A"]) + + def test_bytes_io(self, engine): + # see gh-7074 + with BytesIO() as bio: + df = DataFrame(np.random.default_rng(2).standard_normal((10, 2))) + + # Pass engine explicitly, as there is no file path to infer from. + with ExcelWriter(bio, engine=engine) as writer: + df.to_excel(writer) + + bio.seek(0) + reread_df = pd.read_excel(bio, index_col=0) + tm.assert_frame_equal(df, reread_df) + + def test_engine_kwargs(self, engine, path): + # GH#52368 + df = DataFrame([{"A": 1, "B": 2}, {"A": 3, "B": 4}]) + + msgs = { + "odf": r"OpenDocumentSpreadsheet() got an unexpected keyword " + r"argument 'foo'", + "openpyxl": r"__init__() got an unexpected keyword argument 'foo'", + "xlsxwriter": r"__init__() got an unexpected keyword argument 'foo'", + } + + if PY310: + msgs[ + "openpyxl" + ] = "Workbook.__init__() got an unexpected keyword argument 'foo'" + msgs[ + "xlsxwriter" + ] = "Workbook.__init__() got an unexpected keyword argument 'foo'" + + # Handle change in error message for openpyxl (write and append mode) + if engine == "openpyxl" and not os.path.exists(path): + msgs[ + "openpyxl" + ] = r"load_workbook() got an unexpected keyword argument 'foo'" + + with pytest.raises(TypeError, match=re.escape(msgs[engine])): + df.to_excel( + path, + engine=engine, + engine_kwargs={"foo": "bar"}, + ) + + def test_write_lists_dict(self, path): + # see gh-8188. + df = DataFrame( + { + "mixed": ["a", ["b", "c"], {"d": "e", "f": 2}], + "numeric": [1, 2, 3.0], + "str": ["apple", "banana", "cherry"], + } + ) + df.to_excel(path, sheet_name="Sheet1") + read = pd.read_excel(path, sheet_name="Sheet1", header=0, index_col=0) + + expected = df.copy() + expected.mixed = expected.mixed.apply(str) + expected.numeric = expected.numeric.astype("int64") + + tm.assert_frame_equal(read, expected) + + def test_render_as_column_name(self, path): + # see gh-34331 + df = DataFrame({"render": [1, 2], "data": [3, 4]}) + df.to_excel(path, sheet_name="Sheet1") + read = pd.read_excel(path, "Sheet1", index_col=0) + expected = df + tm.assert_frame_equal(read, expected) + + def test_true_and_false_value_options(self, path): + # see gh-13347 + df = DataFrame([["foo", "bar"]], columns=["col1", "col2"], dtype=object) + with option_context("future.no_silent_downcasting", True): + expected = df.replace({"foo": True, "bar": False}).astype("bool") + + df.to_excel(path) + read_frame = pd.read_excel( + path, true_values=["foo"], false_values=["bar"], index_col=0 + ) + tm.assert_frame_equal(read_frame, expected) + + def test_freeze_panes(self, path): + # see gh-15160 + expected = DataFrame([[1, 2], [3, 4]], columns=["col1", "col2"]) + expected.to_excel(path, sheet_name="Sheet1", freeze_panes=(1, 1)) + + result = pd.read_excel(path, index_col=0) + tm.assert_frame_equal(result, expected) + + def test_path_path_lib(self, engine, ext): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD")), + index=Index([f"i-{i}" for i in range(30)], dtype=object), + ) + writer = partial(df.to_excel, engine=engine) + + reader = partial(pd.read_excel, index_col=0) + result = tm.round_trip_pathlib(writer, reader, path=f"foo{ext}") + tm.assert_frame_equal(result, df) + + def test_path_local_path(self, engine, ext): + df = DataFrame( + 1.1 * np.arange(120).reshape((30, 4)), + columns=Index(list("ABCD")), + index=Index([f"i-{i}" for i in range(30)]), + ) + writer = partial(df.to_excel, engine=engine) + + reader = partial(pd.read_excel, index_col=0) + result = tm.round_trip_localpath(writer, reader, path=f"foo{ext}") + tm.assert_frame_equal(result, df) + + def test_merged_cell_custom_objects(self, path): + # see GH-27006 + mi = MultiIndex.from_tuples( + [ + (pd.Period("2018"), pd.Period("2018Q1")), + (pd.Period("2018"), pd.Period("2018Q2")), + ] + ) + expected = DataFrame(np.ones((2, 2), dtype="int64"), columns=mi) + expected.to_excel(path) + result = pd.read_excel(path, header=[0, 1], index_col=0) + # need to convert PeriodIndexes to standard Indexes for assert equal + expected.columns = expected.columns.set_levels( + [[str(i) for i in mi.levels[0]], [str(i) for i in mi.levels[1]]], + level=[0, 1], + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, object]) + def test_raise_when_saving_timezones(self, dtype, tz_aware_fixture, path): + # GH 27008, GH 7056 + tz = tz_aware_fixture + data = pd.Timestamp("2019", tz=tz) + df = DataFrame([data], dtype=dtype) + with pytest.raises(ValueError, match="Excel does not support"): + df.to_excel(path) + + data = data.to_pydatetime() + df = DataFrame([data], dtype=dtype) + with pytest.raises(ValueError, match="Excel does not support"): + df.to_excel(path) + + def test_excel_duplicate_columns_with_names(self, path): + # GH#39695 + df = DataFrame({"A": [0, 1], "B": [10, 11]}) + df.to_excel(path, columns=["A", "B", "A"], index=False) + + result = pd.read_excel(path) + expected = DataFrame([[0, 10, 0], [1, 11, 1]], columns=["A", "B", "A.1"]) + tm.assert_frame_equal(result, expected) + + def test_if_sheet_exists_raises(self, ext): + # GH 40230 + msg = "if_sheet_exists is only valid in append mode (mode='a')" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=re.escape(msg)): + ExcelWriter(f, if_sheet_exists="replace") + + def test_excel_writer_empty_frame(self, engine, ext): + # GH#45793 + with tm.ensure_clean(ext) as path: + with ExcelWriter(path, engine=engine) as writer: + DataFrame().to_excel(writer) + result = pd.read_excel(path) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + def test_to_excel_empty_frame(self, engine, ext): + # GH#45793 + with tm.ensure_clean(ext) as path: + DataFrame().to_excel(path, engine=engine) + result = pd.read_excel(path) + expected = DataFrame() + tm.assert_frame_equal(result, expected) + + +class TestExcelWriterEngineTests: + @pytest.mark.parametrize( + "klass,ext", + [ + pytest.param(_XlsxWriter, ".xlsx", marks=td.skip_if_no("xlsxwriter")), + pytest.param(_OpenpyxlWriter, ".xlsx", marks=td.skip_if_no("openpyxl")), + ], + ) + def test_ExcelWriter_dispatch(self, klass, ext): + with tm.ensure_clean(ext) as path: + with ExcelWriter(path) as writer: + if ext == ".xlsx" and bool( + import_optional_dependency("xlsxwriter", errors="ignore") + ): + # xlsxwriter has preference over openpyxl if both installed + assert isinstance(writer, _XlsxWriter) + else: + assert isinstance(writer, klass) + + def test_ExcelWriter_dispatch_raises(self): + with pytest.raises(ValueError, match="No engine"): + ExcelWriter("nothing") + + def test_register_writer(self): + class DummyClass(ExcelWriter): + called_save = False + called_write_cells = False + called_sheets = False + _supported_extensions = ("xlsx", "xls") + _engine = "dummy" + + def book(self): + pass + + def _save(self): + type(self).called_save = True + + def _write_cells(self, *args, **kwargs): + type(self).called_write_cells = True + + @property + def sheets(self): + type(self).called_sheets = True + + @classmethod + def assert_called_and_reset(cls): + assert cls.called_save + assert cls.called_write_cells + assert not cls.called_sheets + cls.called_save = False + cls.called_write_cells = False + + register_writer(DummyClass) + + with option_context("io.excel.xlsx.writer", "dummy"): + path = "something.xlsx" + with tm.ensure_clean(path) as filepath: + with ExcelWriter(filepath) as writer: + assert isinstance(writer, DummyClass) + df = DataFrame( + ["a"], + columns=Index(["b"], name="foo"), + index=Index(["c"], name="bar"), + ) + df.to_excel(filepath) + DummyClass.assert_called_and_reset() + + with tm.ensure_clean("something.xls") as filepath: + df.to_excel(filepath, engine="dummy") + DummyClass.assert_called_and_reset() + + +@td.skip_if_no("xlrd") +@td.skip_if_no("openpyxl") +class TestFSPath: + def test_excelfile_fspath(self): + with tm.ensure_clean("foo.xlsx") as path: + df = DataFrame({"A": [1, 2]}) + df.to_excel(path) + with ExcelFile(path) as xl: + result = os.fspath(xl) + assert result == path + + def test_excelwriter_fspath(self): + with tm.ensure_clean("foo.xlsx") as path: + with ExcelWriter(path) as writer: + assert os.fspath(writer) == str(path) + + def test_to_excel_pos_args_deprecation(self): + # GH-54229 + df = DataFrame({"a": [1, 2, 3]}) + msg = ( + r"Starting with pandas version 3.0 all arguments of to_excel except " + r"for the argument 'excel_writer' will be keyword-only." + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + buf = BytesIO() + writer = ExcelWriter(buf) + df.to_excel(writer, "Sheet_name_1") + + +@pytest.mark.parametrize("klass", _writers.values()) +def test_subclass_attr(klass): + # testing that subclasses of ExcelWriter don't have public attributes (issue 49602) + attrs_base = {name for name in dir(ExcelWriter) if not name.startswith("_")} + attrs_klass = {name for name in dir(klass) if not name.startswith("_")} + assert not attrs_base.symmetric_difference(attrs_klass) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py new file mode 100644 index 0000000000000000000000000000000000000000..066393d91eeadcdc08873f4ffeedda0f689337fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlrd.py @@ -0,0 +1,76 @@ +import io + +import numpy as np +import pytest + +from pandas.compat import is_platform_windows + +import pandas as pd +import pandas._testing as tm + +from pandas.io.excel import ExcelFile +from pandas.io.excel._base import inspect_excel_format + +xlrd = pytest.importorskip("xlrd") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture(params=[".xls"]) +def read_ext_xlrd(request): + """ + Valid extensions for reading Excel files with xlrd. + + Similar to read_ext, but excludes .ods, .xlsb, and for xlrd>2 .xlsx, .xlsm + """ + return request.param + + +def test_read_xlrd_book(read_ext_xlrd, datapath): + engine = "xlrd" + sheet_name = "Sheet1" + pth = datapath("io", "data", "excel", "test1.xls") + with xlrd.open_workbook(pth) as book: + with ExcelFile(book, engine=engine) as xl: + result = pd.read_excel(xl, sheet_name=sheet_name, index_col=0) + + expected = pd.read_excel( + book, sheet_name=sheet_name, engine=engine, index_col=0 + ) + tm.assert_frame_equal(result, expected) + + +def test_read_xlsx_fails(datapath): + # GH 29375 + from xlrd.biffh import XLRDError + + path = datapath("io", "data", "excel", "test1.xlsx") + with pytest.raises(XLRDError, match="Excel xlsx file; not supported"): + pd.read_excel(path, engine="xlrd") + + +def test_nan_in_xls(datapath): + # GH 54564 + path = datapath("io", "data", "excel", "test6.xls") + + expected = pd.DataFrame({0: np.r_[0, 2].astype("int64"), 1: np.r_[1, np.nan]}) + + result = pd.read_excel(path, header=None) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "file_header", + [ + b"\x09\x00\x04\x00\x07\x00\x10\x00", + b"\x09\x02\x06\x00\x00\x00\x10\x00", + b"\x09\x04\x06\x00\x00\x00\x10\x00", + b"\xd0\xcf\x11\xe0\xa1\xb1\x1a\xe1", + ], +) +def test_read_old_xls_files(file_header): + # GH 41226 + f = io.BytesIO(file_header) + assert inspect_excel_format(f) == "xls" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py new file mode 100644 index 0000000000000000000000000000000000000000..529367761fc025e3e5d02bea85741c82f64c97ca --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/excel/test_xlsxwriter.py @@ -0,0 +1,86 @@ +import contextlib + +import pytest + +from pandas.compat import is_platform_windows + +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.excel import ExcelWriter + +xlsxwriter = pytest.importorskip("xlsxwriter") + +if is_platform_windows(): + pytestmark = pytest.mark.single_cpu + + +@pytest.fixture +def ext(): + return ".xlsx" + + +def test_column_format(ext): + # Test that column formats are applied to cells. Test for issue #9167. + # Applicable to xlsxwriter only. + openpyxl = pytest.importorskip("openpyxl") + + with tm.ensure_clean(ext) as path: + frame = DataFrame({"A": [123456, 123456], "B": [123456, 123456]}) + + with ExcelWriter(path) as writer: + frame.to_excel(writer) + + # Add a number format to col B and ensure it is applied to cells. + num_format = "#,##0" + write_workbook = writer.book + write_worksheet = write_workbook.worksheets()[0] + col_format = write_workbook.add_format({"num_format": num_format}) + write_worksheet.set_column("B:B", None, col_format) + + with contextlib.closing(openpyxl.load_workbook(path)) as read_workbook: + try: + read_worksheet = read_workbook["Sheet1"] + except TypeError: + # compat + read_worksheet = read_workbook.get_sheet_by_name(name="Sheet1") + + # Get the number format from the cell. + try: + cell = read_worksheet["B2"] + except TypeError: + # compat + cell = read_worksheet.cell("B2") + + try: + read_num_format = cell.number_format + except AttributeError: + read_num_format = cell.style.number_format._format_code + + assert read_num_format == num_format + + +def test_write_append_mode_raises(ext): + msg = "Append mode is not supported with xlsxwriter!" + + with tm.ensure_clean(ext) as f: + with pytest.raises(ValueError, match=msg): + ExcelWriter(f, engine="xlsxwriter", mode="a") + + +@pytest.mark.parametrize("nan_inf_to_errors", [True, False]) +def test_engine_kwargs(ext, nan_inf_to_errors): + # GH 42286 + engine_kwargs = {"options": {"nan_inf_to_errors": nan_inf_to_errors}} + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="xlsxwriter", engine_kwargs=engine_kwargs) as writer: + assert writer.book.nan_inf_to_errors == nan_inf_to_errors + + +def test_book_and_sheets_consistent(ext): + # GH#45687 - Ensure sheets is updated if user modifies book + with tm.ensure_clean(ext) as f: + with ExcelWriter(f, engine="xlsxwriter") as writer: + assert writer.sheets == {} + sheet = writer.book.add_worksheet("test_name") + assert writer.sheets == {"test_name": sheet} diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9476e3d354e9e355bdbed3fff952c28c5f76cc7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_console.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..419e05359d9b73368756e547a1923d43a5e59974 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_eng_formatting.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..653732ce2ed4e233b09816dfa2884495aee388ac Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/__pycache__/test_to_markdown.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f351bbfa9c7173d73f0f75f50a319d39680d10c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5add7ec15a84c6f8ab584088cd252e25d0adf514 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_bar.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fb587b57e4877f140bc6135375cd829759aa958 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_exceptions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..19af2770c813d4b680790355bf00695fa9a56cfd Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_format.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..27b86fbc63f00a624adcf88159f2d5e78f2fbfd1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_highlight.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3fa7eb9b578f2b0096ce6cc32d0f18514c0e5419 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_html.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c987214a6c892fbcf52457e469e15ebc32ea58f6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_matplotlib.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ec32cab0aa630f5599407a881205a6ecc734e3f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_non_unique.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..51a89b8c86d2641f14f8c7f20709eef6a3cfc294 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_style.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c4c635fca19f4dc7af412b30a5521171a2704ad Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_latex.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..302c47bd9e7a75fc8b089560f978dd4d1369ea45 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_to_string.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16e3fd3337e4bc25e314a21e268c39562a803c77 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/__pycache__/test_tooltip.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py new file mode 100644 index 0000000000000000000000000000000000000000..1c84816ead140b95f14df8dbeccc83b317ac239a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_format.py @@ -0,0 +1,562 @@ +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + IndexSlice, + MultiIndex, + NaT, + Timestamp, + option_context, +) + +pytest.importorskip("jinja2") +from pandas.io.formats.style import Styler +from pandas.io.formats.style_render import _str_escape + + +@pytest.fixture +def df(): + return DataFrame( + data=[[0, -0.609], [1, -1.228]], + columns=["A", "B"], + index=["x", "y"], + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +@pytest.fixture +def df_multi(): + return DataFrame( + data=np.arange(16).reshape(4, 4), + columns=MultiIndex.from_product([["A", "B"], ["a", "b"]]), + index=MultiIndex.from_product([["X", "Y"], ["x", "y"]]), + ) + + +@pytest.fixture +def styler_multi(df_multi): + return Styler(df_multi, uuid_len=0) + + +def test_display_format(styler): + ctx = styler.format("{:0.1f}")._translate(True, True) + assert all(["display_value" in c for c in row] for row in ctx["body"]) + assert all([len(c["display_value"]) <= 3 for c in row[1:]] for row in ctx["body"]) + assert len(ctx["body"][0][1]["display_value"].lstrip("-")) <= 3 + + +@pytest.mark.parametrize("index", [True, False]) +@pytest.mark.parametrize("columns", [True, False]) +def test_display_format_index(styler, index, columns): + exp_index = ["x", "y"] + if index: + styler.format_index(lambda v: v.upper(), axis=0) # test callable + exp_index = ["X", "Y"] + + exp_columns = ["A", "B"] + if columns: + styler.format_index("*{}*", axis=1) # test string + exp_columns = ["*A*", "*B*"] + + ctx = styler._translate(True, True) + + for r, row in enumerate(ctx["body"]): + assert row[0]["display_value"] == exp_index[r] + + for c, col in enumerate(ctx["head"][1:]): + assert col["display_value"] == exp_columns[c] + + +def test_format_dict(styler): + ctx = styler.format({"A": "{:0.1f}", "B": "{0:.2%}"})._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "0.0" + assert ctx["body"][0][2]["display_value"] == "-60.90%" + + +def test_format_index_dict(styler): + ctx = styler.format_index({0: lambda v: v.upper()})._translate(True, True) + for i, val in enumerate(["X", "Y"]): + assert ctx["body"][i][0]["display_value"] == val + + +def test_format_string(styler): + ctx = styler.format("{:.2f}")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "0.00" + assert ctx["body"][0][2]["display_value"] == "-0.61" + assert ctx["body"][1][1]["display_value"] == "1.00" + assert ctx["body"][1][2]["display_value"] == "-1.23" + + +def test_format_callable(styler): + ctx = styler.format(lambda v: "neg" if v < 0 else "pos")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "pos" + assert ctx["body"][0][2]["display_value"] == "neg" + assert ctx["body"][1][1]["display_value"] == "pos" + assert ctx["body"][1][2]["display_value"] == "neg" + + +def test_format_with_na_rep(): + # GH 21527 28358 + df = DataFrame([[None, None], [1.1, 1.2]], columns=["A", "B"]) + + ctx = df.style.format(None, na_rep="-")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + + ctx = df.style.format("{:.2%}", na_rep="-")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][1]["display_value"] == "110.00%" + assert ctx["body"][1][2]["display_value"] == "120.00%" + + ctx = df.style.format("{:.2%}", na_rep="-", subset=["B"])._translate(True, True) + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][2]["display_value"] == "120.00%" + + +def test_format_index_with_na_rep(): + df = DataFrame([[1, 2, 3, 4, 5]], columns=["A", None, np.nan, NaT, NA]) + ctx = df.style.format_index(None, na_rep="--", axis=1)._translate(True, True) + assert ctx["head"][0][1]["display_value"] == "A" + for i in [2, 3, 4, 5]: + assert ctx["head"][0][i]["display_value"] == "--" + + +def test_format_non_numeric_na(): + # GH 21527 28358 + df = DataFrame( + { + "object": [None, np.nan, "foo"], + "datetime": [None, NaT, Timestamp("20120101")], + } + ) + ctx = df.style.format(None, na_rep="-")._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "-" + assert ctx["body"][0][2]["display_value"] == "-" + assert ctx["body"][1][1]["display_value"] == "-" + assert ctx["body"][1][2]["display_value"] == "-" + + +@pytest.mark.parametrize( + "func, attr, kwargs", + [ + ("format", "_display_funcs", {}), + ("format_index", "_display_funcs_index", {"axis": 0}), + ("format_index", "_display_funcs_columns", {"axis": 1}), + ], +) +def test_format_clear(styler, func, attr, kwargs): + assert (0, 0) not in getattr(styler, attr) # using default + getattr(styler, func)("{:.2f}", **kwargs) + assert (0, 0) in getattr(styler, attr) # formatter is specified + getattr(styler, func)(**kwargs) + assert (0, 0) not in getattr(styler, attr) # formatter cleared to default + + +@pytest.mark.parametrize( + "escape, exp", + [ + ("html", "<>&"%$#_{}~^\\~ ^ \\ "), + ( + "latex", + '<>\\&"\\%\\$\\#\\_\\{\\}\\textasciitilde \\textasciicircum ' + "\\textbackslash \\textasciitilde \\space \\textasciicircum \\space " + "\\textbackslash \\space ", + ), + ], +) +def test_format_escape_html(escape, exp): + chars = '<>&"%$#_{}~^\\~ ^ \\ ' + df = DataFrame([[chars]]) + + s = Styler(df, uuid_len=0).format("&{0}&", escape=None) + expected = f'&{chars}&' + assert expected in s.to_html() + + # only the value should be escaped before passing to the formatter + s = Styler(df, uuid_len=0).format("&{0}&", escape=escape) + expected = f'&{exp}&' + assert expected in s.to_html() + + # also test format_index() + styler = Styler(DataFrame(columns=[chars]), uuid_len=0) + styler.format_index("&{0}&", escape=None, axis=1) + assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{chars}&" + styler.format_index("&{0}&", escape=escape, axis=1) + assert styler._translate(True, True)["head"][0][1]["display_value"] == f"&{exp}&" + + +@pytest.mark.parametrize( + "chars, expected", + [ + ( + r"$ \$&%#_{}~^\ $ &%#_{}~^\ $", + "".join( + [ + r"$ \$&%#_{}~^\ $ ", + r"\&\%\#\_\{\}\textasciitilde \textasciicircum ", + r"\textbackslash \space \$", + ] + ), + ), + ( + r"\( &%#_{}~^\ \) &%#_{}~^\ \(", + "".join( + [ + r"\( &%#_{}~^\ \) ", + r"\&\%\#\_\{\}\textasciitilde \textasciicircum ", + r"\textbackslash \space \textbackslash (", + ] + ), + ), + ( + r"$\&%#_{}^\$", + r"\$\textbackslash \&\%\#\_\{\}\textasciicircum \textbackslash \$", + ), + ( + r"$ \frac{1}{2} $ \( \frac{1}{2} \)", + "".join( + [ + r"$ \frac{1}{2} $", + r" \textbackslash ( \textbackslash frac\{1\}\{2\} \textbackslash )", + ] + ), + ), + ], +) +def test_format_escape_latex_math(chars, expected): + # GH 51903 + # latex-math escape works for each DataFrame cell separately. If we have + # a combination of dollar signs and brackets, the dollar sign would apply. + df = DataFrame([[chars]]) + s = df.style.format("{0}", escape="latex-math") + assert s._translate(True, True)["body"][0][1]["display_value"] == expected + + +def test_format_escape_na_rep(): + # tests the na_rep is not escaped + df = DataFrame([['<>&"', None]]) + s = Styler(df, uuid_len=0).format("X&{0}>X", escape="html", na_rep="&") + ex = 'X&<>&">X' + expected2 = '&' + assert ex in s.to_html() + assert expected2 in s.to_html() + + # also test for format_index() + df = DataFrame(columns=['<>&"', None]) + styler = Styler(df, uuid_len=0) + styler.format_index("X&{0}>X", escape="html", na_rep="&", axis=1) + ctx = styler._translate(True, True) + assert ctx["head"][0][1]["display_value"] == "X&<>&">X" + assert ctx["head"][0][2]["display_value"] == "&" + + +def test_format_escape_floats(styler): + # test given formatter for number format is not impacted by escape + s = styler.format("{:.1f}", escape="html") + for expected in [">0.0<", ">1.0<", ">-1.2<", ">-0.6<"]: + assert expected in s.to_html() + # tests precision of floats is not impacted by escape + s = styler.format(precision=1, escape="html") + for expected in [">0<", ">1<", ">-1.2<", ">-0.6<"]: + assert expected in s.to_html() + + +@pytest.mark.parametrize("formatter", [5, True, [2.0]]) +@pytest.mark.parametrize("func", ["format", "format_index"]) +def test_format_raises(styler, formatter, func): + with pytest.raises(TypeError, match="expected str or callable"): + getattr(styler, func)(formatter) + + +@pytest.mark.parametrize( + "precision, expected", + [ + (1, ["1.0", "2.0", "3.2", "4.6"]), + (2, ["1.00", "2.01", "3.21", "4.57"]), + (3, ["1.000", "2.009", "3.212", "4.566"]), + ], +) +def test_format_with_precision(precision, expected): + # Issue #13257 + df = DataFrame([[1.0, 2.0090, 3.2121, 4.566]], columns=[1.0, 2.0090, 3.2121, 4.566]) + styler = Styler(df) + styler.format(precision=precision) + styler.format_index(precision=precision, axis=1) + + ctx = styler._translate(True, True) + for col, exp in enumerate(expected): + assert ctx["body"][0][col + 1]["display_value"] == exp # format test + assert ctx["head"][0][col + 1]["display_value"] == exp # format_index test + + +@pytest.mark.parametrize("axis", [0, 1]) +@pytest.mark.parametrize( + "level, expected", + [ + (0, ["X", "X", "_", "_"]), # level int + ("zero", ["X", "X", "_", "_"]), # level name + (1, ["_", "_", "X", "X"]), # other level int + ("one", ["_", "_", "X", "X"]), # other level name + ([0, 1], ["X", "X", "X", "X"]), # both levels + ([0, "zero"], ["X", "X", "_", "_"]), # level int and name simultaneous + ([0, "one"], ["X", "X", "X", "X"]), # both levels as int and name + (["one", "zero"], ["X", "X", "X", "X"]), # both level names, reversed + ], +) +def test_format_index_level(axis, level, expected): + midx = MultiIndex.from_arrays([["_", "_"], ["_", "_"]], names=["zero", "one"]) + df = DataFrame([[1, 2], [3, 4]]) + if axis == 0: + df.index = midx + else: + df.columns = midx + + styler = df.style.format_index(lambda v: "X", level=level, axis=axis) + ctx = styler._translate(True, True) + + if axis == 0: # compare index + result = [ctx["body"][s][0]["display_value"] for s in range(2)] + result += [ctx["body"][s][1]["display_value"] for s in range(2)] + else: # compare columns + result = [ctx["head"][0][s + 1]["display_value"] for s in range(2)] + result += [ctx["head"][1][s + 1]["display_value"] for s in range(2)] + + assert expected == result + + +def test_format_subset(): + df = DataFrame([[0.1234, 0.1234], [1.1234, 1.1234]], columns=["a", "b"]) + ctx = df.style.format( + {"a": "{:0.1f}", "b": "{0:.2%}"}, subset=IndexSlice[0, :] + )._translate(True, True) + expected = "0.1" + raw_11 = "1.123400" + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + assert ctx["body"][0][2]["display_value"] == "12.34%" + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, :])._translate(True, True) + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + + ctx = df.style.format("{:0.1f}", subset=IndexSlice["a"])._translate(True, True) + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][0][2]["display_value"] == "0.123400" + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[0, "a"])._translate(True, True) + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == raw_11 + + ctx = df.style.format("{:0.1f}", subset=IndexSlice[[0, 1], ["a"]])._translate( + True, True + ) + assert ctx["body"][0][1]["display_value"] == expected + assert ctx["body"][1][1]["display_value"] == "1.1" + assert ctx["body"][0][2]["display_value"] == "0.123400" + assert ctx["body"][1][2]["display_value"] == raw_11 + + +@pytest.mark.parametrize("formatter", [None, "{:,.1f}"]) +@pytest.mark.parametrize("decimal", [".", "*"]) +@pytest.mark.parametrize("precision", [None, 2]) +@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)]) +def test_format_thousands(formatter, decimal, precision, func, col): + styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style + result = getattr(styler, func)( # testing float + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate(True, True) + assert "1_000_000" in result["body"][0][col]["display_value"] + + styler = DataFrame([[1000000]], index=[1000000]).style + result = getattr(styler, func)( # testing int + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate(True, True) + assert "1_000_000" in result["body"][0][col]["display_value"] + + styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style + result = getattr(styler, func)( # testing complex + thousands="_", formatter=formatter, decimal=decimal, precision=precision + )._translate(True, True) + assert "1_000_000" in result["body"][0][col]["display_value"] + + +@pytest.mark.parametrize("formatter", [None, "{:,.4f}"]) +@pytest.mark.parametrize("thousands", [None, ",", "*"]) +@pytest.mark.parametrize("precision", [None, 4]) +@pytest.mark.parametrize("func, col", [("format", 1), ("format_index", 0)]) +def test_format_decimal(formatter, thousands, precision, func, col): + styler = DataFrame([[1000000.123456789]], index=[1000000.123456789]).style + result = getattr(styler, func)( # testing float + decimal="_", formatter=formatter, thousands=thousands, precision=precision + )._translate(True, True) + assert "000_123" in result["body"][0][col]["display_value"] + + styler = DataFrame([[1 + 1000000.123456789j]], index=[1 + 1000000.123456789j]).style + result = getattr(styler, func)( # testing complex + decimal="_", formatter=formatter, thousands=thousands, precision=precision + )._translate(True, True) + assert "000_123" in result["body"][0][col]["display_value"] + + +def test_str_escape_error(): + msg = "`escape` only permitted in {'html', 'latex', 'latex-math'}, got " + with pytest.raises(ValueError, match=msg): + _str_escape("text", "bad_escape") + + with pytest.raises(ValueError, match=msg): + _str_escape("text", []) + + _str_escape(2.00, "bad_escape") # OK since dtype is float + + +def test_long_int_formatting(): + df = DataFrame(data=[[1234567890123456789]], columns=["test"]) + styler = df.style + ctx = styler._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "1234567890123456789" + + styler = df.style.format(thousands="_") + ctx = styler._translate(True, True) + assert ctx["body"][0][1]["display_value"] == "1_234_567_890_123_456_789" + + +def test_format_options(): + df = DataFrame({"int": [2000, 1], "float": [1.009, None], "str": ["&<", "&~"]}) + ctx = df.style._translate(True, True) + + # test option: na_rep + assert ctx["body"][1][2]["display_value"] == "nan" + with option_context("styler.format.na_rep", "MISSING"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][1][2]["display_value"] == "MISSING" + + # test option: decimal and precision + assert ctx["body"][0][2]["display_value"] == "1.009000" + with option_context("styler.format.decimal", "_"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][2]["display_value"] == "1_009000" + with option_context("styler.format.precision", 2): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][2]["display_value"] == "1.01" + + # test option: thousands + assert ctx["body"][0][1]["display_value"] == "2000" + with option_context("styler.format.thousands", "_"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][1]["display_value"] == "2_000" + + # test option: escape + assert ctx["body"][0][3]["display_value"] == "&<" + assert ctx["body"][1][3]["display_value"] == "&~" + with option_context("styler.format.escape", "html"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][3]["display_value"] == "&<" + with option_context("styler.format.escape", "latex"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde " + with option_context("styler.format.escape", "latex-math"): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][1][3]["display_value"] == "\\&\\textasciitilde " + + # test option: formatter + with option_context("styler.format.formatter", {"int": "{:,.2f}"}): + ctx_with_op = df.style._translate(True, True) + assert ctx_with_op["body"][0][1]["display_value"] == "2,000.00" + + +def test_precision_zero(df): + styler = Styler(df, precision=0) + ctx = styler._translate(True, True) + assert ctx["body"][0][2]["display_value"] == "-1" + assert ctx["body"][1][2]["display_value"] == "-1" + + +@pytest.mark.parametrize( + "formatter, exp", + [ + (lambda x: f"{x:.3f}", "9.000"), + ("{:.2f}", "9.00"), + ({0: "{:.1f}"}, "9.0"), + (None, "9"), + ], +) +def test_formatter_options_validator(formatter, exp): + df = DataFrame([[9]]) + with option_context("styler.format.formatter", formatter): + assert f" {exp} " in df.style.to_latex() + + +def test_formatter_options_raises(): + msg = "Value must be an instance of" + with pytest.raises(ValueError, match=msg): + with option_context("styler.format.formatter", ["bad", "type"]): + DataFrame().style.to_latex() + + +def test_1level_multiindex(): + # GH 43383 + midx = MultiIndex.from_product([[1, 2]], names=[""]) + df = DataFrame(-1, index=midx, columns=[0, 1]) + ctx = df.style._translate(True, True) + assert ctx["body"][0][0]["display_value"] == "1" + assert ctx["body"][0][0]["is_visible"] is True + assert ctx["body"][1][0]["display_value"] == "2" + assert ctx["body"][1][0]["is_visible"] is True + + +def test_boolean_format(): + # gh 46384: booleans do not collapse to integer representation on display + df = DataFrame([[True, False]]) + ctx = df.style._translate(True, True) + assert ctx["body"][0][1]["display_value"] is True + assert ctx["body"][0][2]["display_value"] is False + + +@pytest.mark.parametrize( + "hide, labels", + [ + (False, [1, 2]), + (True, [1, 2, 3, 4]), + ], +) +def test_relabel_raise_length(styler_multi, hide, labels): + if hide: + styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")]) + with pytest.raises(ValueError, match="``labels`` must be of length equal"): + styler_multi.relabel_index(labels=labels) + + +def test_relabel_index(styler_multi): + labels = [(1, 2), (3, 4)] + styler_multi.hide(axis=0, subset=[("X", "x"), ("Y", "y")]) + styler_multi.relabel_index(labels=labels) + ctx = styler_multi._translate(True, True) + assert {"value": "X", "display_value": 1}.items() <= ctx["body"][0][0].items() + assert {"value": "y", "display_value": 2}.items() <= ctx["body"][0][1].items() + assert {"value": "Y", "display_value": 3}.items() <= ctx["body"][1][0].items() + assert {"value": "x", "display_value": 4}.items() <= ctx["body"][1][1].items() + + +def test_relabel_columns(styler_multi): + labels = [(1, 2), (3, 4)] + styler_multi.hide(axis=1, subset=[("A", "a"), ("B", "b")]) + styler_multi.relabel_index(axis=1, labels=labels) + ctx = styler_multi._translate(True, True) + assert {"value": "A", "display_value": 1}.items() <= ctx["head"][0][3].items() + assert {"value": "B", "display_value": 3}.items() <= ctx["head"][0][4].items() + assert {"value": "b", "display_value": 2}.items() <= ctx["head"][1][3].items() + assert {"value": "a", "display_value": 4}.items() <= ctx["head"][1][4].items() + + +def test_relabel_roundtrip(styler): + styler.relabel_index(["{}", "{}"]) + ctx = styler._translate(True, True) + assert {"value": "x", "display_value": "x"}.items() <= ctx["body"][0][0].items() + assert {"value": "y", "display_value": "y"}.items() <= ctx["body"][1][0].items() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py new file mode 100644 index 0000000000000000000000000000000000000000..3d59719010ee03cc53373a1c96f5f8c5611d7681 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_highlight.py @@ -0,0 +1,218 @@ +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + IndexSlice, +) + +pytest.importorskip("jinja2") + +from pandas.io.formats.style import Styler + + +@pytest.fixture(params=[(None, "float64"), (NA, "Int64")]) +def df(request): + # GH 45804 + return DataFrame( + {"A": [0, np.nan, 10], "B": [1, request.param[0], 2]}, dtype=request.param[1] + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +def test_highlight_null(styler): + result = styler.highlight_null()._compute().ctx + expected = { + (1, 0): [("background-color", "red")], + (1, 1): [("background-color", "red")], + } + assert result == expected + + +def test_highlight_null_subset(styler): + # GH 31345 + result = ( + styler.highlight_null(color="red", subset=["A"]) + .highlight_null(color="green", subset=["B"]) + ._compute() + .ctx + ) + expected = { + (1, 0): [("background-color", "red")], + (1, 1): [("background-color", "green")], + } + assert result == expected + + +@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) +def test_highlight_minmax_basic(df, f): + expected = { + (0, 1): [("background-color", "red")], + # ignores NaN row, + (2, 0): [("background-color", "red")], + } + if f == "highlight_min": + df = -df + result = getattr(df.style, f)(axis=1, color="red")._compute().ctx + assert result == expected + + +@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) +@pytest.mark.parametrize( + "kwargs", + [ + {"axis": None, "color": "red"}, # test axis + {"axis": 0, "subset": ["A"], "color": "red"}, # test subset and ignores NaN + {"axis": None, "props": "background-color: red"}, # test props + ], +) +def test_highlight_minmax_ext(df, f, kwargs): + expected = {(2, 0): [("background-color", "red")]} + if f == "highlight_min": + df = -df + result = getattr(df.style, f)(**kwargs)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize("f", ["highlight_min", "highlight_max"]) +@pytest.mark.parametrize("axis", [None, 0, 1]) +def test_highlight_minmax_nulls(f, axis): + # GH 42750 + expected = { + (1, 0): [("background-color", "yellow")], + (1, 1): [("background-color", "yellow")], + } + if axis == 1: + expected.update({(2, 1): [("background-color", "yellow")]}) + + if f == "highlight_max": + df = DataFrame({"a": [NA, 1, None], "b": [np.nan, 1, -1]}) + else: + df = DataFrame({"a": [NA, -1, None], "b": [np.nan, -1, 1]}) + + result = getattr(df.style, f)(axis=axis)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "kwargs", + [ + {"left": 0, "right": 1}, # test basic range + {"left": 0, "right": 1, "props": "background-color: yellow"}, # test props + {"left": -100, "right": 100, "subset": IndexSlice[[0, 1], :]}, # test subset + {"left": 0, "subset": IndexSlice[[0, 1], :]}, # test no right + {"right": 1}, # test no left + {"left": [0, 0, 11], "axis": 0}, # test left as sequence + {"left": DataFrame({"A": [0, 0, 11], "B": [1, 1, 11]}), "axis": None}, # axis + {"left": 0, "right": [0, 1], "axis": 1}, # test sequence right + ], +) +def test_highlight_between(styler, kwargs): + expected = { + (0, 0): [("background-color", "yellow")], + (0, 1): [("background-color", "yellow")], + } + result = styler.highlight_between(**kwargs)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "arg, map, axis", + [ + ("left", [1, 2], 0), # 0 axis has 3 elements not 2 + ("left", [1, 2, 3], 1), # 1 axis has 2 elements not 3 + ("left", np.array([[1, 2], [1, 2]]), None), # df is (2,3) not (2,2) + ("right", [1, 2], 0), # same tests as above for 'right' not 'left' + ("right", [1, 2, 3], 1), # .. + ("right", np.array([[1, 2], [1, 2]]), None), # .. + ], +) +def test_highlight_between_raises(arg, styler, map, axis): + msg = f"supplied '{arg}' is not correct shape" + with pytest.raises(ValueError, match=msg): + styler.highlight_between(**{arg: map, "axis": axis})._compute() + + +def test_highlight_between_raises2(styler): + msg = "values can be 'both', 'left', 'right', or 'neither'" + with pytest.raises(ValueError, match=msg): + styler.highlight_between(inclusive="badstring")._compute() + + with pytest.raises(ValueError, match=msg): + styler.highlight_between(inclusive=1)._compute() + + +@pytest.mark.parametrize( + "inclusive, expected", + [ + ( + "both", + { + (0, 0): [("background-color", "yellow")], + (0, 1): [("background-color", "yellow")], + }, + ), + ("neither", {}), + ("left", {(0, 0): [("background-color", "yellow")]}), + ("right", {(0, 1): [("background-color", "yellow")]}), + ], +) +def test_highlight_between_inclusive(styler, inclusive, expected): + kwargs = {"left": 0, "right": 1, "subset": IndexSlice[[0, 1], :]} + result = styler.highlight_between(**kwargs, inclusive=inclusive)._compute() + assert result.ctx == expected + + +@pytest.mark.parametrize( + "kwargs", + [ + {"q_left": 0.5, "q_right": 1, "axis": 0}, # base case + {"q_left": 0.5, "q_right": 1, "axis": None}, # test axis + {"q_left": 0, "q_right": 1, "subset": IndexSlice[2, :]}, # test subset + {"q_left": 0.5, "axis": 0}, # test no high + {"q_right": 1, "subset": IndexSlice[2, :], "axis": 1}, # test no low + {"q_left": 0.5, "axis": 0, "props": "background-color: yellow"}, # tst prop + ], +) +def test_highlight_quantile(styler, kwargs): + expected = { + (2, 0): [("background-color", "yellow")], + (2, 1): [("background-color", "yellow")], + } + result = styler.highlight_quantile(**kwargs)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "f,kwargs", + [ + ("highlight_min", {"axis": 1, "subset": IndexSlice[1, :]}), + ("highlight_max", {"axis": 0, "subset": [0]}), + ("highlight_quantile", {"axis": None, "q_left": 0.6, "q_right": 0.8}), + ("highlight_between", {"subset": [0]}), + ], +) +@pytest.mark.parametrize( + "df", + [ + DataFrame([[0, 10], [20, 30]], dtype=int), + DataFrame([[0, 10], [20, 30]], dtype=float), + DataFrame([[0, 10], [20, 30]], dtype="datetime64[ns]"), + DataFrame([[0, 10], [20, 30]], dtype=str), + DataFrame([[0, 10], [20, 30]], dtype="timedelta64[ns]"), + ], +) +def test_all_highlight_dtypes(f, kwargs, df): + if f == "highlight_quantile" and isinstance(df.iloc[0, 0], (str)): + return None # quantile incompatible with str + if f == "highlight_between": + kwargs["left"] = df.iloc[1, 0] # set the range low for testing + + expected = {(1, 0): [("background-color", "yellow")]} + result = getattr(df.style, f)(**kwargs)._compute().ctx + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py new file mode 100644 index 0000000000000000000000000000000000000000..1e345eb82ed3c31e7a5e0f89fa574aea84923dd7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_html.py @@ -0,0 +1,1009 @@ +from textwrap import ( + dedent, + indent, +) + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + option_context, +) + +jinja2 = pytest.importorskip("jinja2") +from pandas.io.formats.style import Styler + + +@pytest.fixture +def env(): + loader = jinja2.PackageLoader("pandas", "io/formats/templates") + env = jinja2.Environment(loader=loader, trim_blocks=True) + return env + + +@pytest.fixture +def styler(): + return Styler(DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"])) + + +@pytest.fixture +def styler_mi(): + midx = MultiIndex.from_product([["a", "b"], ["c", "d"]]) + return Styler(DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx)) + + +@pytest.fixture +def tpl_style(env): + return env.get_template("html_style.tpl") + + +@pytest.fixture +def tpl_table(env): + return env.get_template("html_table.tpl") + + +def test_html_template_extends_options(): + # make sure if templates are edited tests are updated as are setup fixtures + # to understand the dependency + with open("pandas/io/formats/templates/html.tpl", encoding="utf-8") as file: + result = file.read() + assert "{% include html_style_tpl %}" in result + assert "{% include html_table_tpl %}" in result + + +def test_exclude_styles(styler): + result = styler.to_html(exclude_styles=True, doctype_html=True) + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + + + + + +
 A
a2.610000
b2.690000
+ + + """ + ) + assert result == expected + + +def test_w3_html_format(styler): + styler.set_uuid("").set_table_styles([{"selector": "th", "props": "att2:v2;"}]).map( + lambda x: "att1:v1;" + ).set_table_attributes('class="my-cls1" style="attr3:v3;"').set_td_classes( + DataFrame(["my-cls2"], index=["a"], columns=["A"]) + ).format( + "{:.1f}" + ).set_caption( + "A comprehensive test" + ) + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + +
A comprehensive test
 A
a2.6
b2.7
+ """ + ) + assert expected == styler.to_html() + + +def test_colspan_w3(): + # GH 36223 + df = DataFrame(data=[[1, 2]], columns=[["l0", "l0"], ["l1a", "l1b"]]) + styler = Styler(df, uuid="_", cell_ids=False) + assert 'l0' in styler.to_html() + + +def test_rowspan_w3(): + # GH 38533 + df = DataFrame(data=[[1, 2]], index=[["l0", "l0"], ["l1a", "l1b"]]) + styler = Styler(df, uuid="_", cell_ids=False) + assert 'l0' in styler.to_html() + + +def test_styles(styler): + styler.set_uuid("abc") + styler.set_table_styles([{"selector": "td", "props": "color: red;"}]) + result = styler.to_html(doctype_html=True) + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + + + + + + +
 A
a2.610000
b2.690000
+ + + """ + ) + assert result == expected + + +def test_doctype(styler): + result = styler.to_html(doctype_html=False) + assert "" not in result + assert "" not in result + assert "" not in result + assert "" not in result + + +def test_doctype_encoding(styler): + with option_context("styler.render.encoding", "ASCII"): + result = styler.to_html(doctype_html=True) + assert '' in result + result = styler.to_html(doctype_html=True, encoding="ANSI") + assert '' in result + + +def test_bold_headers_arg(styler): + result = styler.to_html(bold_headers=True) + assert "th {\n font-weight: bold;\n}" in result + result = styler.to_html() + assert "th {\n font-weight: bold;\n}" not in result + + +def test_caption_arg(styler): + result = styler.to_html(caption="foo bar") + assert "foo bar" in result + result = styler.to_html() + assert "foo bar" not in result + + +def test_block_names(tpl_style, tpl_table): + # catch accidental removal of a block + expected_style = { + "before_style", + "style", + "table_styles", + "before_cellstyle", + "cellstyle", + } + expected_table = { + "before_table", + "table", + "caption", + "thead", + "tbody", + "after_table", + "before_head_rows", + "head_tr", + "after_head_rows", + "before_rows", + "tr", + "after_rows", + } + result1 = set(tpl_style.blocks) + assert result1 == expected_style + + result2 = set(tpl_table.blocks) + assert result2 == expected_table + + +def test_from_custom_template_table(tmpdir): + p = tmpdir.mkdir("tpl").join("myhtml_table.tpl") + p.write( + dedent( + """\ + {% extends "html_table.tpl" %} + {% block table %} +

{{custom_title}}

+ {{ super() }} + {% endblock table %}""" + ) + ) + result = Styler.from_custom_template(str(tmpdir.join("tpl")), "myhtml_table.tpl") + assert issubclass(result, Styler) + assert result.env is not Styler.env + assert result.template_html_table is not Styler.template_html_table + styler = result(DataFrame({"A": [1, 2]})) + assert "

My Title

\n\n\n + {{ super() }} + {% endblock style %}""" + ) + ) + result = Styler.from_custom_template( + str(tmpdir.join("tpl")), html_style="myhtml_style.tpl" + ) + assert issubclass(result, Styler) + assert result.env is not Styler.env + assert result.template_html_style is not Styler.template_html_style + styler = result(DataFrame({"A": [1, 2]})) + assert '\n\nfull cap" in styler.to_html() + + +@pytest.mark.parametrize("index", [False, True]) +@pytest.mark.parametrize("columns", [False, True]) +@pytest.mark.parametrize("index_name", [True, False]) +def test_sticky_basic(styler, index, columns, index_name): + if index_name: + styler.index.name = "some text" + if index: + styler.set_sticky(axis=0) + if columns: + styler.set_sticky(axis=1) + + left_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " left: 0px;\n z-index: {1};\n}}" + ) + top_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " top: {1}px;\n z-index: {2};\n{3}}}" + ) + + res = styler.set_uuid("").to_html() + + # test index stickys over thead and tbody + assert (left_css.format("thead tr th:nth-child(1)", "3 !important") in res) is index + assert (left_css.format("tbody tr th:nth-child(1)", "1") in res) is index + + # test column stickys including if name row + assert ( + top_css.format("thead tr:nth-child(1) th", "0", "2", " height: 25px;\n") in res + ) is (columns and index_name) + assert ( + top_css.format("thead tr:nth-child(2) th", "25", "2", " height: 25px;\n") + in res + ) is (columns and index_name) + assert (top_css.format("thead tr:nth-child(1) th", "0", "2", "") in res) is ( + columns and not index_name + ) + + +@pytest.mark.parametrize("index", [False, True]) +@pytest.mark.parametrize("columns", [False, True]) +def test_sticky_mi(styler_mi, index, columns): + if index: + styler_mi.set_sticky(axis=0) + if columns: + styler_mi.set_sticky(axis=1) + + left_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " left: {1}px;\n min-width: 75px;\n max-width: 75px;\n z-index: {2};\n}}" + ) + top_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " top: {1}px;\n height: 25px;\n z-index: {2};\n}}" + ) + + res = styler_mi.set_uuid("").to_html() + + # test the index stickys for thead and tbody over both levels + assert ( + left_css.format("thead tr th:nth-child(1)", "0", "3 !important") in res + ) is index + assert (left_css.format("tbody tr th.level0", "0", "1") in res) is index + assert ( + left_css.format("thead tr th:nth-child(2)", "75", "3 !important") in res + ) is index + assert (left_css.format("tbody tr th.level1", "75", "1") in res) is index + + # test the column stickys for each level row + assert (top_css.format("thead tr:nth-child(1) th", "0", "2") in res) is columns + assert (top_css.format("thead tr:nth-child(2) th", "25", "2") in res) is columns + + +@pytest.mark.parametrize("index", [False, True]) +@pytest.mark.parametrize("columns", [False, True]) +@pytest.mark.parametrize("levels", [[1], ["one"], "one"]) +def test_sticky_levels(styler_mi, index, columns, levels): + styler_mi.index.names, styler_mi.columns.names = ["zero", "one"], ["zero", "one"] + if index: + styler_mi.set_sticky(axis=0, levels=levels) + if columns: + styler_mi.set_sticky(axis=1, levels=levels) + + left_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " left: {1}px;\n min-width: 75px;\n max-width: 75px;\n z-index: {2};\n}}" + ) + top_css = ( + "#T_ {0} {{\n position: sticky;\n background-color: inherit;\n" + " top: {1}px;\n height: 25px;\n z-index: {2};\n}}" + ) + + res = styler_mi.set_uuid("").to_html() + + # test no sticking of level0 + assert "#T_ thead tr th:nth-child(1)" not in res + assert "#T_ tbody tr th.level0" not in res + assert "#T_ thead tr:nth-child(1) th" not in res + + # test sticking level1 + assert ( + left_css.format("thead tr th:nth-child(2)", "0", "3 !important") in res + ) is index + assert (left_css.format("tbody tr th.level1", "0", "1") in res) is index + assert (top_css.format("thead tr:nth-child(2) th", "0", "2") in res) is columns + + +def test_sticky_raises(styler): + with pytest.raises(ValueError, match="No axis named bad for object type DataFrame"): + styler.set_sticky(axis="bad") + + +@pytest.mark.parametrize( + "sparse_index, sparse_columns", + [(True, True), (True, False), (False, True), (False, False)], +) +def test_sparse_options(sparse_index, sparse_columns): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=ridx, columns=cidx) + styler = df.style + + default_html = styler.to_html() # defaults under pd.options to (True , True) + + with option_context( + "styler.sparse.index", sparse_index, "styler.sparse.columns", sparse_columns + ): + html1 = styler.to_html() + assert (html1 == default_html) is (sparse_index and sparse_columns) + html2 = styler.to_html(sparse_index=sparse_index, sparse_columns=sparse_columns) + assert html1 == html2 + + +@pytest.mark.parametrize("index", [True, False]) +@pytest.mark.parametrize("columns", [True, False]) +def test_map_header_cell_ids(styler, index, columns): + # GH 41893 + func = lambda v: "attr: val;" + styler.uuid, styler.cell_ids = "", False + if index: + styler.map_index(func, axis="index") + if columns: + styler.map_index(func, axis="columns") + + result = styler.to_html() + + # test no data cell ids + assert '2.610000' in result + assert '2.690000' in result + + # test index header ids where needed and css styles + assert ( + 'a' in result + ) is index + assert ( + 'b' in result + ) is index + assert ("#T__level0_row0, #T__level0_row1 {\n attr: val;\n}" in result) is index + + # test column header ids where needed and css styles + assert ( + 'A' in result + ) is columns + assert ("#T__level0_col0 {\n attr: val;\n}" in result) is columns + + +@pytest.mark.parametrize("rows", [True, False]) +@pytest.mark.parametrize("cols", [True, False]) +def test_maximums(styler_mi, rows, cols): + result = styler_mi.to_html( + max_rows=2 if rows else None, + max_columns=2 if cols else None, + ) + + assert ">5" in result # [[0,1], [4,5]] always visible + assert (">8" in result) is not rows # first trimmed vertical element + assert (">2" in result) is not cols # first trimmed horizontal element + + +def test_replaced_css_class_names(): + css = { + "row_heading": "ROWHEAD", + # "col_heading": "COLHEAD", + "index_name": "IDXNAME", + # "col": "COL", + "row": "ROW", + # "col_trim": "COLTRIM", + "row_trim": "ROWTRIM", + "level": "LEVEL", + "data": "DATA", + "blank": "BLANK", + } + midx = MultiIndex.from_product([["a", "b"], ["c", "d"]]) + styler_mi = Styler( + DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=midx), + uuid_len=0, + ).set_table_styles(css_class_names=css) + styler_mi.index.names = ["n1", "n2"] + styler_mi.hide(styler_mi.index[1:], axis=0) + styler_mi.hide(styler_mi.columns[1:], axis=1) + styler_mi.map_index(lambda v: "color: red;", axis=0) + styler_mi.map_index(lambda v: "color: green;", axis=1) + styler_mi.map(lambda v: "color: blue;") + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + + + + + + + + +
 n1a
 n2c
n1n2 
ac0
+ """ + ) + result = styler_mi.to_html() + assert result == expected + + +def test_include_css_style_rules_only_for_visible_cells(styler_mi): + # GH 43619 + result = ( + styler_mi.set_uuid("") + .map(lambda v: "color: blue;") + .hide(styler_mi.data.columns[1:], axis="columns") + .hide(styler_mi.data.index[1:], axis="index") + .to_html() + ) + expected_styles = dedent( + """\ + + """ + ) + assert expected_styles in result + + +def test_include_css_style_rules_only_for_visible_index_labels(styler_mi): + # GH 43619 + result = ( + styler_mi.set_uuid("") + .map_index(lambda v: "color: blue;", axis="index") + .hide(styler_mi.data.columns, axis="columns") + .hide(styler_mi.data.index[1:], axis="index") + .to_html() + ) + expected_styles = dedent( + """\ + + """ + ) + assert expected_styles in result + + +def test_include_css_style_rules_only_for_visible_column_labels(styler_mi): + # GH 43619 + result = ( + styler_mi.set_uuid("") + .map_index(lambda v: "color: blue;", axis="columns") + .hide(styler_mi.data.columns[1:], axis="columns") + .hide(styler_mi.data.index, axis="index") + .to_html() + ) + expected_styles = dedent( + """\ + + """ + ) + assert expected_styles in result + + +def test_hiding_index_columns_multiindex_alignment(): + # gh 43644 + midx = MultiIndex.from_product( + [["i0", "j0"], ["i1"], ["i2", "j2"]], names=["i-0", "i-1", "i-2"] + ) + cidx = MultiIndex.from_product( + [["c0"], ["c1", "d1"], ["c2", "d2"]], names=["c-0", "c-1", "c-2"] + ) + df = DataFrame(np.arange(16).reshape(4, 4), index=midx, columns=cidx) + styler = Styler(df, uuid_len=0) + styler.hide(level=1, axis=0).hide(level=0, axis=1) + styler.hide([("j0", "i1", "j2")], axis=0) + styler.hide([("c0", "d1", "d2")], axis=1) + result = styler.to_html() + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 c-1c1d1
 c-2c2d2c2
i-0i-2   
i0i2012
j2456
j0i28910
+ """ + ) + assert result == expected + + +def test_hiding_index_columns_multiindex_trimming(): + # gh 44272 + df = DataFrame(np.arange(64).reshape(8, 8)) + df.columns = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]]) + df.index = MultiIndex.from_product([[0, 1, 2, 3], [0, 1]]) + df.index.names, df.columns.names = ["a", "b"], ["c", "d"] + styler = Styler(df, cell_ids=False, uuid_len=0) + styler.hide([(0, 0), (0, 1), (1, 0)], axis=1).hide([(0, 0), (0, 1), (1, 0)], axis=0) + with option_context("styler.render.max_rows", 4, "styler.render.max_columns", 4): + result = styler.to_html() + + expected = dedent( + """\ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 c123
 d1010...
ab     
1127282930...
2035363738...
143444546...
3051525354...
.....................
+ """ + ) + + assert result == expected + + +@pytest.mark.parametrize("type", ["data", "index"]) +@pytest.mark.parametrize( + "text, exp, found", + [ + ("no link, just text", False, ""), + ("subdomain not www: sub.web.com", False, ""), + ("www subdomain: www.web.com other", True, "www.web.com"), + ("scheme full structure: http://www.web.com", True, "http://www.web.com"), + ("scheme no top-level: http://www.web", True, "http://www.web"), + ("no scheme, no top-level: www.web", False, "www.web"), + ("https scheme: https://www.web.com", True, "https://www.web.com"), + ("ftp scheme: ftp://www.web", True, "ftp://www.web"), + ("ftps scheme: ftps://www.web", True, "ftps://www.web"), + ("subdirectories: www.web.com/directory", True, "www.web.com/directory"), + ("Multiple domains: www.1.2.3.4", True, "www.1.2.3.4"), + ("with port: http://web.com:80", True, "http://web.com:80"), + ( + "full net_loc scheme: http://user:pass@web.com", + True, + "http://user:pass@web.com", + ), + ( + "with valid special chars: http://web.com/,.':;~!@#$*()[]", + True, + "http://web.com/,.':;~!@#$*()[]", + ), + ], +) +def test_rendered_links(type, text, exp, found): + if type == "data": + df = DataFrame([text]) + styler = df.style.format(hyperlinks="html") + else: + df = DataFrame([0], index=[text]) + styler = df.style.format_index(hyperlinks="html") + + rendered = f'{found}' + result = styler.to_html() + assert (rendered in result) is exp + assert (text in result) is not exp # test conversion done when expected and not + + +def test_multiple_rendered_links(): + links = ("www.a.b", "http://a.c", "https://a.d", "ftp://a.e") + # pylint: disable-next=consider-using-f-string + df = DataFrame(["text {} {} text {} {}".format(*links)]) + result = df.style.format(hyperlinks="html").to_html() + href = '{0}' + for link in links: + assert href.format(link) in result + assert href.format("text") not in result + + +def test_concat(styler): + other = styler.data.agg(["mean"]).style + styler.concat(other).set_uuid("X") + result = styler.to_html() + fp = "foot0_" + expected = dedent( + f"""\ + + b + 2.690000 + + + mean + 2.650000 + + + + """ + ) + assert expected in result + + +def test_concat_recursion(styler): + df = styler.data + styler1 = styler + styler2 = Styler(df.agg(["mean"]), precision=3) + styler3 = Styler(df.agg(["mean"]), precision=4) + styler1.concat(styler2.concat(styler3)).set_uuid("X") + result = styler.to_html() + # notice that the second concat (last of the output html), + # there are two `foot_` in the id and class + fp1 = "foot0_" + fp2 = "foot0_foot0_" + expected = dedent( + f"""\ + + b + 2.690000 + + + mean + 2.650 + + + mean + 2.6500 + + + + """ + ) + assert expected in result + + +def test_concat_chain(styler): + df = styler.data + styler1 = styler + styler2 = Styler(df.agg(["mean"]), precision=3) + styler3 = Styler(df.agg(["mean"]), precision=4) + styler1.concat(styler2).concat(styler3).set_uuid("X") + result = styler.to_html() + fp1 = "foot0_" + fp2 = "foot1_" + expected = dedent( + f"""\ + + b + 2.690000 + + + mean + 2.650 + + + mean + 2.6500 + + + + """ + ) + assert expected in result + + +def test_concat_combined(): + def html_lines(foot_prefix: str): + assert foot_prefix.endswith("_") or foot_prefix == "" + fp = foot_prefix + return indent( + dedent( + f"""\ + + a + 2.610000 + + + b + 2.690000 + + """ + ), + prefix=" " * 4, + ) + + df = DataFrame([[2.61], [2.69]], index=["a", "b"], columns=["A"]) + s1 = df.style.highlight_max(color="red") + s2 = df.style.highlight_max(color="green") + s3 = df.style.highlight_max(color="blue") + s4 = df.style.highlight_max(color="yellow") + + result = s1.concat(s2).concat(s3.concat(s4)).set_uuid("X").to_html() + expected_css = dedent( + """\ + + """ + ) + expected_table = ( + dedent( + """\ + + + + + + + + + """ + ) + + html_lines("") + + html_lines("foot0_") + + html_lines("foot1_") + + html_lines("foot1_foot0_") + + dedent( + """\ + +
 A
+ """ + ) + ) + assert expected_css + expected_table == result + + +def test_to_html_na_rep_non_scalar_data(datapath): + # GH47103 + df = DataFrame([{"a": 1, "b": [1, 2, 3], "c": np.nan}]) + result = df.style.format(na_rep="-").to_html(table_uuid="test") + expected = """\ + + + + + + + + + + + + + + + + + + +
 abc
01[1, 2, 3]-
+""" + assert result == expected diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py new file mode 100644 index 0000000000000000000000000000000000000000..fb7a77f1ddb27db66a847fc1a1d87d14d95822aa --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_matplotlib.py @@ -0,0 +1,335 @@ +import gc + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + IndexSlice, + Series, +) + +pytest.importorskip("matplotlib") +pytest.importorskip("jinja2") + +import matplotlib as mpl + +from pandas.io.formats.style import Styler + + +@pytest.fixture(autouse=True) +def mpl_cleanup(): + # matplotlib/testing/decorators.py#L24 + # 1) Resets units registry + # 2) Resets rc_context + # 3) Closes all figures + mpl = pytest.importorskip("matplotlib") + mpl_units = pytest.importorskip("matplotlib.units") + plt = pytest.importorskip("matplotlib.pyplot") + orig_units_registry = mpl_units.registry.copy() + with mpl.rc_context(): + mpl.use("template") + yield + mpl_units.registry.clear() + mpl_units.registry.update(orig_units_registry) + plt.close("all") + # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501 + gc.collect(1) + + +@pytest.fixture +def df(): + return DataFrame([[1, 2], [2, 4]], columns=["A", "B"]) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0) + + +@pytest.fixture +def df_blank(): + return DataFrame([[0, 0], [0, 0]], columns=["A", "B"], index=["X", "Y"]) + + +@pytest.fixture +def styler_blank(df_blank): + return Styler(df_blank, uuid_len=0) + + +@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"]) +def test_function_gradient(styler, f): + for c_map in [None, "YlOrRd"]: + result = getattr(styler, f)(cmap=c_map)._compute().ctx + assert all("#" in x[0][1] for x in result.values()) + assert result[(0, 0)] == result[(0, 1)] + assert result[(1, 0)] == result[(1, 1)] + + +@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"]) +def test_background_gradient_color(styler, f): + result = getattr(styler, f)(subset=IndexSlice[1, "A"])._compute().ctx + if f == "background_gradient": + assert result[(1, 0)] == [("background-color", "#fff7fb"), ("color", "#000000")] + elif f == "text_gradient": + assert result[(1, 0)] == [("color", "#fff7fb")] + + +@pytest.mark.parametrize( + "axis, expected", + [ + (0, ["low", "low", "high", "high"]), + (1, ["low", "high", "low", "high"]), + (None, ["low", "mid", "mid", "high"]), + ], +) +@pytest.mark.parametrize("f", ["background_gradient", "text_gradient"]) +def test_background_gradient_axis(styler, axis, expected, f): + if f == "background_gradient": + colors = { + "low": [("background-color", "#f7fbff"), ("color", "#000000")], + "mid": [("background-color", "#abd0e6"), ("color", "#000000")], + "high": [("background-color", "#08306b"), ("color", "#f1f1f1")], + } + elif f == "text_gradient": + colors = { + "low": [("color", "#f7fbff")], + "mid": [("color", "#abd0e6")], + "high": [("color", "#08306b")], + } + result = getattr(styler, f)(cmap="Blues", axis=axis)._compute().ctx + for i, cell in enumerate([(0, 0), (0, 1), (1, 0), (1, 1)]): + assert result[cell] == colors[expected[i]] + + +@pytest.mark.parametrize( + "cmap, expected", + [ + ( + "PuBu", + { + (4, 5): [("background-color", "#86b0d3"), ("color", "#000000")], + (4, 6): [("background-color", "#83afd3"), ("color", "#f1f1f1")], + }, + ), + ( + "YlOrRd", + { + (4, 8): [("background-color", "#fd913e"), ("color", "#000000")], + (4, 9): [("background-color", "#fd8f3d"), ("color", "#f1f1f1")], + }, + ), + ( + None, + { + (7, 0): [("background-color", "#48c16e"), ("color", "#f1f1f1")], + (7, 1): [("background-color", "#4cc26c"), ("color", "#000000")], + }, + ), + ], +) +def test_text_color_threshold(cmap, expected): + # GH 39888 + df = DataFrame(np.arange(100).reshape(10, 10)) + result = df.style.background_gradient(cmap=cmap, axis=None)._compute().ctx + for k in expected.keys(): + assert result[k] == expected[k] + + +def test_background_gradient_vmin_vmax(): + # GH 12145 + df = DataFrame(range(5)) + ctx = df.style.background_gradient(vmin=1, vmax=3)._compute().ctx + assert ctx[(0, 0)] == ctx[(1, 0)] + assert ctx[(4, 0)] == ctx[(3, 0)] + + +def test_background_gradient_int64(): + # GH 28869 + df1 = Series(range(3)).to_frame() + df2 = Series(range(3), dtype="Int64").to_frame() + ctx1 = df1.style.background_gradient()._compute().ctx + ctx2 = df2.style.background_gradient()._compute().ctx + assert ctx2[(0, 0)] == ctx1[(0, 0)] + assert ctx2[(1, 0)] == ctx1[(1, 0)] + assert ctx2[(2, 0)] == ctx1[(2, 0)] + + +@pytest.mark.parametrize( + "axis, gmap, expected", + [ + ( + 0, + [1, 2], + { + (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")], + (1, 0): [("background-color", "#023858"), ("color", "#f1f1f1")], + (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")], + (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")], + }, + ), + ( + 1, + [1, 2], + { + (0, 0): [("background-color", "#fff7fb"), ("color", "#000000")], + (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")], + (0, 1): [("background-color", "#023858"), ("color", "#f1f1f1")], + (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")], + }, + ), + ( + None, + np.array([[2, 1], [1, 2]]), + { + (0, 0): [("background-color", "#023858"), ("color", "#f1f1f1")], + (1, 0): [("background-color", "#fff7fb"), ("color", "#000000")], + (0, 1): [("background-color", "#fff7fb"), ("color", "#000000")], + (1, 1): [("background-color", "#023858"), ("color", "#f1f1f1")], + }, + ), + ], +) +def test_background_gradient_gmap_array(styler_blank, axis, gmap, expected): + # tests when gmap is given as a sequence and converted to ndarray + result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute().ctx + assert result == expected + + +@pytest.mark.parametrize( + "gmap, axis", [([1, 2, 3], 0), ([1, 2], 1), (np.array([[1, 2], [1, 2]]), None)] +) +def test_background_gradient_gmap_array_raises(gmap, axis): + # test when gmap as converted ndarray is bad shape + df = DataFrame([[0, 0, 0], [0, 0, 0]]) + msg = "supplied 'gmap' is not correct shape" + with pytest.raises(ValueError, match=msg): + df.style.background_gradient(gmap=gmap, axis=axis)._compute() + + +@pytest.mark.parametrize( + "gmap", + [ + DataFrame( # reverse the columns + [[2, 1], [1, 2]], columns=["B", "A"], index=["X", "Y"] + ), + DataFrame( # reverse the index + [[2, 1], [1, 2]], columns=["A", "B"], index=["Y", "X"] + ), + DataFrame( # reverse the index and columns + [[1, 2], [2, 1]], columns=["B", "A"], index=["Y", "X"] + ), + DataFrame( # add unnecessary columns + [[1, 2, 3], [2, 1, 3]], columns=["A", "B", "C"], index=["X", "Y"] + ), + DataFrame( # add unnecessary index + [[1, 2], [2, 1], [3, 3]], columns=["A", "B"], index=["X", "Y", "Z"] + ), + ], +) +@pytest.mark.parametrize( + "subset, exp_gmap", # exp_gmap is underlying map DataFrame should conform to + [ + (None, [[1, 2], [2, 1]]), + (["A"], [[1], [2]]), # slice only column "A" in data and gmap + (["B", "A"], [[2, 1], [1, 2]]), # reverse the columns in data + (IndexSlice["X", :], [[1, 2]]), # slice only index "X" in data and gmap + (IndexSlice[["Y", "X"], :], [[2, 1], [1, 2]]), # reverse the index in data + ], +) +def test_background_gradient_gmap_dataframe_align(styler_blank, gmap, subset, exp_gmap): + # test gmap given as DataFrame that it aligns to the data including subset + expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap, subset=subset) + result = styler_blank.background_gradient(axis=None, gmap=gmap, subset=subset) + assert expected._compute().ctx == result._compute().ctx + + +@pytest.mark.parametrize( + "gmap, axis, exp_gmap", + [ + (Series([2, 1], index=["Y", "X"]), 0, [[1, 1], [2, 2]]), # revrse the index + (Series([2, 1], index=["B", "A"]), 1, [[1, 2], [1, 2]]), # revrse the cols + (Series([1, 2, 3], index=["X", "Y", "Z"]), 0, [[1, 1], [2, 2]]), # add idx + (Series([1, 2, 3], index=["A", "B", "C"]), 1, [[1, 2], [1, 2]]), # add col + ], +) +def test_background_gradient_gmap_series_align(styler_blank, gmap, axis, exp_gmap): + # test gmap given as Series that it aligns to the data including subset + expected = styler_blank.background_gradient(axis=None, gmap=exp_gmap)._compute() + result = styler_blank.background_gradient(axis=axis, gmap=gmap)._compute() + assert expected.ctx == result.ctx + + +@pytest.mark.parametrize( + "gmap, axis", + [ + (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 1), + (DataFrame([[1, 2], [2, 1]], columns=["A", "B"], index=["X", "Y"]), 0), + ], +) +def test_background_gradient_gmap_wrong_dataframe(styler_blank, gmap, axis): + # test giving a gmap in DataFrame but with wrong axis + msg = "'gmap' is a DataFrame but underlying data for operations is a Series" + with pytest.raises(ValueError, match=msg): + styler_blank.background_gradient(gmap=gmap, axis=axis)._compute() + + +def test_background_gradient_gmap_wrong_series(styler_blank): + # test giving a gmap in Series form but with wrong axis + msg = "'gmap' is a Series but underlying data for operations is a DataFrame" + gmap = Series([1, 2], index=["X", "Y"]) + with pytest.raises(ValueError, match=msg): + styler_blank.background_gradient(gmap=gmap, axis=None)._compute() + + +def test_background_gradient_nullable_dtypes(): + # GH 50712 + df1 = DataFrame([[1], [0], [np.nan]], dtype=float) + df2 = DataFrame([[1], [0], [None]], dtype="Int64") + + ctx1 = df1.style.background_gradient()._compute().ctx + ctx2 = df2.style.background_gradient()._compute().ctx + assert ctx1 == ctx2 + + +@pytest.mark.parametrize( + "cmap", + ["PuBu", mpl.colormaps["PuBu"]], +) +def test_bar_colormap(cmap): + data = DataFrame([[1, 2], [3, 4]]) + ctx = data.style.bar(cmap=cmap, axis=None)._compute().ctx + pubu_colors = { + (0, 0): "#d0d1e6", + (1, 0): "#056faf", + (0, 1): "#73a9cf", + (1, 1): "#023858", + } + for k, v in pubu_colors.items(): + assert v in ctx[k][1][1] + + +def test_bar_color_raises(df): + msg = "`color` must be string or list or tuple of 2 strings" + with pytest.raises(ValueError, match=msg): + df.style.bar(color={"a", "b"}).to_html() + with pytest.raises(ValueError, match=msg): + df.style.bar(color=["a", "b", "c"]).to_html() + + msg = "`color` and `cmap` cannot both be given" + with pytest.raises(ValueError, match=msg): + df.style.bar(color="something", cmap="something else").to_html() + + +@pytest.mark.parametrize( + "plot_method", + ["scatter", "hexbin"], +) +def test_pass_colormap_instance(df, plot_method): + # https://github.com/pandas-dev/pandas/issues/49374 + cmap = mpl.colors.ListedColormap([[1, 1, 1], [0, 0, 0]]) + df["c"] = df.A + df.B + kwargs = {"x": "A", "y": "B", "c": "c", "colormap": cmap} + if plot_method == "hexbin": + kwargs["C"] = kwargs.pop("c") + getattr(df.plot, plot_method)(**kwargs) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py new file mode 100644 index 0000000000000000000000000000000000000000..7f1443c3ee66be040f668f546682924207cfd31e --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/io/formats/style/test_to_latex.py @@ -0,0 +1,1090 @@ +from textwrap import dedent + +import numpy as np +import pytest + +from pandas import ( + DataFrame, + MultiIndex, + Series, + option_context, +) + +pytest.importorskip("jinja2") +from pandas.io.formats.style import Styler +from pandas.io.formats.style_render import ( + _parse_latex_cell_styles, + _parse_latex_css_conversion, + _parse_latex_header_span, + _parse_latex_table_styles, + _parse_latex_table_wrapping, +) + + +@pytest.fixture +def df(): + return DataFrame( + {"A": [0, 1], "B": [-0.61, -1.22], "C": Series(["ab", "cd"], dtype=object)} + ) + + +@pytest.fixture +def df_ext(): + return DataFrame( + {"A": [0, 1, 2], "B": [-0.61, -1.22, -2.22], "C": ["ab", "cd", "de"]} + ) + + +@pytest.fixture +def styler(df): + return Styler(df, uuid_len=0, precision=2) + + +def test_minimal_latex_tabular(styler): + expected = dedent( + """\ + \\begin{tabular}{lrrl} + & A & B & C \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + assert styler.to_latex() == expected + + +def test_tabular_hrules(styler): + expected = dedent( + """\ + \\begin{tabular}{lrrl} + \\toprule + & A & B & C \\\\ + \\midrule + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\bottomrule + \\end{tabular} + """ + ) + assert styler.to_latex(hrules=True) == expected + + +def test_tabular_custom_hrules(styler): + styler.set_table_styles( + [ + {"selector": "toprule", "props": ":hline"}, + {"selector": "bottomrule", "props": ":otherline"}, + ] + ) # no midrule + expected = dedent( + """\ + \\begin{tabular}{lrrl} + \\hline + & A & B & C \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\otherline + \\end{tabular} + """ + ) + assert styler.to_latex() == expected + + +def test_column_format(styler): + # default setting is already tested in `test_latex_minimal_tabular` + styler.set_table_styles([{"selector": "column_format", "props": ":cccc"}]) + + assert "\\begin{tabular}{rrrr}" in styler.to_latex(column_format="rrrr") + styler.set_table_styles([{"selector": "column_format", "props": ":r|r|cc"}]) + assert "\\begin{tabular}{r|r|cc}" in styler.to_latex() + + +def test_siunitx_cols(styler): + expected = dedent( + """\ + \\begin{tabular}{lSSl} + {} & {A} & {B} & {C} \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + assert styler.to_latex(siunitx=True) == expected + + +def test_position(styler): + assert "\\begin{table}[h!]" in styler.to_latex(position="h!") + assert "\\end{table}" in styler.to_latex(position="h!") + styler.set_table_styles([{"selector": "position", "props": ":b!"}]) + assert "\\begin{table}[b!]" in styler.to_latex() + assert "\\end{table}" in styler.to_latex() + + +@pytest.mark.parametrize("env", [None, "longtable"]) +def test_label(styler, env): + assert "\n\\label{text}" in styler.to_latex(label="text", environment=env) + styler.set_table_styles([{"selector": "label", "props": ":{more §text}"}]) + assert "\n\\label{more :text}" in styler.to_latex(environment=env) + + +def test_position_float_raises(styler): + msg = "`position_float` should be one of 'raggedright', 'raggedleft', 'centering'," + with pytest.raises(ValueError, match=msg): + styler.to_latex(position_float="bad_string") + + msg = "`position_float` cannot be used in 'longtable' `environment`" + with pytest.raises(ValueError, match=msg): + styler.to_latex(position_float="centering", environment="longtable") + + +@pytest.mark.parametrize("label", [(None, ""), ("text", "\\label{text}")]) +@pytest.mark.parametrize("position", [(None, ""), ("h!", "{table}[h!]")]) +@pytest.mark.parametrize("caption", [(None, ""), ("text", "\\caption{text}")]) +@pytest.mark.parametrize("column_format", [(None, ""), ("rcrl", "{tabular}{rcrl}")]) +@pytest.mark.parametrize("position_float", [(None, ""), ("centering", "\\centering")]) +def test_kwargs_combinations( + styler, label, position, caption, column_format, position_float +): + result = styler.to_latex( + label=label[0], + position=position[0], + caption=caption[0], + column_format=column_format[0], + position_float=position_float[0], + ) + assert label[1] in result + assert position[1] in result + assert caption[1] in result + assert column_format[1] in result + assert position_float[1] in result + + +def test_custom_table_styles(styler): + styler.set_table_styles( + [ + {"selector": "mycommand", "props": ":{myoptions}"}, + {"selector": "mycommand2", "props": ":{myoptions2}"}, + ] + ) + expected = dedent( + """\ + \\begin{table} + \\mycommand{myoptions} + \\mycommand2{myoptions2} + """ + ) + assert expected in styler.to_latex() + + +def test_cell_styling(styler): + styler.highlight_max(props="itshape:;Huge:--wrap;") + expected = dedent( + """\ + \\begin{tabular}{lrrl} + & A & B & C \\\\ + 0 & 0 & \\itshape {\\Huge -0.61} & ab \\\\ + 1 & \\itshape {\\Huge 1} & -1.22 & \\itshape {\\Huge cd} \\\\ + \\end{tabular} + """ + ) + assert expected == styler.to_latex() + + +def test_multiindex_columns(df): + cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df.columns = cidx + expected = dedent( + """\ + \\begin{tabular}{lrrl} + & \\multicolumn{2}{r}{A} & B \\\\ + & a & b & c \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + s = df.style.format(precision=2) + assert expected == s.to_latex() + + # non-sparse + expected = dedent( + """\ + \\begin{tabular}{lrrl} + & A & A & B \\\\ + & a & b & c \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + s = df.style.format(precision=2) + assert expected == s.to_latex(sparse_columns=False) + + +def test_multiindex_row(df_ext): + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index = ridx + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & A & B & C \\\\ + \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\ + & b & 1 & -1.22 & cd \\\\ + B & c & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + styler = df_ext.style.format(precision=2) + result = styler.to_latex() + assert expected == result + + # non-sparse + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & A & B & C \\\\ + A & a & 0 & -0.61 & ab \\\\ + A & b & 1 & -1.22 & cd \\\\ + B & c & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + result = styler.to_latex(sparse_index=False) + assert expected == result + + +def test_multirow_naive(df_ext): + ridx = MultiIndex.from_tuples([("X", "x"), ("X", "y"), ("Y", "z")]) + df_ext.index = ridx + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & A & B & C \\\\ + X & x & 0 & -0.61 & ab \\\\ + & y & 1 & -1.22 & cd \\\\ + Y & z & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + styler = df_ext.style.format(precision=2) + result = styler.to_latex(multirow_align="naive") + assert expected == result + + +def test_multiindex_row_and_col(df_ext): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & \\multicolumn{2}{l}{Z} & Y \\\\ + & & a & b & c \\\\ + \\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\ + & b & 1 & -1.22 & cd \\\\ + B & c & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + styler = df_ext.style.format(precision=2) + result = styler.to_latex(multirow_align="b", multicol_align="l") + assert result == expected + + # non-sparse + expected = dedent( + """\ + \\begin{tabular}{llrrl} + & & Z & Z & Y \\\\ + & & a & b & c \\\\ + A & a & 0 & -0.61 & ab \\\\ + A & b & 1 & -1.22 & cd \\\\ + B & c & 2 & -2.22 & de \\\\ + \\end{tabular} + """ + ) + result = styler.to_latex(sparse_index=False, sparse_columns=False) + assert result == expected + + +@pytest.mark.parametrize( + "multicol_align, siunitx, header", + [ + ("naive-l", False, " & A & &"), + ("naive-r", False, " & & & A"), + ("naive-l", True, "{} & {A} & {} & {}"), + ("naive-r", True, "{} & {} & {} & {A}"), + ], +) +def test_multicol_naive(df, multicol_align, siunitx, header): + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("A", "c")]) + df.columns = ridx + level1 = " & a & b & c" if not siunitx else "{} & {a} & {b} & {c}" + col_format = "lrrl" if not siunitx else "lSSl" + expected = dedent( + f"""\ + \\begin{{tabular}}{{{col_format}}} + {header} \\\\ + {level1} \\\\ + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{{tabular}} + """ + ) + styler = df.style.format(precision=2) + result = styler.to_latex(multicol_align=multicol_align, siunitx=siunitx) + assert expected == result + + +def test_multi_options(df_ext): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + styler = df_ext.style.format(precision=2) + + expected = dedent( + """\ + & & \\multicolumn{2}{r}{Z} & Y \\\\ + & & a & b & c \\\\ + \\multirow[c]{2}{*}{A} & a & 0 & -0.61 & ab \\\\ + """ + ) + result = styler.to_latex() + assert expected in result + + with option_context("styler.latex.multicol_align", "l"): + assert " & & \\multicolumn{2}{l}{Z} & Y \\\\" in styler.to_latex() + + with option_context("styler.latex.multirow_align", "b"): + assert "\\multirow[b]{2}{*}{A} & a & 0 & -0.61 & ab \\\\" in styler.to_latex() + + +def test_multiindex_columns_hidden(): + df = DataFrame([[1, 2, 3, 4]]) + df.columns = MultiIndex.from_tuples([("A", 1), ("A", 2), ("A", 3), ("B", 1)]) + s = df.style + assert "{tabular}{lrrrr}" in s.to_latex() + s.set_table_styles([]) # reset the position command + s.hide([("A", 2)], axis="columns") + assert "{tabular}{lrrr}" in s.to_latex() + + +@pytest.mark.parametrize( + "option, value", + [ + ("styler.sparse.index", True), + ("styler.sparse.index", False), + ("styler.sparse.columns", True), + ("styler.sparse.columns", False), + ], +) +def test_sparse_options(df_ext, option, value): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + styler = df_ext.style + + latex1 = styler.to_latex() + with option_context(option, value): + latex2 = styler.to_latex() + assert (latex1 == latex2) is value + + +def test_hidden_index(styler): + styler.hide(axis="index") + expected = dedent( + """\ + \\begin{tabular}{rrl} + A & B & C \\\\ + 0 & -0.61 & ab \\\\ + 1 & -1.22 & cd \\\\ + \\end{tabular} + """ + ) + assert styler.to_latex() == expected + + +@pytest.mark.parametrize("environment", ["table", "figure*", None]) +def test_comprehensive(df_ext, environment): + # test as many low level features simultaneously as possible + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + stlr = df_ext.style + stlr.set_caption("mycap") + stlr.set_table_styles( + [ + {"selector": "label", "props": ":{fig§item}"}, + {"selector": "position", "props": ":h!"}, + {"selector": "position_float", "props": ":centering"}, + {"selector": "column_format", "props": ":rlrlr"}, + {"selector": "toprule", "props": ":toprule"}, + {"selector": "midrule", "props": ":midrule"}, + {"selector": "bottomrule", "props": ":bottomrule"}, + {"selector": "rowcolors", "props": ":{3}{pink}{}"}, # custom command + ] + ) + stlr.highlight_max(axis=0, props="textbf:--rwrap;cellcolor:[rgb]{1,1,0.6}--rwrap") + stlr.highlight_max(axis=None, props="Huge:--wrap;", subset=[("Z", "a"), ("Z", "b")]) + + expected = ( + """\ +\\begin{table}[h!] +\\centering +\\caption{mycap} +\\label{fig:item} +\\rowcolors{3}{pink}{} +\\begin{tabular}{rlrlr} +\\toprule + & & \\multicolumn{2}{r}{Z} & Y \\\\ + & & a & b & c \\\\ +\\midrule +\\multirow[c]{2}{*}{A} & a & 0 & \\textbf{\\cellcolor[rgb]{1,1,0.6}{-0.61}} & ab \\\\ + & b & 1 & -1.22 & cd \\\\ +B & c & \\textbf{\\cellcolor[rgb]{1,1,0.6}{{\\Huge 2}}} & -2.22 & """ + """\ +\\textbf{\\cellcolor[rgb]{1,1,0.6}{de}} \\\\ +\\bottomrule +\\end{tabular} +\\end{table} +""" + ).replace("table", environment if environment else "table") + result = stlr.format(precision=2).to_latex(environment=environment) + assert result == expected + + +def test_environment_option(styler): + with option_context("styler.latex.environment", "bar-env"): + assert "\\begin{bar-env}" in styler.to_latex() + assert "\\begin{foo-env}" in styler.to_latex(environment="foo-env") + + +def test_parse_latex_table_styles(styler): + styler.set_table_styles( + [ + {"selector": "foo", "props": [("attr", "value")]}, + {"selector": "bar", "props": [("attr", "overwritten")]}, + {"selector": "bar", "props": [("attr", "baz"), ("attr2", "ignored")]}, + {"selector": "label", "props": [("", "{fig§item}")]}, + ] + ) + assert _parse_latex_table_styles(styler.table_styles, "bar") == "baz" + + # test '§' replaced by ':' [for CSS compatibility] + assert _parse_latex_table_styles(styler.table_styles, "label") == "{fig:item}" + + +def test_parse_latex_cell_styles_basic(): # test nesting + cell_style = [("itshape", "--rwrap"), ("cellcolor", "[rgb]{0,1,1}--rwrap")] + expected = "\\itshape{\\cellcolor[rgb]{0,1,1}{text}}" + assert _parse_latex_cell_styles(cell_style, "text") == expected + + +@pytest.mark.parametrize( + "wrap_arg, expected", + [ # test wrapping + ("", "\\ "), + ("--wrap", "{\\ }"), + ("--nowrap", "\\ "), + ("--lwrap", "{\\} "), + ("--dwrap", "{\\}{}"), + ("--rwrap", "\\{}"), + ], +) +def test_parse_latex_cell_styles_braces(wrap_arg, expected): + cell_style = [("", f"{wrap_arg}")] + assert _parse_latex_cell_styles(cell_style, "") == expected + + +def test_parse_latex_header_span(): + cell = {"attributes": 'colspan="3"', "display_value": "text", "cellstyle": []} + expected = "\\multicolumn{3}{Y}{text}" + assert _parse_latex_header_span(cell, "X", "Y") == expected + + cell = {"attributes": 'rowspan="5"', "display_value": "text", "cellstyle": []} + expected = "\\multirow[X]{5}{*}{text}" + assert _parse_latex_header_span(cell, "X", "Y") == expected + + cell = {"display_value": "text", "cellstyle": []} + assert _parse_latex_header_span(cell, "X", "Y") == "text" + + cell = {"display_value": "text", "cellstyle": [("bfseries", "--rwrap")]} + assert _parse_latex_header_span(cell, "X", "Y") == "\\bfseries{text}" + + +def test_parse_latex_table_wrapping(styler): + styler.set_table_styles( + [ + {"selector": "toprule", "props": ":value"}, + {"selector": "bottomrule", "props": ":value"}, + {"selector": "midrule", "props": ":value"}, + {"selector": "column_format", "props": ":value"}, + ] + ) + assert _parse_latex_table_wrapping(styler.table_styles, styler.caption) is False + assert _parse_latex_table_wrapping(styler.table_styles, "some caption") is True + styler.set_table_styles( + [ + {"selector": "not-ignored", "props": ":value"}, + ], + overwrite=False, + ) + assert _parse_latex_table_wrapping(styler.table_styles, None) is True + + +def test_short_caption(styler): + result = styler.to_latex(caption=("full cap", "short cap")) + assert "\\caption[short cap]{full cap}" in result + + +@pytest.mark.parametrize( + "css, expected", + [ + ([("color", "red")], [("color", "{red}")]), # test color and input format types + ( + [("color", "rgb(128, 128, 128 )")], + [("color", "[rgb]{0.502, 0.502, 0.502}")], + ), + ( + [("color", "rgb(128, 50%, 25% )")], + [("color", "[rgb]{0.502, 0.500, 0.250}")], + ), + ( + [("color", "rgba(128,128,128,1)")], + [("color", "[rgb]{0.502, 0.502, 0.502}")], + ), + ([("color", "#FF00FF")], [("color", "[HTML]{FF00FF}")]), + ([("color", "#F0F")], [("color", "[HTML]{FF00FF}")]), + ([("font-weight", "bold")], [("bfseries", "")]), # test font-weight and types + ([("font-weight", "bolder")], [("bfseries", "")]), + ([("font-weight", "normal")], []), + ([("background-color", "red")], [("cellcolor", "{red}--lwrap")]), + ( + [("background-color", "#FF00FF")], # test background-color command and wrap + [("cellcolor", "[HTML]{FF00FF}--lwrap")], + ), + ([("font-style", "italic")], [("itshape", "")]), # test font-style and types + ([("font-style", "oblique")], [("slshape", "")]), + ([("font-style", "normal")], []), + ([("color", "red /*--dwrap*/")], [("color", "{red}--dwrap")]), # css comments + ([("background-color", "red /* --dwrap */")], [("cellcolor", "{red}--dwrap")]), + ], +) +def test_parse_latex_css_conversion(css, expected): + result = _parse_latex_css_conversion(css) + assert result == expected + + +@pytest.mark.parametrize( + "env, inner_env", + [ + (None, "tabular"), + ("table", "tabular"), + ("longtable", "longtable"), + ], +) +@pytest.mark.parametrize( + "convert, exp", [(True, "bfseries"), (False, "font-weightbold")] +) +def test_parse_latex_css_convert_minimal(styler, env, inner_env, convert, exp): + # parameters ensure longtable template is also tested + styler.highlight_max(props="font-weight:bold;") + result = styler.to_latex(convert_css=convert, environment=env) + expected = dedent( + f"""\ + 0 & 0 & \\{exp} -0.61 & ab \\\\ + 1 & \\{exp} 1 & -1.22 & \\{exp} cd \\\\ + \\end{{{inner_env}}} + """ + ) + assert expected in result + + +def test_parse_latex_css_conversion_option(): + css = [("command", "option--latex--wrap")] + expected = [("command", "option--wrap")] + result = _parse_latex_css_conversion(css) + assert result == expected + + +def test_styler_object_after_render(styler): + # GH 42320 + pre_render = styler._copy(deepcopy=True) + styler.to_latex( + column_format="rllr", + position="h", + position_float="centering", + hrules=True, + label="my lab", + caption="my cap", + ) + + assert pre_render.table_styles == styler.table_styles + assert pre_render.caption == styler.caption + + +def test_longtable_comprehensive(styler): + result = styler.to_latex( + environment="longtable", hrules=True, label="fig:A", caption=("full", "short") + ) + expected = dedent( + """\ + \\begin{longtable}{lrrl} + \\caption[short]{full} \\label{fig:A} \\\\ + \\toprule + & A & B & C \\\\ + \\midrule + \\endfirsthead + \\caption[]{full} \\\\ + \\toprule + & A & B & C \\\\ + \\midrule + \\endhead + \\midrule + \\multicolumn{4}{r}{Continued on next page} \\\\ + \\midrule + \\endfoot + \\bottomrule + \\endlastfoot + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{longtable} + """ + ) + assert result == expected + + +def test_longtable_minimal(styler): + result = styler.to_latex(environment="longtable") + expected = dedent( + """\ + \\begin{longtable}{lrrl} + & A & B & C \\\\ + \\endfirsthead + & A & B & C \\\\ + \\endhead + \\multicolumn{4}{r}{Continued on next page} \\\\ + \\endfoot + \\endlastfoot + 0 & 0 & -0.61 & ab \\\\ + 1 & 1 & -1.22 & cd \\\\ + \\end{longtable} + """ + ) + assert result == expected + + +@pytest.mark.parametrize( + "sparse, exp, siunitx", + [ + (True, "{} & \\multicolumn{2}{r}{A} & {B}", True), + (False, "{} & {A} & {A} & {B}", True), + (True, " & \\multicolumn{2}{r}{A} & B", False), + (False, " & A & A & B", False), + ], +) +def test_longtable_multiindex_columns(df, sparse, exp, siunitx): + cidx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df.columns = cidx + with_si = "{} & {a} & {b} & {c} \\\\" + without_si = " & a & b & c \\\\" + expected = dedent( + f"""\ + \\begin{{longtable}}{{l{"SS" if siunitx else "rr"}l}} + {exp} \\\\ + {with_si if siunitx else without_si} + \\endfirsthead + {exp} \\\\ + {with_si if siunitx else without_si} + \\endhead + """ + ) + result = df.style.to_latex( + environment="longtable", sparse_columns=sparse, siunitx=siunitx + ) + assert expected in result + + +@pytest.mark.parametrize( + "caption, cap_exp", + [ + ("full", ("{full}", "")), + (("full", "short"), ("{full}", "[short]")), + ], +) +@pytest.mark.parametrize("label, lab_exp", [(None, ""), ("tab:A", " \\label{tab:A}")]) +def test_longtable_caption_label(styler, caption, cap_exp, label, lab_exp): + cap_exp1 = f"\\caption{cap_exp[1]}{cap_exp[0]}" + cap_exp2 = f"\\caption[]{cap_exp[0]}" + + expected = dedent( + f"""\ + {cap_exp1}{lab_exp} \\\\ + & A & B & C \\\\ + \\endfirsthead + {cap_exp2} \\\\ + """ + ) + assert expected in styler.to_latex( + environment="longtable", caption=caption, label=label + ) + + +@pytest.mark.parametrize("index", [True, False]) +@pytest.mark.parametrize( + "columns, siunitx", + [ + (True, True), + (True, False), + (False, False), + ], +) +def test_apply_map_header_render_mi(df_ext, index, columns, siunitx): + cidx = MultiIndex.from_tuples([("Z", "a"), ("Z", "b"), ("Y", "c")]) + ridx = MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "c")]) + df_ext.index, df_ext.columns = ridx, cidx + styler = df_ext.style + + func = lambda v: "bfseries: --rwrap" if "A" in v or "Z" in v or "c" in v else None + + if index: + styler.map_index(func, axis="index") + if columns: + styler.map_index(func, axis="columns") + + result = styler.to_latex(siunitx=siunitx) + + expected_index = dedent( + """\ + \\multirow[c]{2}{*}{\\bfseries{A}} & a & 0 & -0.610000 & ab \\\\ + \\bfseries{} & b & 1 & -1.220000 & cd \\\\ + B & \\bfseries{c} & 2 & -2.220000 & de \\\\ + """ + ) + assert (expected_index in result) is index + + exp_cols_si = dedent( + """\ + {} & {} & \\multicolumn{2}{r}{\\bfseries{Z}} & {Y} \\\\ + {} & {} & {a} & {b} & {\\bfseries{c}} \\\\ + """ + ) + exp_cols_no_si = """\ + & & \\multicolumn{2}{r}{\\bfseries{Z}} & Y \\\\ + & & a & b & \\bfseries{c} \\\\ +""" + assert ((exp_cols_si if siunitx else exp_cols_no_si) in result) is columns + + +def test_repr_option(styler): + assert "