diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5c7b7efe6334a1b64c73007b14675bd00f5ca8a3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1bfa43c3bafc3e090f3df5a8509f13b21b09c965 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10845542d1e0ce0adbf0a6a2c7f1ec6287009f75 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1e86c2a26805c9c537d8845b880ac184ddd5cf7 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_downstream.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_downstream.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26ad24b82c69e6ba856f4b43d3771d3400e62dfe Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_downstream.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_errors.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_errors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..482b20bfb8e7b385194286f0f147b715ece86c0d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_errors.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_expressions.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_expressions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74795ef74c2ca3568f8fec085e102d2f9f2f4736 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_expressions.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_flags.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_flags.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b22d44bfda26e6f59eccd1835564eb1efd06295 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_flags.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_multilevel.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_multilevel.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..79c7bca815d463d6ce9093fc8a8a7b378829ed75 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_multilevel.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c36d5bf31a3d2cf65a1093cc263241a7543dc263 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9228bc26940c0f833d00fe1e7f086efe208fdf8 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..21fc41209f63a16827b226b0e00ac05a60839b31 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_sorting.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_sorting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a83870a42708f555b63bbd12734bc8a68721f65 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_sorting.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_take.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_take.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b35aed39107599ad9218cb1705492b9729cb3fec Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_take.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90f700b532a5c29770f510399399972a0b341d82 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3de25f82cb502cd22b8ff4aecb7b6606056262f5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..219e1ff35c736bd9726b28ef6740a9f806cef12d Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24307b2c16d5a055ac554aece587a405bf21e595 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/setitem.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/setitem.py new file mode 100644 index 0000000000000000000000000000000000000000..ca19845041e231f141d480ad57f668e4d6fcd5fc --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/setitem.py @@ -0,0 +1,451 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +class BaseSetitemTests: + @pytest.fixture( + params=[ + lambda x: x.index, + lambda x: list(x.index), + lambda x: slice(None), + lambda x: slice(0, len(x)), + lambda x: range(len(x)), + lambda x: list(range(len(x))), + lambda x: np.ones(len(x), dtype=bool), + ], + ids=[ + "index", + "list[index]", + "null_slice", + "full_slice", + "range", + "list(range)", + "mask", + ], + ) + def full_indexer(self, request): + """ + Fixture for an indexer to pass to obj.loc to get/set the full length of the + object. + + In some cases, assumes that obj.index is the default RangeIndex. + """ + return request.param + + @pytest.fixture(autouse=True) + def skip_if_immutable(self, dtype, request): + if dtype._is_immutable: + node = request.node + if node.name.split("[")[0] == "test_is_immutable": + # This fixture is auto-used, but we want to not-skip + # test_is_immutable. + return + + # When BaseSetitemTests is mixed into ExtensionTests, we only + # want this fixture to operate on the tests defined in this + # class/file. + defined_in = node.function.__qualname__.split(".")[0] + if defined_in == "BaseSetitemTests": + pytest.skip("__setitem__ test not applicable with immutable dtype") + + def test_is_immutable(self, data): + if data.dtype._is_immutable: + with pytest.raises(TypeError): + data[0] = data[0] + else: + data[0] = data[1] + assert data[0] == data[1] + + def test_setitem_scalar_series(self, data, box_in_series): + if box_in_series: + data = pd.Series(data) + data[0] = data[1] + assert data[0] == data[1] + + def test_setitem_sequence(self, data, box_in_series): + if box_in_series: + data = pd.Series(data) + original = data.copy() + + data[[0, 1]] = [data[1], data[0]] + assert data[0] == original[1] + assert data[1] == original[0] + + def test_setitem_sequence_mismatched_length_raises(self, data, as_array): + ser = pd.Series(data) + original = ser.copy() + value = [data[0]] + if as_array: + value = data._from_sequence(value, dtype=data.dtype) + + xpr = "cannot set using a {} indexer with a different length" + with pytest.raises(ValueError, match=xpr.format("list-like")): + ser[[0, 1]] = value + # Ensure no modifications made before the exception + tm.assert_series_equal(ser, original) + + with pytest.raises(ValueError, match=xpr.format("slice")): + ser[slice(3)] = value + tm.assert_series_equal(ser, original) + + def test_setitem_empty_indexer(self, data, box_in_series): + if box_in_series: + data = pd.Series(data) + original = data.copy() + data[np.array([], dtype=int)] = [] + tm.assert_equal(data, original) + + def test_setitem_sequence_broadcasts(self, data, box_in_series): + if box_in_series: + data = pd.Series(data) + data[[0, 1]] = data[2] + assert data[0] == data[2] + assert data[1] == data[2] + + @pytest.mark.parametrize("setter", ["loc", "iloc"]) + def test_setitem_scalar(self, data, setter): + arr = pd.Series(data) + setter = getattr(arr, setter) + setter[0] = data[1] + assert arr[0] == data[1] + + def test_setitem_loc_scalar_mixed(self, data): + df = pd.DataFrame({"A": np.arange(len(data)), "B": data}) + df.loc[0, "B"] = data[1] + assert df.loc[0, "B"] == data[1] + + def test_setitem_loc_scalar_single(self, data): + df = pd.DataFrame({"B": data}) + df.loc[10, "B"] = data[1] + assert df.loc[10, "B"] == data[1] + + def test_setitem_loc_scalar_multiple_homogoneous(self, data): + df = pd.DataFrame({"A": data, "B": data}) + df.loc[10, "B"] = data[1] + assert df.loc[10, "B"] == data[1] + + def test_setitem_iloc_scalar_mixed(self, data): + df = pd.DataFrame({"A": np.arange(len(data)), "B": data}) + df.iloc[0, 1] = data[1] + assert df.loc[0, "B"] == data[1] + + def test_setitem_iloc_scalar_single(self, data): + df = pd.DataFrame({"B": data}) + df.iloc[10, 0] = data[1] + assert df.loc[10, "B"] == data[1] + + def test_setitem_iloc_scalar_multiple_homogoneous(self, data): + df = pd.DataFrame({"A": data, "B": data}) + df.iloc[10, 1] = data[1] + assert df.loc[10, "B"] == data[1] + + @pytest.mark.parametrize( + "mask", + [ + np.array([True, True, True, False, False]), + pd.array([True, True, True, False, False], dtype="boolean"), + pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"), + ], + ids=["numpy-array", "boolean-array", "boolean-array-na"], + ) + def test_setitem_mask(self, data, mask, box_in_series): + arr = data[:5].copy() + expected = arr.take([0, 0, 0, 3, 4]) + if box_in_series: + arr = pd.Series(arr) + expected = pd.Series(expected) + arr[mask] = data[0] + tm.assert_equal(expected, arr) + + def test_setitem_mask_raises(self, data, box_in_series): + # wrong length + mask = np.array([True, False]) + + if box_in_series: + data = pd.Series(data) + + with pytest.raises(IndexError, match="wrong length"): + data[mask] = data[0] + + mask = pd.array(mask, dtype="boolean") + with pytest.raises(IndexError, match="wrong length"): + data[mask] = data[0] + + def test_setitem_mask_boolean_array_with_na(self, data, box_in_series): + mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean") + mask[:3] = True + mask[3:5] = pd.NA + + if box_in_series: + data = pd.Series(data) + + data[mask] = data[0] + + assert (data[:3] == data[0]).all() + + @pytest.mark.parametrize( + "idx", + [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])], + ids=["list", "integer-array", "numpy-array"], + ) + def test_setitem_integer_array(self, data, idx, box_in_series): + arr = data[:5].copy() + expected = data.take([0, 0, 0, 3, 4]) + + if box_in_series: + arr = pd.Series(arr) + expected = pd.Series(expected) + + arr[idx] = arr[0] + tm.assert_equal(arr, expected) + + @pytest.mark.parametrize( + "idx, box_in_series", + [ + ([0, 1, 2, pd.NA], False), + pytest.param( + [0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948") + ), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + ], + ids=["list-False", "list-True", "integer-array-False", "integer-array-True"], + ) + def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series): + arr = data.copy() + + # TODO(xfail) this raises KeyError about labels not found (it tries label-based) + # for list of labels with Series + if box_in_series: + arr = pd.Series(data, index=[chr(100 + i) for i in range(len(data))]) + + msg = "Cannot index with an integer indexer containing NA values" + with pytest.raises(ValueError, match=msg): + arr[idx] = arr[0] + + @pytest.mark.parametrize("as_callable", [True, False]) + @pytest.mark.parametrize("setter", ["loc", None]) + def test_setitem_mask_aligned(self, data, as_callable, setter): + ser = pd.Series(data) + mask = np.zeros(len(data), dtype=bool) + mask[:2] = True + + if as_callable: + mask2 = lambda x: mask + else: + mask2 = mask + + if setter: + # loc + target = getattr(ser, setter) + else: + # Series.__setitem__ + target = ser + + target[mask2] = data[5:7] + + ser[mask2] = data[5:7] + assert ser[0] == data[5] + assert ser[1] == data[6] + + @pytest.mark.parametrize("setter", ["loc", None]) + def test_setitem_mask_broadcast(self, data, setter): + ser = pd.Series(data) + mask = np.zeros(len(data), dtype=bool) + mask[:2] = True + + if setter: # loc + target = getattr(ser, setter) + else: # __setitem__ + target = ser + + target[mask] = data[10] + assert ser[0] == data[10] + assert ser[1] == data[10] + + def test_setitem_expand_columns(self, data): + df = pd.DataFrame({"A": data}) + result = df.copy() + result["B"] = 1 + expected = pd.DataFrame({"A": data, "B": [1] * len(data)}) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.loc[:, "B"] = 1 + tm.assert_frame_equal(result, expected) + + # overwrite with new type + result["B"] = data + expected = pd.DataFrame({"A": data, "B": data}) + tm.assert_frame_equal(result, expected) + + def test_setitem_expand_with_extension(self, data): + df = pd.DataFrame({"A": [1] * len(data)}) + result = df.copy() + result["B"] = data + expected = pd.DataFrame({"A": [1] * len(data), "B": data}) + tm.assert_frame_equal(result, expected) + + result = df.copy() + result.loc[:, "B"] = data + tm.assert_frame_equal(result, expected) + + def test_setitem_frame_invalid_length(self, data): + df = pd.DataFrame({"A": [1] * len(data)}) + xpr = ( + rf"Length of values \({len(data[:5])}\) " + rf"does not match length of index \({len(df)}\)" + ) + with pytest.raises(ValueError, match=xpr): + df["B"] = data[:5] + + def test_setitem_tuple_index(self, data): + ser = pd.Series(data[:2], index=[(0, 0), (0, 1)]) + expected = pd.Series(data.take([1, 1]), index=ser.index) + ser[(0, 0)] = data[1] + tm.assert_series_equal(ser, expected) + + def test_setitem_slice(self, data, box_in_series): + arr = data[:5].copy() + expected = data.take([0, 0, 0, 3, 4]) + if box_in_series: + arr = pd.Series(arr) + expected = pd.Series(expected) + + arr[:3] = data[0] + tm.assert_equal(arr, expected) + + def test_setitem_loc_iloc_slice(self, data): + arr = data[:5].copy() + s = pd.Series(arr, index=["a", "b", "c", "d", "e"]) + expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index) + + result = s.copy() + result.iloc[:3] = data[0] + tm.assert_equal(result, expected) + + result = s.copy() + result.loc[:"c"] = data[0] + tm.assert_equal(result, expected) + + def test_setitem_slice_mismatch_length_raises(self, data): + arr = data[:5] + with pytest.raises(ValueError): + arr[:1] = arr[:2] + + def test_setitem_slice_array(self, data): + arr = data[:5].copy() + arr[:5] = data[-5:] + tm.assert_extension_array_equal(arr, data[-5:]) + + def test_setitem_scalar_key_sequence_raise(self, data): + arr = data[:5].copy() + with pytest.raises(ValueError): + arr[0] = arr[[0, 1]] + + def test_setitem_preserves_views(self, data): + # GH#28150 setitem shouldn't swap the underlying data + view1 = data.view() + view2 = data[:] + + data[0] = data[1] + assert view1[0] == data[1] + assert view2[0] == data[1] + + def test_setitem_with_expansion_dataframe_column(self, data, full_indexer): + # https://github.com/pandas-dev/pandas/issues/32395 + df = expected = pd.DataFrame({0: pd.Series(data)}) + result = pd.DataFrame(index=df.index) + + key = full_indexer(df) + result.loc[key, 0] = df[0] + + tm.assert_frame_equal(result, expected) + + def test_setitem_with_expansion_row(self, data, na_value): + df = pd.DataFrame({"data": data[:1]}) + + df.loc[1, "data"] = data[1] + expected = pd.DataFrame({"data": data[:2]}) + tm.assert_frame_equal(df, expected) + + # https://github.com/pandas-dev/pandas/issues/47284 + df.loc[2, "data"] = na_value + expected = pd.DataFrame( + {"data": pd.Series([data[0], data[1], na_value], dtype=data.dtype)} + ) + tm.assert_frame_equal(df, expected) + + def test_setitem_series(self, data, full_indexer): + # https://github.com/pandas-dev/pandas/issues/32395 + ser = pd.Series(data, name="data") + result = pd.Series(index=ser.index, dtype=object, name="data") + + # because result has object dtype, the attempt to do setting inplace + # is successful, and object dtype is retained + key = full_indexer(ser) + result.loc[key] = ser + + expected = pd.Series( + data.astype(object), index=ser.index, name="data", dtype=object + ) + tm.assert_series_equal(result, expected) + + def test_setitem_frame_2d_values(self, data): + # GH#44514 + df = pd.DataFrame({"A": data}) + + # Avoiding using_array_manager fixture + # https://github.com/pandas-dev/pandas/pull/44514#discussion_r754002410 + using_array_manager = isinstance(df._mgr, pd.core.internals.ArrayManager) + using_copy_on_write = pd.options.mode.copy_on_write + + blk_data = df._mgr.arrays[0] + + orig = df.copy() + + df.iloc[:] = df.copy() + tm.assert_frame_equal(df, orig) + + df.iloc[:-1] = df.iloc[:-1].copy() + tm.assert_frame_equal(df, orig) + + df.iloc[:] = df.values + tm.assert_frame_equal(df, orig) + if not using_array_manager and not using_copy_on_write: + # GH#33457 Check that this setting occurred in-place + # FIXME(ArrayManager): this should work there too + assert df._mgr.arrays[0] is blk_data + + df.iloc[:-1] = df.values[:-1] + tm.assert_frame_equal(df, orig) + + def test_delitem_series(self, data): + # GH#40763 + ser = pd.Series(data, name="data") + + taker = np.arange(len(ser)) + taker = np.delete(taker, 1) + + expected = ser[taker] + del ser[1] + tm.assert_series_equal(ser, expected) + + def test_setitem_invalid(self, data, invalid_scalar): + msg = "" # messages vary by subclass, so we do not test it + with pytest.raises((ValueError, TypeError), match=msg): + data[0] = invalid_scalar + + with pytest.raises((ValueError, TypeError), match=msg): + data[:] = invalid_scalar + + def test_setitem_2d_values(self, data): + # GH50085 + original = data.copy() + df = pd.DataFrame({"a": data, "b": data}) + df.loc[[0, 1], :] = df.loc[[1, 0], :].values + assert (df.loc[0, :] == original[1]).all() + assert (df.loc[1, :] == original[0]).all() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/conftest.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..c5b1295ee4a7d4ad8f4a76b58e4b40837cd46d4c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/conftest.py @@ -0,0 +1,230 @@ +import operator + +import pytest + +from pandas._config.config import _get_option + +from pandas import ( + Series, + options, +) + + +@pytest.fixture +def dtype(): + """A fixture providing the ExtensionDtype to validate.""" + raise NotImplementedError + + +@pytest.fixture +def data(): + """ + Length-100 array for this type. + + * data[0] and data[1] should both be non missing + * data[0] and data[1] should not be equal + """ + raise NotImplementedError + + +@pytest.fixture +def data_for_twos(dtype): + """ + Length-100 array in which all the elements are two. + + Call pytest.skip in your fixture if the dtype does not support divmod. + """ + if not (dtype._is_numeric or dtype.kind == "m"): + # Object-dtypes may want to allow this, but for the most part + # only numeric and timedelta-like dtypes will need to implement this. + pytest.skip(f"{dtype} is not a numeric dtype") + + raise NotImplementedError + + +@pytest.fixture +def data_missing(): + """Length-2 array with [NA, Valid]""" + raise NotImplementedError + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture giving 'data' and 'data_missing'""" + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing + + +@pytest.fixture +def data_repeated(data): + """ + Generate many datasets. + + Parameters + ---------- + data : fixture implementing `data` + + Returns + ------- + Callable[[int], Generator]: + A callable that takes a `count` argument and + returns a generator yielding `count` datasets. + """ + + def gen(count): + for _ in range(count): + yield data + + return gen + + +@pytest.fixture +def data_for_sorting(): + """ + Length-3 array with a known sort order. + + This should be three items [B, C, A] with + A < B < C + + For boolean dtypes (for which there are only 2 values available), + set B=C=True + """ + raise NotImplementedError + + +@pytest.fixture +def data_missing_for_sorting(): + """ + Length-3 array with a known sort order. + + This should be three items [B, NA, A] with + A < B and NA missing. + """ + raise NotImplementedError + + +@pytest.fixture +def na_cmp(): + """ + Binary operator for comparing NA values. + + Should return a function of two arguments that returns + True if both arguments are (scalar) NA for your type. + + By default, uses ``operator.is_`` + """ + return operator.is_ + + +@pytest.fixture +def na_value(dtype): + """ + The scalar missing value for this type. Default dtype.na_value. + + TODO: can be removed in 3.x (see https://github.com/pandas-dev/pandas/pull/54930) + """ + return dtype.na_value + + +@pytest.fixture +def data_for_grouping(): + """ + Data for factorization, grouping, and unique tests. + + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing. + + If a dtype has _is_boolean = True, i.e. only 2 unique non-NA entries, + then set C=B. + """ + raise NotImplementedError + + +@pytest.fixture(params=[True, False]) +def box_in_series(request): + """Whether to box the data in a Series""" + return request.param + + +@pytest.fixture( + params=[ + lambda x: 1, + lambda x: [1] * len(x), + lambda x: Series([1] * len(x)), + lambda x: x, + ], + ids=["scalar", "list", "series", "object"], +) +def groupby_apply_op(request): + """ + Functions to test groupby.apply(). + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_frame(request): + """ + Boolean fixture to support Series and Series.to_frame() comparison testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_series(request): + """ + Boolean fixture to support arr and Series(arr) comparison testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def use_numpy(request): + """ + Boolean fixture to support comparison testing of ExtensionDtype array + and numpy array. + """ + return request.param + + +@pytest.fixture(params=["ffill", "bfill"]) +def fillna_method(request): + """ + Parametrized fixture giving method parameters 'ffill' and 'bfill' for + Series.fillna(method=) testing. + """ + return request.param + + +@pytest.fixture(params=[True, False]) +def as_array(request): + """ + Boolean fixture to support ExtensionDtype _from_sequence method testing. + """ + return request.param + + +@pytest.fixture +def invalid_scalar(data): + """ + A scalar that *cannot* be held by this ExtensionArray. + + The default should work for most subclasses, but is not guaranteed. + + If the array can hold any item (i.e. object dtype), then use pytest.skip. + """ + return object.__new__(object) + + +@pytest.fixture +def using_copy_on_write() -> bool: + """ + Fixture to check if Copy-on-Write is enabled. + """ + return ( + options.mode.copy_on_write is True + and _get_option("mode.data_manager", silent=True) == "block" + ) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..2a8c7e9f57a5da982530b8db854edd37baf13b6b --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__init__.py @@ -0,0 +1,6 @@ +from pandas.tests.extension.date.array import ( + DateArray, + DateDtype, +) + +__all__ = ["DateArray", "DateDtype"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/array.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/array.py new file mode 100644 index 0000000000000000000000000000000000000000..2306f5974ba186587dedb1159d64374601f55c86 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/array.py @@ -0,0 +1,188 @@ +from __future__ import annotations + +import datetime as dt +from typing import ( + TYPE_CHECKING, + Any, + cast, +) + +import numpy as np + +from pandas.core.dtypes.dtypes import register_extension_dtype + +from pandas.api.extensions import ( + ExtensionArray, + ExtensionDtype, +) +from pandas.api.types import pandas_dtype + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + Dtype, + PositionalIndexer, + ) + + +@register_extension_dtype +class DateDtype(ExtensionDtype): + @property + def type(self): + return dt.date + + @property + def name(self): + return "DateDtype" + + @classmethod + def construct_from_string(cls, string: str): + if not isinstance(string, str): + raise TypeError( + f"'construct_from_string' expects a string, got {type(string)}" + ) + + if string == cls.__name__: + return cls() + else: + raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'") + + @classmethod + def construct_array_type(cls): + return DateArray + + @property + def na_value(self): + return dt.date.min + + def __repr__(self) -> str: + return self.name + + +class DateArray(ExtensionArray): + def __init__( + self, + dates: ( + dt.date + | Sequence[dt.date] + | tuple[np.ndarray, np.ndarray, np.ndarray] + | np.ndarray + ), + ) -> None: + if isinstance(dates, dt.date): + self._year = np.array([dates.year]) + self._month = np.array([dates.month]) + self._day = np.array([dates.year]) + return + + ldates = len(dates) + if isinstance(dates, list): + # pre-allocate the arrays since we know the size before hand + self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999) + self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31) + self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12) + # populate them + for i, (y, m, d) in enumerate( + (date.year, date.month, date.day) for date in dates + ): + self._year[i] = y + self._month[i] = m + self._day[i] = d + + elif isinstance(dates, tuple): + # only support triples + if ldates != 3: + raise ValueError("only triples are valid") + # check if all elements have the same type + if any(not isinstance(x, np.ndarray) for x in dates): + raise TypeError("invalid type") + ly, lm, ld = (len(cast(np.ndarray, d)) for d in dates) + if not ly == lm == ld: + raise ValueError( + f"tuple members must have the same length: {(ly, lm, ld)}" + ) + self._year = dates[0].astype(np.uint16) + self._month = dates[1].astype(np.uint8) + self._day = dates[2].astype(np.uint8) + + elif isinstance(dates, np.ndarray) and dates.dtype == "U10": + self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999) + self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31) + self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12) + + # error: "object_" object is not iterable + obj = np.char.split(dates, sep="-") + for (i,), (y, m, d) in np.ndenumerate(obj): # type: ignore[misc] + self._year[i] = int(y) + self._month[i] = int(m) + self._day[i] = int(d) + + else: + raise TypeError(f"{type(dates)} is not supported") + + @property + def dtype(self) -> ExtensionDtype: + return DateDtype() + + def astype(self, dtype, copy=True): + dtype = pandas_dtype(dtype) + + if isinstance(dtype, DateDtype): + data = self.copy() if copy else self + else: + data = self.to_numpy(dtype=dtype, copy=copy, na_value=dt.date.min) + + return data + + @property + def nbytes(self) -> int: + return self._year.nbytes + self._month.nbytes + self._day.nbytes + + def __len__(self) -> int: + return len(self._year) # all 3 arrays are enforced to have the same length + + def __getitem__(self, item: PositionalIndexer): + if isinstance(item, int): + return dt.date(self._year[item], self._month[item], self._day[item]) + else: + raise NotImplementedError("only ints are supported as indexes") + + def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None: + if not isinstance(key, int): + raise NotImplementedError("only ints are supported as indexes") + + if not isinstance(value, dt.date): + raise TypeError("you can only set datetime.date types") + + self._year[key] = value.year + self._month[key] = value.month + self._day[key] = value.day + + def __repr__(self) -> str: + return f"DateArray{list(zip(self._year, self._month, self._day))}" + + def copy(self) -> DateArray: + return DateArray((self._year.copy(), self._month.copy(), self._day.copy())) + + def isna(self) -> np.ndarray: + return np.logical_and( + np.logical_and( + self._year == dt.date.min.year, self._month == dt.date.min.month + ), + self._day == dt.date.min.day, + ) + + @classmethod + def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False): + if isinstance(scalars, dt.date): + raise TypeError + elif isinstance(scalars, DateArray): + if dtype is not None: + return scalars.astype(dtype, copy=copy) + if copy: + return scalars.copy() + return scalars[:] + elif isinstance(scalars, np.ndarray): + scalars = scalars.astype("U10") # 10 chars for yyyy-mm-dd + return DateArray(scalars) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..34727b43a7b0fb325143dfedee4db25c4b56f5db --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__init__.py @@ -0,0 +1,8 @@ +from pandas.tests.extension.decimal.array import ( + DecimalArray, + DecimalDtype, + make_data, + to_decimal, +) + +__all__ = ["DecimalArray", "DecimalDtype", "to_decimal", "make_data"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3cc6e7dedccf6b686bc31e8cd1d5594584135ea5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea39e506115ed85d2f40190283688a97d5e776db Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/test_decimal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/test_decimal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fdf6dbf6e8fbd1749aa2374dd2428612910a144b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/test_decimal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/array.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/array.py new file mode 100644 index 0000000000000000000000000000000000000000..521c1ff0b96bc12672b64be0fa191e153692f6da --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/array.py @@ -0,0 +1,311 @@ +from __future__ import annotations + +import decimal +import numbers +import sys +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.base import ExtensionDtype +from pandas.core.dtypes.common import ( + is_dtype_equal, + is_float, + is_integer, + pandas_dtype, +) + +import pandas as pd +from pandas.api.extensions import ( + no_default, + register_extension_dtype, +) +from pandas.api.types import ( + is_list_like, + is_scalar, +) +from pandas.core import arraylike +from pandas.core.algorithms import value_counts_internal as value_counts +from pandas.core.arraylike import OpsMixin +from pandas.core.arrays import ( + ExtensionArray, + ExtensionScalarOpsMixin, +) +from pandas.core.indexers import check_array_indexer + +if TYPE_CHECKING: + from pandas._typing import type_t + + +@register_extension_dtype +class DecimalDtype(ExtensionDtype): + type = decimal.Decimal + name = "decimal" + na_value = decimal.Decimal("NaN") + _metadata = ("context",) + + def __init__(self, context=None) -> None: + self.context = context or decimal.getcontext() + + def __repr__(self) -> str: + return f"DecimalDtype(context={self.context})" + + @classmethod + def construct_array_type(cls) -> type_t[DecimalArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return DecimalArray + + @property + def _is_numeric(self) -> bool: + return True + + +class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray): + __array_priority__ = 1000 + + def __init__(self, values, dtype=None, copy=False, context=None) -> None: + for i, val in enumerate(values): + if is_float(val) or is_integer(val): + if np.isnan(val): + values[i] = DecimalDtype.na_value + else: + # error: Argument 1 has incompatible type "float | int | + # integer[Any]"; expected "Decimal | float | str | tuple[int, + # Sequence[int], int]" + values[i] = DecimalDtype.type(val) # type: ignore[arg-type] + elif not isinstance(val, decimal.Decimal): + raise TypeError("All values must be of type " + str(decimal.Decimal)) + values = np.asarray(values, dtype=object) + + self._data = values + # Some aliases for common attribute names to ensure pandas supports + # these + self._items = self.data = self._data + # those aliases are currently not working due to assumptions + # in internal code (GH-20735) + # self._values = self.values = self.data + self._dtype = DecimalDtype(context) + + @property + def dtype(self): + return self._dtype + + @classmethod + def _from_sequence(cls, scalars, *, dtype=None, copy=False): + return cls(scalars) + + @classmethod + def _from_sequence_of_strings(cls, strings, dtype=None, copy=False): + return cls._from_sequence( + [decimal.Decimal(x) for x in strings], dtype=dtype, copy=copy + ) + + @classmethod + def _from_factorized(cls, values, original): + return cls(values) + + _HANDLED_TYPES = (decimal.Decimal, numbers.Number, np.ndarray) + + def to_numpy( + self, + dtype=None, + copy: bool = False, + na_value: object = no_default, + decimals=None, + ) -> np.ndarray: + result = np.asarray(self, dtype=dtype) + if decimals is not None: + result = np.asarray([round(x, decimals) for x in result]) + return result + + def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs): + # + if not all( + isinstance(t, self._HANDLED_TYPES + (DecimalArray,)) for t in inputs + ): + return NotImplemented + + result = arraylike.maybe_dispatch_ufunc_to_dunder_op( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + # e.g. test_array_ufunc_series_scalar_other + return result + + if "out" in kwargs: + return arraylike.dispatch_ufunc_with_out( + self, ufunc, method, *inputs, **kwargs + ) + + inputs = tuple(x._data if isinstance(x, DecimalArray) else x for x in inputs) + result = getattr(ufunc, method)(*inputs, **kwargs) + + if method == "reduce": + result = arraylike.dispatch_reduction_ufunc( + self, ufunc, method, *inputs, **kwargs + ) + if result is not NotImplemented: + return result + + def reconstruct(x): + if isinstance(x, (decimal.Decimal, numbers.Number)): + return x + else: + return type(self)._from_sequence(x, dtype=self.dtype) + + if ufunc.nout > 1: + return tuple(reconstruct(x) for x in result) + else: + return reconstruct(result) + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self._data[item] + else: + # array, slice. + item = pd.api.indexers.check_array_indexer(self, item) + return type(self)(self._data[item]) + + def take(self, indexer, allow_fill=False, fill_value=None): + from pandas.api.extensions import take + + data = self._data + if allow_fill and fill_value is None: + fill_value = self.dtype.na_value + + result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill) + return self._from_sequence(result, dtype=self.dtype) + + def copy(self): + return type(self)(self._data.copy(), dtype=self.dtype) + + def astype(self, dtype, copy=True): + if is_dtype_equal(dtype, self._dtype): + if not copy: + return self + dtype = pandas_dtype(dtype) + if isinstance(dtype, type(self.dtype)): + return type(self)(self._data, copy=copy, context=dtype.context) + + return super().astype(dtype, copy=copy) + + def __setitem__(self, key, value) -> None: + if is_list_like(value): + if is_scalar(key): + raise ValueError("setting an array element with a sequence.") + value = [decimal.Decimal(v) for v in value] + else: + value = decimal.Decimal(value) + + key = check_array_indexer(self, key) + self._data[key] = value + + def __len__(self) -> int: + return len(self._data) + + def __contains__(self, item) -> bool | np.bool_: + if not isinstance(item, decimal.Decimal): + return False + elif item.is_nan(): + return self.isna().any() + else: + return super().__contains__(item) + + @property + def nbytes(self) -> int: + n = len(self) + if n: + return n * sys.getsizeof(self[0]) + return 0 + + def isna(self): + return np.array([x.is_nan() for x in self._data], dtype=bool) + + @property + def _na_value(self): + return decimal.Decimal("NaN") + + def _formatter(self, boxed=False): + if boxed: + return "Decimal: {}".format + return repr + + @classmethod + def _concat_same_type(cls, to_concat): + return cls(np.concatenate([x._data for x in to_concat])) + + def _reduce( + self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs + ): + if skipna and self.isna().any(): + # If we don't have any NAs, we can ignore skipna + other = self[~self.isna()] + result = other._reduce(name, **kwargs) + elif name == "sum" and len(self) == 0: + # GH#29630 avoid returning int 0 or np.bool_(False) on old numpy + result = decimal.Decimal(0) + else: + try: + op = getattr(self.data, name) + except AttributeError as err: + raise NotImplementedError( + f"decimal does not support the {name} operation" + ) from err + result = op(axis=0) + + if keepdims: + return type(self)([result]) + else: + return result + + def _cmp_method(self, other, op): + # For use with OpsMixin + def convert_values(param): + if isinstance(param, ExtensionArray) or is_list_like(param): + ovalues = param + else: + # Assume it's an object + ovalues = [param] * len(self) + return ovalues + + lvalues = self + rvalues = convert_values(other) + + # If the operator is not defined for the underlying objects, + # a TypeError should be raised + res = [op(a, b) for (a, b) in zip(lvalues, rvalues)] + + return np.asarray(res, dtype=bool) + + def value_counts(self, dropna: bool = True): + return value_counts(self.to_numpy(), dropna=dropna) + + # We override fillna here to simulate a 3rd party EA that has done so. This + # lets us test the deprecation telling authors to implement _pad_or_backfill + # Simulate a 3rd-party EA that has not yet updated to include a "copy" + # keyword in its fillna method. + # error: Signature of "fillna" incompatible with supertype "ExtensionArray" + def fillna( # type: ignore[override] + self, + value=None, + method=None, + limit: int | None = None, + ): + return super().fillna(value=value, method=method, limit=limit, copy=True) + + +def to_decimal(values, context=None): + return DecimalArray([decimal.Decimal(x) for x in values], context=context) + + +def make_data(): + return [decimal.Decimal(val) for val in np.random.default_rng(2).random(100)] + + +DecimalArray._add_arithmetic_ops() diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/test_decimal.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/test_decimal.py new file mode 100644 index 0000000000000000000000000000000000000000..9907e345ada63e54c5e80eb8b2cb11a9417799a4 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/test_decimal.py @@ -0,0 +1,567 @@ +from __future__ import annotations + +import decimal +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension import base +from pandas.tests.extension.decimal.array import ( + DecimalArray, + DecimalDtype, + make_data, + to_decimal, +) + + +@pytest.fixture +def dtype(): + return DecimalDtype() + + +@pytest.fixture +def data(): + return DecimalArray(make_data()) + + +@pytest.fixture +def data_for_twos(): + return DecimalArray([decimal.Decimal(2) for _ in range(100)]) + + +@pytest.fixture +def data_missing(): + return DecimalArray([decimal.Decimal("NaN"), decimal.Decimal(1)]) + + +@pytest.fixture +def data_for_sorting(): + return DecimalArray( + [decimal.Decimal("1"), decimal.Decimal("2"), decimal.Decimal("0")] + ) + + +@pytest.fixture +def data_missing_for_sorting(): + return DecimalArray( + [decimal.Decimal("1"), decimal.Decimal("NaN"), decimal.Decimal("0")] + ) + + +@pytest.fixture +def na_cmp(): + return lambda x, y: x.is_nan() and y.is_nan() + + +@pytest.fixture +def data_for_grouping(): + b = decimal.Decimal("1.0") + a = decimal.Decimal("0.0") + c = decimal.Decimal("2.0") + na = decimal.Decimal("NaN") + return DecimalArray([b, b, na, na, a, a, b, c]) + + +class TestDecimalArray(base.ExtensionTests): + def _get_expected_exception( + self, op_name: str, obj, other + ) -> type[Exception] | None: + return None + + def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool: + return True + + def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool): + if op_name == "count": + return super().check_reduce(ser, op_name, skipna) + else: + result = getattr(ser, op_name)(skipna=skipna) + expected = getattr(np.asarray(ser), op_name)() + tm.assert_almost_equal(result, expected) + + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request): + if all_numeric_reductions in ["kurt", "skew", "sem", "median"]: + mark = pytest.mark.xfail(raises=NotImplementedError) + request.applymarker(mark) + super().test_reduce_series_numeric(data, all_numeric_reductions, skipna) + + def test_reduce_frame(self, data, all_numeric_reductions, skipna, request): + op_name = all_numeric_reductions + if op_name in ["skew", "median"]: + mark = pytest.mark.xfail(raises=NotImplementedError) + request.applymarker(mark) + + return super().test_reduce_frame(data, all_numeric_reductions, skipna) + + def test_compare_scalar(self, data, comparison_op): + ser = pd.Series(data) + self._compare_other(ser, data, comparison_op, 0.5) + + def test_compare_array(self, data, comparison_op): + ser = pd.Series(data) + + alter = np.random.default_rng(2).choice([-1, 0, 1], len(data)) + # Randomly double, halve or keep same value + other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter] + self._compare_other(ser, data, comparison_op, other) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + op_name = all_arithmetic_operators + ser = pd.Series(data) + + context = decimal.getcontext() + divbyzerotrap = context.traps[decimal.DivisionByZero] + invalidoptrap = context.traps[decimal.InvalidOperation] + context.traps[decimal.DivisionByZero] = 0 + context.traps[decimal.InvalidOperation] = 0 + + # Decimal supports ops with int, but not float + other = pd.Series([int(d * 100) for d in data]) + self.check_opname(ser, op_name, other) + + if "mod" not in op_name: + self.check_opname(ser, op_name, ser * 2) + + self.check_opname(ser, op_name, 0) + self.check_opname(ser, op_name, 5) + context.traps[decimal.DivisionByZero] = divbyzerotrap + context.traps[decimal.InvalidOperation] = invalidoptrap + + def test_fillna_frame(self, data_missing): + msg = "ExtensionArray.fillna added a 'copy' keyword" + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + super().test_fillna_frame(data_missing) + + def test_fillna_limit_pad(self, data_missing): + msg = "ExtensionArray.fillna 'method' keyword is deprecated" + with tm.assert_produces_warning( + DeprecationWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_pad(data_missing) + + msg = "The 'method' keyword in DecimalArray.fillna is deprecated" + with tm.assert_produces_warning( + FutureWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_pad(data_missing) + + @pytest.mark.parametrize( + "limit_area, input_ilocs, expected_ilocs", + [ + ("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]), + ("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]), + ("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]), + ("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]), + ("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]), + ("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]), + ("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]), + ("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]), + ], + ) + def test_ffill_limit_area( + self, data_missing, limit_area, input_ilocs, expected_ilocs + ): + # GH#56616 + msg = "ExtensionArray.fillna 'method' keyword is deprecated" + with tm.assert_produces_warning( + DeprecationWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + msg = "DecimalArray does not implement limit_area" + with pytest.raises(NotImplementedError, match=msg): + super().test_ffill_limit_area( + data_missing, limit_area, input_ilocs, expected_ilocs + ) + + def test_fillna_limit_backfill(self, data_missing): + msg = "Series.fillna with 'method' is deprecated" + with tm.assert_produces_warning( + FutureWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_backfill(data_missing) + + msg = "ExtensionArray.fillna 'method' keyword is deprecated" + with tm.assert_produces_warning( + DeprecationWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_backfill(data_missing) + + msg = "The 'method' keyword in DecimalArray.fillna is deprecated" + with tm.assert_produces_warning( + FutureWarning, + match=msg, + check_stacklevel=False, + raise_on_extra_warnings=False, + ): + super().test_fillna_limit_backfill(data_missing) + + def test_fillna_no_op_returns_copy(self, data): + msg = "|".join( + [ + "ExtensionArray.fillna 'method' keyword is deprecated", + "The 'method' keyword in DecimalArray.fillna is deprecated", + ] + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False + ): + super().test_fillna_no_op_returns_copy(data) + + def test_fillna_series(self, data_missing): + msg = "ExtensionArray.fillna added a 'copy' keyword" + with tm.assert_produces_warning( + DeprecationWarning, match=msg, check_stacklevel=False + ): + super().test_fillna_series(data_missing) + + def test_fillna_series_method(self, data_missing, fillna_method): + msg = "|".join( + [ + "ExtensionArray.fillna 'method' keyword is deprecated", + "The 'method' keyword in DecimalArray.fillna is deprecated", + ] + ) + with tm.assert_produces_warning( + (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False + ): + super().test_fillna_series_method(data_missing, fillna_method) + + def test_fillna_copy_frame(self, data_missing, using_copy_on_write): + warn = DeprecationWarning if not using_copy_on_write else None + msg = "ExtensionArray.fillna added a 'copy' keyword" + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): + super().test_fillna_copy_frame(data_missing) + + def test_fillna_copy_series(self, data_missing, using_copy_on_write): + warn = DeprecationWarning if not using_copy_on_write else None + msg = "ExtensionArray.fillna added a 'copy' keyword" + with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False): + super().test_fillna_copy_series(data_missing) + + @pytest.mark.parametrize("dropna", [True, False]) + def test_value_counts(self, all_data, dropna, request): + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + vcs = pd.Series(all_data).value_counts(dropna=dropna) + vcs_ex = pd.Series(other).value_counts(dropna=dropna) + + with decimal.localcontext() as ctx: + # avoid raising when comparing Decimal("NAN") < Decimal(2) + ctx.traps[decimal.InvalidOperation] = False + + result = vcs.sort_index() + expected = vcs_ex.sort_index() + + tm.assert_series_equal(result, expected) + + def test_series_repr(self, data): + # Overriding this base test to explicitly test that + # the custom _formatter is used + ser = pd.Series(data) + assert data.dtype.name in repr(ser) + assert "Decimal: " in repr(ser) + + @pytest.mark.xfail(reason="Inconsistent array-vs-scalar behavior") + @pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs]) + def test_unary_ufunc_dunder_equivalence(self, data, ufunc): + super().test_unary_ufunc_dunder_equivalence(data, ufunc) + + +def test_take_na_value_other_decimal(): + arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) + result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0")) + expected = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("-1.0")]) + tm.assert_extension_array_equal(result, expected) + + +def test_series_constructor_coerce_data_to_extension_dtype(): + dtype = DecimalDtype() + ser = pd.Series([0, 1, 2], dtype=dtype) + + arr = DecimalArray( + [decimal.Decimal(0), decimal.Decimal(1), decimal.Decimal(2)], + dtype=dtype, + ) + exp = pd.Series(arr) + tm.assert_series_equal(ser, exp) + + +def test_series_constructor_with_dtype(): + arr = DecimalArray([decimal.Decimal("10.0")]) + result = pd.Series(arr, dtype=DecimalDtype()) + expected = pd.Series(arr) + tm.assert_series_equal(result, expected) + + result = pd.Series(arr, dtype="int64") + expected = pd.Series([10]) + tm.assert_series_equal(result, expected) + + +def test_dataframe_constructor_with_dtype(): + arr = DecimalArray([decimal.Decimal("10.0")]) + + result = pd.DataFrame({"A": arr}, dtype=DecimalDtype()) + expected = pd.DataFrame({"A": arr}) + tm.assert_frame_equal(result, expected) + + arr = DecimalArray([decimal.Decimal("10.0")]) + result = pd.DataFrame({"A": arr}, dtype="int64") + expected = pd.DataFrame({"A": [10]}) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("frame", [True, False]) +def test_astype_dispatches(frame): + # This is a dtype-specific test that ensures Series[decimal].astype + # gets all the way through to ExtensionArray.astype + # Designing a reliable smoke test that works for arbitrary data types + # is difficult. + data = pd.Series(DecimalArray([decimal.Decimal(2)]), name="a") + ctx = decimal.Context() + ctx.prec = 5 + + if frame: + data = data.to_frame() + + result = data.astype(DecimalDtype(ctx)) + + if frame: + result = result["a"] + + assert result.dtype.context.prec == ctx.prec + + +class DecimalArrayWithoutFromSequence(DecimalArray): + """Helper class for testing error handling in _from_sequence.""" + + @classmethod + def _from_sequence(cls, scalars, *, dtype=None, copy=False): + raise KeyError("For the test") + + +class DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence): + @classmethod + def _create_arithmetic_method(cls, op): + return cls._create_method(op, coerce_to_dtype=False) + + +DecimalArrayWithoutCoercion._add_arithmetic_ops() + + +def test_combine_from_sequence_raises(monkeypatch): + # https://github.com/pandas-dev/pandas/issues/22850 + cls = DecimalArrayWithoutFromSequence + + @classmethod + def construct_array_type(cls): + return DecimalArrayWithoutFromSequence + + monkeypatch.setattr(DecimalDtype, "construct_array_type", construct_array_type) + + arr = cls([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) + ser = pd.Series(arr) + result = ser.combine(ser, operator.add) + + # note: object dtype + expected = pd.Series( + [decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object" + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "class_", [DecimalArrayWithoutFromSequence, DecimalArrayWithoutCoercion] +) +def test_scalar_ops_from_sequence_raises(class_): + # op(EA, EA) should return an EA, or an ndarray if it's not possible + # to return an EA with the return values. + arr = class_([decimal.Decimal("1.0"), decimal.Decimal("2.0")]) + result = arr + arr + expected = np.array( + [decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object" + ) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "reverse, expected_div, expected_mod", + [(False, [0, 1, 1, 2], [1, 0, 1, 0]), (True, [2, 1, 0, 0], [0, 0, 2, 2])], +) +def test_divmod_array(reverse, expected_div, expected_mod): + # https://github.com/pandas-dev/pandas/issues/22930 + arr = to_decimal([1, 2, 3, 4]) + if reverse: + div, mod = divmod(2, arr) + else: + div, mod = divmod(arr, 2) + expected_div = to_decimal(expected_div) + expected_mod = to_decimal(expected_mod) + + tm.assert_extension_array_equal(div, expected_div) + tm.assert_extension_array_equal(mod, expected_mod) + + +def test_ufunc_fallback(data): + a = data[:5] + s = pd.Series(a, index=range(3, 8)) + result = np.abs(s) + expected = pd.Series(np.abs(a), index=range(3, 8)) + tm.assert_series_equal(result, expected) + + +def test_array_ufunc(): + a = to_decimal([1, 2, 3]) + result = np.exp(a) + expected = to_decimal(np.exp(a._data)) + tm.assert_extension_array_equal(result, expected) + + +def test_array_ufunc_series(): + a = to_decimal([1, 2, 3]) + s = pd.Series(a) + result = np.exp(s) + expected = pd.Series(to_decimal(np.exp(a._data))) + tm.assert_series_equal(result, expected) + + +def test_array_ufunc_series_scalar_other(): + # check _HANDLED_TYPES + a = to_decimal([1, 2, 3]) + s = pd.Series(a) + result = np.add(s, decimal.Decimal(1)) + expected = pd.Series(np.add(a, decimal.Decimal(1))) + tm.assert_series_equal(result, expected) + + +def test_array_ufunc_series_defer(): + a = to_decimal([1, 2, 3]) + s = pd.Series(a) + + expected = pd.Series(to_decimal([2, 4, 6])) + r1 = np.add(s, a) + r2 = np.add(a, s) + + tm.assert_series_equal(r1, expected) + tm.assert_series_equal(r2, expected) + + +def test_groupby_agg(): + # Ensure that the result of agg is inferred to be decimal dtype + # https://github.com/pandas-dev/pandas/issues/29141 + + data = make_data()[:5] + df = pd.DataFrame( + {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)} + ) + + # single key, selected column + expected = pd.Series(to_decimal([data[0], data[3]])) + result = df.groupby("id1")["decimals"].agg(lambda x: x.iloc[0]) + tm.assert_series_equal(result, expected, check_names=False) + result = df["decimals"].groupby(df["id1"]).agg(lambda x: x.iloc[0]) + tm.assert_series_equal(result, expected, check_names=False) + + # multiple keys, selected column + expected = pd.Series( + to_decimal([data[0], data[1], data[3]]), + index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 1)]), + ) + result = df.groupby(["id1", "id2"])["decimals"].agg(lambda x: x.iloc[0]) + tm.assert_series_equal(result, expected, check_names=False) + result = df["decimals"].groupby([df["id1"], df["id2"]]).agg(lambda x: x.iloc[0]) + tm.assert_series_equal(result, expected, check_names=False) + + # multiple columns + expected = pd.DataFrame({"id2": [0, 1], "decimals": to_decimal([data[0], data[3]])}) + result = df.groupby("id1").agg(lambda x: x.iloc[0]) + tm.assert_frame_equal(result, expected, check_names=False) + + +def test_groupby_agg_ea_method(monkeypatch): + # Ensure that the result of agg is inferred to be decimal dtype + # https://github.com/pandas-dev/pandas/issues/29141 + + def DecimalArray__my_sum(self): + return np.sum(np.array(self)) + + monkeypatch.setattr(DecimalArray, "my_sum", DecimalArray__my_sum, raising=False) + + data = make_data()[:5] + df = pd.DataFrame({"id": [0, 0, 0, 1, 1], "decimals": DecimalArray(data)}) + expected = pd.Series(to_decimal([data[0] + data[1] + data[2], data[3] + data[4]])) + + result = df.groupby("id")["decimals"].agg(lambda x: x.values.my_sum()) + tm.assert_series_equal(result, expected, check_names=False) + s = pd.Series(DecimalArray(data)) + grouper = np.array([0, 0, 0, 1, 1], dtype=np.int64) + result = s.groupby(grouper).agg(lambda x: x.values.my_sum()) + tm.assert_series_equal(result, expected, check_names=False) + + +def test_indexing_no_materialize(monkeypatch): + # See https://github.com/pandas-dev/pandas/issues/29708 + # Ensure that indexing operations do not materialize (convert to a numpy + # array) the ExtensionArray unnecessary + + def DecimalArray__array__(self, dtype=None): + raise Exception("tried to convert a DecimalArray to a numpy array") + + monkeypatch.setattr(DecimalArray, "__array__", DecimalArray__array__, raising=False) + + data = make_data() + s = pd.Series(DecimalArray(data)) + df = pd.DataFrame({"a": s, "b": range(len(s))}) + + # ensure the following operations do not raise an error + s[s > 0.5] + df[s > 0.5] + s.at[0] + df.at[0, "a"] + + +def test_to_numpy_keyword(): + # test the extra keyword + values = [decimal.Decimal("1.1111"), decimal.Decimal("2.2222")] + expected = np.array( + [decimal.Decimal("1.11"), decimal.Decimal("2.22")], dtype="object" + ) + a = pd.array(values, dtype="decimal") + result = a.to_numpy(decimals=2) + tm.assert_numpy_array_equal(result, expected) + + result = pd.Series(a).to_numpy(decimals=2) + tm.assert_numpy_array_equal(result, expected) + + +def test_array_copy_on_write(using_copy_on_write): + df = pd.DataFrame({"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype="object") + df2 = df.astype(DecimalDtype()) + df.iloc[0, 0] = 0 + if using_copy_on_write: + expected = pd.DataFrame( + {"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype=DecimalDtype() + ) + tm.assert_equal(df2.values, expected.values) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0f3f2f35377882a0fae603edfc8edb46371429fe --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__init__.py @@ -0,0 +1,7 @@ +from pandas.tests.extension.list.array import ( + ListArray, + ListDtype, + make_data, +) + +__all__ = ["ListArray", "ListDtype", "make_data"] diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..959b10a9d30b74c4bb9afb84f7170e43a698051b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..871d6f175a3abb54c100e991c957b98d1f2ac228 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/test_list.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/test_list.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f03d50d0d00a4c24d9dfbfb70a490bc5a1fd381b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/test_list.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/array.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/array.py new file mode 100644 index 0000000000000000000000000000000000000000..b3bb35c9396f4d1748fff37b7334c68a0b055daf --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/array.py @@ -0,0 +1,137 @@ +""" +Test extension array for storing nested data in a pandas container. + +The ListArray stores an ndarray of lists. +""" +from __future__ import annotations + +import numbers +import string +from typing import TYPE_CHECKING + +import numpy as np + +from pandas.core.dtypes.base import ExtensionDtype + +import pandas as pd +from pandas.api.types import ( + is_object_dtype, + is_string_dtype, +) +from pandas.core.arrays import ExtensionArray + +if TYPE_CHECKING: + from pandas._typing import type_t + + +class ListDtype(ExtensionDtype): + type = list + name = "list" + na_value = np.nan + + @classmethod + def construct_array_type(cls) -> type_t[ListArray]: + """ + Return the array type associated with this dtype. + + Returns + ------- + type + """ + return ListArray + + +class ListArray(ExtensionArray): + dtype = ListDtype() + __array_priority__ = 1000 + + def __init__(self, values, dtype=None, copy=False) -> None: + if not isinstance(values, np.ndarray): + raise TypeError("Need to pass a numpy array as values") + for val in values: + if not isinstance(val, self.dtype.type) and not pd.isna(val): + raise TypeError("All values must be of type " + str(self.dtype.type)) + self.data = values + + @classmethod + def _from_sequence(cls, scalars, *, dtype=None, copy=False): + data = np.empty(len(scalars), dtype=object) + data[:] = scalars + return cls(data) + + def __getitem__(self, item): + if isinstance(item, numbers.Integral): + return self.data[item] + else: + # slice, list-like, mask + return type(self)(self.data[item]) + + def __len__(self) -> int: + return len(self.data) + + def isna(self): + return np.array( + [not isinstance(x, list) and np.isnan(x) for x in self.data], dtype=bool + ) + + def take(self, indexer, allow_fill=False, fill_value=None): + # re-implement here, since NumPy has trouble setting + # sized objects like UserDicts into scalar slots of + # an ndarary. + indexer = np.asarray(indexer) + msg = ( + "Index is out of bounds or cannot do a " + "non-empty take from an empty array." + ) + + if allow_fill: + if fill_value is None: + fill_value = self.dtype.na_value + # bounds check + if (indexer < -1).any(): + raise ValueError + try: + output = [ + self.data[loc] if loc != -1 else fill_value for loc in indexer + ] + except IndexError as err: + raise IndexError(msg) from err + else: + try: + output = [self.data[loc] for loc in indexer] + except IndexError as err: + raise IndexError(msg) from err + + return self._from_sequence(output) + + def copy(self): + return type(self)(self.data[:]) + + def astype(self, dtype, copy=True): + if isinstance(dtype, type(self.dtype)) and dtype == self.dtype: + if copy: + return self.copy() + return self + elif is_string_dtype(dtype) and not is_object_dtype(dtype): + # numpy has problems with astype(str) for nested elements + return np.array([str(x) for x in self.data], dtype=dtype) + elif not copy: + return np.asarray(self.data, dtype=dtype) + else: + return np.array(self.data, dtype=dtype, copy=copy) + + @classmethod + def _concat_same_type(cls, to_concat): + data = np.concatenate([x.data for x in to_concat]) + return cls(data) + + +def make_data(): + # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer + rng = np.random.default_rng(2) + data = np.empty(100, dtype=object) + data[:] = [ + [rng.choice(list(string.ascii_letters)) for _ in range(rng.integers(0, 10))] + for _ in range(100) + ] + return data diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/test_list.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/test_list.py new file mode 100644 index 0000000000000000000000000000000000000000..ac396cd3c60d435d34f95d5027d80d116d4560d5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/test_list.py @@ -0,0 +1,33 @@ +import pytest + +import pandas as pd +from pandas.tests.extension.list.array import ( + ListArray, + ListDtype, + make_data, +) + + +@pytest.fixture +def dtype(): + return ListDtype() + + +@pytest.fixture +def data(): + """Length-100 ListArray for semantics test.""" + data = make_data() + + while len(data[0]) == len(data[1]): + data = make_data() + + return ListArray(data) + + +def test_to_csv(data): + # https://github.com/pandas-dev/pandas/issues/28840 + # array with list-likes fail when doing astype(str) on the numpy array + # which was done in get_values_for_csv + df = pd.DataFrame({"a": data}) + res = df.to_csv() + assert str(data[0]) in res diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_arrow.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_arrow.py new file mode 100644 index 0000000000000000000000000000000000000000..d9a3033b8380ef0217f23870c0ee5d918c069ff8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_arrow.py @@ -0,0 +1,3388 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. +""" +from __future__ import annotations + +from datetime import ( + date, + datetime, + time, + timedelta, +) +from decimal import Decimal +from io import ( + BytesIO, + StringIO, +) +import operator +import pickle +import re + +import numpy as np +import pytest + +from pandas._libs import lib +from pandas._libs.tslibs import timezones +from pandas.compat import ( + PY311, + PY312, + is_ci_environment, + is_platform_windows, + pa_version_under11p0, + pa_version_under13p0, + pa_version_under14p0, +) +import pandas.util._test_decorators as td + +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + CategoricalDtypeType, +) + +import pandas as pd +import pandas._testing as tm +from pandas.api.extensions import no_default +from pandas.api.types import ( + is_bool_dtype, + is_float_dtype, + is_integer_dtype, + is_numeric_dtype, + is_signed_integer_dtype, + is_string_dtype, + is_unsigned_integer_dtype, +) +from pandas.tests.extension import base + +pa = pytest.importorskip("pyarrow") + +from pandas.core.arrays.arrow.array import ArrowExtensionArray +from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + +def _require_timezone_database(request): + if is_platform_windows() and is_ci_environment(): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + "TODO: Set ARROW_TIMEZONE_DATABASE environment variable " + "on CI to path to the tzdata for pyarrow." + ), + ) + request.applymarker(mark) + + +@pytest.fixture(params=tm.ALL_PYARROW_DTYPES, ids=str) +def dtype(request): + return ArrowDtype(pyarrow_dtype=request.param) + + +@pytest.fixture +def data(dtype): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_boolean(pa_dtype): + data = [True, False] * 4 + [None] + [True, False] * 44 + [None] + [True, False] + elif pa.types.is_floating(pa_dtype): + data = [1.0, 0.0] * 4 + [None] + [-2.0, -1.0] * 44 + [None] + [0.5, 99.5] + elif pa.types.is_signed_integer(pa_dtype): + data = [1, 0] * 4 + [None] + [-2, -1] * 44 + [None] + [1, 99] + elif pa.types.is_unsigned_integer(pa_dtype): + data = [1, 0] * 4 + [None] + [2, 1] * 44 + [None] + [1, 99] + elif pa.types.is_decimal(pa_dtype): + data = ( + [Decimal("1"), Decimal("0.0")] * 4 + + [None] + + [Decimal("-2.0"), Decimal("-1.0")] * 44 + + [None] + + [Decimal("0.5"), Decimal("33.123")] + ) + elif pa.types.is_date(pa_dtype): + data = ( + [date(2022, 1, 1), date(1999, 12, 31)] * 4 + + [None] + + [date(2022, 1, 1), date(2022, 1, 1)] * 44 + + [None] + + [date(1999, 12, 31), date(1999, 12, 31)] + ) + elif pa.types.is_timestamp(pa_dtype): + data = ( + [datetime(2020, 1, 1, 1, 1, 1, 1), datetime(1999, 1, 1, 1, 1, 1, 1)] * 4 + + [None] + + [datetime(2020, 1, 1, 1), datetime(1999, 1, 1, 1)] * 44 + + [None] + + [datetime(2020, 1, 1), datetime(1999, 1, 1)] + ) + elif pa.types.is_duration(pa_dtype): + data = ( + [timedelta(1), timedelta(1, 1)] * 4 + + [None] + + [timedelta(-1), timedelta(0)] * 44 + + [None] + + [timedelta(-10), timedelta(10)] + ) + elif pa.types.is_time(pa_dtype): + data = ( + [time(12, 0), time(0, 12)] * 4 + + [None] + + [time(0, 0), time(1, 1)] * 44 + + [None] + + [time(0, 5), time(5, 0)] + ) + elif pa.types.is_string(pa_dtype): + data = ["a", "b"] * 4 + [None] + ["1", "2"] * 44 + [None] + ["!", ">"] + elif pa.types.is_binary(pa_dtype): + data = [b"a", b"b"] * 4 + [None] + [b"1", b"2"] * 44 + [None] + [b"!", b">"] + else: + raise NotImplementedError + return pd.array(data, dtype=dtype) + + +@pytest.fixture +def data_missing(data): + """Length-2 array with [NA, Valid]""" + return type(data)._from_sequence([None, data[0]], dtype=data.dtype) + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture returning 'data' or 'data_missing' integer arrays. + + Used to test dtype conversion with and without missing values. + """ + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing + + +@pytest.fixture +def data_for_grouping(dtype): + """ + Data for factorization, grouping, and unique tests. + + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_boolean(pa_dtype): + A = False + B = True + C = True + elif pa.types.is_floating(pa_dtype): + A = -1.1 + B = 0.0 + C = 1.1 + elif pa.types.is_signed_integer(pa_dtype): + A = -1 + B = 0 + C = 1 + elif pa.types.is_unsigned_integer(pa_dtype): + A = 0 + B = 1 + C = 10 + elif pa.types.is_date(pa_dtype): + A = date(1999, 12, 31) + B = date(2010, 1, 1) + C = date(2022, 1, 1) + elif pa.types.is_timestamp(pa_dtype): + A = datetime(1999, 1, 1, 1, 1, 1, 1) + B = datetime(2020, 1, 1) + C = datetime(2020, 1, 1, 1) + elif pa.types.is_duration(pa_dtype): + A = timedelta(-1) + B = timedelta(0) + C = timedelta(1, 4) + elif pa.types.is_time(pa_dtype): + A = time(0, 0) + B = time(0, 12) + C = time(12, 12) + elif pa.types.is_string(pa_dtype): + A = "a" + B = "b" + C = "c" + elif pa.types.is_binary(pa_dtype): + A = b"a" + B = b"b" + C = b"c" + elif pa.types.is_decimal(pa_dtype): + A = Decimal("-1.1") + B = Decimal("0.0") + C = Decimal("1.1") + else: + raise NotImplementedError + return pd.array([B, B, None, None, A, A, B, C], dtype=dtype) + + +@pytest.fixture +def data_for_sorting(data_for_grouping): + """ + Length-3 array with a known sort order. + + This should be three items [B, C, A] with + A < B < C + """ + return type(data_for_grouping)._from_sequence( + [data_for_grouping[0], data_for_grouping[7], data_for_grouping[4]], + dtype=data_for_grouping.dtype, + ) + + +@pytest.fixture +def data_missing_for_sorting(data_for_grouping): + """ + Length-3 array with a known sort order. + + This should be three items [B, NA, A] with + A < B and NA missing. + """ + return type(data_for_grouping)._from_sequence( + [data_for_grouping[0], data_for_grouping[2], data_for_grouping[4]], + dtype=data_for_grouping.dtype, + ) + + +@pytest.fixture +def data_for_twos(data): + """Length-100 array in which all the elements are two.""" + pa_dtype = data.dtype.pyarrow_dtype + if ( + pa.types.is_integer(pa_dtype) + or pa.types.is_floating(pa_dtype) + or pa.types.is_decimal(pa_dtype) + or pa.types.is_duration(pa_dtype) + ): + return pd.array([2] * 100, dtype=data.dtype) + # tests will be xfailed where 2 is not a valid scalar for pa_dtype + return data + # TODO: skip otherwise? + + +class TestArrowArray(base.ExtensionTests): + def test_compare_scalar(self, data, comparison_op): + ser = pd.Series(data) + self._compare_other(ser, data, comparison_op, data[0]) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data_missing, na_action): + if data_missing.dtype.kind in "mM": + result = data_missing.map(lambda x: x, na_action=na_action) + expected = data_missing.to_numpy(dtype=object) + tm.assert_numpy_array_equal(result, expected) + else: + result = data_missing.map(lambda x: x, na_action=na_action) + if data_missing.dtype == "float32[pyarrow]": + # map roundtrips through objects, which converts to float64 + expected = data_missing.to_numpy(dtype="float64", na_value=np.nan) + else: + expected = data_missing.to_numpy() + tm.assert_numpy_array_equal(result, expected) + + def test_astype_str(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_binary(pa_dtype): + request.applymarker( + pytest.mark.xfail( + reason=f"For {pa_dtype} .astype(str) decodes.", + ) + ) + elif ( + pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is None + ) or pa.types.is_duration(pa_dtype): + request.applymarker( + pytest.mark.xfail( + reason="pd.Timestamp/pd.Timedelta repr different from numpy repr", + ) + ) + super().test_astype_str(data) + + @pytest.mark.parametrize( + "nullable_string_dtype", + [ + "string[python]", + pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")), + ], + ) + def test_astype_string(self, data, nullable_string_dtype, request): + pa_dtype = data.dtype.pyarrow_dtype + if ( + pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is None + ) or pa.types.is_duration(pa_dtype): + request.applymarker( + pytest.mark.xfail( + reason="pd.Timestamp/pd.Timedelta repr different from numpy repr", + ) + ) + super().test_astype_string(data, nullable_string_dtype) + + def test_from_dtype(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_string(pa_dtype) or pa.types.is_decimal(pa_dtype): + if pa.types.is_string(pa_dtype): + reason = "ArrowDtype(pa.string()) != StringDtype('pyarrow')" + else: + reason = f"pyarrow.type_for_alias cannot infer {pa_dtype}" + + request.applymarker( + pytest.mark.xfail( + reason=reason, + ) + ) + super().test_from_dtype(data) + + def test_from_sequence_pa_array(self, data): + # https://github.com/pandas-dev/pandas/pull/47034#discussion_r955500784 + # data._pa_array = pa.ChunkedArray + result = type(data)._from_sequence(data._pa_array, dtype=data.dtype) + tm.assert_extension_array_equal(result, data) + assert isinstance(result._pa_array, pa.ChunkedArray) + + result = type(data)._from_sequence( + data._pa_array.combine_chunks(), dtype=data.dtype + ) + tm.assert_extension_array_equal(result, data) + assert isinstance(result._pa_array, pa.ChunkedArray) + + def test_from_sequence_pa_array_notimplemented(self, request): + with pytest.raises(NotImplementedError, match="Converting strings to"): + ArrowExtensionArray._from_sequence_of_strings( + ["12-1"], dtype=pa.month_day_nano_interval() + ) + + def test_from_sequence_of_strings_pa_array(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_time64(pa_dtype) and pa_dtype.equals("time64[ns]") and not PY311: + request.applymarker( + pytest.mark.xfail( + reason="Nanosecond time parsing not supported.", + ) + ) + elif pa_version_under11p0 and ( + pa.types.is_duration(pa_dtype) or pa.types.is_decimal(pa_dtype) + ): + request.applymarker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"pyarrow doesn't support parsing {pa_dtype}", + ) + ) + elif pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None: + _require_timezone_database(request) + + pa_array = data._pa_array.cast(pa.string()) + result = type(data)._from_sequence_of_strings(pa_array, dtype=data.dtype) + tm.assert_extension_array_equal(result, data) + + pa_array = pa_array.combine_chunks() + result = type(data)._from_sequence_of_strings(pa_array, dtype=data.dtype) + tm.assert_extension_array_equal(result, data) + + def check_accumulate(self, ser, op_name, skipna): + result = getattr(ser, op_name)(skipna=skipna) + + pa_type = ser.dtype.pyarrow_dtype + if pa.types.is_temporal(pa_type): + # Just check that we match the integer behavior. + if pa_type.bit_width == 32: + int_type = "int32[pyarrow]" + else: + int_type = "int64[pyarrow]" + ser = ser.astype(int_type) + result = result.astype(int_type) + + result = result.astype("Float64") + expected = getattr(ser.astype("Float64"), op_name)(skipna=skipna) + tm.assert_series_equal(result, expected, check_dtype=False) + + def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool: + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no + # attribute "pyarrow_dtype" + pa_type = ser.dtype.pyarrow_dtype # type: ignore[union-attr] + + if ( + pa.types.is_string(pa_type) + or pa.types.is_binary(pa_type) + or pa.types.is_decimal(pa_type) + ): + if op_name in ["cumsum", "cumprod", "cummax", "cummin"]: + return False + elif pa.types.is_boolean(pa_type): + if op_name in ["cumprod", "cummax", "cummin"]: + return False + elif pa.types.is_temporal(pa_type): + if op_name == "cumsum" and not pa.types.is_duration(pa_type): + return False + elif op_name == "cumprod": + return False + return True + + @pytest.mark.parametrize("skipna", [True, False]) + def test_accumulate_series(self, data, all_numeric_accumulations, skipna, request): + pa_type = data.dtype.pyarrow_dtype + op_name = all_numeric_accumulations + ser = pd.Series(data) + + if not self._supports_accumulation(ser, op_name): + # The base class test will check that we raise + return super().test_accumulate_series( + data, all_numeric_accumulations, skipna + ) + + if pa_version_under13p0 and all_numeric_accumulations != "cumsum": + # xfailing takes a long time to run because pytest + # renders the exception messages even when not showing them + opt = request.config.option + if opt.markexpr and "not slow" in opt.markexpr: + pytest.skip( + f"{all_numeric_accumulations} not implemented for pyarrow < 9" + ) + mark = pytest.mark.xfail( + reason=f"{all_numeric_accumulations} not implemented for pyarrow < 9" + ) + request.applymarker(mark) + + elif all_numeric_accumulations == "cumsum" and ( + pa.types.is_boolean(pa_type) or pa.types.is_decimal(pa_type) + ): + request.applymarker( + pytest.mark.xfail( + reason=f"{all_numeric_accumulations} not implemented for {pa_type}", + raises=NotImplementedError, + ) + ) + + self.check_accumulate(ser, op_name, skipna) + + def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool: + dtype = ser.dtype + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has + # no attribute "pyarrow_dtype" + pa_dtype = dtype.pyarrow_dtype # type: ignore[union-attr] + if pa.types.is_temporal(pa_dtype) and op_name in [ + "sum", + "var", + "skew", + "kurt", + "prod", + ]: + if pa.types.is_duration(pa_dtype) and op_name in ["sum"]: + # summing timedeltas is one case that *is* well-defined + pass + else: + return False + elif ( + pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype) + ) and op_name in [ + "sum", + "mean", + "median", + "prod", + "std", + "sem", + "var", + "skew", + "kurt", + ]: + return False + + if ( + pa.types.is_temporal(pa_dtype) + and not pa.types.is_duration(pa_dtype) + and op_name in ["any", "all"] + ): + # xref GH#34479 we support this in our non-pyarrow datetime64 dtypes, + # but it isn't obvious we _should_. For now, we keep the pyarrow + # behavior which does not support this. + return False + + return True + + def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool): + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no + # attribute "pyarrow_dtype" + pa_dtype = ser.dtype.pyarrow_dtype # type: ignore[union-attr] + if pa.types.is_integer(pa_dtype) or pa.types.is_floating(pa_dtype): + alt = ser.astype("Float64") + else: + # TODO: in the opposite case, aren't we testing... nothing? For + # e.g. date/time dtypes trying to calculate 'expected' by converting + # to object will raise for mean, std etc + alt = ser + + # TODO: in the opposite case, aren't we testing... nothing? + if op_name == "count": + result = getattr(ser, op_name)() + expected = getattr(alt, op_name)() + else: + result = getattr(ser, op_name)(skipna=skipna) + expected = getattr(alt, op_name)(skipna=skipna) + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request): + dtype = data.dtype + pa_dtype = dtype.pyarrow_dtype + + xfail_mark = pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_numeric_reductions} is not implemented in " + f"pyarrow={pa.__version__} for {pa_dtype}" + ), + ) + if all_numeric_reductions in {"skew", "kurt"} and ( + dtype._is_numeric or dtype.kind == "b" + ): + request.applymarker(xfail_mark) + + elif pa.types.is_boolean(pa_dtype) and all_numeric_reductions in { + "sem", + "std", + "var", + "median", + }: + request.applymarker(xfail_mark) + super().test_reduce_series_numeric(data, all_numeric_reductions, skipna) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_boolean( + self, data, all_boolean_reductions, skipna, na_value, request + ): + pa_dtype = data.dtype.pyarrow_dtype + xfail_mark = pytest.mark.xfail( + raises=TypeError, + reason=( + f"{all_boolean_reductions} is not implemented in " + f"pyarrow={pa.__version__} for {pa_dtype}" + ), + ) + if pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype): + # We *might* want to make this behave like the non-pyarrow cases, + # but have not yet decided. + request.applymarker(xfail_mark) + + return super().test_reduce_series_boolean(data, all_boolean_reductions, skipna) + + def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool): + if op_name in ["max", "min"]: + cmp_dtype = arr.dtype + elif arr.dtype.name == "decimal128(7, 3)[pyarrow]": + if op_name not in ["median", "var", "std"]: + cmp_dtype = arr.dtype + else: + cmp_dtype = "float64[pyarrow]" + elif op_name in ["median", "var", "std", "mean", "skew"]: + cmp_dtype = "float64[pyarrow]" + else: + cmp_dtype = { + "i": "int64[pyarrow]", + "u": "uint64[pyarrow]", + "f": "float64[pyarrow]", + }[arr.dtype.kind] + return cmp_dtype + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_frame(self, data, all_numeric_reductions, skipna, request): + op_name = all_numeric_reductions + if op_name == "skew": + if data.dtype._is_numeric: + mark = pytest.mark.xfail(reason="skew not implemented") + request.applymarker(mark) + return super().test_reduce_frame(data, all_numeric_reductions, skipna) + + @pytest.mark.parametrize("typ", ["int64", "uint64", "float64"]) + def test_median_not_approximate(self, typ): + # GH 52679 + result = pd.Series([1, 2], dtype=f"{typ}[pyarrow]").median() + assert result == 1.5 + + def test_in_numeric_groupby(self, data_for_grouping): + dtype = data_for_grouping.dtype + if is_string_dtype(dtype): + df = pd.DataFrame( + { + "A": [1, 1, 2, 2, 3, 3, 1, 4], + "B": data_for_grouping, + "C": [1, 1, 1, 1, 1, 1, 1, 1], + } + ) + + expected = pd.Index(["C"]) + msg = re.escape(f"agg function failed [how->sum,dtype->{dtype}") + with pytest.raises(TypeError, match=msg): + df.groupby("A").sum() + result = df.groupby("A").sum(numeric_only=True).columns + tm.assert_index_equal(result, expected) + else: + super().test_in_numeric_groupby(data_for_grouping) + + def test_construct_from_string_own_name(self, dtype, request): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_decimal(pa_dtype): + request.applymarker( + pytest.mark.xfail( + raises=NotImplementedError, + reason=f"pyarrow.type_for_alias cannot infer {pa_dtype}", + ) + ) + + if pa.types.is_string(pa_dtype): + # We still support StringDtype('pyarrow') over ArrowDtype(pa.string()) + msg = r"string\[pyarrow\] should be constructed by StringDtype" + with pytest.raises(TypeError, match=msg): + dtype.construct_from_string(dtype.name) + + return + + super().test_construct_from_string_own_name(dtype) + + def test_is_dtype_from_name(self, dtype, request): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_string(pa_dtype): + # We still support StringDtype('pyarrow') over ArrowDtype(pa.string()) + assert not type(dtype).is_dtype(dtype.name) + else: + if pa.types.is_decimal(pa_dtype): + request.applymarker( + pytest.mark.xfail( + raises=NotImplementedError, + reason=f"pyarrow.type_for_alias cannot infer {pa_dtype}", + ) + ) + super().test_is_dtype_from_name(dtype) + + def test_construct_from_string_another_type_raises(self, dtype): + msg = r"'another_type' must end with '\[pyarrow\]'" + with pytest.raises(TypeError, match=msg): + type(dtype).construct_from_string("another_type") + + def test_get_common_dtype(self, dtype, request): + pa_dtype = dtype.pyarrow_dtype + if ( + pa.types.is_date(pa_dtype) + or pa.types.is_time(pa_dtype) + or (pa.types.is_timestamp(pa_dtype) and pa_dtype.tz is not None) + or pa.types.is_binary(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ): + request.applymarker( + pytest.mark.xfail( + reason=( + f"{pa_dtype} does not have associated numpy " + f"dtype findable by find_common_type" + ) + ) + ) + super().test_get_common_dtype(dtype) + + def test_is_not_string_type(self, dtype): + pa_dtype = dtype.pyarrow_dtype + if pa.types.is_string(pa_dtype): + assert is_string_dtype(dtype) + else: + super().test_is_not_string_type(dtype) + + @pytest.mark.xfail( + reason="GH 45419: pyarrow.ChunkedArray does not support views.", run=False + ) + def test_view(self, data): + super().test_view(data) + + def test_fillna_no_op_returns_copy(self, data): + data = data[~data.isna()] + + valid = data[0] + result = data.fillna(valid) + assert result is not data + tm.assert_extension_array_equal(result, data) + + result = data.fillna(method="backfill") + assert result is not data + tm.assert_extension_array_equal(result, data) + + @pytest.mark.xfail( + reason="GH 45419: pyarrow.ChunkedArray does not support views", run=False + ) + def test_transpose(self, data): + super().test_transpose(data) + + @pytest.mark.xfail( + reason="GH 45419: pyarrow.ChunkedArray does not support views", run=False + ) + def test_setitem_preserves_views(self, data): + super().test_setitem_preserves_views(data) + + @pytest.mark.parametrize("dtype_backend", ["pyarrow", no_default]) + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_EA_types(self, engine, data, dtype_backend, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_decimal(pa_dtype): + request.applymarker( + pytest.mark.xfail( + raises=NotImplementedError, + reason=f"Parameterized types {pa_dtype} not supported.", + ) + ) + elif pa.types.is_timestamp(pa_dtype) and pa_dtype.unit in ("us", "ns"): + request.applymarker( + pytest.mark.xfail( + raises=ValueError, + reason="https://github.com/pandas-dev/pandas/issues/49767", + ) + ) + elif pa.types.is_binary(pa_dtype): + request.applymarker( + pytest.mark.xfail(reason="CSV parsers don't correctly handle binary") + ) + df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))}) + csv_output = df.to_csv(index=False, na_rep=np.nan) + if pa.types.is_binary(pa_dtype): + csv_output = BytesIO(csv_output) + else: + csv_output = StringIO(csv_output) + result = pd.read_csv( + csv_output, + dtype={"with_dtype": str(data.dtype)}, + engine=engine, + dtype_backend=dtype_backend, + ) + expected = df + tm.assert_frame_equal(result, expected) + + def test_invert(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + if not ( + pa.types.is_boolean(pa_dtype) + or pa.types.is_integer(pa_dtype) + or pa.types.is_string(pa_dtype) + ): + request.applymarker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"pyarrow.compute.invert does support {pa_dtype}", + ) + ) + if PY312 and pa.types.is_boolean(pa_dtype): + with tm.assert_produces_warning( + DeprecationWarning, match="Bitwise inversion", check_stacklevel=False + ): + super().test_invert(data) + else: + super().test_invert(data) + + @pytest.mark.parametrize("periods", [1, -2]) + def test_diff(self, data, periods, request): + pa_dtype = data.dtype.pyarrow_dtype + if pa.types.is_unsigned_integer(pa_dtype) and periods == 1: + request.applymarker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + f"diff with {pa_dtype} and periods={periods} will overflow" + ), + ) + ) + super().test_diff(data, periods) + + def test_value_counts_returns_pyarrow_int64(self, data): + # GH 51462 + data = data[:10] + result = data.value_counts() + assert result.dtype == ArrowDtype(pa.int64()) + + _combine_le_expected_dtype = "bool[pyarrow]" + + divmod_exc = NotImplementedError + + def get_op_from_name(self, op_name): + short_opname = op_name.strip("_") + if short_opname == "rtruediv": + # use the numpy version that won't raise on division by zero + + def rtruediv(x, y): + return np.divide(y, x) + + return rtruediv + elif short_opname == "rfloordiv": + return lambda x, y: np.floor_divide(y, x) + + return tm.get_op_from_name(op_name) + + def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): + # BaseOpsUtil._combine can upcast expected dtype + # (because it generates expected on python scalars) + # while ArrowExtensionArray maintains original type + expected = pointwise_result + + if op_name in ["eq", "ne", "lt", "le", "gt", "ge"]: + return pointwise_result.astype("boolean[pyarrow]") + + was_frame = False + if isinstance(expected, pd.DataFrame): + was_frame = True + expected_data = expected.iloc[:, 0] + original_dtype = obj.iloc[:, 0].dtype + else: + expected_data = expected + original_dtype = obj.dtype + + orig_pa_type = original_dtype.pyarrow_dtype + if not was_frame and isinstance(other, pd.Series): + # i.e. test_arith_series_with_array + if not ( + pa.types.is_floating(orig_pa_type) + or ( + pa.types.is_integer(orig_pa_type) + and op_name not in ["__truediv__", "__rtruediv__"] + ) + or pa.types.is_duration(orig_pa_type) + or pa.types.is_timestamp(orig_pa_type) + or pa.types.is_date(orig_pa_type) + or pa.types.is_decimal(orig_pa_type) + ): + # base class _combine always returns int64, while + # ArrowExtensionArray does not upcast + return expected + elif not ( + (op_name == "__floordiv__" and pa.types.is_integer(orig_pa_type)) + or pa.types.is_duration(orig_pa_type) + or pa.types.is_timestamp(orig_pa_type) + or pa.types.is_date(orig_pa_type) + or pa.types.is_decimal(orig_pa_type) + ): + # base class _combine always returns int64, while + # ArrowExtensionArray does not upcast + return expected + + pa_expected = pa.array(expected_data._values) + + if pa.types.is_duration(pa_expected.type): + if pa.types.is_date(orig_pa_type): + if pa.types.is_date64(orig_pa_type): + # TODO: why is this different vs date32? + unit = "ms" + else: + unit = "s" + else: + # pyarrow sees sequence of datetime/timedelta objects and defaults + # to "us" but the non-pointwise op retains unit + # timestamp or duration + unit = orig_pa_type.unit + if type(other) in [datetime, timedelta] and unit in ["s", "ms"]: + # pydatetime/pytimedelta objects have microsecond reso, so we + # take the higher reso of the original and microsecond. Note + # this matches what we would do with DatetimeArray/TimedeltaArray + unit = "us" + + pa_expected = pa_expected.cast(f"duration[{unit}]") + + elif pa.types.is_decimal(pa_expected.type) and pa.types.is_decimal( + orig_pa_type + ): + # decimal precision can resize in the result type depending on data + # just compare the float values + alt = getattr(obj, op_name)(other) + alt_dtype = tm.get_dtype(alt) + assert isinstance(alt_dtype, ArrowDtype) + if op_name == "__pow__" and isinstance(other, Decimal): + # TODO: would it make more sense to retain Decimal here? + alt_dtype = ArrowDtype(pa.float64()) + elif ( + op_name == "__pow__" + and isinstance(other, pd.Series) + and other.dtype == original_dtype + ): + # TODO: would it make more sense to retain Decimal here? + alt_dtype = ArrowDtype(pa.float64()) + else: + assert pa.types.is_decimal(alt_dtype.pyarrow_dtype) + return expected.astype(alt_dtype) + + else: + pa_expected = pa_expected.cast(orig_pa_type) + + pd_expected = type(expected_data._values)(pa_expected) + if was_frame: + expected = pd.DataFrame( + pd_expected, index=expected.index, columns=expected.columns + ) + else: + expected = pd.Series(pd_expected) + return expected + + def _is_temporal_supported(self, opname, pa_dtype): + return ( + ( + opname in ("__add__", "__radd__") + or ( + opname + in ("__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__") + and not pa_version_under14p0 + ) + ) + and pa.types.is_duration(pa_dtype) + or opname in ("__sub__", "__rsub__") + and pa.types.is_temporal(pa_dtype) + ) + + def _get_expected_exception( + self, op_name: str, obj, other + ) -> type[Exception] | None: + if op_name in ("__divmod__", "__rdivmod__"): + return self.divmod_exc + + dtype = tm.get_dtype(obj) + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no + # attribute "pyarrow_dtype" + pa_dtype = dtype.pyarrow_dtype # type: ignore[union-attr] + + arrow_temporal_supported = self._is_temporal_supported(op_name, pa_dtype) + if op_name in { + "__mod__", + "__rmod__", + }: + exc = NotImplementedError + elif arrow_temporal_supported: + exc = None + elif op_name in ["__add__", "__radd__"] and ( + pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype) + ): + exc = None + elif not ( + pa.types.is_floating(pa_dtype) + or pa.types.is_integer(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ): + # TODO: in many of these cases, e.g. non-duration temporal, + # these will *never* be allowed. Would it make more sense to + # re-raise as TypeError, more consistent with non-pyarrow cases? + exc = pa.ArrowNotImplementedError + else: + exc = None + return exc + + def _get_arith_xfail_marker(self, opname, pa_dtype): + mark = None + + arrow_temporal_supported = self._is_temporal_supported(opname, pa_dtype) + + if opname == "__rpow__" and ( + pa.types.is_floating(pa_dtype) + or pa.types.is_integer(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ): + mark = pytest.mark.xfail( + reason=( + f"GH#29997: 1**pandas.NA == 1 while 1**pyarrow.NA == NULL " + f"for {pa_dtype}" + ) + ) + elif arrow_temporal_supported and ( + pa.types.is_time(pa_dtype) + or ( + opname + in ("__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__") + and pa.types.is_duration(pa_dtype) + ) + ): + mark = pytest.mark.xfail( + raises=TypeError, + reason=( + f"{opname} not supported between" + f"pd.NA and {pa_dtype} Python scalar" + ), + ) + elif opname == "__rfloordiv__" and ( + pa.types.is_integer(pa_dtype) or pa.types.is_decimal(pa_dtype) + ): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason="divide by 0", + ) + elif opname == "__rtruediv__" and pa.types.is_decimal(pa_dtype): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason="divide by 0", + ) + + return mark + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request): + pa_dtype = data.dtype.pyarrow_dtype + + if all_arithmetic_operators == "__rmod__" and pa.types.is_binary(pa_dtype): + pytest.skip("Skip testing Python string formatting") + elif all_arithmetic_operators in ("__rmul__", "__mul__") and ( + pa.types.is_binary(pa_dtype) or pa.types.is_string(pa_dtype) + ): + request.applymarker( + pytest.mark.xfail( + raises=TypeError, reason="Can only string multiply by an integer." + ) + ) + + mark = self._get_arith_xfail_marker(all_arithmetic_operators, pa_dtype) + if mark is not None: + request.applymarker(mark) + + super().test_arith_series_with_scalar(data, all_arithmetic_operators) + + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): + pa_dtype = data.dtype.pyarrow_dtype + + if all_arithmetic_operators == "__rmod__" and ( + pa.types.is_string(pa_dtype) or pa.types.is_binary(pa_dtype) + ): + pytest.skip("Skip testing Python string formatting") + elif all_arithmetic_operators in ("__rmul__", "__mul__") and ( + pa.types.is_binary(pa_dtype) or pa.types.is_string(pa_dtype) + ): + request.applymarker( + pytest.mark.xfail( + raises=TypeError, reason="Can only string multiply by an integer." + ) + ) + + mark = self._get_arith_xfail_marker(all_arithmetic_operators, pa_dtype) + if mark is not None: + request.applymarker(mark) + + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) + + def test_arith_series_with_array(self, data, all_arithmetic_operators, request): + pa_dtype = data.dtype.pyarrow_dtype + + if all_arithmetic_operators in ( + "__sub__", + "__rsub__", + ) and pa.types.is_unsigned_integer(pa_dtype): + request.applymarker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + f"Implemented pyarrow.compute.subtract_checked " + f"which raises on overflow for {pa_dtype}" + ), + ) + ) + elif all_arithmetic_operators in ("__rmul__", "__mul__") and ( + pa.types.is_binary(pa_dtype) or pa.types.is_string(pa_dtype) + ): + request.applymarker( + pytest.mark.xfail( + raises=TypeError, reason="Can only string multiply by an integer." + ) + ) + + mark = self._get_arith_xfail_marker(all_arithmetic_operators, pa_dtype) + if mark is not None: + request.applymarker(mark) + + op_name = all_arithmetic_operators + ser = pd.Series(data) + # pd.Series([ser.iloc[0]] * len(ser)) may not return ArrowExtensionArray + # since ser.iloc[0] is a python scalar + other = pd.Series(pd.array([ser.iloc[0]] * len(ser), dtype=data.dtype)) + + self.check_opname(ser, op_name, other) + + def test_add_series_with_extension_array(self, data, request): + pa_dtype = data.dtype.pyarrow_dtype + + if pa_dtype.equals("int8"): + request.applymarker( + pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=f"raises on overflow for {pa_dtype}", + ) + ) + super().test_add_series_with_extension_array(data) + + def test_invalid_other_comp(self, data, comparison_op): + # GH 48833 + with pytest.raises( + NotImplementedError, match=".* not implemented for " + ): + comparison_op(data, object()) + + @pytest.mark.parametrize("masked_dtype", ["boolean", "Int64", "Float64"]) + def test_comp_masked_numpy(self, masked_dtype, comparison_op): + # GH 52625 + data = [1, 0, None] + ser_masked = pd.Series(data, dtype=masked_dtype) + ser_pa = pd.Series(data, dtype=f"{masked_dtype.lower()}[pyarrow]") + result = comparison_op(ser_pa, ser_masked) + if comparison_op in [operator.lt, operator.gt, operator.ne]: + exp = [False, False, None] + else: + exp = [True, True, None] + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +class TestLogicalOps: + """Various Series and DataFrame logical ops methods.""" + + def test_kleene_or(self): + a = pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]") + b = pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + result = a | b + expected = pd.Series( + [True, True, True, True, False, None, True, None, None], + dtype="boolean[pyarrow]", + ) + tm.assert_series_equal(result, expected) + + result = b | a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, + pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]"), + ) + tm.assert_series_equal( + b, pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (None, [True, None, None]), + (pd.NA, [True, None, None]), + (True, [True, True, True]), + (np.bool_(True), [True, True, True]), + (False, [True, False, None]), + (np.bool_(False), [True, False, None]), + ], + ) + def test_kleene_or_scalar(self, other, expected): + a = pd.Series([True, False, None], dtype="boolean[pyarrow]") + result = a | other + expected = pd.Series(expected, dtype="boolean[pyarrow]") + tm.assert_series_equal(result, expected) + + result = other | a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, pd.Series([True, False, None], dtype="boolean[pyarrow]") + ) + + def test_kleene_and(self): + a = pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]") + b = pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + result = a & b + expected = pd.Series( + [True, False, None, False, False, False, None, False, None], + dtype="boolean[pyarrow]", + ) + tm.assert_series_equal(result, expected) + + result = b & a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, + pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]"), + ) + tm.assert_series_equal( + b, pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (None, [None, False, None]), + (pd.NA, [None, False, None]), + (True, [True, False, None]), + (False, [False, False, False]), + (np.bool_(True), [True, False, None]), + (np.bool_(False), [False, False, False]), + ], + ) + def test_kleene_and_scalar(self, other, expected): + a = pd.Series([True, False, None], dtype="boolean[pyarrow]") + result = a & other + expected = pd.Series(expected, dtype="boolean[pyarrow]") + tm.assert_series_equal(result, expected) + + result = other & a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, pd.Series([True, False, None], dtype="boolean[pyarrow]") + ) + + def test_kleene_xor(self): + a = pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]") + b = pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + result = a ^ b + expected = pd.Series( + [False, True, None, True, False, None, None, None, None], + dtype="boolean[pyarrow]", + ) + tm.assert_series_equal(result, expected) + + result = b ^ a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, + pd.Series([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean[pyarrow]"), + ) + tm.assert_series_equal( + b, pd.Series([True, False, None] * 3, dtype="boolean[pyarrow]") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (None, [None, None, None]), + (pd.NA, [None, None, None]), + (True, [False, True, None]), + (np.bool_(True), [False, True, None]), + (np.bool_(False), [True, False, None]), + ], + ) + def test_kleene_xor_scalar(self, other, expected): + a = pd.Series([True, False, None], dtype="boolean[pyarrow]") + result = a ^ other + expected = pd.Series(expected, dtype="boolean[pyarrow]") + tm.assert_series_equal(result, expected) + + result = other ^ a + tm.assert_series_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_series_equal( + a, pd.Series([True, False, None], dtype="boolean[pyarrow]") + ) + + @pytest.mark.parametrize( + "op, exp", + [ + ["__and__", True], + ["__or__", True], + ["__xor__", False], + ], + ) + def test_logical_masked_numpy(self, op, exp): + # GH 52625 + data = [True, False, None] + ser_masked = pd.Series(data, dtype="boolean") + ser_pa = pd.Series(data, dtype="boolean[pyarrow]") + result = getattr(ser_pa, op)(ser_masked) + expected = pd.Series([exp, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.ALL_INT_PYARROW_DTYPES) +def test_bitwise(pa_type): + # GH 54495 + dtype = ArrowDtype(pa_type) + left = pd.Series([1, None, 3, 4], dtype=dtype) + right = pd.Series([None, 3, 5, 4], dtype=dtype) + + result = left | right + expected = pd.Series([None, None, 3 | 5, 4 | 4], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = left & right + expected = pd.Series([None, None, 3 & 5, 4 & 4], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = left ^ right + expected = pd.Series([None, None, 3 ^ 5, 4 ^ 4], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = ~left + expected = ~(left.fillna(0).to_numpy()) + expected = pd.Series(expected, dtype=dtype).mask(left.isnull()) + tm.assert_series_equal(result, expected) + + +def test_arrowdtype_construct_from_string_type_with_unsupported_parameters(): + with pytest.raises(NotImplementedError, match="Passing pyarrow type"): + ArrowDtype.construct_from_string("not_a_real_dype[s, tz=UTC][pyarrow]") + + with pytest.raises(NotImplementedError, match="Passing pyarrow type"): + ArrowDtype.construct_from_string("decimal(7, 2)[pyarrow]") + + +def test_arrowdtype_construct_from_string_supports_dt64tz(): + # as of GH#50689, timestamptz is supported + dtype = ArrowDtype.construct_from_string("timestamp[s, tz=UTC][pyarrow]") + expected = ArrowDtype(pa.timestamp("s", "UTC")) + assert dtype == expected + + +def test_arrowdtype_construct_from_string_type_only_one_pyarrow(): + # GH#51225 + invalid = "int64[pyarrow]foobar[pyarrow]" + msg = ( + r"Passing pyarrow type specific parameters \(\[pyarrow\]\) in the " + r"string is not supported\." + ) + with pytest.raises(NotImplementedError, match=msg): + pd.Series(range(3), dtype=invalid) + + +def test_arrow_string_multiplication(): + # GH 56537 + binary = pd.Series(["abc", "defg"], dtype=ArrowDtype(pa.string())) + repeat = pd.Series([2, -2], dtype="int64[pyarrow]") + result = binary * repeat + expected = pd.Series(["abcabc", ""], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + reflected_result = repeat * binary + tm.assert_series_equal(result, reflected_result) + + +def test_arrow_string_multiplication_scalar_repeat(): + binary = pd.Series(["abc", "defg"], dtype=ArrowDtype(pa.string())) + result = binary * 2 + expected = pd.Series(["abcabc", "defgdefg"], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + reflected_result = 2 * binary + tm.assert_series_equal(reflected_result, expected) + + +@pytest.mark.parametrize( + "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"] +) +@pytest.mark.parametrize("quantile", [0.5, [0.5, 0.5]]) +def test_quantile(data, interpolation, quantile, request): + pa_dtype = data.dtype.pyarrow_dtype + + data = data.take([0, 0, 0]) + ser = pd.Series(data) + + if ( + pa.types.is_string(pa_dtype) + or pa.types.is_binary(pa_dtype) + or pa.types.is_boolean(pa_dtype) + ): + # For string, bytes, and bool, we don't *expect* to have quantile work + # Note this matches the non-pyarrow behavior + msg = r"Function 'quantile' has no kernel matching input types \(.*\)" + with pytest.raises(pa.ArrowNotImplementedError, match=msg): + ser.quantile(q=quantile, interpolation=interpolation) + return + + if ( + pa.types.is_integer(pa_dtype) + or pa.types.is_floating(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ): + pass + elif pa.types.is_temporal(data._pa_array.type): + pass + else: + request.applymarker( + pytest.mark.xfail( + raises=pa.ArrowNotImplementedError, + reason=f"quantile not supported by pyarrow for {pa_dtype}", + ) + ) + data = data.take([0, 0, 0]) + ser = pd.Series(data) + result = ser.quantile(q=quantile, interpolation=interpolation) + + if pa.types.is_timestamp(pa_dtype) and interpolation not in ["lower", "higher"]: + # rounding error will make the check below fail + # (e.g. '2020-01-01 01:01:01.000001' vs '2020-01-01 01:01:01.000001024'), + # so we'll check for now that we match the numpy analogue + if pa_dtype.tz: + pd_dtype = f"M8[{pa_dtype.unit}, {pa_dtype.tz}]" + else: + pd_dtype = f"M8[{pa_dtype.unit}]" + ser_np = ser.astype(pd_dtype) + + expected = ser_np.quantile(q=quantile, interpolation=interpolation) + if quantile == 0.5: + if pa_dtype.unit == "us": + expected = expected.to_pydatetime(warn=False) + assert result == expected + else: + if pa_dtype.unit == "us": + expected = expected.dt.floor("us") + tm.assert_series_equal(result, expected.astype(data.dtype)) + return + + if quantile == 0.5: + assert result == data[0] + else: + # Just check the values + expected = pd.Series(data.take([0, 0]), index=[0.5, 0.5]) + if ( + pa.types.is_integer(pa_dtype) + or pa.types.is_floating(pa_dtype) + or pa.types.is_decimal(pa_dtype) + ): + expected = expected.astype("float64[pyarrow]") + result = result.astype("float64[pyarrow]") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "take_idx, exp_idx", + [[[0, 0, 2, 2, 4, 4], [4, 0]], [[0, 0, 0, 2, 4, 4], [0]]], + ids=["multi_mode", "single_mode"], +) +def test_mode_dropna_true(data_for_grouping, take_idx, exp_idx): + data = data_for_grouping.take(take_idx) + ser = pd.Series(data) + result = ser.mode(dropna=True) + expected = pd.Series(data_for_grouping.take(exp_idx)) + tm.assert_series_equal(result, expected) + + +def test_mode_dropna_false_mode_na(data): + # GH 50982 + more_nans = pd.Series([None, None, data[0]], dtype=data.dtype) + result = more_nans.mode(dropna=False) + expected = pd.Series([None], dtype=data.dtype) + tm.assert_series_equal(result, expected) + + expected = pd.Series([data[0], None], dtype=data.dtype) + result = expected.mode(dropna=False) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "arrow_dtype, expected_type", + [ + [pa.binary(), bytes], + [pa.binary(16), bytes], + [pa.large_binary(), bytes], + [pa.large_string(), str], + [pa.list_(pa.int64()), list], + [pa.large_list(pa.int64()), list], + [pa.map_(pa.string(), pa.int64()), list], + [pa.struct([("f1", pa.int8()), ("f2", pa.string())]), dict], + [pa.dictionary(pa.int64(), pa.int64()), CategoricalDtypeType], + ], +) +def test_arrow_dtype_type(arrow_dtype, expected_type): + # GH 51845 + # TODO: Redundant with test_getitem_scalar once arrow_dtype exists in data fixture + assert ArrowDtype(arrow_dtype).type == expected_type + + +def test_is_bool_dtype(): + # GH 22667 + data = ArrowExtensionArray(pa.array([True, False, True])) + assert is_bool_dtype(data) + assert pd.core.common.is_bool_indexer(data) + s = pd.Series(range(len(data))) + result = s[data] + expected = s[np.asarray(data)] + tm.assert_series_equal(result, expected) + + +def test_is_numeric_dtype(data): + # GH 50563 + pa_type = data.dtype.pyarrow_dtype + if ( + pa.types.is_floating(pa_type) + or pa.types.is_integer(pa_type) + or pa.types.is_decimal(pa_type) + ): + assert is_numeric_dtype(data) + else: + assert not is_numeric_dtype(data) + + +def test_is_integer_dtype(data): + # GH 50667 + pa_type = data.dtype.pyarrow_dtype + if pa.types.is_integer(pa_type): + assert is_integer_dtype(data) + else: + assert not is_integer_dtype(data) + + +def test_is_signed_integer_dtype(data): + pa_type = data.dtype.pyarrow_dtype + if pa.types.is_signed_integer(pa_type): + assert is_signed_integer_dtype(data) + else: + assert not is_signed_integer_dtype(data) + + +def test_is_unsigned_integer_dtype(data): + pa_type = data.dtype.pyarrow_dtype + if pa.types.is_unsigned_integer(pa_type): + assert is_unsigned_integer_dtype(data) + else: + assert not is_unsigned_integer_dtype(data) + + +def test_is_float_dtype(data): + pa_type = data.dtype.pyarrow_dtype + if pa.types.is_floating(pa_type): + assert is_float_dtype(data) + else: + assert not is_float_dtype(data) + + +def test_pickle_roundtrip(data): + # GH 42600 + expected = pd.Series(data) + expected_sliced = expected.head(2) + full_pickled = pickle.dumps(expected) + sliced_pickled = pickle.dumps(expected_sliced) + + assert len(full_pickled) > len(sliced_pickled) + + result = pickle.loads(full_pickled) + tm.assert_series_equal(result, expected) + + result_sliced = pickle.loads(sliced_pickled) + tm.assert_series_equal(result_sliced, expected_sliced) + + +def test_astype_from_non_pyarrow(data): + # GH49795 + pd_array = data._pa_array.to_pandas().array + result = pd_array.astype(data.dtype) + assert not isinstance(pd_array.dtype, ArrowDtype) + assert isinstance(result.dtype, ArrowDtype) + tm.assert_extension_array_equal(result, data) + + +def test_astype_float_from_non_pyarrow_str(): + # GH50430 + ser = pd.Series(["1.0"]) + result = ser.astype("float64[pyarrow]") + expected = pd.Series([1.0], dtype="float64[pyarrow]") + tm.assert_series_equal(result, expected) + + +def test_astype_errors_ignore(): + # GH 55399 + expected = pd.DataFrame({"col": [17000000]}, dtype="int32[pyarrow]") + result = expected.astype("float[pyarrow]", errors="ignore") + tm.assert_frame_equal(result, expected) + + +def test_to_numpy_with_defaults(data): + # GH49973 + result = data.to_numpy() + + pa_type = data._pa_array.type + if pa.types.is_duration(pa_type) or pa.types.is_timestamp(pa_type): + pytest.skip("Tested in test_to_numpy_temporal") + elif pa.types.is_date(pa_type): + expected = np.array(list(data)) + else: + expected = np.array(data._pa_array) + + if data._hasna and not is_numeric_dtype(data.dtype): + expected = expected.astype(object) + expected[pd.isna(data)] = pd.NA + + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_int_with_na(): + # GH51227: ensure to_numpy does not convert int to float + data = [1, None] + arr = pd.array(data, dtype="int64[pyarrow]") + result = arr.to_numpy() + expected = np.array([1, np.nan]) + assert isinstance(result[0], float) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("na_val, exp", [(lib.no_default, np.nan), (1, 1)]) +def test_to_numpy_null_array(na_val, exp): + # GH#52443 + arr = pd.array([pd.NA, pd.NA], dtype="null[pyarrow]") + result = arr.to_numpy(dtype="float64", na_value=na_val) + expected = np.array([exp] * 2, dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_null_array_no_dtype(): + # GH#52443 + arr = pd.array([pd.NA, pd.NA], dtype="null[pyarrow]") + result = arr.to_numpy(dtype=None) + expected = np.array([pd.NA] * 2, dtype="object") + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_without_dtype(): + # GH 54808 + arr = pd.array([True, pd.NA], dtype="boolean[pyarrow]") + result = arr.to_numpy(na_value=False) + expected = np.array([True, False], dtype=np.bool_) + tm.assert_numpy_array_equal(result, expected) + + arr = pd.array([1.0, pd.NA], dtype="float32[pyarrow]") + result = arr.to_numpy(na_value=0.0) + expected = np.array([1.0, 0.0], dtype=np.float32) + tm.assert_numpy_array_equal(result, expected) + + +def test_setitem_null_slice(data): + # GH50248 + orig = data.copy() + + result = orig.copy() + result[:] = data[0] + expected = ArrowExtensionArray._from_sequence( + [data[0]] * len(data), + dtype=data.dtype, + ) + tm.assert_extension_array_equal(result, expected) + + result = orig.copy() + result[:] = data[::-1] + expected = data[::-1] + tm.assert_extension_array_equal(result, expected) + + result = orig.copy() + result[:] = data.tolist() + expected = data + tm.assert_extension_array_equal(result, expected) + + +def test_setitem_invalid_dtype(data): + # GH50248 + pa_type = data._pa_array.type + if pa.types.is_string(pa_type) or pa.types.is_binary(pa_type): + fill_value = 123 + err = TypeError + msg = "Invalid value '123' for dtype" + elif ( + pa.types.is_integer(pa_type) + or pa.types.is_floating(pa_type) + or pa.types.is_boolean(pa_type) + ): + fill_value = "foo" + err = pa.ArrowInvalid + msg = "Could not convert" + else: + fill_value = "foo" + err = TypeError + msg = "Invalid value 'foo' for dtype" + with pytest.raises(err, match=msg): + data[:] = fill_value + + +def test_from_arrow_respecting_given_dtype(): + date_array = pa.array( + [pd.Timestamp("2019-12-31"), pd.Timestamp("2019-12-31")], type=pa.date32() + ) + result = date_array.to_pandas( + types_mapper={pa.date32(): ArrowDtype(pa.date64())}.get + ) + expected = pd.Series( + [pd.Timestamp("2019-12-31"), pd.Timestamp("2019-12-31")], + dtype=ArrowDtype(pa.date64()), + ) + tm.assert_series_equal(result, expected) + + +def test_from_arrow_respecting_given_dtype_unsafe(): + array = pa.array([1.5, 2.5], type=pa.float64()) + with pytest.raises(pa.ArrowInvalid, match="Float value 1.5 was truncated"): + array.to_pandas(types_mapper={pa.float64(): ArrowDtype(pa.int64())}.get) + + +def test_round(): + dtype = "float64[pyarrow]" + + ser = pd.Series([0.0, 1.23, 2.56, pd.NA], dtype=dtype) + result = ser.round(1) + expected = pd.Series([0.0, 1.2, 2.6, pd.NA], dtype=dtype) + tm.assert_series_equal(result, expected) + + ser = pd.Series([123.4, pd.NA, 56.78], dtype=dtype) + result = ser.round(-1) + expected = pd.Series([120.0, pd.NA, 60.0], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_searchsorted_with_na_raises(data_for_sorting, as_series): + # GH50447 + b, c, a = data_for_sorting + arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c] + arr[-1] = pd.NA + + if as_series: + arr = pd.Series(arr) + + msg = ( + "searchsorted requires array to be sorted, " + "which is impossible with NAs present." + ) + with pytest.raises(ValueError, match=msg): + arr.searchsorted(b) + + +def test_sort_values_dictionary(): + df = pd.DataFrame( + { + "a": pd.Series( + ["x", "y"], dtype=ArrowDtype(pa.dictionary(pa.int32(), pa.string())) + ), + "b": [1, 2], + }, + ) + expected = df.copy() + result = df.sort_values(by=["a", "b"]) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("pat", ["abc", "a[a-z]{2}"]) +def test_str_count(pat): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.count(pat) + expected = pd.Series([1, None], dtype=ArrowDtype(pa.int32())) + tm.assert_series_equal(result, expected) + + +def test_str_count_flags_unsupported(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(NotImplementedError, match="count not"): + ser.str.count("abc", flags=1) + + +@pytest.mark.parametrize( + "side, str_func", [["left", "rjust"], ["right", "ljust"], ["both", "center"]] +) +def test_str_pad(side, str_func): + ser = pd.Series(["a", None], dtype=ArrowDtype(pa.string())) + result = ser.str.pad(width=3, side=side, fillchar="x") + expected = pd.Series( + [getattr("a", str_func)(3, "x"), None], dtype=ArrowDtype(pa.string()) + ) + tm.assert_series_equal(result, expected) + + +def test_str_pad_invalid_side(): + ser = pd.Series(["a", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(ValueError, match="Invalid side: foo"): + ser.str.pad(3, "foo", "x") + + +@pytest.mark.parametrize( + "pat, case, na, regex, exp", + [ + ["ab", False, None, False, [True, None]], + ["Ab", True, None, False, [False, None]], + ["ab", False, True, False, [True, True]], + ["a[a-z]{1}", False, None, True, [True, None]], + ["A[a-z]{1}", True, None, True, [False, None]], + ], +) +def test_str_contains(pat, case, na, regex, exp): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.contains(pat, case=case, na=na, regex=regex) + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +def test_str_contains_flags_unsupported(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(NotImplementedError, match="contains not"): + ser.str.contains("a", flags=1) + + +@pytest.mark.parametrize( + "side, pat, na, exp", + [ + ["startswith", "ab", None, [True, None, False]], + ["startswith", "b", False, [False, False, False]], + ["endswith", "b", True, [False, True, False]], + ["endswith", "bc", None, [True, None, False]], + ["startswith", ("a", "e", "g"), None, [True, None, True]], + ["endswith", ("a", "c", "g"), None, [True, None, True]], + ["startswith", (), None, [False, None, False]], + ["endswith", (), None, [False, None, False]], + ], +) +def test_str_start_ends_with(side, pat, na, exp): + ser = pd.Series(["abc", None, "efg"], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, side)(pat, na=na) + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("side", ("startswith", "endswith")) +def test_str_starts_ends_with_all_nulls_empty_tuple(side): + ser = pd.Series([None, None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, side)(()) + + # bool datatype preserved for all nulls. + expected = pd.Series([None, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "arg_name, arg", + [["pat", re.compile("b")], ["repl", str], ["case", False], ["flags", 1]], +) +def test_str_replace_unsupported(arg_name, arg): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + kwargs = {"pat": "b", "repl": "x", "regex": True} + kwargs[arg_name] = arg + with pytest.raises(NotImplementedError, match="replace is not supported"): + ser.str.replace(**kwargs) + + +@pytest.mark.parametrize( + "pat, repl, n, regex, exp", + [ + ["a", "x", -1, False, ["xbxc", None]], + ["a", "x", 1, False, ["xbac", None]], + ["[a-b]", "x", -1, True, ["xxxc", None]], + ], +) +def test_str_replace(pat, repl, n, regex, exp): + ser = pd.Series(["abac", None], dtype=ArrowDtype(pa.string())) + result = ser.str.replace(pat, repl, n=n, regex=regex) + expected = pd.Series(exp, dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_str_replace_negative_n(): + # GH 56404 + ser = pd.Series(["abc", "aaaaaa"], dtype=ArrowDtype(pa.string())) + actual = ser.str.replace("a", "", -3, True) + expected = pd.Series(["bc", ""], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(expected, actual) + + +def test_str_repeat_unsupported(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(NotImplementedError, match="repeat is not"): + ser.str.repeat([1, 2]) + + +def test_str_repeat(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.repeat(2) + expected = pd.Series(["abcabc", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "pat, case, na, exp", + [ + ["ab", False, None, [True, None]], + ["Ab", True, None, [False, None]], + ["bc", True, None, [False, None]], + ["ab", False, True, [True, True]], + ["a[a-z]{1}", False, None, [True, None]], + ["A[a-z]{1}", True, None, [False, None]], + ], +) +def test_str_match(pat, case, na, exp): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.match(pat, case=case, na=na) + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "pat, case, na, exp", + [ + ["abc", False, None, [True, True, False, None]], + ["Abc", True, None, [False, False, False, None]], + ["bc", True, None, [False, False, False, None]], + ["ab", False, None, [True, True, False, None]], + ["a[a-z]{2}", False, None, [True, True, False, None]], + ["A[a-z]{1}", True, None, [False, False, False, None]], + # GH Issue: #56652 + ["abc$", False, None, [True, False, False, None]], + ["abc\\$", False, None, [False, True, False, None]], + ["Abc$", True, None, [False, False, False, None]], + ["Abc\\$", True, None, [False, False, False, None]], + ], +) +def test_str_fullmatch(pat, case, na, exp): + ser = pd.Series(["abc", "abc$", "$abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.match(pat, case=case, na=na) + expected = pd.Series(exp, dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "sub, start, end, exp, exp_typ", + [["ab", 0, None, [0, None], pa.int32()], ["bc", 1, 3, [1, None], pa.int64()]], +) +def test_str_find(sub, start, end, exp, exp_typ): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.find(sub, start=start, end=end) + expected = pd.Series(exp, dtype=ArrowDtype(exp_typ)) + tm.assert_series_equal(result, expected) + + +def test_str_find_negative_start(): + # GH 56411 + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.find(sub="b", start=-1000, end=3) + expected = pd.Series([1, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) + + +def test_str_find_notimplemented(): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + with pytest.raises(NotImplementedError, match="find not implemented"): + ser.str.find("ab", start=1) + + +@pytest.mark.parametrize( + "i, exp", + [ + [1, ["b", "e", None]], + [-1, ["c", "e", None]], + [2, ["c", None, None]], + [-3, ["a", None, None]], + [4, [None, None, None]], + ], +) +def test_str_get(i, exp): + ser = pd.Series(["abc", "de", None], dtype=ArrowDtype(pa.string())) + result = ser.str.get(i) + expected = pd.Series(exp, dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.xfail( + reason="TODO: StringMethods._validate should support Arrow list types", + raises=AttributeError, +) +def test_str_join(): + ser = pd.Series(ArrowExtensionArray(pa.array([list("abc"), list("123"), None]))) + result = ser.str.join("=") + expected = pd.Series(["a=b=c", "1=2=3", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_str_join_string_type(): + ser = pd.Series(ArrowExtensionArray(pa.array(["abc", "123", None]))) + result = ser.str.join("=") + expected = pd.Series(["a=b=c", "1=2=3", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start, stop, step, exp", + [ + [None, 2, None, ["ab", None]], + [None, 2, 1, ["ab", None]], + [1, 3, 1, ["bc", None]], + ], +) +def test_str_slice(start, stop, step, exp): + ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string())) + result = ser.str.slice(start, stop, step) + expected = pd.Series(exp, dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start, stop, repl, exp", + [ + [1, 2, "x", ["axcd", None]], + [None, 2, "x", ["xcd", None]], + [None, 2, None, ["cd", None]], + ], +) +def test_str_slice_replace(start, stop, repl, exp): + ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string())) + result = ser.str.slice_replace(start, stop, repl) + expected = pd.Series(exp, dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "value, method, exp", + [ + ["a1c", "isalnum", True], + ["!|,", "isalnum", False], + ["aaa", "isalpha", True], + ["!!!", "isalpha", False], + ["Ù ", "isdecimal", True], # noqa: RUF001 + ["~!", "isdecimal", False], + ["2", "isdigit", True], + ["~", "isdigit", False], + ["aaa", "islower", True], + ["aaA", "islower", False], + ["123", "isnumeric", True], + ["11I", "isnumeric", False], + [" ", "isspace", True], + ["", "isspace", False], + ["The That", "istitle", True], + ["the That", "istitle", False], + ["AAA", "isupper", True], + ["AAc", "isupper", False], + ], +) +def test_str_is_functions(value, method, exp): + ser = pd.Series([value, None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)() + expected = pd.Series([exp, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", + [ + ["capitalize", "Abc def"], + ["title", "Abc Def"], + ["swapcase", "AbC Def"], + ["lower", "abc def"], + ["upper", "ABC DEF"], + ["casefold", "abc def"], + ], +) +def test_str_transform_functions(method, exp): + ser = pd.Series(["aBc dEF", None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)() + expected = pd.Series([exp, None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_str_len(): + ser = pd.Series(["abcd", None], dtype=ArrowDtype(pa.string())) + result = ser.str.len() + expected = pd.Series([4, None], dtype=ArrowDtype(pa.int32())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "method, to_strip, val", + [ + ["strip", None, " abc "], + ["strip", "x", "xabcx"], + ["lstrip", None, " abc"], + ["lstrip", "x", "xabc"], + ["rstrip", None, "abc "], + ["rstrip", "x", "abcx"], + ], +) +def test_str_strip(method, to_strip, val): + ser = pd.Series([val, None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)(to_strip=to_strip) + expected = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("val", ["abc123", "abc"]) +def test_str_removesuffix(val): + ser = pd.Series([val, None], dtype=ArrowDtype(pa.string())) + result = ser.str.removesuffix("123") + expected = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("val", ["123abc", "abc"]) +def test_str_removeprefix(val): + ser = pd.Series([val, None], dtype=ArrowDtype(pa.string())) + result = ser.str.removeprefix("123") + expected = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("errors", ["ignore", "strict"]) +@pytest.mark.parametrize( + "encoding, exp", + [ + ["utf8", b"abc"], + ["utf32", b"\xff\xfe\x00\x00a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00"], + ], +) +def test_str_encode(errors, encoding, exp): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.encode(encoding, errors) + expected = pd.Series([exp, None], dtype=ArrowDtype(pa.binary())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("flags", [0, 2]) +def test_str_findall(flags): + ser = pd.Series(["abc", "efg", None], dtype=ArrowDtype(pa.string())) + result = ser.str.findall("b", flags=flags) + expected = pd.Series([["b"], [], None], dtype=ArrowDtype(pa.list_(pa.string()))) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["index", "rindex"]) +@pytest.mark.parametrize( + "start, end", + [ + [0, None], + [1, 4], + ], +) +def test_str_r_index(method, start, end): + ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)("c", start, end) + expected = pd.Series([2, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) + + with pytest.raises(ValueError, match="substring not found"): + getattr(ser.str, method)("foo", start, end) + + +@pytest.mark.parametrize("form", ["NFC", "NFKC"]) +def test_str_normalize(form): + ser = pd.Series(["abc", None], dtype=ArrowDtype(pa.string())) + result = ser.str.normalize(form) + expected = ser.copy() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "start, end", + [ + [0, None], + [1, 4], + ], +) +def test_str_rfind(start, end): + ser = pd.Series(["abcba", "foo", None], dtype=ArrowDtype(pa.string())) + result = ser.str.rfind("c", start, end) + expected = pd.Series([2, -1, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) + + +def test_str_translate(): + ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string())) + result = ser.str.translate({97: "b"}) + expected = pd.Series(["bbcbb", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_str_wrap(): + ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string())) + result = ser.str.wrap(3) + expected = pd.Series(["abc\nba", None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_get_dummies(): + ser = pd.Series(["a|b", None, "a|c"], dtype=ArrowDtype(pa.string())) + result = ser.str.get_dummies() + expected = pd.DataFrame( + [[True, True, False], [False, False, False], [True, False, True]], + dtype=ArrowDtype(pa.bool_()), + columns=["a", "b", "c"], + ) + tm.assert_frame_equal(result, expected) + + +def test_str_partition(): + ser = pd.Series(["abcba", None], dtype=ArrowDtype(pa.string())) + result = ser.str.partition("b") + expected = pd.DataFrame( + [["a", "b", "cba"], [None, None, None]], dtype=ArrowDtype(pa.string()) + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.partition("b", expand=False) + expected = pd.Series(ArrowExtensionArray(pa.array([["a", "b", "cba"], None]))) + tm.assert_series_equal(result, expected) + + result = ser.str.rpartition("b") + expected = pd.DataFrame( + [["abc", "b", "a"], [None, None, None]], dtype=ArrowDtype(pa.string()) + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.rpartition("b", expand=False) + expected = pd.Series(ArrowExtensionArray(pa.array([["abc", "b", "a"], None]))) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["rsplit", "split"]) +def test_str_split_pat_none(method): + # GH 56271 + ser = pd.Series(["a1 cbc\nb", None], dtype=ArrowDtype(pa.string())) + result = getattr(ser.str, method)() + expected = pd.Series(ArrowExtensionArray(pa.array([["a1", "cbc", "b"], None]))) + tm.assert_series_equal(result, expected) + + +def test_str_split(): + # GH 52401 + ser = pd.Series(["a1cbcb", "a2cbcb", None], dtype=ArrowDtype(pa.string())) + result = ser.str.split("c") + expected = pd.Series( + ArrowExtensionArray(pa.array([["a1", "b", "b"], ["a2", "b", "b"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.split("c", n=1) + expected = pd.Series( + ArrowExtensionArray(pa.array([["a1", "bcb"], ["a2", "bcb"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.split("[1-2]", regex=True) + expected = pd.Series( + ArrowExtensionArray(pa.array([["a", "cbcb"], ["a", "cbcb"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.split("[1-2]", regex=True, expand=True) + expected = pd.DataFrame( + { + 0: ArrowExtensionArray(pa.array(["a", "a", None])), + 1: ArrowExtensionArray(pa.array(["cbcb", "cbcb", None])), + } + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.split("1", expand=True) + expected = pd.DataFrame( + { + 0: ArrowExtensionArray(pa.array(["a", "a2cbcb", None])), + 1: ArrowExtensionArray(pa.array(["cbcb", None, None])), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_str_rsplit(): + # GH 52401 + ser = pd.Series(["a1cbcb", "a2cbcb", None], dtype=ArrowDtype(pa.string())) + result = ser.str.rsplit("c") + expected = pd.Series( + ArrowExtensionArray(pa.array([["a1", "b", "b"], ["a2", "b", "b"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.rsplit("c", n=1) + expected = pd.Series( + ArrowExtensionArray(pa.array([["a1cb", "b"], ["a2cb", "b"], None])) + ) + tm.assert_series_equal(result, expected) + + result = ser.str.rsplit("c", n=1, expand=True) + expected = pd.DataFrame( + { + 0: ArrowExtensionArray(pa.array(["a1cb", "a2cb", None])), + 1: ArrowExtensionArray(pa.array(["b", "b", None])), + } + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.rsplit("1", expand=True) + expected = pd.DataFrame( + { + 0: ArrowExtensionArray(pa.array(["a", "a2cbcb", None])), + 1: ArrowExtensionArray(pa.array(["cbcb", None, None])), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_str_extract_non_symbolic(): + ser = pd.Series(["a1", "b2", "c3"], dtype=ArrowDtype(pa.string())) + with pytest.raises(ValueError, match="pat=.* must contain a symbolic group name."): + ser.str.extract(r"[ab](\d)") + + +@pytest.mark.parametrize("expand", [True, False]) +def test_str_extract(expand): + ser = pd.Series(["a1", "b2", "c3"], dtype=ArrowDtype(pa.string())) + result = ser.str.extract(r"(?P[ab])(?P\d)", expand=expand) + expected = pd.DataFrame( + { + "letter": ArrowExtensionArray(pa.array(["a", "b", None])), + "digit": ArrowExtensionArray(pa.array(["1", "2", None])), + } + ) + tm.assert_frame_equal(result, expected) + + +def test_str_extract_expand(): + ser = pd.Series(["a1", "b2", "c3"], dtype=ArrowDtype(pa.string())) + result = ser.str.extract(r"[ab](?P\d)", expand=True) + expected = pd.DataFrame( + { + "digit": ArrowExtensionArray(pa.array(["1", "2", None])), + } + ) + tm.assert_frame_equal(result, expected) + + result = ser.str.extract(r"[ab](?P\d)", expand=False) + expected = pd.Series(ArrowExtensionArray(pa.array(["1", "2", None])), name="digit") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"]) +def test_duration_from_strings_with_nat(unit): + # GH51175 + strings = ["1000", "NaT"] + pa_type = pa.duration(unit) + result = ArrowExtensionArray._from_sequence_of_strings(strings, dtype=pa_type) + expected = ArrowExtensionArray(pa.array([1000, None], type=pa_type)) + tm.assert_extension_array_equal(result, expected) + + +def test_unsupported_dt(data): + pa_dtype = data.dtype.pyarrow_dtype + if not pa.types.is_temporal(pa_dtype): + with pytest.raises( + AttributeError, match="Can only use .dt accessor with datetimelike values" + ): + pd.Series(data).dt + + +@pytest.mark.parametrize( + "prop, expected", + [ + ["year", 2023], + ["day", 2], + ["day_of_week", 0], + ["dayofweek", 0], + ["weekday", 0], + ["day_of_year", 2], + ["dayofyear", 2], + ["hour", 3], + ["minute", 4], + ["is_leap_year", False], + ["microsecond", 5], + ["month", 1], + ["nanosecond", 6], + ["quarter", 1], + ["second", 7], + ["date", date(2023, 1, 2)], + ["time", time(3, 4, 7, 5)], + ], +) +def test_dt_properties(prop, expected): + ser = pd.Series( + [ + pd.Timestamp( + year=2023, + month=1, + day=2, + hour=3, + minute=4, + second=7, + microsecond=5, + nanosecond=6, + ), + None, + ], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + result = getattr(ser.dt, prop) + exp_type = None + if isinstance(expected, date): + exp_type = pa.date32() + elif isinstance(expected, time): + exp_type = pa.time64("ns") + expected = pd.Series(ArrowExtensionArray(pa.array([expected, None], type=exp_type))) + tm.assert_series_equal(result, expected) + + +def test_dt_is_month_start_end(): + ser = pd.Series( + [ + datetime(year=2023, month=12, day=2, hour=3), + datetime(year=2023, month=1, day=1, hour=3), + datetime(year=2023, month=3, day=31, hour=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = ser.dt.is_month_start + expected = pd.Series([False, True, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + result = ser.dt.is_month_end + expected = pd.Series([False, False, True, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +def test_dt_is_year_start_end(): + ser = pd.Series( + [ + datetime(year=2023, month=12, day=31, hour=3), + datetime(year=2023, month=1, day=1, hour=3), + datetime(year=2023, month=3, day=31, hour=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = ser.dt.is_year_start + expected = pd.Series([False, True, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + result = ser.dt.is_year_end + expected = pd.Series([True, False, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +def test_dt_is_quarter_start_end(): + ser = pd.Series( + [ + datetime(year=2023, month=11, day=30, hour=3), + datetime(year=2023, month=1, day=1, hour=3), + datetime(year=2023, month=3, day=31, hour=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = ser.dt.is_quarter_start + expected = pd.Series([False, True, False, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + result = ser.dt.is_quarter_end + expected = pd.Series([False, False, True, None], dtype=ArrowDtype(pa.bool_())) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["days_in_month", "daysinmonth"]) +def test_dt_days_in_month(method): + ser = pd.Series( + [ + datetime(year=2023, month=3, day=30, hour=3), + datetime(year=2023, month=4, day=1, hour=3), + datetime(year=2023, month=2, day=3, hour=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = getattr(ser.dt, method) + expected = pd.Series([31, 30, 28, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) + + +def test_dt_normalize(): + ser = pd.Series( + [ + datetime(year=2023, month=3, day=30), + datetime(year=2023, month=4, day=1, hour=3), + datetime(year=2023, month=2, day=3, hour=23, minute=59, second=59), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + result = ser.dt.normalize() + expected = pd.Series( + [ + datetime(year=2023, month=3, day=30), + datetime(year=2023, month=4, day=1), + datetime(year=2023, month=2, day=3), + None, + ], + dtype=ArrowDtype(pa.timestamp("us")), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["us", "ns"]) +def test_dt_time_preserve_unit(unit): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp(unit)), + ) + assert ser.dt.unit == unit + + result = ser.dt.time + expected = pd.Series( + ArrowExtensionArray(pa.array([time(3, 0), None], type=pa.time64(unit))) + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"]) +def test_dt_tz(tz): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns", tz=tz)), + ) + result = ser.dt.tz + assert result == timezones.maybe_get_tz(tz) + + +def test_dt_isocalendar(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + result = ser.dt.isocalendar() + expected = pd.DataFrame( + [[2023, 1, 1], [0, 0, 0]], + columns=["year", "week", "day"], + dtype="int64[pyarrow]", + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "method, exp", [["day_name", "Sunday"], ["month_name", "January"]] +) +def test_dt_day_month_name(method, exp, request): + # GH 52388 + _require_timezone_database(request) + + ser = pd.Series([datetime(2023, 1, 1), None], dtype=ArrowDtype(pa.timestamp("ms"))) + result = getattr(ser.dt, method)() + expected = pd.Series([exp, None], dtype=ArrowDtype(pa.string())) + tm.assert_series_equal(result, expected) + + +def test_dt_strftime(request): + _require_timezone_database(request) + + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + result = ser.dt.strftime("%Y-%m-%dT%H:%M:%S") + expected = pd.Series( + ["2023-01-02T03:00:00.000000000", None], dtype=ArrowDtype(pa.string()) + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("method", ["ceil", "floor", "round"]) +def test_dt_roundlike_tz_options_not_supported(method): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + with pytest.raises(NotImplementedError, match="ambiguous is not supported."): + getattr(ser.dt, method)("1h", ambiguous="NaT") + + with pytest.raises(NotImplementedError, match="nonexistent is not supported."): + getattr(ser.dt, method)("1h", nonexistent="NaT") + + +@pytest.mark.parametrize("method", ["ceil", "floor", "round"]) +def test_dt_roundlike_unsupported_freq(method): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + with pytest.raises(ValueError, match="freq='1B' is not supported"): + getattr(ser.dt, method)("1B") + + with pytest.raises(ValueError, match="Must specify a valid frequency: None"): + getattr(ser.dt, method)(None) + + +@pytest.mark.parametrize("freq", ["D", "h", "min", "s", "ms", "us", "ns"]) +@pytest.mark.parametrize("method", ["ceil", "floor", "round"]) +def test_dt_ceil_year_floor(freq, method): + ser = pd.Series( + [datetime(year=2023, month=1, day=1), None], + ) + pa_dtype = ArrowDtype(pa.timestamp("ns")) + expected = getattr(ser.dt, method)(f"1{freq}").astype(pa_dtype) + result = getattr(ser.astype(pa_dtype).dt, method)(f"1{freq}") + tm.assert_series_equal(result, expected) + + +def test_dt_to_pydatetime(): + # GH 51859 + data = [datetime(2022, 1, 1), datetime(2023, 1, 1)] + ser = pd.Series(data, dtype=ArrowDtype(pa.timestamp("ns"))) + + msg = "The behavior of ArrowTemporalProperties.to_pydatetime is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.dt.to_pydatetime() + expected = np.array(data, dtype=object) + tm.assert_numpy_array_equal(result, expected) + assert all(type(res) is datetime for res in result) + + msg = "The behavior of DatetimeProperties.to_pydatetime is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + expected = ser.astype("datetime64[ns]").dt.to_pydatetime() + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("date_type", [32, 64]) +def test_dt_to_pydatetime_date_error(date_type): + # GH 52812 + ser = pd.Series( + [date(2022, 12, 31)], + dtype=ArrowDtype(getattr(pa, f"date{date_type}")()), + ) + msg = "The behavior of ArrowTemporalProperties.to_pydatetime is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pytest.raises(ValueError, match="to_pydatetime cannot be called with"): + ser.dt.to_pydatetime() + + +def test_dt_tz_localize_unsupported_tz_options(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + with pytest.raises(NotImplementedError, match="ambiguous='NaT' is not supported"): + ser.dt.tz_localize("UTC", ambiguous="NaT") + + with pytest.raises(NotImplementedError, match="nonexistent='NaT' is not supported"): + ser.dt.tz_localize("UTC", nonexistent="NaT") + + +def test_dt_tz_localize_none(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns", tz="US/Pacific")), + ) + result = ser.dt.tz_localize(None) + expected = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["us", "ns"]) +def test_dt_tz_localize(unit, request): + _require_timezone_database(request) + + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp(unit)), + ) + result = ser.dt.tz_localize("US/Pacific") + exp_data = pa.array( + [datetime(year=2023, month=1, day=2, hour=3), None], type=pa.timestamp(unit) + ) + exp_data = pa.compute.assume_timezone(exp_data, "US/Pacific") + expected = pd.Series(ArrowExtensionArray(exp_data)) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "nonexistent, exp_date", + [ + ["shift_forward", datetime(year=2023, month=3, day=12, hour=3)], + ["shift_backward", pd.Timestamp("2023-03-12 01:59:59.999999999")], + ], +) +def test_dt_tz_localize_nonexistent(nonexistent, exp_date, request): + _require_timezone_database(request) + + ser = pd.Series( + [datetime(year=2023, month=3, day=12, hour=2, minute=30), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + result = ser.dt.tz_localize("US/Pacific", nonexistent=nonexistent) + exp_data = pa.array([exp_date, None], type=pa.timestamp("ns")) + exp_data = pa.compute.assume_timezone(exp_data, "US/Pacific") + expected = pd.Series(ArrowExtensionArray(exp_data)) + tm.assert_series_equal(result, expected) + + +def test_dt_tz_convert_not_tz_raises(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + with pytest.raises(TypeError, match="Cannot convert tz-naive timestamps"): + ser.dt.tz_convert("UTC") + + +def test_dt_tz_convert_none(): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns", "US/Pacific")), + ) + result = ser.dt.tz_convert(None) + expected = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp("ns")), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["us", "ns"]) +def test_dt_tz_convert(unit): + ser = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp(unit, "US/Pacific")), + ) + result = ser.dt.tz_convert("US/Eastern") + expected = pd.Series( + [datetime(year=2023, month=1, day=2, hour=3), None], + dtype=ArrowDtype(pa.timestamp(unit, "US/Eastern")), + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["timestamp[ms][pyarrow]", "duration[ms][pyarrow]"]) +def test_as_unit(dtype): + # GH 52284 + ser = pd.Series([1000, None], dtype=dtype) + result = ser.dt.as_unit("ns") + expected = ser.astype(dtype.replace("ms", "ns")) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "prop, expected", + [ + ["days", 1], + ["seconds", 2], + ["microseconds", 3], + ["nanoseconds", 4], + ], +) +def test_dt_timedelta_properties(prop, expected): + # GH 52284 + ser = pd.Series( + [ + pd.Timedelta( + days=1, + seconds=2, + microseconds=3, + nanoseconds=4, + ), + None, + ], + dtype=ArrowDtype(pa.duration("ns")), + ) + result = getattr(ser.dt, prop) + expected = pd.Series( + ArrowExtensionArray(pa.array([expected, None], type=pa.int32())) + ) + tm.assert_series_equal(result, expected) + + +def test_dt_timedelta_total_seconds(): + # GH 52284 + ser = pd.Series( + [ + pd.Timedelta( + days=1, + seconds=2, + microseconds=3, + nanoseconds=4, + ), + None, + ], + dtype=ArrowDtype(pa.duration("ns")), + ) + result = ser.dt.total_seconds() + expected = pd.Series( + ArrowExtensionArray(pa.array([86402.000003, None], type=pa.float64())) + ) + tm.assert_series_equal(result, expected) + + +def test_dt_to_pytimedelta(): + # GH 52284 + data = [timedelta(1, 2, 3), timedelta(1, 2, 4)] + ser = pd.Series(data, dtype=ArrowDtype(pa.duration("ns"))) + + result = ser.dt.to_pytimedelta() + expected = np.array(data, dtype=object) + tm.assert_numpy_array_equal(result, expected) + assert all(type(res) is timedelta for res in result) + + expected = ser.astype("timedelta64[ns]").dt.to_pytimedelta() + tm.assert_numpy_array_equal(result, expected) + + +def test_dt_components(): + # GH 52284 + ser = pd.Series( + [ + pd.Timedelta( + days=1, + seconds=2, + microseconds=3, + nanoseconds=4, + ), + None, + ], + dtype=ArrowDtype(pa.duration("ns")), + ) + result = ser.dt.components + expected = pd.DataFrame( + [[1, 0, 0, 2, 0, 3, 4], [None, None, None, None, None, None, None]], + columns=[ + "days", + "hours", + "minutes", + "seconds", + "milliseconds", + "microseconds", + "nanoseconds", + ], + dtype="int32[pyarrow]", + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +def test_boolean_reduce_series_all_null(all_boolean_reductions, skipna): + # GH51624 + ser = pd.Series([None], dtype="float64[pyarrow]") + result = getattr(ser, all_boolean_reductions)(skipna=skipna) + if skipna: + expected = all_boolean_reductions == "all" + else: + expected = pd.NA + assert result is expected + + +def test_from_sequence_of_strings_boolean(): + true_strings = ["true", "TRUE", "True", "1", "1.0"] + false_strings = ["false", "FALSE", "False", "0", "0.0"] + nulls = [None] + strings = true_strings + false_strings + nulls + bools = ( + [True] * len(true_strings) + [False] * len(false_strings) + [None] * len(nulls) + ) + + result = ArrowExtensionArray._from_sequence_of_strings(strings, dtype=pa.bool_()) + expected = pd.array(bools, dtype="boolean[pyarrow]") + tm.assert_extension_array_equal(result, expected) + + strings = ["True", "foo"] + with pytest.raises(pa.ArrowInvalid, match="Failed to parse"): + ArrowExtensionArray._from_sequence_of_strings(strings, dtype=pa.bool_()) + + +def test_concat_empty_arrow_backed_series(dtype): + # GH#51734 + ser = pd.Series([], dtype=dtype) + expected = ser.copy() + result = pd.concat([ser[np.array([], dtype=np.bool_)]]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["string", "string[pyarrow]"]) +def test_series_from_string_array(dtype): + arr = pa.array("the quick brown fox".split()) + ser = pd.Series(arr, dtype=dtype) + expected = pd.Series(ArrowExtensionArray(arr), dtype=dtype) + tm.assert_series_equal(ser, expected) + + +# _data was renamed to _pa_data +class OldArrowExtensionArray(ArrowExtensionArray): + def __getstate__(self): + state = super().__getstate__() + state["_data"] = state.pop("_pa_array") + return state + + +def test_pickle_old_arrowextensionarray(): + data = pa.array([1]) + expected = OldArrowExtensionArray(data) + result = pickle.loads(pickle.dumps(expected)) + tm.assert_extension_array_equal(result, expected) + assert result._pa_array == pa.chunked_array(data) + assert not hasattr(result, "_data") + + +def test_setitem_boolean_replace_with_mask_segfault(): + # GH#52059 + N = 145_000 + arr = ArrowExtensionArray(pa.chunked_array([np.ones((N,), dtype=np.bool_)])) + expected = arr.copy() + arr[np.zeros((N,), dtype=np.bool_)] = False + assert arr._pa_array == expected._pa_array + + +@pytest.mark.parametrize( + "data, arrow_dtype", + [ + ([b"a", b"b"], pa.large_binary()), + (["a", "b"], pa.large_string()), + ], +) +def test_conversion_large_dtypes_from_numpy_array(data, arrow_dtype): + dtype = ArrowDtype(arrow_dtype) + result = pd.array(np.array(data), dtype=dtype) + expected = pd.array(data, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_concat_null_array(): + df = pd.DataFrame({"a": [None, None]}, dtype=ArrowDtype(pa.null())) + df2 = pd.DataFrame({"a": [0, 1]}, dtype="int64[pyarrow]") + + result = pd.concat([df, df2], ignore_index=True) + expected = pd.DataFrame({"a": [None, None, 0, 1]}, dtype="int64[pyarrow]") + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.ALL_INT_PYARROW_DTYPES + tm.FLOAT_PYARROW_DTYPES) +def test_describe_numeric_data(pa_type): + # GH 52470 + data = pd.Series([1, 2, 3], dtype=ArrowDtype(pa_type)) + result = data.describe() + expected = pd.Series( + [3, 2, 1, 1, 1.5, 2.0, 2.5, 3], + dtype=ArrowDtype(pa.float64()), + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.TIMEDELTA_PYARROW_DTYPES) +def test_describe_timedelta_data(pa_type): + # GH53001 + data = pd.Series(range(1, 10), dtype=ArrowDtype(pa_type)) + result = data.describe() + expected = pd.Series( + [9] + pd.to_timedelta([5, 2, 1, 3, 5, 7, 9], unit=pa_type.unit).tolist(), + dtype=object, + index=["count", "mean", "std", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.DATETIME_PYARROW_DTYPES) +def test_describe_datetime_data(pa_type): + # GH53001 + data = pd.Series(range(1, 10), dtype=ArrowDtype(pa_type)) + result = data.describe() + expected = pd.Series( + [9] + + [ + pd.Timestamp(v, tz=pa_type.tz, unit=pa_type.unit) + for v in [5, 1, 3, 5, 7, 9] + ], + dtype=object, + index=["count", "mean", "min", "25%", "50%", "75%", "max"], + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_quantile_temporal(pa_type): + # GH52678 + data = [1, 2, 3] + ser = pd.Series(data, dtype=ArrowDtype(pa_type)) + result = ser.quantile(0.1) + expected = ser[0] + assert result == expected + + +def test_date32_repr(): + # GH48238 + arrow_dt = pa.array([date.fromisoformat("2020-01-01")], type=pa.date32()) + ser = pd.Series(arrow_dt, dtype=ArrowDtype(arrow_dt.type)) + assert repr(ser) == "0 2020-01-01\ndtype: date32[day][pyarrow]" + + +def test_duration_overflow_from_ndarray_containing_nat(): + # GH52843 + data_ts = pd.to_datetime([1, None]) + data_td = pd.to_timedelta([1, None]) + ser_ts = pd.Series(data_ts, dtype=ArrowDtype(pa.timestamp("ns"))) + ser_td = pd.Series(data_td, dtype=ArrowDtype(pa.duration("ns"))) + result = ser_ts + ser_td + expected = pd.Series([2, None], dtype=ArrowDtype(pa.timestamp("ns"))) + tm.assert_series_equal(result, expected) + + +def test_infer_dtype_pyarrow_dtype(data, request): + res = lib.infer_dtype(data) + assert res != "unknown-array" + + if data._hasna and res in ["floating", "datetime64", "timedelta64"]: + mark = pytest.mark.xfail( + reason="in infer_dtype pd.NA is not ignored in these cases " + "even with skipna=True in the list(data) check below" + ) + request.applymarker(mark) + + assert res == lib.infer_dtype(list(data), skipna=True) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_from_sequence_temporal(pa_type): + # GH 53171 + val = 3 + unit = pa_type.unit + if pa.types.is_duration(pa_type): + seq = [pd.Timedelta(val, unit=unit).as_unit(unit)] + else: + seq = [pd.Timestamp(val, unit=unit, tz=pa_type.tz).as_unit(unit)] + + result = ArrowExtensionArray._from_sequence(seq, dtype=pa_type) + expected = ArrowExtensionArray(pa.array([val], type=pa_type)) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_setitem_temporal(pa_type): + # GH 53171 + unit = pa_type.unit + if pa.types.is_duration(pa_type): + val = pd.Timedelta(1, unit=unit).as_unit(unit) + else: + val = pd.Timestamp(1, unit=unit, tz=pa_type.tz).as_unit(unit) + + arr = ArrowExtensionArray(pa.array([1, 2, 3], type=pa_type)) + + result = arr.copy() + result[:] = val + expected = ArrowExtensionArray(pa.array([1, 1, 1], type=pa_type)) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_arithmetic_temporal(pa_type, request): + # GH 53171 + arr = ArrowExtensionArray(pa.array([1, 2, 3], type=pa_type)) + unit = pa_type.unit + result = arr - pd.Timedelta(1, unit=unit).as_unit(unit) + expected = ArrowExtensionArray(pa.array([0, 1, 2], type=pa_type)) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_comparison_temporal(pa_type): + # GH 53171 + unit = pa_type.unit + if pa.types.is_duration(pa_type): + val = pd.Timedelta(1, unit=unit).as_unit(unit) + else: + val = pd.Timestamp(1, unit=unit, tz=pa_type.tz).as_unit(unit) + + arr = ArrowExtensionArray(pa.array([1, 2, 3], type=pa_type)) + + result = arr > val + expected = ArrowExtensionArray(pa.array([False, True, True], type=pa.bool_())) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_getitem_temporal(pa_type): + # GH 53326 + arr = ArrowExtensionArray(pa.array([1, 2, 3], type=pa_type)) + result = arr[1] + if pa.types.is_duration(pa_type): + expected = pd.Timedelta(2, unit=pa_type.unit).as_unit(pa_type.unit) + assert isinstance(result, pd.Timedelta) + else: + expected = pd.Timestamp(2, unit=pa_type.unit, tz=pa_type.tz).as_unit( + pa_type.unit + ) + assert isinstance(result, pd.Timestamp) + assert result.unit == expected.unit + assert result == expected + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES +) +def test_iter_temporal(pa_type): + # GH 53326 + arr = ArrowExtensionArray(pa.array([1, None], type=pa_type)) + result = list(arr) + if pa.types.is_duration(pa_type): + expected = [ + pd.Timedelta(1, unit=pa_type.unit).as_unit(pa_type.unit), + pd.NA, + ] + assert isinstance(result[0], pd.Timedelta) + else: + expected = [ + pd.Timestamp(1, unit=pa_type.unit, tz=pa_type.tz).as_unit(pa_type.unit), + pd.NA, + ] + assert isinstance(result[0], pd.Timestamp) + assert result[0].unit == expected[0].unit + assert result == expected + + +def test_groupby_series_size_returns_pa_int(data): + # GH 54132 + ser = pd.Series(data[:3], index=["a", "a", "b"]) + result = ser.groupby(level=0).size() + expected = pd.Series([2, 1], dtype="int64[pyarrow]", index=["a", "b"]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "pa_type", tm.DATETIME_PYARROW_DTYPES + tm.TIMEDELTA_PYARROW_DTYPES, ids=repr +) +@pytest.mark.parametrize("dtype", [None, object]) +def test_to_numpy_temporal(pa_type, dtype): + # GH 53326 + # GH 55997: Return datetime64/timedelta64 types with NaT if possible + arr = ArrowExtensionArray(pa.array([1, None], type=pa_type)) + result = arr.to_numpy(dtype=dtype) + if pa.types.is_duration(pa_type): + value = pd.Timedelta(1, unit=pa_type.unit).as_unit(pa_type.unit) + else: + value = pd.Timestamp(1, unit=pa_type.unit, tz=pa_type.tz).as_unit(pa_type.unit) + + if dtype == object or (pa.types.is_timestamp(pa_type) and pa_type.tz is not None): + if dtype == object: + na = pd.NA + else: + na = pd.NaT + expected = np.array([value, na], dtype=object) + assert result[0].unit == value.unit + else: + na = pa_type.to_pandas_dtype().type("nat", pa_type.unit) + value = value.to_numpy() + expected = np.array([value, na]) + assert np.datetime_data(result[0])[0] == pa_type.unit + tm.assert_numpy_array_equal(result, expected) + + +def test_groupby_count_return_arrow_dtype(data_missing): + df = pd.DataFrame({"A": [1, 1], "B": data_missing, "C": data_missing}) + result = df.groupby("A").count() + expected = pd.DataFrame( + [[1, 1]], + index=pd.Index([1], name="A"), + columns=["B", "C"], + dtype="int64[pyarrow]", + ) + tm.assert_frame_equal(result, expected) + + +def test_fixed_size_list(): + # GH#55000 + ser = pd.Series( + [[1, 2], [3, 4]], dtype=ArrowDtype(pa.list_(pa.int64(), list_size=2)) + ) + result = ser.dtype.type + assert result == list + + +def test_arrowextensiondtype_dataframe_repr(): + # GH 54062 + df = pd.DataFrame( + pd.period_range("2012", periods=3), + columns=["col"], + dtype=ArrowDtype(ArrowPeriodType("D")), + ) + result = repr(df) + # TODO: repr value may not be expected; address how + # pyarrow.ExtensionType values are displayed + expected = " col\n0 15340\n1 15341\n2 15342" + assert result == expected + + +def test_pow_missing_operand(): + # GH 55512 + k = pd.Series([2, None], dtype="int64[pyarrow]") + result = k.pow(None, fill_value=3) + expected = pd.Series([8, None], dtype="int64[pyarrow]") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.TIMEDELTA_PYARROW_DTYPES) +def test_duration_fillna_numpy(pa_type): + # GH 54707 + ser1 = pd.Series([None, 2], dtype=ArrowDtype(pa_type)) + ser2 = pd.Series(np.array([1, 3], dtype=f"m8[{pa_type.unit}]")) + result = ser1.fillna(ser2) + expected = pd.Series([1, 2], dtype=ArrowDtype(pa_type)) + tm.assert_series_equal(result, expected) + + +def test_comparison_not_propagating_arrow_error(): + # GH#54944 + a = pd.Series([1 << 63], dtype="uint64[pyarrow]") + b = pd.Series([None], dtype="int64[pyarrow]") + with pytest.raises(pa.lib.ArrowInvalid, match="Integer value"): + a < b + + +def test_factorize_chunked_dictionary(): + # GH 54844 + pa_array = pa.chunked_array( + [pa.array(["a"]).dictionary_encode(), pa.array(["b"]).dictionary_encode()] + ) + ser = pd.Series(ArrowExtensionArray(pa_array)) + res_indices, res_uniques = ser.factorize() + exp_indicies = np.array([0, 1], dtype=np.intp) + exp_uniques = pd.Index(ArrowExtensionArray(pa_array.combine_chunks())) + tm.assert_numpy_array_equal(res_indices, exp_indicies) + tm.assert_index_equal(res_uniques, exp_uniques) + + +def test_dictionary_astype_categorical(): + # GH#56672 + arrs = [ + pa.array(np.array(["a", "x", "c", "a"])).dictionary_encode(), + pa.array(np.array(["a", "d", "c"])).dictionary_encode(), + ] + ser = pd.Series(ArrowExtensionArray(pa.chunked_array(arrs))) + result = ser.astype("category") + categories = pd.Index(["a", "x", "c", "d"], dtype=ArrowDtype(pa.string())) + expected = pd.Series( + ["a", "x", "c", "a", "a", "d", "c"], + dtype=pd.CategoricalDtype(categories=categories), + ) + tm.assert_series_equal(result, expected) + + +def test_arrow_floordiv(): + # GH 55561 + a = pd.Series([-7], dtype="int64[pyarrow]") + b = pd.Series([4], dtype="int64[pyarrow]") + expected = pd.Series([-2], dtype="int64[pyarrow]") + result = a // b + tm.assert_series_equal(result, expected) + + +def test_arrow_floordiv_large_values(): + # GH 56645 + a = pd.Series([1425801600000000000], dtype="int64[pyarrow]") + expected = pd.Series([1425801600000], dtype="int64[pyarrow]") + result = a // 1_000_000 + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["int64[pyarrow]", "uint64[pyarrow]"]) +def test_arrow_floordiv_large_integral_result(dtype): + # GH 56676 + a = pd.Series([18014398509481983], dtype=dtype) + result = a // 1 + tm.assert_series_equal(result, a) + + +@pytest.mark.parametrize("pa_type", tm.SIGNED_INT_PYARROW_DTYPES) +def test_arrow_floordiv_larger_divisor(pa_type): + # GH 56676 + dtype = ArrowDtype(pa_type) + a = pd.Series([-23], dtype=dtype) + result = a // 24 + expected = pd.Series([-1], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.SIGNED_INT_PYARROW_DTYPES) +def test_arrow_floordiv_integral_invalid(pa_type): + # GH 56676 + min_value = np.iinfo(pa_type.to_pandas_dtype()).min + a = pd.Series([min_value], dtype=ArrowDtype(pa_type)) + with pytest.raises(pa.lib.ArrowInvalid, match="overflow|not in range"): + a // -1 + with pytest.raises(pa.lib.ArrowInvalid, match="divide by zero"): + a // 0 + + +@pytest.mark.parametrize("dtype", tm.FLOAT_PYARROW_DTYPES_STR_REPR) +def test_arrow_floordiv_floating_0_divisor(dtype): + # GH 56676 + a = pd.Series([2], dtype=dtype) + result = a // 0 + expected = pd.Series([float("inf")], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["float64", "datetime64[ns]", "timedelta64[ns]"]) +def test_astype_int_with_null_to_numpy_dtype(dtype): + # GH 57093 + ser = pd.Series([1, None], dtype="int64[pyarrow]") + result = ser.astype(dtype) + expected = pd.Series([1, None], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("pa_type", tm.ALL_INT_PYARROW_DTYPES) +def test_arrow_integral_floordiv_large_values(pa_type): + # GH 56676 + max_value = np.iinfo(pa_type.to_pandas_dtype()).max + dtype = ArrowDtype(pa_type) + a = pd.Series([max_value], dtype=dtype) + b = pd.Series([1], dtype=dtype) + result = a // b + tm.assert_series_equal(result, a) + + +@pytest.mark.parametrize("dtype", ["int64[pyarrow]", "uint64[pyarrow]"]) +def test_arrow_true_division_large_divisor(dtype): + # GH 56706 + a = pd.Series([0], dtype=dtype) + b = pd.Series([18014398509481983], dtype=dtype) + expected = pd.Series([0], dtype="float64[pyarrow]") + result = a / b + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["int64[pyarrow]", "uint64[pyarrow]"]) +def test_arrow_floor_division_large_divisor(dtype): + # GH 56706 + a = pd.Series([0], dtype=dtype) + b = pd.Series([18014398509481983], dtype=dtype) + expected = pd.Series([0], dtype=dtype) + result = a // b + tm.assert_series_equal(result, expected) + + +def test_string_to_datetime_parsing_cast(): + # GH 56266 + string_dates = ["2020-01-01 04:30:00", "2020-01-02 00:00:00", "2020-01-03 00:00:00"] + result = pd.Series(string_dates, dtype="timestamp[ns][pyarrow]") + expected = pd.Series( + ArrowExtensionArray(pa.array(pd.to_datetime(string_dates), from_pandas=True)) + ) + tm.assert_series_equal(result, expected) + + +def test_string_to_time_parsing_cast(): + # GH 56463 + string_times = ["11:41:43.076160"] + result = pd.Series(string_times, dtype="time64[us][pyarrow]") + expected = pd.Series( + ArrowExtensionArray(pa.array([time(11, 41, 43, 76160)], from_pandas=True)) + ) + tm.assert_series_equal(result, expected) + + +def test_to_numpy_float(): + # GH#56267 + ser = pd.Series([32, 40, None], dtype="float[pyarrow]") + result = ser.astype("float64") + expected = pd.Series([32, 40, np.nan], dtype="float64") + tm.assert_series_equal(result, expected) + + +def test_to_numpy_timestamp_to_int(): + # GH 55997 + ser = pd.Series(["2020-01-01 04:30:00"], dtype="timestamp[ns][pyarrow]") + result = ser.to_numpy(dtype=np.int64) + expected = np.array([1577853000000000000]) + tm.assert_numpy_array_equal(result, expected) + + +def test_map_numeric_na_action(): + ser = pd.Series([32, 40, None], dtype="int64[pyarrow]") + result = ser.map(lambda x: 42, na_action="ignore") + expected = pd.Series([42.0, 42.0, np.nan], dtype="float64") + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..6f33b18b19c51f0b2a552d8f046b99ee75f0b83c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py @@ -0,0 +1,200 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import string + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +import pandas as pd +from pandas import Categorical +import pandas._testing as tm +from pandas.api.types import CategoricalDtype +from pandas.tests.extension import base + + +def make_data(): + while True: + values = np.random.default_rng(2).choice(list(string.ascii_letters), size=100) + # ensure we meet the requirements + # 1. first two not null + # 2. first and second are different + if values[0] != values[1]: + break + return values + + +@pytest.fixture +def dtype(): + return CategoricalDtype() + + +@pytest.fixture +def data(): + """Length-100 array for this type. + + * data[0] and data[1] should both be non missing + * data[0] and data[1] should not be equal + """ + return Categorical(make_data()) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return Categorical([np.nan, "A"]) + + +@pytest.fixture +def data_for_sorting(): + return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True) + + +@pytest.fixture +def data_missing_for_sorting(): + return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True) + + +@pytest.fixture +def data_for_grouping(): + return Categorical(["a", "a", None, None, "b", "b", "a", "c"]) + + +class TestCategorical(base.ExtensionTests): + @pytest.mark.xfail(reason="Memory usage doesn't match") + def test_memory_usage(self, data): + # TODO: Is this deliberate? + super().test_memory_usage(data) + + def test_contains(self, data, data_missing): + # GH-37867 + # na value handling in Categorical.__contains__ is deprecated. + # See base.BaseInterFaceTests.test_contains for more details. + + na_value = data.dtype.na_value + # ensure data without missing values + data = data[~data.isna()] + + # first elements are non-missing + assert data[0] in data + assert data_missing[0] in data_missing + + # check the presence of na_value + assert na_value in data_missing + assert na_value not in data + + # Categoricals can contain other nan-likes than na_value + for na_value_obj in tm.NULL_OBJECTS: + if na_value_obj is na_value: + continue + assert na_value_obj not in data + # this section suffers from super method + if not using_pyarrow_string_dtype(): + assert na_value_obj in data_missing + + def test_empty(self, dtype): + cls = dtype.construct_array_type() + result = cls._empty((4,), dtype=dtype) + + assert isinstance(result, cls) + # the dtype we passed is not initialized, so will not match the + # dtype on our result. + assert result.dtype == CategoricalDtype([]) + + @pytest.mark.skip(reason="Backwards compatibility") + def test_getitem_scalar(self, data): + # CategoricalDtype.type isn't "correct" since it should + # be a parent of the elements (object). But don't want + # to break things by changing. + super().test_getitem_scalar(data) + + @pytest.mark.xfail(reason="Unobserved categories included") + def test_value_counts(self, all_data, dropna): + return super().test_value_counts(all_data, dropna) + + def test_combine_add(self, data_repeated): + # GH 20825 + # When adding categoricals in combine, result is a string + orig_data1, orig_data2 = data_repeated(2) + s1 = pd.Series(orig_data1) + s2 = pd.Series(orig_data2) + result = s1.combine(s2, lambda x1, x2: x1 + x2) + expected = pd.Series( + [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))] + ) + tm.assert_series_equal(result, expected) + + val = s1.iloc[0] + result = s1.combine(val, lambda x1, x2: x1 + x2) + expected = pd.Series([a + val for a in list(orig_data1)]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data, na_action): + result = data.map(lambda x: x, na_action=na_action) + tm.assert_extension_array_equal(result, data) + + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): + # frame & scalar + op_name = all_arithmetic_operators + if op_name == "__rmod__": + request.applymarker( + pytest.mark.xfail( + reason="rmod never called when string is first argument" + ) + ) + super().test_arith_frame_with_scalar(data, op_name) + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request): + op_name = all_arithmetic_operators + if op_name == "__rmod__": + request.applymarker( + pytest.mark.xfail( + reason="rmod never called when string is first argument" + ) + ) + super().test_arith_series_with_scalar(data, op_name) + + def _compare_other(self, ser: pd.Series, data, op, other): + op_name = f"__{op.__name__}__" + if op_name not in ["__eq__", "__ne__"]: + msg = "Unordered Categoricals can only compare equality or not" + with pytest.raises(TypeError, match=msg): + op(data, other) + else: + return super()._compare_other(ser, data, op, other) + + @pytest.mark.xfail(reason="Categorical overrides __repr__") + @pytest.mark.parametrize("size", ["big", "small"]) + def test_array_repr(self, data, size): + super().test_array_repr(data, size) + + @pytest.mark.xfail(reason="TBD") + @pytest.mark.parametrize("as_index", [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + super().test_groupby_extension_agg(as_index, data_for_grouping) + + +class Test2DCompat(base.NDArrayBacked2DTests): + def test_repr_2d(self, data): + # Categorical __repr__ doesn't include "Categorical", so we need + # to special-case + res = repr(data.reshape(1, -1)) + assert res.count("\nCategories") == 1 + + res = repr(data.reshape(-1, 1)) + assert res.count("\nCategories") == 1 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_common.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..5eda0f00f54cae1002b0e7e60e9de765870a9ad8 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_common.py @@ -0,0 +1,105 @@ +import numpy as np +import pytest + +from pandas.core.dtypes import dtypes +from pandas.core.dtypes.common import is_extension_array_dtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ExtensionArray + + +class DummyDtype(dtypes.ExtensionDtype): + pass + + +class DummyArray(ExtensionArray): + def __init__(self, data) -> None: + self.data = data + + def __array__(self, dtype=None, copy=None): + return self.data + + @property + def dtype(self): + return DummyDtype() + + def astype(self, dtype, copy=True): + # we don't support anything but a single dtype + if isinstance(dtype, DummyDtype): + if copy: + return type(self)(self.data) + return self + elif not copy: + return np.asarray(self, dtype=dtype) + else: + return np.array(self, dtype=dtype, copy=copy) + + +class TestExtensionArrayDtype: + @pytest.mark.parametrize( + "values", + [ + pd.Categorical([]), + pd.Categorical([]).dtype, + pd.Series(pd.Categorical([])), + DummyDtype(), + DummyArray(np.array([1, 2])), + ], + ) + def test_is_extension_array_dtype(self, values): + assert is_extension_array_dtype(values) + + @pytest.mark.parametrize("values", [np.array([]), pd.Series(np.array([]))]) + def test_is_not_extension_array_dtype(self, values): + assert not is_extension_array_dtype(values) + + +def test_astype(): + arr = DummyArray(np.array([1, 2, 3])) + expected = np.array([1, 2, 3], dtype=object) + + result = arr.astype(object) + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("object") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_no_copy(): + arr = DummyArray(np.array([1, 2, 3], dtype=np.int64)) + result = arr.astype(arr.dtype, copy=False) + + assert arr is result + + result = arr.astype(arr.dtype) + assert arr is not result + + +@pytest.mark.parametrize("dtype", [dtypes.CategoricalDtype(), dtypes.IntervalDtype()]) +def test_is_extension_array_dtype(dtype): + assert isinstance(dtype, dtypes.ExtensionDtype) + assert is_extension_array_dtype(dtype) + + +class CapturingStringArray(pd.arrays.StringArray): + """Extend StringArray to capture arguments to __getitem__""" + + def __getitem__(self, item): + self.last_item_arg = item + return super().__getitem__(item) + + +def test_ellipsis_index(): + # GH#42430 1D slices over extension types turn into N-dimensional slices + # over ExtensionArrays + df = pd.DataFrame( + {"col1": CapturingStringArray(np.array(["hello", "world"], dtype=object))} + ) + _ = df.iloc[:1] + + # String comparison because there's no native way to compare slices. + # Before the fix for GH#42430, last_item_arg would get set to the 2D slice + # (Ellipsis, slice(None, 1, None)) + out = df["col1"].array.last_item_arg + assert str(out) == "slice(None, 1, None)" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..7f70957007dad9cc589e6f589c48555fd90f527d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py @@ -0,0 +1,144 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray +from pandas.tests.extension import base + + +@pytest.fixture(params=["US/Central"]) +def dtype(request): + return DatetimeTZDtype(unit="ns", tz=request.param) + + +@pytest.fixture +def data(dtype): + data = DatetimeArray._from_sequence( + pd.date_range("2000", periods=100, tz=dtype.tz), dtype=dtype + ) + return data + + +@pytest.fixture +def data_missing(dtype): + return DatetimeArray._from_sequence( + np.array(["NaT", "2000-01-01"], dtype="datetime64[ns]"), dtype=dtype + ) + + +@pytest.fixture +def data_for_sorting(dtype): + a = pd.Timestamp("2000-01-01") + b = pd.Timestamp("2000-01-02") + c = pd.Timestamp("2000-01-03") + return DatetimeArray._from_sequence( + np.array([b, c, a], dtype="datetime64[ns]"), dtype=dtype + ) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + a = pd.Timestamp("2000-01-01") + b = pd.Timestamp("2000-01-02") + return DatetimeArray._from_sequence( + np.array([b, "NaT", a], dtype="datetime64[ns]"), dtype=dtype + ) + + +@pytest.fixture +def data_for_grouping(dtype): + """ + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + a = pd.Timestamp("2000-01-01") + b = pd.Timestamp("2000-01-02") + c = pd.Timestamp("2000-01-03") + na = "NaT" + return DatetimeArray._from_sequence( + np.array([b, b, na, na, a, a, b, c], dtype="datetime64[ns]"), dtype=dtype + ) + + +@pytest.fixture +def na_cmp(): + def cmp(a, b): + return a is pd.NaT and a is b + + return cmp + + +# ---------------------------------------------------------------------------- +class TestDatetimeArray(base.ExtensionTests): + def _get_expected_exception(self, op_name, obj, other): + if op_name in ["__sub__", "__rsub__"]: + return None + return super()._get_expected_exception(op_name, obj, other) + + def _supports_accumulation(self, ser, op_name: str) -> bool: + return op_name in ["cummin", "cummax"] + + def _supports_reduction(self, obj, op_name: str) -> bool: + return op_name in ["min", "max", "median", "mean", "std", "any", "all"] + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna): + meth = all_boolean_reductions + msg = f"'{meth}' with datetime64 dtypes is deprecated and will raise in" + with tm.assert_produces_warning( + FutureWarning, match=msg, check_stacklevel=False + ): + super().test_reduce_series_boolean(data, all_boolean_reductions, skipna) + + def test_series_constructor(self, data): + # Series construction drops any .freq attr + data = data._with_freq(None) + super().test_series_constructor(data) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data, na_action): + result = data.map(lambda x: x, na_action=na_action) + tm.assert_extension_array_equal(result, data) + + def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool): + if op_name in ["median", "mean", "std"]: + alt = ser.astype("int64") + + res_op = getattr(ser, op_name) + exp_op = getattr(alt, op_name) + result = res_op(skipna=skipna) + expected = exp_op(skipna=skipna) + if op_name in ["mean", "median"]: + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" + # has no attribute "tz" + tz = ser.dtype.tz # type: ignore[union-attr] + expected = pd.Timestamp(expected, tz=tz) + else: + expected = pd.Timedelta(expected) + tm.assert_almost_equal(result, expected) + + else: + return super().check_reduce(ser, op_name, skipna) + + +class Test2DCompat(base.NDArrayBacked2DTests): + pass diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_extension.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_extension.py new file mode 100644 index 0000000000000000000000000000000000000000..1ed626cd5108081eff7156275f439ececdf28241 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_extension.py @@ -0,0 +1,26 @@ +""" +Tests for behavior if an author does *not* implement EA methods. +""" +import numpy as np +import pytest + +from pandas.core.arrays import ExtensionArray + + +class MyEA(ExtensionArray): + def __init__(self, values) -> None: + self._values = values + + +@pytest.fixture +def data(): + arr = np.arange(10) + return MyEA(arr) + + +class TestExtensionArray: + def test_errors(self, data, all_arithmetic_operators): + # invalid ops + op_name = all_arithmetic_operators + with pytest.raises(AttributeError): + getattr(data, op_name) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_interval.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..98dd1c5cb615faf160e27adffcd74d5cfad75349 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_interval.py @@ -0,0 +1,98 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import IntervalDtype + +from pandas import Interval +from pandas.core.arrays import IntervalArray +from pandas.tests.extension import base + +if TYPE_CHECKING: + import pandas as pd + + +def make_data(): + N = 100 + left_array = np.random.default_rng(2).uniform(size=N).cumsum() + right_array = left_array + np.random.default_rng(2).uniform(size=N) + return [Interval(left, right) for left, right in zip(left_array, right_array)] + + +@pytest.fixture +def dtype(): + return IntervalDtype() + + +@pytest.fixture +def data(): + """Length-100 PeriodArray for semantics test.""" + return IntervalArray(make_data()) + + +@pytest.fixture +def data_missing(): + """Length 2 array with [NA, Valid]""" + return IntervalArray.from_tuples([None, (0, 1)]) + + +@pytest.fixture +def data_for_twos(): + pytest.skip("Interval is not a numeric dtype") + + +@pytest.fixture +def data_for_sorting(): + return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)]) + + +@pytest.fixture +def data_missing_for_sorting(): + return IntervalArray.from_tuples([(1, 2), None, (0, 1)]) + + +@pytest.fixture +def data_for_grouping(): + a = (0, 1) + b = (1, 2) + c = (2, 3) + return IntervalArray.from_tuples([b, b, None, None, a, a, b, c]) + + +class TestIntervalArray(base.ExtensionTests): + divmod_exc = TypeError + + def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool: + return op_name in ["min", "max"] + + @pytest.mark.xfail( + reason="Raises with incorrect message bc it disallows *all* listlikes " + "instead of just wrong-length listlikes" + ) + def test_fillna_length_mismatch(self, data_missing): + super().test_fillna_length_mismatch(data_missing) + + +# TODO: either belongs in tests.arrays.interval or move into base tests. +def test_fillna_non_scalar_raises(data_missing): + msg = "can only insert Interval objects and NA into an IntervalArray" + with pytest.raises(TypeError, match=msg): + data_missing.fillna([1, 1]) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_masked.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_masked.py new file mode 100644 index 0000000000000000000000000000000000000000..651f783b44d1f788b57df4fc1ff0c8b2b33bc0f3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_masked.py @@ -0,0 +1,417 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +import warnings + +import numpy as np +import pytest + +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat.numpy import np_version_gt2 + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_signed_integer_dtype, + is_unsigned_integer_dtype, +) + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) +from pandas.tests.extension import base + +is_windows_or_32bit = (is_platform_windows() and not np_version_gt2) or not IS64 + +pytestmark = [ + pytest.mark.filterwarnings( + "ignore:invalid value encountered in divide:RuntimeWarning" + ), + pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning"), + # overflow only relevant for Floating dtype cases cases + pytest.mark.filterwarnings("ignore:overflow encountered in reduce:RuntimeWarning"), +] + + +def make_data(): + return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100] + + +def make_float_data(): + return ( + list(np.arange(0.1, 0.9, 0.1)) + + [pd.NA] + + list(np.arange(1, 9.8, 0.1)) + + [pd.NA] + + [9.9, 10.0] + ) + + +def make_bool_data(): + return [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False] + + +@pytest.fixture( + params=[ + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + Float32Dtype, + Float64Dtype, + BooleanDtype, + ] +) +def dtype(request): + return request.param() + + +@pytest.fixture +def data(dtype): + if dtype.kind == "f": + data = make_float_data() + elif dtype.kind == "b": + data = make_bool_data() + else: + data = make_data() + return pd.array(data, dtype=dtype) + + +@pytest.fixture +def data_for_twos(dtype): + if dtype.kind == "b": + return pd.array(np.ones(100), dtype=dtype) + return pd.array(np.ones(100) * 2, dtype=dtype) + + +@pytest.fixture +def data_missing(dtype): + if dtype.kind == "f": + return pd.array([pd.NA, 0.1], dtype=dtype) + elif dtype.kind == "b": + return pd.array([np.nan, True], dtype=dtype) + return pd.array([pd.NA, 1], dtype=dtype) + + +@pytest.fixture +def data_for_sorting(dtype): + if dtype.kind == "f": + return pd.array([0.1, 0.2, 0.0], dtype=dtype) + elif dtype.kind == "b": + return pd.array([True, True, False], dtype=dtype) + return pd.array([1, 2, 0], dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + if dtype.kind == "f": + return pd.array([0.1, pd.NA, 0.0], dtype=dtype) + elif dtype.kind == "b": + return pd.array([True, np.nan, False], dtype=dtype) + return pd.array([1, pd.NA, 0], dtype=dtype) + + +@pytest.fixture +def na_cmp(): + # we are pd.NA + return lambda x, y: x is pd.NA and y is pd.NA + + +@pytest.fixture +def data_for_grouping(dtype): + if dtype.kind == "f": + b = 0.1 + a = 0.0 + c = 0.2 + elif dtype.kind == "b": + b = True + a = False + c = b + else: + b = 1 + a = 0 + c = 2 + + na = pd.NA + return pd.array([b, b, na, na, a, a, b, c], dtype=dtype) + + +class TestMaskedArrays(base.ExtensionTests): + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data_missing, na_action): + result = data_missing.map(lambda x: x, na_action=na_action) + if data_missing.dtype == Float32Dtype(): + # map roundtrips through objects, which converts to float64 + expected = data_missing.to_numpy(dtype="float64", na_value=np.nan) + else: + expected = data_missing.to_numpy() + tm.assert_numpy_array_equal(result, expected) + + def test_map_na_action_ignore(self, data_missing_for_sorting): + zero = data_missing_for_sorting[2] + result = data_missing_for_sorting.map(lambda x: zero, na_action="ignore") + if data_missing_for_sorting.dtype.kind == "b": + expected = np.array([False, pd.NA, False], dtype=object) + else: + expected = np.array([zero, np.nan, zero]) + tm.assert_numpy_array_equal(result, expected) + + def _get_expected_exception(self, op_name, obj, other): + try: + dtype = tm.get_dtype(obj) + except AttributeError: + # passed arguments reversed + dtype = tm.get_dtype(other) + + if dtype.kind == "b": + if op_name.strip("_").lstrip("r") in ["pow", "truediv", "floordiv"]: + # match behavior with non-masked bool dtype + return NotImplementedError + elif op_name in ["__sub__", "__rsub__"]: + # exception message would include "numpy boolean subtract"" + return TypeError + return None + return None + + def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): + sdtype = tm.get_dtype(obj) + expected = pointwise_result + + if op_name in ("eq", "ne", "le", "ge", "lt", "gt"): + return expected.astype("boolean") + + if sdtype.kind in "iu": + if op_name in ("__rtruediv__", "__truediv__", "__div__"): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + "Downcasting object dtype arrays", + category=FutureWarning, + ) + filled = expected.fillna(np.nan) + expected = filled.astype("Float64") + else: + # combine method result in 'biggest' (int64) dtype + expected = expected.astype(sdtype) + elif sdtype.kind == "b": + if op_name in ( + "__floordiv__", + "__rfloordiv__", + "__pow__", + "__rpow__", + "__mod__", + "__rmod__", + ): + # combine keeps boolean type + expected = expected.astype("Int8") + + elif op_name in ("__truediv__", "__rtruediv__"): + # combine with bools does not generate the correct result + # (numpy behaviour for div is to regard the bools as numeric) + op = self.get_op_from_name(op_name) + expected = self._combine(obj.astype(float), other, op) + expected = expected.astype("Float64") + + if op_name == "__rpow__": + # for rpow, combine does not propagate NaN + result = getattr(obj, op_name)(other) + expected[result.isna()] = np.nan + else: + # combine method result in 'biggest' (float64) dtype + expected = expected.astype(sdtype) + return expected + + def test_divmod_series_array(self, data, data_for_twos, request): + if data.dtype.kind == "b": + mark = pytest.mark.xfail( + reason="Inconsistency between floordiv and divmod; we raise for " + "floordiv but not for divmod. This matches what we do for " + "non-masked bool dtype." + ) + request.applymarker(mark) + super().test_divmod_series_array(data, data_for_twos) + + def test_combine_le(self, data_repeated): + # TODO: patching self is a bad pattern here + orig_data1, orig_data2 = data_repeated(2) + if orig_data1.dtype.kind == "b": + self._combine_le_expected_dtype = "boolean" + else: + # TODO: can we make this boolean? + self._combine_le_expected_dtype = object + super().test_combine_le(data_repeated) + + def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool: + if op_name in ["any", "all"] and ser.dtype.kind != "b": + pytest.skip(reason="Tested in tests/reductions/test_reductions.py") + return True + + def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool): + # overwrite to ensure pd.NA is tested instead of np.nan + # https://github.com/pandas-dev/pandas/issues/30958 + + cmp_dtype = "int64" + if ser.dtype.kind == "f": + # Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has + # no attribute "numpy_dtype" + cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr] + elif ser.dtype.kind == "b": + if op_name in ["min", "max"]: + cmp_dtype = "bool" + + # TODO: prod with integer dtypes does *not* match the result we would + # get if we used object for cmp_dtype. In that cae the object result + # is a large integer while the non-object case overflows and returns 0 + alt = ser.dropna().astype(cmp_dtype) + if op_name == "count": + result = getattr(ser, op_name)() + expected = getattr(alt, op_name)() + else: + result = getattr(ser, op_name)(skipna=skipna) + expected = getattr(alt, op_name)(skipna=skipna) + if not skipna and ser.isna().any() and op_name not in ["any", "all"]: + expected = pd.NA + tm.assert_almost_equal(result, expected) + + def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool): + if is_float_dtype(arr.dtype): + cmp_dtype = arr.dtype.name + elif op_name in ["mean", "median", "var", "std", "skew"]: + cmp_dtype = "Float64" + elif op_name in ["max", "min"]: + cmp_dtype = arr.dtype.name + elif arr.dtype in ["Int64", "UInt64"]: + cmp_dtype = arr.dtype.name + elif is_signed_integer_dtype(arr.dtype): + # TODO: Why does Window Numpy 2.0 dtype depend on skipna? + cmp_dtype = ( + "Int32" + if (is_platform_windows() and (not np_version_gt2 or not skipna)) + or not IS64 + else "Int64" + ) + elif is_unsigned_integer_dtype(arr.dtype): + cmp_dtype = ( + "UInt32" + if (is_platform_windows() and (not np_version_gt2 or not skipna)) + or not IS64 + else "UInt64" + ) + elif arr.dtype.kind == "b": + if op_name in ["mean", "median", "var", "std", "skew"]: + cmp_dtype = "Float64" + elif op_name in ["min", "max"]: + cmp_dtype = "boolean" + elif op_name in ["sum", "prod"]: + cmp_dtype = ( + "Int32" + if (is_platform_windows() and (not np_version_gt2 or not skipna)) + or not IS64 + else "Int64" + ) + else: + raise TypeError("not supposed to reach this") + else: + raise TypeError("not supposed to reach this") + return cmp_dtype + + def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool: + return True + + def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool): + # overwrite to ensure pd.NA is tested instead of np.nan + # https://github.com/pandas-dev/pandas/issues/30958 + length = 64 + if is_windows_or_32bit: + # Item "ExtensionDtype" of "Union[dtype[Any], ExtensionDtype]" has + # no attribute "itemsize" + if not ser.dtype.itemsize == 8: # type: ignore[union-attr] + length = 32 + + if ser.dtype.name.startswith("U"): + expected_dtype = f"UInt{length}" + elif ser.dtype.name.startswith("I"): + expected_dtype = f"Int{length}" + elif ser.dtype.name.startswith("F"): + # Incompatible types in assignment (expression has type + # "Union[dtype[Any], ExtensionDtype]", variable has type "str") + expected_dtype = ser.dtype # type: ignore[assignment] + elif ser.dtype.kind == "b": + if op_name in ("cummin", "cummax"): + expected_dtype = "boolean" + else: + expected_dtype = f"Int{length}" + + if expected_dtype == "Float32" and op_name == "cumprod" and skipna: + # TODO: xfail? + pytest.skip( + f"Float32 precision lead to large differences with op {op_name} " + f"and skipna={skipna}" + ) + + if op_name == "cumsum": + result = getattr(ser, op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser.astype("float64"), op_name)(skipna=skipna), + dtype=expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + elif op_name in ["cummax", "cummin"]: + result = getattr(ser, op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser.astype("float64"), op_name)(skipna=skipna), + dtype=ser.dtype, + ) + ) + tm.assert_series_equal(result, expected) + elif op_name == "cumprod": + result = getattr(ser[:12], op_name)(skipna=skipna) + expected = pd.Series( + pd.array( + getattr(ser[:12].astype("float64"), op_name)(skipna=skipna), + dtype=expected_dtype, + ) + ) + tm.assert_series_equal(result, expected) + + else: + raise NotImplementedError(f"{op_name} not supported") + + +class Test2DCompat(base.Dim2CompatTests): + pass diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_numpy.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..e38144f4c615b22c864a5b385e3b73fd74374f83 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_numpy.py @@ -0,0 +1,426 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +Note: we do not bother with base.BaseIndexTests because NumpyExtensionArray +will never be held in an Index. +""" +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import NumpyEADtype + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_object_dtype +from pandas.core.arrays.numpy_ import NumpyExtensionArray +from pandas.tests.extension import base + +orig_assert_attr_equal = tm.assert_attr_equal + + +def _assert_attr_equal(attr: str, left, right, obj: str = "Attributes"): + """ + patch tm.assert_attr_equal so NumpyEADtype("object") is closed enough to + np.dtype("object") + """ + if attr == "dtype": + lattr = getattr(left, "dtype", None) + rattr = getattr(right, "dtype", None) + if isinstance(lattr, NumpyEADtype) and not isinstance(rattr, NumpyEADtype): + left = left.astype(lattr.numpy_dtype) + elif isinstance(rattr, NumpyEADtype) and not isinstance(lattr, NumpyEADtype): + right = right.astype(rattr.numpy_dtype) + + orig_assert_attr_equal(attr, left, right, obj) + + +@pytest.fixture(params=["float", "object"]) +def dtype(request): + return NumpyEADtype(np.dtype(request.param)) + + +@pytest.fixture +def allow_in_pandas(monkeypatch): + """ + A monkeypatch to tells pandas to let us in. + + By default, passing a NumpyExtensionArray to an index / series / frame + constructor will unbox that NumpyExtensionArray to an ndarray, and treat + it as a non-EA column. We don't want people using EAs without + reason. + + The mechanism for this is a check against ABCNumpyExtensionArray + in each constructor. + + But, for testing, we need to allow them in pandas. So we patch + the _typ of NumpyExtensionArray, so that we evade the ABCNumpyExtensionArray + check. + """ + with monkeypatch.context() as m: + m.setattr(NumpyExtensionArray, "_typ", "extension") + m.setattr(tm.asserters, "assert_attr_equal", _assert_attr_equal) + yield + + +@pytest.fixture +def data(allow_in_pandas, dtype): + if dtype.numpy_dtype == "object": + return pd.Series([(i,) for i in range(100)]).array + return NumpyExtensionArray(np.arange(1, 101, dtype=dtype._dtype)) + + +@pytest.fixture +def data_missing(allow_in_pandas, dtype): + if dtype.numpy_dtype == "object": + return NumpyExtensionArray(np.array([np.nan, (1,)], dtype=object)) + return NumpyExtensionArray(np.array([np.nan, 1.0])) + + +@pytest.fixture +def na_cmp(): + def cmp(a, b): + return np.isnan(a) and np.isnan(b) + + return cmp + + +@pytest.fixture +def data_for_sorting(allow_in_pandas, dtype): + """Length-3 array with a known sort order. + + This should be three items [B, C, A] with + A < B < C + """ + if dtype.numpy_dtype == "object": + # Use an empty tuple for first element, then remove, + # to disable np.array's shape inference. + return NumpyExtensionArray(np.array([(), (2,), (3,), (1,)], dtype=object)[1:]) + return NumpyExtensionArray(np.array([1, 2, 0])) + + +@pytest.fixture +def data_missing_for_sorting(allow_in_pandas, dtype): + """Length-3 array with a known sort order. + + This should be three items [B, NA, A] with + A < B and NA missing. + """ + if dtype.numpy_dtype == "object": + return NumpyExtensionArray(np.array([(1,), np.nan, (0,)], dtype=object)) + return NumpyExtensionArray(np.array([1, np.nan, 0])) + + +@pytest.fixture +def data_for_grouping(allow_in_pandas, dtype): + """Data for factorization, grouping, and unique tests. + + Expected to be like [B, B, NA, NA, A, A, B, C] + + Where A < B < C and NA is missing + """ + if dtype.numpy_dtype == "object": + a, b, c = (1,), (2,), (3,) + else: + a, b, c = np.arange(3) + return NumpyExtensionArray( + np.array([b, b, np.nan, np.nan, a, a, b, c], dtype=dtype.numpy_dtype) + ) + + +@pytest.fixture +def data_for_twos(dtype): + if dtype.kind == "O": + pytest.skip(f"{dtype} is not a numeric dtype") + arr = np.ones(100) * 2 + return NumpyExtensionArray._from_sequence(arr, dtype=dtype) + + +@pytest.fixture +def skip_numpy_object(dtype, request): + """ + Tests for NumpyExtensionArray with nested data. Users typically won't create + these objects via `pd.array`, but they can show up through `.array` + on a Series with nested data. Many of the base tests fail, as they aren't + appropriate for nested data. + + This fixture allows these tests to be skipped when used as a usefixtures + marker to either an individual test or a test class. + """ + if dtype == "object": + mark = pytest.mark.xfail(reason="Fails for object dtype") + request.applymarker(mark) + + +skip_nested = pytest.mark.usefixtures("skip_numpy_object") + + +class TestNumpyExtensionArray(base.ExtensionTests): + @pytest.mark.skip(reason="We don't register our dtype") + # We don't want to register. This test should probably be split in two. + def test_from_dtype(self, data): + pass + + @skip_nested + def test_series_constructor_scalar_with_index(self, data, dtype): + # ValueError: Length of passed values is 1, index implies 3. + super().test_series_constructor_scalar_with_index(data, dtype) + + def test_check_dtype(self, data, request, using_infer_string): + if data.dtype.numpy_dtype == "object": + request.applymarker( + pytest.mark.xfail( + reason=f"NumpyExtensionArray expectedly clashes with a " + f"NumPy name: {data.dtype.numpy_dtype}" + ) + ) + super().test_check_dtype(data) + + def test_is_not_object_type(self, dtype, request): + if dtype.numpy_dtype == "object": + # Different from BaseDtypeTests.test_is_not_object_type + # because NumpyEADtype(object) is an object type + assert is_object_dtype(dtype) + else: + super().test_is_not_object_type(dtype) + + @skip_nested + def test_getitem_scalar(self, data): + # AssertionError + super().test_getitem_scalar(data) + + @skip_nested + def test_shift_fill_value(self, data): + # np.array shape inference. Shift implementation fails. + super().test_shift_fill_value(data) + + @skip_nested + def test_fillna_copy_frame(self, data_missing): + # The "scalar" for this array isn't a scalar. + super().test_fillna_copy_frame(data_missing) + + @skip_nested + def test_fillna_copy_series(self, data_missing): + # The "scalar" for this array isn't a scalar. + super().test_fillna_copy_series(data_missing) + + @skip_nested + def test_searchsorted(self, data_for_sorting, as_series): + # TODO: NumpyExtensionArray.searchsorted calls ndarray.searchsorted which + # isn't quite what we want in nested data cases. Instead we need to + # adapt something like libindex._bin_search. + super().test_searchsorted(data_for_sorting, as_series) + + @pytest.mark.xfail(reason="NumpyExtensionArray.diff may fail on dtype") + def test_diff(self, data, periods): + return super().test_diff(data, periods) + + def test_insert(self, data, request): + if data.dtype.numpy_dtype == object: + mark = pytest.mark.xfail(reason="Dimension mismatch in np.concatenate") + request.applymarker(mark) + + super().test_insert(data) + + @skip_nested + def test_insert_invalid(self, data, invalid_scalar): + # NumpyExtensionArray[object] can hold anything, so skip + super().test_insert_invalid(data, invalid_scalar) + + divmod_exc = None + series_scalar_exc = None + frame_scalar_exc = None + series_array_exc = None + + def test_divmod(self, data): + divmod_exc = None + if data.dtype.kind == "O": + divmod_exc = TypeError + self.divmod_exc = divmod_exc + super().test_divmod(data) + + def test_divmod_series_array(self, data): + ser = pd.Series(data) + exc = None + if data.dtype.kind == "O": + exc = TypeError + self.divmod_exc = exc + self._check_divmod_op(ser, divmod, data) + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request): + opname = all_arithmetic_operators + series_scalar_exc = None + if data.dtype.numpy_dtype == object: + if opname in ["__mul__", "__rmul__"]: + mark = pytest.mark.xfail( + reason="the Series.combine step raises but not the Series method." + ) + request.node.add_marker(mark) + series_scalar_exc = TypeError + self.series_scalar_exc = series_scalar_exc + super().test_arith_series_with_scalar(data, all_arithmetic_operators) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + opname = all_arithmetic_operators + series_array_exc = None + if data.dtype.numpy_dtype == object and opname not in ["__add__", "__radd__"]: + series_array_exc = TypeError + self.series_array_exc = series_array_exc + super().test_arith_series_with_array(data, all_arithmetic_operators) + + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): + opname = all_arithmetic_operators + frame_scalar_exc = None + if data.dtype.numpy_dtype == object: + if opname in ["__mul__", "__rmul__"]: + mark = pytest.mark.xfail( + reason="the Series.combine step raises but not the Series method." + ) + request.node.add_marker(mark) + frame_scalar_exc = TypeError + self.frame_scalar_exc = frame_scalar_exc + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) + + def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool: + if ser.dtype.kind == "O": + return op_name in ["sum", "min", "max", "any", "all"] + return True + + def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool): + res_op = getattr(ser, op_name) + # avoid coercing int -> float. Just cast to the actual numpy type. + # error: Item "ExtensionDtype" of "dtype[Any] | ExtensionDtype" has + # no attribute "numpy_dtype" + cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr] + alt = ser.astype(cmp_dtype) + exp_op = getattr(alt, op_name) + if op_name == "count": + result = res_op() + expected = exp_op() + else: + result = res_op(skipna=skipna) + expected = exp_op(skipna=skipna) + tm.assert_almost_equal(result, expected) + + @pytest.mark.skip("TODO: tests not written yet") + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_frame(self, data, all_numeric_reductions, skipna): + pass + + @skip_nested + def test_fillna_series(self, data_missing): + # Non-scalar "scalar" values. + super().test_fillna_series(data_missing) + + @skip_nested + def test_fillna_frame(self, data_missing): + # Non-scalar "scalar" values. + super().test_fillna_frame(data_missing) + + @skip_nested + def test_setitem_invalid(self, data, invalid_scalar): + # object dtype can hold anything, so doesn't raise + super().test_setitem_invalid(data, invalid_scalar) + + @skip_nested + def test_setitem_sequence_broadcasts(self, data, box_in_series): + # ValueError: cannot set using a list-like indexer with a different + # length than the value + super().test_setitem_sequence_broadcasts(data, box_in_series) + + @skip_nested + @pytest.mark.parametrize("setter", ["loc", None]) + def test_setitem_mask_broadcast(self, data, setter): + # ValueError: cannot set using a list-like indexer with a different + # length than the value + super().test_setitem_mask_broadcast(data, setter) + + @skip_nested + def test_setitem_scalar_key_sequence_raise(self, data): + # Failed: DID NOT RAISE + super().test_setitem_scalar_key_sequence_raise(data) + + # TODO: there is some issue with NumpyExtensionArray, therefore, + # skip the setitem test for now, and fix it later (GH 31446) + + @skip_nested + @pytest.mark.parametrize( + "mask", + [ + np.array([True, True, True, False, False]), + pd.array([True, True, True, False, False], dtype="boolean"), + ], + ids=["numpy-array", "boolean-array"], + ) + def test_setitem_mask(self, data, mask, box_in_series): + super().test_setitem_mask(data, mask, box_in_series) + + @skip_nested + @pytest.mark.parametrize( + "idx", + [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])], + ids=["list", "integer-array", "numpy-array"], + ) + def test_setitem_integer_array(self, data, idx, box_in_series): + super().test_setitem_integer_array(data, idx, box_in_series) + + @pytest.mark.parametrize( + "idx, box_in_series", + [ + ([0, 1, 2, pd.NA], False), + pytest.param([0, 1, 2, pd.NA], True, marks=pytest.mark.xfail), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False), + ], + ids=["list-False", "list-True", "integer-array-False", "integer-array-True"], + ) + def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series): + super().test_setitem_integer_with_missing_raises(data, idx, box_in_series) + + @skip_nested + def test_setitem_slice(self, data, box_in_series): + super().test_setitem_slice(data, box_in_series) + + @skip_nested + def test_setitem_loc_iloc_slice(self, data): + super().test_setitem_loc_iloc_slice(data) + + def test_setitem_with_expansion_dataframe_column(self, data, full_indexer): + # https://github.com/pandas-dev/pandas/issues/32395 + df = expected = pd.DataFrame({"data": pd.Series(data)}) + result = pd.DataFrame(index=df.index) + + # because result has object dtype, the attempt to do setting inplace + # is successful, and object dtype is retained + key = full_indexer(df) + result.loc[key, "data"] = df["data"] + + # base class method has expected = df; NumpyExtensionArray behaves oddly because + # we patch _typ for these tests. + if data.dtype.numpy_dtype != object: + if not isinstance(key, slice) or key != slice(None): + expected = pd.DataFrame({"data": data.to_numpy()}) + tm.assert_frame_equal(result, expected, check_column_type=False) + + @pytest.mark.xfail(reason="NumpyEADtype is unpacked") + def test_index_from_listlike_with_dtype(self, data): + super().test_index_from_listlike_with_dtype(data) + + @skip_nested + @pytest.mark.parametrize("engine", ["c", "python"]) + def test_EA_types(self, engine, data, request): + super().test_EA_types(engine, data, request) + + +class Test2DCompat(base.NDArrayBacked2DTests): + pass diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_period.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_period.py new file mode 100644 index 0000000000000000000000000000000000000000..2d1d213322bac02e65f710ac77943876425102a5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_period.py @@ -0,0 +1,119 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np +import pytest + +from pandas._libs import ( + Period, + iNaT, +) +from pandas.compat import is_platform_windows +from pandas.compat.numpy import np_version_gte1p24 + +from pandas.core.dtypes.dtypes import PeriodDtype + +import pandas._testing as tm +from pandas.core.arrays import PeriodArray +from pandas.tests.extension import base + +if TYPE_CHECKING: + import pandas as pd + + +@pytest.fixture(params=["D", "2D"]) +def dtype(request): + return PeriodDtype(freq=request.param) + + +@pytest.fixture +def data(dtype): + return PeriodArray(np.arange(1970, 2070), dtype=dtype) + + +@pytest.fixture +def data_for_sorting(dtype): + return PeriodArray([2018, 2019, 2017], dtype=dtype) + + +@pytest.fixture +def data_missing(dtype): + return PeriodArray([iNaT, 2017], dtype=dtype) + + +@pytest.fixture +def data_missing_for_sorting(dtype): + return PeriodArray([2018, iNaT, 2017], dtype=dtype) + + +@pytest.fixture +def data_for_grouping(dtype): + B = 2018 + NA = iNaT + A = 2017 + C = 2019 + return PeriodArray([B, B, NA, NA, A, A, B, C], dtype=dtype) + + +class TestPeriodArray(base.ExtensionTests): + def _get_expected_exception(self, op_name, obj, other): + if op_name in ("__sub__", "__rsub__"): + return None + return super()._get_expected_exception(op_name, obj, other) + + def _supports_accumulation(self, ser, op_name: str) -> bool: + return op_name in ["cummin", "cummax"] + + def _supports_reduction(self, obj, op_name: str) -> bool: + return op_name in ["min", "max", "median"] + + def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool): + if op_name == "median": + res_op = getattr(ser, op_name) + + alt = ser.astype("int64") + + exp_op = getattr(alt, op_name) + result = res_op(skipna=skipna) + expected = exp_op(skipna=skipna) + # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no + # attribute "freq" + freq = ser.dtype.freq # type: ignore[union-attr] + expected = Period._from_ordinal(int(expected), freq=freq) + tm.assert_almost_equal(result, expected) + + else: + return super().check_reduce(ser, op_name, skipna) + + @pytest.mark.parametrize("periods", [1, -2]) + def test_diff(self, data, periods): + if is_platform_windows() and np_version_gte1p24: + with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False): + super().test_diff(data, periods) + else: + super().test_diff(data, periods) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map(self, data, na_action): + result = data.map(lambda x: x, na_action=na_action) + tm.assert_extension_array_equal(result, data) + + +class Test2DCompat(base.NDArrayBacked2DTests): + pass diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py new file mode 100644 index 0000000000000000000000000000000000000000..4039a5d01f372d175eca2ec049ab3895ed59886c --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py @@ -0,0 +1,498 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" + +import numpy as np +import pytest + +from pandas.errors import PerformanceWarning + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.arrays import SparseArray +from pandas.tests.extension import base + + +def make_data(fill_value): + rng = np.random.default_rng(2) + if np.isnan(fill_value): + data = rng.uniform(size=100) + else: + data = rng.integers(1, 100, size=100, dtype=int) + if data[0] == data[1]: + data[0] += 1 + + data[2::3] = fill_value + return data + + +@pytest.fixture +def dtype(): + return SparseDtype() + + +@pytest.fixture(params=[0, np.nan]) +def data(request): + """Length-100 PeriodArray for semantics test.""" + res = SparseArray(make_data(request.param), fill_value=request.param) + return res + + +@pytest.fixture +def data_for_twos(): + return SparseArray(np.ones(100) * 2) + + +@pytest.fixture(params=[0, np.nan]) +def data_missing(request): + """Length 2 array with [NA, Valid]""" + return SparseArray([np.nan, 1], fill_value=request.param) + + +@pytest.fixture(params=[0, np.nan]) +def data_repeated(request): + """Return different versions of data for count times""" + + def gen(count): + for _ in range(count): + yield SparseArray(make_data(request.param), fill_value=request.param) + + yield gen + + +@pytest.fixture(params=[0, np.nan]) +def data_for_sorting(request): + return SparseArray([2, 3, 1], fill_value=request.param) + + +@pytest.fixture(params=[0, np.nan]) +def data_missing_for_sorting(request): + return SparseArray([2, np.nan, 1], fill_value=request.param) + + +@pytest.fixture +def na_cmp(): + return lambda left, right: pd.isna(left) and pd.isna(right) + + +@pytest.fixture(params=[0, np.nan]) +def data_for_grouping(request): + return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param) + + +@pytest.fixture(params=[0, np.nan]) +def data_for_compare(request): + return SparseArray([0, 0, np.nan, -2, -1, 4, 2, 3, 0, 0], fill_value=request.param) + + +class TestSparseArray(base.ExtensionTests): + def _supports_reduction(self, obj, op_name: str) -> bool: + return True + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request): + if all_numeric_reductions in [ + "prod", + "median", + "var", + "std", + "sem", + "skew", + "kurt", + ]: + mark = pytest.mark.xfail( + reason="This should be viable but is not implemented" + ) + request.node.add_marker(mark) + elif ( + all_numeric_reductions in ["sum", "max", "min", "mean"] + and data.dtype.kind == "f" + and not skipna + ): + mark = pytest.mark.xfail(reason="getting a non-nan float") + request.node.add_marker(mark) + + super().test_reduce_series_numeric(data, all_numeric_reductions, skipna) + + @pytest.mark.parametrize("skipna", [True, False]) + def test_reduce_frame(self, data, all_numeric_reductions, skipna, request): + if all_numeric_reductions in [ + "prod", + "median", + "var", + "std", + "sem", + "skew", + "kurt", + ]: + mark = pytest.mark.xfail( + reason="This should be viable but is not implemented" + ) + request.node.add_marker(mark) + elif ( + all_numeric_reductions in ["sum", "max", "min", "mean"] + and data.dtype.kind == "f" + and not skipna + ): + mark = pytest.mark.xfail(reason="ExtensionArray NA mask are different") + request.node.add_marker(mark) + + super().test_reduce_frame(data, all_numeric_reductions, skipna) + + def _check_unsupported(self, data): + if data.dtype == SparseDtype(int, 0): + pytest.skip("Can't store nan in int array.") + + def test_concat_mixed_dtypes(self, data): + # https://github.com/pandas-dev/pandas/issues/20762 + # This should be the same, aside from concat([sparse, float]) + df1 = pd.DataFrame({"A": data[:3]}) + df2 = pd.DataFrame({"A": [1, 2, 3]}) + df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category") + dfs = [df1, df2, df3] + + # dataframes + result = pd.concat(dfs) + expected = pd.concat( + [x.apply(lambda s: np.asarray(s).astype(object)) for x in dfs] + ) + tm.assert_frame_equal(result, expected) + + @pytest.mark.filterwarnings( + "ignore:The previous implementation of stack is deprecated" + ) + @pytest.mark.parametrize( + "columns", + [ + ["A", "B"], + pd.MultiIndex.from_tuples( + [("A", "a"), ("A", "b")], names=["outer", "inner"] + ), + ], + ) + @pytest.mark.parametrize("future_stack", [True, False]) + def test_stack(self, data, columns, future_stack): + super().test_stack(data, columns, future_stack) + + def test_concat_columns(self, data, na_value): + self._check_unsupported(data) + super().test_concat_columns(data, na_value) + + def test_concat_extension_arrays_copy_false(self, data, na_value): + self._check_unsupported(data) + super().test_concat_extension_arrays_copy_false(data, na_value) + + def test_align(self, data, na_value): + self._check_unsupported(data) + super().test_align(data, na_value) + + def test_align_frame(self, data, na_value): + self._check_unsupported(data) + super().test_align_frame(data, na_value) + + def test_align_series_frame(self, data, na_value): + self._check_unsupported(data) + super().test_align_series_frame(data, na_value) + + def test_merge(self, data, na_value): + self._check_unsupported(data) + super().test_merge(data, na_value) + + def test_get(self, data): + ser = pd.Series(data, index=[2 * i for i in range(len(data))]) + if np.isnan(ser.values.fill_value): + assert np.isnan(ser.get(4)) and np.isnan(ser.iloc[2]) + else: + assert ser.get(4) == ser.iloc[2] + assert ser.get(2) == ser.iloc[1] + + def test_reindex(self, data, na_value): + self._check_unsupported(data) + super().test_reindex(data, na_value) + + def test_isna(self, data_missing): + sarr = SparseArray(data_missing) + expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value)) + expected = SparseArray([True, False], dtype=expected_dtype) + result = sarr.isna() + tm.assert_sp_array_equal(result, expected) + + # test isna for arr without na + sarr = sarr.fillna(0) + expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value)) + expected = SparseArray([False, False], fill_value=False, dtype=expected_dtype) + tm.assert_equal(sarr.isna(), expected) + + def test_fillna_limit_backfill(self, data_missing): + warns = (PerformanceWarning, FutureWarning) + with tm.assert_produces_warning(warns, check_stacklevel=False): + super().test_fillna_limit_backfill(data_missing) + + def test_fillna_no_op_returns_copy(self, data, request): + if np.isnan(data.fill_value): + request.applymarker( + pytest.mark.xfail(reason="returns array with different fill value") + ) + super().test_fillna_no_op_returns_copy(data) + + @pytest.mark.xfail(reason="Unsupported") + def test_fillna_series(self, data_missing): + # this one looks doable. + # TODO: this fails bc we do not pass through data_missing. If we did, + # the 0-fill case would xpass + super().test_fillna_series() + + def test_fillna_frame(self, data_missing): + # Have to override to specify that fill_value will change. + fill_value = data_missing[1] + + result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value) + + if pd.isna(data_missing.fill_value): + dtype = SparseDtype(data_missing.dtype, fill_value) + else: + dtype = data_missing.dtype + + expected = pd.DataFrame( + { + "A": data_missing._from_sequence([fill_value, fill_value], dtype=dtype), + "B": [1, 2], + } + ) + + tm.assert_frame_equal(result, expected) + + _combine_le_expected_dtype = "Sparse[bool]" + + def test_fillna_copy_frame(self, data_missing, using_copy_on_write): + arr = data_missing.take([1, 1]) + df = pd.DataFrame({"A": arr}, copy=False) + + filled_val = df.iloc[0, 0] + result = df.fillna(filled_val) + + if hasattr(df._mgr, "blocks"): + if using_copy_on_write: + assert df.values.base is result.values.base + else: + assert df.values.base is not result.values.base + assert df.A._values.to_dense() is arr.to_dense() + + def test_fillna_copy_series(self, data_missing, using_copy_on_write): + arr = data_missing.take([1, 1]) + ser = pd.Series(arr, copy=False) + + filled_val = ser[0] + result = ser.fillna(filled_val) + + if using_copy_on_write: + assert ser._values is result._values + + else: + assert ser._values is not result._values + assert ser._values.to_dense() is arr.to_dense() + + @pytest.mark.xfail(reason="Not Applicable") + def test_fillna_length_mismatch(self, data_missing): + super().test_fillna_length_mismatch(data_missing) + + def test_where_series(self, data, na_value): + assert data[0] != data[1] + cls = type(data) + a, b = data[:2] + + ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype)) + + cond = np.array([True, True, False, False]) + result = ser.where(cond) + + new_dtype = SparseDtype("float", 0.0) + expected = pd.Series( + cls._from_sequence([a, a, na_value, na_value], dtype=new_dtype) + ) + tm.assert_series_equal(result, expected) + + other = cls._from_sequence([a, b, a, b], dtype=data.dtype) + cond = np.array([True, False, True, True]) + result = ser.where(cond, other) + expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype)) + tm.assert_series_equal(result, expected) + + def test_searchsorted(self, data_for_sorting, as_series): + with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False): + super().test_searchsorted(data_for_sorting, as_series) + + def test_shift_0_periods(self, data): + # GH#33856 shifting with periods=0 should return a copy, not same obj + result = data.shift(0) + + data._sparse_values[0] = data._sparse_values[1] + assert result._sparse_values[0] != result._sparse_values[1] + + @pytest.mark.parametrize("method", ["argmax", "argmin"]) + def test_argmin_argmax_all_na(self, method, data, na_value): + # overriding because Sparse[int64, 0] cannot handle na_value + self._check_unsupported(data) + super().test_argmin_argmax_all_na(method, data, na_value) + + @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame]) + def test_equals(self, data, na_value, as_series, box): + self._check_unsupported(data) + super().test_equals(data, na_value, as_series, box) + + @pytest.mark.parametrize( + "func, na_action, expected", + [ + (lambda x: x, None, SparseArray([1.0, np.nan])), + (lambda x: x, "ignore", SparseArray([1.0, np.nan])), + (str, None, SparseArray(["1.0", "nan"], fill_value="nan")), + (str, "ignore", SparseArray(["1.0", np.nan])), + ], + ) + def test_map(self, func, na_action, expected): + # GH52096 + data = SparseArray([1, np.nan]) + result = data.map(func, na_action=na_action) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize("na_action", [None, "ignore"]) + def test_map_raises(self, data, na_action): + # GH52096 + msg = "fill value in the sparse values not supported" + with pytest.raises(ValueError, match=msg): + data.map(lambda x: np.nan, na_action=na_action) + + @pytest.mark.xfail(raises=TypeError, reason="no sparse StringDtype") + def test_astype_string(self, data, nullable_string_dtype): + # TODO: this fails bc we do not pass through nullable_string_dtype; + # If we did, the 0-cases would xpass + super().test_astype_string(data) + + series_scalar_exc = None + frame_scalar_exc = None + divmod_exc = None + series_array_exc = None + + def _skip_if_different_combine(self, data): + if data.fill_value == 0: + # arith ops call on dtype.fill_value so that the sparsity + # is maintained. Combine can't be called on a dtype in + # general, so we can't make the expected. This is tested elsewhere + pytest.skip("Incorrected expected from Series.combine and tested elsewhere") + + def test_arith_series_with_scalar(self, data, all_arithmetic_operators): + self._skip_if_different_combine(data) + super().test_arith_series_with_scalar(data, all_arithmetic_operators) + + def test_arith_series_with_array(self, data, all_arithmetic_operators): + self._skip_if_different_combine(data) + super().test_arith_series_with_array(data, all_arithmetic_operators) + + def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request): + if data.dtype.fill_value != 0: + pass + elif all_arithmetic_operators.strip("_") not in [ + "mul", + "rmul", + "floordiv", + "rfloordiv", + "pow", + "mod", + "rmod", + ]: + mark = pytest.mark.xfail(reason="result dtype.fill_value mismatch") + request.applymarker(mark) + super().test_arith_frame_with_scalar(data, all_arithmetic_operators) + + def _compare_other( + self, ser: pd.Series, data_for_compare: SparseArray, comparison_op, other + ): + op = comparison_op + + result = op(data_for_compare, other) + if isinstance(other, pd.Series): + assert isinstance(result, pd.Series) + assert isinstance(result.dtype, SparseDtype) + else: + assert isinstance(result, SparseArray) + assert result.dtype.subtype == np.bool_ + + if isinstance(other, pd.Series): + fill_value = op(data_for_compare.fill_value, other._values.fill_value) + expected = SparseArray( + op(data_for_compare.to_dense(), np.asarray(other)), + fill_value=fill_value, + dtype=np.bool_, + ) + + else: + fill_value = np.all( + op(np.asarray(data_for_compare.fill_value), np.asarray(other)) + ) + + expected = SparseArray( + op(data_for_compare.to_dense(), np.asarray(other)), + fill_value=fill_value, + dtype=np.bool_, + ) + if isinstance(other, pd.Series): + # error: Incompatible types in assignment + expected = pd.Series(expected) # type: ignore[assignment] + tm.assert_equal(result, expected) + + def test_scalar(self, data_for_compare: SparseArray, comparison_op): + ser = pd.Series(data_for_compare) + self._compare_other(ser, data_for_compare, comparison_op, 0) + self._compare_other(ser, data_for_compare, comparison_op, 1) + self._compare_other(ser, data_for_compare, comparison_op, -1) + self._compare_other(ser, data_for_compare, comparison_op, np.nan) + + def test_array(self, data_for_compare: SparseArray, comparison_op, request): + if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ in [ + "eq", + "ge", + "le", + ]: + mark = pytest.mark.xfail(reason="Wrong fill_value") + request.applymarker(mark) + + arr = np.linspace(-4, 5, 10) + ser = pd.Series(data_for_compare) + self._compare_other(ser, data_for_compare, comparison_op, arr) + + def test_sparse_array(self, data_for_compare: SparseArray, comparison_op, request): + if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ != "gt": + mark = pytest.mark.xfail(reason="Wrong fill_value") + request.applymarker(mark) + + ser = pd.Series(data_for_compare) + arr = data_for_compare + 1 + self._compare_other(ser, data_for_compare, comparison_op, arr) + arr = data_for_compare * 2 + self._compare_other(ser, data_for_compare, comparison_op, arr) + + @pytest.mark.xfail(reason="Different repr") + def test_array_repr(self, data, size): + super().test_array_repr(data, size) + + @pytest.mark.xfail(reason="result does not match expected") + @pytest.mark.parametrize("as_index", [True, False]) + def test_groupby_extension_agg(self, as_index, data_for_grouping): + super().test_groupby_extension_agg(as_index, data_for_grouping) + + +def test_array_type_with_arg(dtype): + assert dtype.construct_array_type() is SparseArray diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_string.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_string.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5a134f8560a17b915486313730ea6c1be28b40 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_string.py @@ -0,0 +1,242 @@ +""" +This file contains a minimal set of tests for compliance with the extension +array interface test suite, and should contain no other tests. +The test suite for the full functionality of the array is located in +`pandas/tests/arrays/`. + +The tests in this file are inherited from the BaseExtensionTests, and only +minimal tweaks should be applied to get the tests passing (by overwriting a +parent method). + +Additional tests should either be added to one of the BaseExtensionTests +classes (if they are relevant for the extension interface for all dtypes), or +be added to the array-specific tests in `pandas/tests/arrays/`. + +""" +from __future__ import annotations + +import string +from typing import cast + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_string_dtype +from pandas.core.arrays import ArrowStringArray +from pandas.core.arrays.string_ import StringDtype +from pandas.tests.extension import base + + +def maybe_split_array(arr, chunked): + if not chunked: + return arr + elif arr.dtype.storage != "pyarrow": + return arr + + pa = pytest.importorskip("pyarrow") + + arrow_array = arr._pa_array + split = len(arrow_array) // 2 + arrow_array = pa.chunked_array( + [*arrow_array[:split].chunks, *arrow_array[split:].chunks] + ) + assert arrow_array.num_chunks == 2 + return type(arr)(arrow_array) + + +@pytest.fixture(params=[True, False]) +def chunked(request): + return request.param + + +@pytest.fixture +def dtype(string_storage): + return StringDtype(storage=string_storage) + + +@pytest.fixture +def data(dtype, chunked): + strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100) + while strings[0] == strings[1]: + strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100) + + arr = dtype.construct_array_type()._from_sequence(strings, dtype=dtype) + return maybe_split_array(arr, chunked) + + +@pytest.fixture +def data_missing(dtype, chunked): + """Length 2 array with [NA, Valid]""" + arr = dtype.construct_array_type()._from_sequence([pd.NA, "A"], dtype=dtype) + return maybe_split_array(arr, chunked) + + +@pytest.fixture +def data_for_sorting(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence(["B", "C", "A"], dtype=dtype) + return maybe_split_array(arr, chunked) + + +@pytest.fixture +def data_missing_for_sorting(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"], dtype=dtype) + return maybe_split_array(arr, chunked) + + +@pytest.fixture +def data_for_grouping(dtype, chunked): + arr = dtype.construct_array_type()._from_sequence( + ["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"], dtype=dtype + ) + return maybe_split_array(arr, chunked) + + +class TestStringArray(base.ExtensionTests): + def test_eq_with_str(self, dtype): + assert dtype == f"string[{dtype.storage}]" + super().test_eq_with_str(dtype) + + def test_is_not_string_type(self, dtype): + # Different from BaseDtypeTests.test_is_not_string_type + # because StringDtype is a string type + assert is_string_dtype(dtype) + + def test_view(self, data, request, arrow_string_storage): + if data.dtype.storage in arrow_string_storage: + pytest.skip(reason="2D support not implemented for ArrowStringArray") + super().test_view(data) + + def test_from_dtype(self, data): + # base test uses string representation of dtype + pass + + def test_transpose(self, data, request, arrow_string_storage): + if data.dtype.storage in arrow_string_storage: + pytest.skip(reason="2D support not implemented for ArrowStringArray") + super().test_transpose(data) + + def test_setitem_preserves_views(self, data, request, arrow_string_storage): + if data.dtype.storage in arrow_string_storage: + pytest.skip(reason="2D support not implemented for ArrowStringArray") + super().test_setitem_preserves_views(data) + + def test_dropna_array(self, data_missing): + result = data_missing.dropna() + expected = data_missing[[1]] + tm.assert_extension_array_equal(result, expected) + + def test_fillna_no_op_returns_copy(self, data): + data = data[~data.isna()] + + valid = data[0] + result = data.fillna(valid) + assert result is not data + tm.assert_extension_array_equal(result, data) + + result = data.fillna(method="backfill") + assert result is not data + tm.assert_extension_array_equal(result, data) + + def _get_expected_exception( + self, op_name: str, obj, other + ) -> type[Exception] | None: + if op_name in ["__divmod__", "__rdivmod__"]: + if isinstance(obj, pd.Series) and cast( + StringDtype, tm.get_dtype(obj) + ).storage in [ + "pyarrow", + "pyarrow_numpy", + ]: + # TODO: re-raise as TypeError? + return NotImplementedError + elif isinstance(other, pd.Series) and cast( + StringDtype, tm.get_dtype(other) + ).storage in [ + "pyarrow", + "pyarrow_numpy", + ]: + # TODO: re-raise as TypeError? + return NotImplementedError + return TypeError + elif op_name in ["__mod__", "__rmod__", "__pow__", "__rpow__"]: + if cast(StringDtype, tm.get_dtype(obj)).storage in [ + "pyarrow", + "pyarrow_numpy", + ]: + return NotImplementedError + return TypeError + elif op_name in ["__mul__", "__rmul__"]: + # Can only multiply strings by integers + return TypeError + elif op_name in [ + "__truediv__", + "__rtruediv__", + "__floordiv__", + "__rfloordiv__", + "__sub__", + "__rsub__", + ]: + if cast(StringDtype, tm.get_dtype(obj)).storage in [ + "pyarrow", + "pyarrow_numpy", + ]: + import pyarrow as pa + + # TODO: better to re-raise as TypeError? + return pa.ArrowNotImplementedError + return TypeError + + return None + + def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool: + return ( + op_name in ["min", "max"] + or ser.dtype.storage == "pyarrow_numpy" # type: ignore[union-attr] + and op_name in ("any", "all") + ) + + def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result): + dtype = cast(StringDtype, tm.get_dtype(obj)) + if op_name in ["__add__", "__radd__"]: + cast_to = dtype + elif dtype.storage == "pyarrow": + cast_to = "boolean[pyarrow]" # type: ignore[assignment] + elif dtype.storage == "pyarrow_numpy": + cast_to = np.bool_ # type: ignore[assignment] + else: + cast_to = "boolean" # type: ignore[assignment] + return pointwise_result.astype(cast_to) + + def test_compare_scalar(self, data, comparison_op): + ser = pd.Series(data) + self._compare_other(ser, data, comparison_op, "abc") + + @pytest.mark.filterwarnings("ignore:Falling back:pandas.errors.PerformanceWarning") + def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op): + super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op) + + +class Test2DCompat(base.Dim2CompatTests): + @pytest.fixture(autouse=True) + def arrow_not_supported(self, data): + if isinstance(data, ArrowStringArray): + pytest.skip(reason="2D support not implemented for ArrowStringArray") + + +def test_searchsorted_with_na_raises(data_for_sorting, as_series): + # GH50447 + b, c, a = data_for_sorting + arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c] + arr[-1] = pd.NA + + if as_series: + arr = pd.Series(arr) + + msg = ( + "searchsorted requires array to be sorted, " + "which is impossible with NAs present." + ) + with pytest.raises(ValueError, match=msg): + arr.searchsorted(b) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5604b04ef35cbc95fdf4946c60ae6c61197523e6 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_impl.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_impl.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e2df68823b853b92667a8aa009b56bd50cbbeda Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_impl.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_spec_conformance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_spec_conformance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d6ed583d2200fb7f1160d15fb9c80444ab3ec5b Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_spec_conformance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_utils.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..886b2dd2317dde8efc012b8bbc317f8e18bdec16 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_utils.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_impl.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_impl.py new file mode 100644 index 0000000000000000000000000000000000000000..25418b8bb2b37d3241ffe0d066f8877db80dded5 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_impl.py @@ -0,0 +1,604 @@ +from datetime import ( + datetime, + timezone, +) + +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT +from pandas.compat import ( + is_ci_environment, + is_platform_windows, +) +from pandas.compat.numpy import np_version_lt1p23 + +import pandas as pd +import pandas._testing as tm +from pandas.core.interchange.column import PandasColumn +from pandas.core.interchange.dataframe_protocol import ( + ColumnNullType, + DtypeKind, +) +from pandas.core.interchange.from_dataframe import from_dataframe +from pandas.core.interchange.utils import ArrowCTypes + + +@pytest.fixture +def data_categorical(): + return { + "ordered": pd.Categorical(list("testdata") * 30, ordered=True), + "unordered": pd.Categorical(list("testdata") * 30, ordered=False), + } + + +@pytest.fixture +def string_data(): + return { + "separator data": [ + "abC|DeF,Hik", + "234,3245.67", + "gSaf,qWer|Gre", + "asd3,4sad|", + np.nan, + ] + } + + +@pytest.mark.parametrize("data", [("ordered", True), ("unordered", False)]) +def test_categorical_dtype(data, data_categorical): + df = pd.DataFrame({"A": (data_categorical[data[0]])}) + + col = df.__dataframe__().get_column_by_name("A") + assert col.dtype[0] == DtypeKind.CATEGORICAL + assert col.null_count == 0 + assert col.describe_null == (ColumnNullType.USE_SENTINEL, -1) + assert col.num_chunks() == 1 + desc_cat = col.describe_categorical + assert desc_cat["is_ordered"] == data[1] + assert desc_cat["is_dictionary"] is True + assert isinstance(desc_cat["categories"], PandasColumn) + tm.assert_series_equal( + desc_cat["categories"]._col, pd.Series(["a", "d", "e", "s", "t"]) + ) + + tm.assert_frame_equal(df, from_dataframe(df.__dataframe__())) + + +def test_categorical_pyarrow(): + # GH 49889 + pa = pytest.importorskip("pyarrow", "11.0.0") + + arr = ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"] + table = pa.table({"weekday": pa.array(arr).dictionary_encode()}) + exchange_df = table.__dataframe__() + result = from_dataframe(exchange_df) + weekday = pd.Categorical( + arr, categories=["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"] + ) + expected = pd.DataFrame({"weekday": weekday}) + tm.assert_frame_equal(result, expected) + + +def test_empty_categorical_pyarrow(): + # https://github.com/pandas-dev/pandas/issues/53077 + pa = pytest.importorskip("pyarrow", "11.0.0") + + arr = [None] + table = pa.table({"arr": pa.array(arr, "float64").dictionary_encode()}) + exchange_df = table.__dataframe__() + result = pd.api.interchange.from_dataframe(exchange_df) + expected = pd.DataFrame({"arr": pd.Categorical([np.nan])}) + tm.assert_frame_equal(result, expected) + + +def test_large_string_pyarrow(): + # GH 52795 + pa = pytest.importorskip("pyarrow", "11.0.0") + + arr = ["Mon", "Tue"] + table = pa.table({"weekday": pa.array(arr, "large_string")}) + exchange_df = table.__dataframe__() + result = from_dataframe(exchange_df) + expected = pd.DataFrame({"weekday": ["Mon", "Tue"]}) + tm.assert_frame_equal(result, expected) + + # check round-trip + assert pa.Table.equals(pa.interchange.from_dataframe(result), table) + + +@pytest.mark.parametrize( + ("offset", "length", "expected_values"), + [ + (0, None, [3.3, float("nan"), 2.1]), + (1, None, [float("nan"), 2.1]), + (2, None, [2.1]), + (0, 2, [3.3, float("nan")]), + (0, 1, [3.3]), + (1, 1, [float("nan")]), + ], +) +def test_bitmasks_pyarrow(offset, length, expected_values): + # GH 52795 + pa = pytest.importorskip("pyarrow", "11.0.0") + + arr = [3.3, None, 2.1] + table = pa.table({"arr": arr}).slice(offset, length) + exchange_df = table.__dataframe__() + result = from_dataframe(exchange_df) + expected = pd.DataFrame({"arr": expected_values}) + tm.assert_frame_equal(result, expected) + + # check round-trip + assert pa.Table.equals(pa.interchange.from_dataframe(result), table) + + +@pytest.mark.parametrize( + "data", + [ + lambda: np.random.default_rng(2).integers(-100, 100), + lambda: np.random.default_rng(2).integers(1, 100), + lambda: np.random.default_rng(2).random(), + lambda: np.random.default_rng(2).choice([True, False]), + lambda: datetime( + year=np.random.default_rng(2).integers(1900, 2100), + month=np.random.default_rng(2).integers(1, 12), + day=np.random.default_rng(2).integers(1, 20), + ), + ], +) +def test_dataframe(data): + NCOLS, NROWS = 10, 20 + data = { + f"col{int((i - NCOLS / 2) % NCOLS + 1)}": [data() for _ in range(NROWS)] + for i in range(NCOLS) + } + df = pd.DataFrame(data) + + df2 = df.__dataframe__() + + assert df2.num_columns() == NCOLS + assert df2.num_rows() == NROWS + + assert list(df2.column_names()) == list(data.keys()) + + indices = (0, 2) + names = tuple(list(data.keys())[idx] for idx in indices) + + result = from_dataframe(df2.select_columns(indices)) + expected = from_dataframe(df2.select_columns_by_name(names)) + tm.assert_frame_equal(result, expected) + + assert isinstance(result.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list) + assert isinstance(expected.attrs["_INTERCHANGE_PROTOCOL_BUFFERS"], list) + + +def test_missing_from_masked(): + df = pd.DataFrame( + { + "x": np.array([1.0, 2.0, 3.0, 4.0, 0.0]), + "y": np.array([1.5, 2.5, 3.5, 4.5, 0]), + "z": np.array([1.0, 0.0, 1.0, 1.0, 1.0]), + } + ) + + rng = np.random.default_rng(2) + dict_null = {col: rng.integers(low=0, high=len(df)) for col in df.columns} + for col, num_nulls in dict_null.items(): + null_idx = df.index[ + rng.choice(np.arange(len(df)), size=num_nulls, replace=False) + ] + df.loc[null_idx, col] = None + + df2 = df.__dataframe__() + + assert df2.get_column_by_name("x").null_count == dict_null["x"] + assert df2.get_column_by_name("y").null_count == dict_null["y"] + assert df2.get_column_by_name("z").null_count == dict_null["z"] + + +@pytest.mark.parametrize( + "data", + [ + {"x": [1.5, 2.5, 3.5], "y": [9.2, 10.5, 11.8]}, + {"x": [1, 2, 0], "y": [9.2, 10.5, 11.8]}, + { + "x": np.array([True, True, False]), + "y": np.array([1, 2, 0]), + "z": np.array([9.2, 10.5, 11.8]), + }, + ], +) +def test_mixed_data(data): + df = pd.DataFrame(data) + df2 = df.__dataframe__() + + for col_name in df.columns: + assert df2.get_column_by_name(col_name).null_count == 0 + + +def test_mixed_missing(): + df = pd.DataFrame( + { + "x": np.array([True, None, False, None, True]), + "y": np.array([None, 2, None, 1, 2]), + "z": np.array([9.2, 10.5, None, 11.8, None]), + } + ) + + df2 = df.__dataframe__() + + for col_name in df.columns: + assert df2.get_column_by_name(col_name).null_count == 2 + + +def test_string(string_data): + test_str_data = string_data["separator data"] + [""] + df = pd.DataFrame({"A": test_str_data}) + col = df.__dataframe__().get_column_by_name("A") + + assert col.size() == 6 + assert col.null_count == 1 + assert col.dtype[0] == DtypeKind.STRING + assert col.describe_null == (ColumnNullType.USE_BYTEMASK, 0) + + df_sliced = df[1:] + col = df_sliced.__dataframe__().get_column_by_name("A") + assert col.size() == 5 + assert col.null_count == 1 + assert col.dtype[0] == DtypeKind.STRING + assert col.describe_null == (ColumnNullType.USE_BYTEMASK, 0) + + +def test_nonstring_object(): + df = pd.DataFrame({"A": ["a", 10, 1.0, ()]}) + col = df.__dataframe__().get_column_by_name("A") + with pytest.raises(NotImplementedError, match="not supported yet"): + col.dtype + + +def test_datetime(): + df = pd.DataFrame({"A": [pd.Timestamp("2022-01-01"), pd.NaT]}) + col = df.__dataframe__().get_column_by_name("A") + + assert col.size() == 2 + assert col.null_count == 1 + assert col.dtype[0] == DtypeKind.DATETIME + assert col.describe_null == (ColumnNullType.USE_SENTINEL, iNaT) + + tm.assert_frame_equal(df, from_dataframe(df.__dataframe__())) + + +@pytest.mark.skipif(np_version_lt1p23, reason="Numpy > 1.23 required") +def test_categorical_to_numpy_dlpack(): + # https://github.com/pandas-dev/pandas/issues/48393 + df = pd.DataFrame({"A": pd.Categorical(["a", "b", "a"])}) + col = df.__dataframe__().get_column_by_name("A") + result = np.from_dlpack(col.get_buffers()["data"][0]) + expected = np.array([0, 1, 0], dtype="int8") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("data", [{}, {"a": []}]) +def test_empty_pyarrow(data): + # GH 53155 + pytest.importorskip("pyarrow", "11.0.0") + from pyarrow.interchange import from_dataframe as pa_from_dataframe + + expected = pd.DataFrame(data) + arrow_df = pa_from_dataframe(expected) + result = from_dataframe(arrow_df) + tm.assert_frame_equal(result, expected) + + +def test_multi_chunk_pyarrow() -> None: + pa = pytest.importorskip("pyarrow", "11.0.0") + n_legs = pa.chunked_array([[2, 2, 4], [4, 5, 100]]) + names = ["n_legs"] + table = pa.table([n_legs], names=names) + with pytest.raises( + RuntimeError, + match="To join chunks a copy is required which is " + "forbidden by allow_copy=False", + ): + pd.api.interchange.from_dataframe(table, allow_copy=False) + + +def test_multi_chunk_column() -> None: + pytest.importorskip("pyarrow", "11.0.0") + ser = pd.Series([1, 2, None], dtype="Int64[pyarrow]") + df = pd.concat([ser, ser], ignore_index=True).to_frame("a") + df_orig = df.copy() + with pytest.raises( + RuntimeError, match="Found multi-chunk pyarrow array, but `allow_copy` is False" + ): + pd.api.interchange.from_dataframe(df.__dataframe__(allow_copy=False)) + result = pd.api.interchange.from_dataframe(df.__dataframe__(allow_copy=True)) + # Interchange protocol defaults to creating numpy-backed columns, so currently this + # is 'float64'. + expected = pd.DataFrame({"a": [1.0, 2.0, None, 1.0, 2.0, None]}, dtype="float64") + tm.assert_frame_equal(result, expected) + + # Check that the rechunking we did didn't modify the original DataFrame. + tm.assert_frame_equal(df, df_orig) + assert len(df["a"].array._pa_array.chunks) == 2 + assert len(df_orig["a"].array._pa_array.chunks) == 2 + + +def test_timestamp_ns_pyarrow(): + # GH 56712 + pytest.importorskip("pyarrow", "11.0.0") + timestamp_args = { + "year": 2000, + "month": 1, + "day": 1, + "hour": 1, + "minute": 1, + "second": 1, + } + df = pd.Series( + [datetime(**timestamp_args)], + dtype="timestamp[ns][pyarrow]", + name="col0", + ).to_frame() + + dfi = df.__dataframe__() + result = pd.api.interchange.from_dataframe(dfi)["col0"].item() + + expected = pd.Timestamp(**timestamp_args) + assert result == expected + + +@pytest.mark.parametrize("tz", ["UTC", "US/Pacific"]) +@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) +def test_datetimetzdtype(tz, unit): + # GH 54239 + tz_data = ( + pd.date_range("2018-01-01", periods=5, freq="D").tz_localize(tz).as_unit(unit) + ) + df = pd.DataFrame({"ts_tz": tz_data}) + tm.assert_frame_equal(df, from_dataframe(df.__dataframe__())) + + +def test_interchange_from_non_pandas_tz_aware(request): + # GH 54239, 54287 + pa = pytest.importorskip("pyarrow", "11.0.0") + import pyarrow.compute as pc + + if is_platform_windows() and is_ci_environment(): + mark = pytest.mark.xfail( + raises=pa.ArrowInvalid, + reason=( + "TODO: Set ARROW_TIMEZONE_DATABASE environment variable " + "on CI to path to the tzdata for pyarrow." + ), + ) + request.applymarker(mark) + + arr = pa.array([datetime(2020, 1, 1), None, datetime(2020, 1, 2)]) + arr = pc.assume_timezone(arr, "Asia/Kathmandu") + table = pa.table({"arr": arr}) + exchange_df = table.__dataframe__() + result = from_dataframe(exchange_df) + + expected = pd.DataFrame( + ["2020-01-01 00:00:00+05:45", "NaT", "2020-01-02 00:00:00+05:45"], + columns=["arr"], + dtype="datetime64[us, Asia/Kathmandu]", + ) + tm.assert_frame_equal(expected, result) + + +def test_interchange_from_corrected_buffer_dtypes(monkeypatch) -> None: + # https://github.com/pandas-dev/pandas/issues/54781 + df = pd.DataFrame({"a": ["foo", "bar"]}).__dataframe__() + interchange = df.__dataframe__() + column = interchange.get_column_by_name("a") + buffers = column.get_buffers() + buffers_data = buffers["data"] + buffer_dtype = buffers_data[1] + buffer_dtype = ( + DtypeKind.UINT, + 8, + ArrowCTypes.UINT8, + buffer_dtype[3], + ) + buffers["data"] = (buffers_data[0], buffer_dtype) + column.get_buffers = lambda: buffers + interchange.get_column_by_name = lambda _: column + monkeypatch.setattr(df, "__dataframe__", lambda allow_copy: interchange) + pd.api.interchange.from_dataframe(df) + + +def test_empty_string_column(): + # https://github.com/pandas-dev/pandas/issues/56703 + df = pd.DataFrame({"a": []}, dtype=str) + df2 = df.__dataframe__() + result = pd.api.interchange.from_dataframe(df2) + tm.assert_frame_equal(df, result) + + +def test_large_string(): + # GH#56702 + pytest.importorskip("pyarrow") + df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]") + result = pd.api.interchange.from_dataframe(df.__dataframe__()) + expected = pd.DataFrame({"a": ["x"]}, dtype="object") + tm.assert_frame_equal(result, expected) + + +def test_non_str_names(): + # https://github.com/pandas-dev/pandas/issues/56701 + df = pd.Series([1, 2, 3], name=0).to_frame() + names = df.__dataframe__().column_names() + assert names == ["0"] + + +def test_non_str_names_w_duplicates(): + # https://github.com/pandas-dev/pandas/issues/56701 + df = pd.DataFrame({"0": [1, 2, 3], 0: [4, 5, 6]}) + dfi = df.__dataframe__() + with pytest.raises( + TypeError, + match=( + "Expected a Series, got a DataFrame. This likely happened because you " + "called __dataframe__ on a DataFrame which, after converting column " + r"names to string, resulted in duplicated names: Index\(\['0', '0'\], " + r"dtype='object'\). Please rename these columns before using the " + "interchange protocol." + ), + ): + pd.api.interchange.from_dataframe(dfi, allow_copy=False) + + +@pytest.mark.parametrize( + ("data", "dtype", "expected_dtype"), + [ + ([1, 2, None], "Int64", "int64"), + ([1, 2, None], "Int64[pyarrow]", "int64"), + ([1, 2, None], "Int8", "int8"), + ([1, 2, None], "Int8[pyarrow]", "int8"), + ( + [1, 2, None], + "UInt64", + "uint64", + ), + ( + [1, 2, None], + "UInt64[pyarrow]", + "uint64", + ), + ([1.0, 2.25, None], "Float32", "float32"), + ([1.0, 2.25, None], "Float32[pyarrow]", "float32"), + ([True, False, None], "boolean", "bool"), + ([True, False, None], "boolean[pyarrow]", "bool"), + (["much ado", "about", None], "string[pyarrow_numpy]", "large_string"), + (["much ado", "about", None], "string[pyarrow]", "large_string"), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), None], + "timestamp[ns][pyarrow]", + "timestamp[ns]", + ), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), None], + "timestamp[us][pyarrow]", + "timestamp[us]", + ), + ( + [ + datetime(2020, 1, 1, tzinfo=timezone.utc), + datetime(2020, 1, 2, tzinfo=timezone.utc), + None, + ], + "timestamp[us, Asia/Kathmandu][pyarrow]", + "timestamp[us, tz=Asia/Kathmandu]", + ), + ], +) +def test_pandas_nullable_with_missing_values( + data: list, dtype: str, expected_dtype: str +) -> None: + # https://github.com/pandas-dev/pandas/issues/57643 + # https://github.com/pandas-dev/pandas/issues/57664 + pa = pytest.importorskip("pyarrow", "11.0.0") + import pyarrow.interchange as pai + + if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]": + expected_dtype = pa.timestamp("us", "Asia/Kathmandu") + + df = pd.DataFrame({"a": data}, dtype=dtype) + result = pai.from_dataframe(df.__dataframe__())["a"] + assert result.type == expected_dtype + assert result[0].as_py() == data[0] + assert result[1].as_py() == data[1] + assert result[2].as_py() is None + + +@pytest.mark.parametrize( + ("data", "dtype", "expected_dtype"), + [ + ([1, 2, 3], "Int64", "int64"), + ([1, 2, 3], "Int64[pyarrow]", "int64"), + ([1, 2, 3], "Int8", "int8"), + ([1, 2, 3], "Int8[pyarrow]", "int8"), + ( + [1, 2, 3], + "UInt64", + "uint64", + ), + ( + [1, 2, 3], + "UInt64[pyarrow]", + "uint64", + ), + ([1.0, 2.25, 5.0], "Float32", "float32"), + ([1.0, 2.25, 5.0], "Float32[pyarrow]", "float32"), + ([True, False, False], "boolean", "bool"), + ([True, False, False], "boolean[pyarrow]", "bool"), + (["much ado", "about", "nothing"], "string[pyarrow_numpy]", "large_string"), + (["much ado", "about", "nothing"], "string[pyarrow]", "large_string"), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)], + "timestamp[ns][pyarrow]", + "timestamp[ns]", + ), + ( + [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)], + "timestamp[us][pyarrow]", + "timestamp[us]", + ), + ( + [ + datetime(2020, 1, 1, tzinfo=timezone.utc), + datetime(2020, 1, 2, tzinfo=timezone.utc), + datetime(2020, 1, 3, tzinfo=timezone.utc), + ], + "timestamp[us, Asia/Kathmandu][pyarrow]", + "timestamp[us, tz=Asia/Kathmandu]", + ), + ], +) +def test_pandas_nullable_without_missing_values( + data: list, dtype: str, expected_dtype: str +) -> None: + # https://github.com/pandas-dev/pandas/issues/57643 + pa = pytest.importorskip("pyarrow", "11.0.0") + import pyarrow.interchange as pai + + if expected_dtype == "timestamp[us, tz=Asia/Kathmandu]": + expected_dtype = pa.timestamp("us", "Asia/Kathmandu") + + df = pd.DataFrame({"a": data}, dtype=dtype) + result = pai.from_dataframe(df.__dataframe__())["a"] + assert result.type == expected_dtype + assert result[0].as_py() == data[0] + assert result[1].as_py() == data[1] + assert result[2].as_py() == data[2] + + +def test_string_validity_buffer() -> None: + # https://github.com/pandas-dev/pandas/issues/57761 + pytest.importorskip("pyarrow", "11.0.0") + df = pd.DataFrame({"a": ["x"]}, dtype="large_string[pyarrow]") + result = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"] + assert result is None + + +def test_string_validity_buffer_no_missing() -> None: + # https://github.com/pandas-dev/pandas/issues/57762 + pytest.importorskip("pyarrow", "11.0.0") + df = pd.DataFrame({"a": ["x", None]}, dtype="large_string[pyarrow]") + validity = df.__dataframe__().get_column_by_name("a").get_buffers()["validity"] + assert validity is not None + result = validity[1] + expected = (DtypeKind.BOOL, 1, ArrowCTypes.BOOL, "=") + assert result == expected + + +def test_empty_dataframe(): + # https://github.com/pandas-dev/pandas/issues/56700 + df = pd.DataFrame({"a": []}, dtype="int8") + dfi = df.__dataframe__() + result = pd.api.interchange.from_dataframe(dfi, allow_copy=False) + expected = pd.DataFrame({"a": []}, dtype="int8") + tm.assert_frame_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_spec_conformance.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_spec_conformance.py new file mode 100644 index 0000000000000000000000000000000000000000..7c02379c118539032cb79d682d4baa2c7ae1fb81 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_spec_conformance.py @@ -0,0 +1,175 @@ +""" +A verbatim copy (vendored) of the spec tests. +Taken from https://github.com/data-apis/dataframe-api +""" +import ctypes +import math + +import pytest + +import pandas as pd + + +@pytest.fixture +def df_from_dict(): + def maker(dct, is_categorical=False): + df = pd.DataFrame(dct) + return df.astype("category") if is_categorical else df + + return maker + + +@pytest.mark.parametrize( + "test_data", + [ + {"a": ["foo", "bar"], "b": ["baz", "qux"]}, + {"a": [1.5, 2.5, 3.5], "b": [9.2, 10.5, 11.8]}, + {"A": [1, 2, 3, 4], "B": [1, 2, 3, 4]}, + ], + ids=["str_data", "float_data", "int_data"], +) +def test_only_one_dtype(test_data, df_from_dict): + columns = list(test_data.keys()) + df = df_from_dict(test_data) + dfX = df.__dataframe__() + + column_size = len(test_data[columns[0]]) + for column in columns: + null_count = dfX.get_column_by_name(column).null_count + assert null_count == 0 + assert isinstance(null_count, int) + assert dfX.get_column_by_name(column).size() == column_size + assert dfX.get_column_by_name(column).offset == 0 + + +def test_mixed_dtypes(df_from_dict): + df = df_from_dict( + { + "a": [1, 2, 3], # dtype kind INT = 0 + "b": [3, 4, 5], # dtype kind INT = 0 + "c": [1.5, 2.5, 3.5], # dtype kind FLOAT = 2 + "d": [9, 10, 11], # dtype kind INT = 0 + "e": [True, False, True], # dtype kind BOOLEAN = 20 + "f": ["a", "", "c"], # dtype kind STRING = 21 + } + ) + dfX = df.__dataframe__() + # for meanings of dtype[0] see the spec; we cannot import the spec here as this + # file is expected to be vendored *anywhere*; + # values for dtype[0] are explained above + columns = {"a": 0, "b": 0, "c": 2, "d": 0, "e": 20, "f": 21} + + for column, kind in columns.items(): + colX = dfX.get_column_by_name(column) + assert colX.null_count == 0 + assert isinstance(colX.null_count, int) + assert colX.size() == 3 + assert colX.offset == 0 + + assert colX.dtype[0] == kind + + assert dfX.get_column_by_name("c").dtype[1] == 64 + + +def test_na_float(df_from_dict): + df = df_from_dict({"a": [1.0, math.nan, 2.0]}) + dfX = df.__dataframe__() + colX = dfX.get_column_by_name("a") + assert colX.null_count == 1 + assert isinstance(colX.null_count, int) + + +def test_noncategorical(df_from_dict): + df = df_from_dict({"a": [1, 2, 3]}) + dfX = df.__dataframe__() + colX = dfX.get_column_by_name("a") + with pytest.raises(TypeError, match=".*categorical.*"): + colX.describe_categorical + + +def test_categorical(df_from_dict): + df = df_from_dict( + {"weekday": ["Mon", "Tue", "Mon", "Wed", "Mon", "Thu", "Fri", "Sat", "Sun"]}, + is_categorical=True, + ) + + colX = df.__dataframe__().get_column_by_name("weekday") + categorical = colX.describe_categorical + assert isinstance(categorical["is_ordered"], bool) + assert isinstance(categorical["is_dictionary"], bool) + + +def test_dataframe(df_from_dict): + df = df_from_dict( + {"x": [True, True, False], "y": [1, 2, 0], "z": [9.2, 10.5, 11.8]} + ) + dfX = df.__dataframe__() + + assert dfX.num_columns() == 3 + assert dfX.num_rows() == 3 + assert dfX.num_chunks() == 1 + assert list(dfX.column_names()) == ["x", "y", "z"] + assert list(dfX.select_columns((0, 2)).column_names()) == list( + dfX.select_columns_by_name(("x", "z")).column_names() + ) + + +@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)]) +def test_df_get_chunks(size, n_chunks, df_from_dict): + df = df_from_dict({"x": list(range(size))}) + dfX = df.__dataframe__() + chunks = list(dfX.get_chunks(n_chunks)) + assert len(chunks) == n_chunks + assert sum(chunk.num_rows() for chunk in chunks) == size + + +@pytest.mark.parametrize(["size", "n_chunks"], [(10, 3), (12, 3), (12, 5)]) +def test_column_get_chunks(size, n_chunks, df_from_dict): + df = df_from_dict({"x": list(range(size))}) + dfX = df.__dataframe__() + chunks = list(dfX.get_column(0).get_chunks(n_chunks)) + assert len(chunks) == n_chunks + assert sum(chunk.size() for chunk in chunks) == size + + +def test_get_columns(df_from_dict): + df = df_from_dict({"a": [0, 1], "b": [2.5, 3.5]}) + dfX = df.__dataframe__() + for colX in dfX.get_columns(): + assert colX.size() == 2 + assert colX.num_chunks() == 1 + # for meanings of dtype[0] see the spec; we cannot import the spec here as this + # file is expected to be vendored *anywhere* + assert dfX.get_column(0).dtype[0] == 0 # INT + assert dfX.get_column(1).dtype[0] == 2 # FLOAT + + +def test_buffer(df_from_dict): + arr = [0, 1, -1] + df = df_from_dict({"a": arr}) + dfX = df.__dataframe__() + colX = dfX.get_column(0) + bufX = colX.get_buffers() + + dataBuf, dataDtype = bufX["data"] + + assert dataBuf.bufsize > 0 + assert dataBuf.ptr != 0 + device, _ = dataBuf.__dlpack_device__() + + # for meanings of dtype[0] see the spec; we cannot import the spec here as this + # file is expected to be vendored *anywhere* + assert dataDtype[0] == 0 # INT + + if device == 1: # CPU-only as we're going to directly read memory here + bitwidth = dataDtype[1] + ctype = { + 8: ctypes.c_int8, + 16: ctypes.c_int16, + 32: ctypes.c_int32, + 64: ctypes.c_int64, + }[bitwidth] + + for idx, truth in enumerate(arr): + val = ctype.from_address(dataBuf.ptr + idx * (bitwidth // 8)).value + assert val == truth, f"Buffer at index {idx} mismatch" diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_utils.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a47bc2752ff32f5eb7630a3960e7611242cb73e3 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/test_utils.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.interchange.utils import dtype_to_arrow_c_fmt + +# TODO: use ArrowSchema to get reference C-string. +# At the time, there is no way to access ArrowSchema holding a type format string +# from python. The only way to access it is to export the structure to a C-pointer, +# see DataType._export_to_c() method defined in +# https://github.com/apache/arrow/blob/master/python/pyarrow/types.pxi + + +@pytest.mark.parametrize( + "pandas_dtype, c_string", + [ + (np.dtype("bool"), "b"), + (np.dtype("int8"), "c"), + (np.dtype("uint8"), "C"), + (np.dtype("int16"), "s"), + (np.dtype("uint16"), "S"), + (np.dtype("int32"), "i"), + (np.dtype("uint32"), "I"), + (np.dtype("int64"), "l"), + (np.dtype("uint64"), "L"), + (np.dtype("float16"), "e"), + (np.dtype("float32"), "f"), + (np.dtype("float64"), "g"), + (pd.Series(["a"]).dtype, "u"), + ( + pd.Series([0]).astype("datetime64[ns]").dtype, + "tsn:", + ), + (pd.CategoricalDtype(["a"]), "l"), + (np.dtype("O"), "u"), + ], +) +def test_dtype_to_arrow_c_fmt(pandas_dtype, c_string): # PR01 + """Test ``dtype_to_arrow_c_fmt`` utility function.""" + assert dtype_to_arrow_c_fmt(pandas_dtype) == c_string + + +@pytest.mark.parametrize( + "pa_dtype, args_kwargs, c_string", + [ + ["null", {}, "n"], + ["bool_", {}, "b"], + ["uint8", {}, "C"], + ["uint16", {}, "S"], + ["uint32", {}, "I"], + ["uint64", {}, "L"], + ["int8", {}, "c"], + ["int16", {}, "S"], + ["int32", {}, "i"], + ["int64", {}, "l"], + ["float16", {}, "e"], + ["float32", {}, "f"], + ["float64", {}, "g"], + ["string", {}, "u"], + ["binary", {}, "z"], + ["time32", ("s",), "tts"], + ["time32", ("ms",), "ttm"], + ["time64", ("us",), "ttu"], + ["time64", ("ns",), "ttn"], + ["date32", {}, "tdD"], + ["date64", {}, "tdm"], + ["timestamp", {"unit": "s"}, "tss:"], + ["timestamp", {"unit": "ms"}, "tsm:"], + ["timestamp", {"unit": "us"}, "tsu:"], + ["timestamp", {"unit": "ns"}, "tsn:"], + ["timestamp", {"unit": "ns", "tz": "UTC"}, "tsn:UTC"], + ["duration", ("s",), "tDs"], + ["duration", ("ms",), "tDm"], + ["duration", ("us",), "tDu"], + ["duration", ("ns",), "tDn"], + ["decimal128", {"precision": 4, "scale": 2}, "d:4,2"], + ], +) +def test_dtype_to_arrow_c_fmt_arrowdtype(pa_dtype, args_kwargs, c_string): + # GH 52323 + pa = pytest.importorskip("pyarrow") + if not args_kwargs: + pa_type = getattr(pa, pa_dtype)() + elif isinstance(args_kwargs, tuple): + pa_type = getattr(pa, pa_dtype)(*args_kwargs) + else: + pa_type = getattr(pa, pa_dtype)(**args_kwargs) + arrow_type = pd.ArrowDtype(pa_type) + assert dtype_to_arrow_c_fmt(arrow_type) == c_string diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__init__.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/__init__.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..210a98b1fbe120aea2116ef1c3604d77dd7aa85f Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/__init__.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5cd9d9a18d7a3dba225aa451f64aca2b34fb78b4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_datetime.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_numeric.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_numeric.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..82a705bde7c6c56af8af7846c53c6e0d06a9a93e Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_numeric.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_time.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_time.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f1aa335950da85fd107bdf345f42691f9e6ac28 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_time.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_timedelta.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_timedelta.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7c749c3689781f29441bbae6448d935e049d39c Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/__pycache__/test_to_timedelta.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_datetime.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_datetime.py new file mode 100644 index 0000000000000000000000000000000000000000..a1ed996dade8e976607537e30c322d470ffcae2d --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_datetime.py @@ -0,0 +1,3900 @@ +""" test to_datetime """ + +import calendar +from collections import deque +from datetime import ( + date, + datetime, + timedelta, + timezone, +) +from decimal import Decimal +import locale + +from dateutil.parser import parse +from dateutil.tz.tz import tzoffset +import numpy as np +import pytest +import pytz + +from pandas._libs import tslib +from pandas._libs.tslibs import ( + iNaT, + parsing, +) +from pandas.errors import ( + OutOfBoundsDatetime, + OutOfBoundsTimedelta, +) +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_datetime64_ns_dtype + +import pandas as pd +from pandas import ( + DataFrame, + DatetimeIndex, + Index, + NaT, + Series, + Timestamp, + date_range, + isna, + to_datetime, +) +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray +from pandas.core.tools import datetimes as tools +from pandas.core.tools.datetimes import start_caching_at + +PARSING_ERR_MSG = ( + r"You might want to try:\n" + r" - passing `format` if your strings have a consistent format;\n" + r" - passing `format=\'ISO8601\'` if your strings are all ISO8601 " + r"but not necessarily in exactly the same format;\n" + r" - passing `format=\'mixed\'`, and the format will be inferred " + r"for each element individually. You might want to use `dayfirst` " + r"alongside this." +) + +pytestmark = pytest.mark.filterwarnings( + "ignore:errors='ignore' is deprecated:FutureWarning" +) + + +@pytest.fixture(params=[True, False]) +def cache(request): + """ + cache keyword to pass to to_datetime. + """ + return request.param + + +class TestTimeConversionFormats: + @pytest.mark.parametrize("readonly", [True, False]) + def test_to_datetime_readonly(self, readonly): + # GH#34857 + arr = np.array([], dtype=object) + if readonly: + arr.setflags(write=False) + result = to_datetime(arr) + expected = to_datetime([]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "format, expected", + [ + [ + "%d/%m/%Y", + [Timestamp("20000101"), Timestamp("20000201"), Timestamp("20000301")], + ], + [ + "%m/%d/%Y", + [Timestamp("20000101"), Timestamp("20000102"), Timestamp("20000103")], + ], + ], + ) + def test_to_datetime_format(self, cache, index_or_series, format, expected): + values = index_or_series(["1/1/2000", "1/2/2000", "1/3/2000"]) + result = to_datetime(values, format=format, cache=cache) + expected = index_or_series(expected) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "arg, expected, format", + [ + ["1/1/2000", "20000101", "%d/%m/%Y"], + ["1/1/2000", "20000101", "%m/%d/%Y"], + ["1/2/2000", "20000201", "%d/%m/%Y"], + ["1/2/2000", "20000102", "%m/%d/%Y"], + ["1/3/2000", "20000301", "%d/%m/%Y"], + ["1/3/2000", "20000103", "%m/%d/%Y"], + ], + ) + def test_to_datetime_format_scalar(self, cache, arg, expected, format): + result = to_datetime(arg, format=format, cache=cache) + expected = Timestamp(expected) + assert result == expected + + def test_to_datetime_format_YYYYMMDD(self, cache): + ser = Series([19801222, 19801222] + [19810105] * 5) + expected = Series([Timestamp(x) for x in ser.apply(str)]) + + result = to_datetime(ser, format="%Y%m%d", cache=cache) + tm.assert_series_equal(result, expected) + + result = to_datetime(ser.apply(str), format="%Y%m%d", cache=cache) + tm.assert_series_equal(result, expected) + + def test_to_datetime_format_YYYYMMDD_with_nat(self, cache): + # Explicit cast to float to explicit cast when setting np.nan + ser = Series([19801222, 19801222] + [19810105] * 5, dtype="float") + # with NaT + expected = Series( + [Timestamp("19801222"), Timestamp("19801222")] + [Timestamp("19810105")] * 5 + ) + expected[2] = np.nan + ser[2] = np.nan + + result = to_datetime(ser, format="%Y%m%d", cache=cache) + tm.assert_series_equal(result, expected) + + # string with NaT + ser2 = ser.apply(str) + ser2[2] = "nat" + with pytest.raises( + ValueError, + match=( + 'unconverted data remains when parsing with format "%Y%m%d": ".0", ' + "at position 0" + ), + ): + # https://github.com/pandas-dev/pandas/issues/50051 + to_datetime(ser2, format="%Y%m%d", cache=cache) + + def test_to_datetime_format_YYYYMM_with_nat(self, cache): + # https://github.com/pandas-dev/pandas/issues/50237 + # Explicit cast to float to explicit cast when setting np.nan + ser = Series([198012, 198012] + [198101] * 5, dtype="float") + expected = Series( + [Timestamp("19801201"), Timestamp("19801201")] + [Timestamp("19810101")] * 5 + ) + expected[2] = np.nan + ser[2] = np.nan + result = to_datetime(ser, format="%Y%m", cache=cache) + tm.assert_series_equal(result, expected) + + def test_to_datetime_format_YYYYMMDD_ignore(self, cache): + # coercion + # GH 7930, GH 14487 + ser = Series([20121231, 20141231, 99991231]) + result = to_datetime(ser, format="%Y%m%d", errors="ignore", cache=cache) + expected = Series( + [20121231, 20141231, 99991231], + dtype=object, + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_format_YYYYMMDD_ignore_with_outofbounds(self, cache): + # https://github.com/pandas-dev/pandas/issues/26493 + result = to_datetime( + ["15010101", "20150101", np.nan], + format="%Y%m%d", + errors="ignore", + cache=cache, + ) + expected = Index(["15010101", "20150101", np.nan], dtype=object) + tm.assert_index_equal(result, expected) + + def test_to_datetime_format_YYYYMMDD_coercion(self, cache): + # coercion + # GH 7930 + ser = Series([20121231, 20141231, 99991231]) + result = to_datetime(ser, format="%Y%m%d", errors="coerce", cache=cache) + expected = Series(["20121231", "20141231", "NaT"], dtype="M8[ns]") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "input_s", + [ + # Null values with Strings + ["19801222", "20010112", None], + ["19801222", "20010112", np.nan], + ["19801222", "20010112", NaT], + ["19801222", "20010112", "NaT"], + # Null values with Integers + [19801222, 20010112, None], + [19801222, 20010112, np.nan], + [19801222, 20010112, NaT], + [19801222, 20010112, "NaT"], + ], + ) + def test_to_datetime_format_YYYYMMDD_with_none(self, input_s): + # GH 30011 + # format='%Y%m%d' + # with None + expected = Series([Timestamp("19801222"), Timestamp("20010112"), NaT]) + result = Series(to_datetime(input_s, format="%Y%m%d")) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "input_s, expected", + [ + # NaN before strings with invalid date values + [ + Series(["19801222", np.nan, "20010012", "10019999"]), + Series([Timestamp("19801222"), np.nan, np.nan, np.nan]), + ], + # NaN after strings with invalid date values + [ + Series(["19801222", "20010012", "10019999", np.nan]), + Series([Timestamp("19801222"), np.nan, np.nan, np.nan]), + ], + # NaN before integers with invalid date values + [ + Series([20190813, np.nan, 20010012, 20019999]), + Series([Timestamp("20190813"), np.nan, np.nan, np.nan]), + ], + # NaN after integers with invalid date values + [ + Series([20190813, 20010012, np.nan, 20019999]), + Series([Timestamp("20190813"), np.nan, np.nan, np.nan]), + ], + ], + ) + def test_to_datetime_format_YYYYMMDD_overflow(self, input_s, expected): + # GH 25512 + # format='%Y%m%d', errors='coerce' + result = to_datetime(input_s, format="%Y%m%d", errors="coerce") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "data, format, expected", + [ + ([pd.NA], "%Y%m%d%H%M%S", DatetimeIndex(["NaT"])), + ([pd.NA], None, DatetimeIndex(["NaT"])), + ( + [pd.NA, "20210202202020"], + "%Y%m%d%H%M%S", + DatetimeIndex(["NaT", "2021-02-02 20:20:20"]), + ), + (["201010", pd.NA], "%y%m%d", DatetimeIndex(["2020-10-10", "NaT"])), + (["201010", pd.NA], "%d%m%y", DatetimeIndex(["2010-10-20", "NaT"])), + ([None, np.nan, pd.NA], None, DatetimeIndex(["NaT", "NaT", "NaT"])), + ([None, np.nan, pd.NA], "%Y%m%d", DatetimeIndex(["NaT", "NaT", "NaT"])), + ], + ) + def test_to_datetime_with_NA(self, data, format, expected): + # GH#42957 + result = to_datetime(data, format=format) + tm.assert_index_equal(result, expected) + + def test_to_datetime_with_NA_with_warning(self): + # GH#42957 + result = to_datetime(["201010", pd.NA]) + expected = DatetimeIndex(["2010-10-20", "NaT"]) + tm.assert_index_equal(result, expected) + + def test_to_datetime_format_integer(self, cache): + # GH 10178 + ser = Series([2000, 2001, 2002]) + expected = Series([Timestamp(x) for x in ser.apply(str)]) + + result = to_datetime(ser, format="%Y", cache=cache) + tm.assert_series_equal(result, expected) + + ser = Series([200001, 200105, 200206]) + expected = Series([Timestamp(x[:4] + "-" + x[4:]) for x in ser.apply(str)]) + + result = to_datetime(ser, format="%Y%m", cache=cache) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "int_date, expected", + [ + # valid date, length == 8 + [20121030, datetime(2012, 10, 30)], + # short valid date, length == 6 + [199934, datetime(1999, 3, 4)], + # long integer date partially parsed to datetime(2012,1,1), length > 8 + [2012010101, 2012010101], + # invalid date partially parsed to datetime(2012,9,9), length == 8 + [20129930, 20129930], + # short integer date partially parsed to datetime(2012,9,9), length < 8 + [2012993, 2012993], + # short invalid date, length == 4 + [2121, 2121], + ], + ) + def test_int_to_datetime_format_YYYYMMDD_typeerror(self, int_date, expected): + # GH 26583 + result = to_datetime(int_date, format="%Y%m%d", errors="ignore") + assert result == expected + + def test_to_datetime_format_microsecond(self, cache): + month_abbr = calendar.month_abbr[4] + val = f"01-{month_abbr}-2011 00:00:01.978" + + format = "%d-%b-%Y %H:%M:%S.%f" + result = to_datetime(val, format=format, cache=cache) + exp = datetime.strptime(val, format) + assert result == exp + + @pytest.mark.parametrize( + "value, format, dt", + [ + ["01/10/2010 15:20", "%m/%d/%Y %H:%M", Timestamp("2010-01-10 15:20")], + ["01/10/2010 05:43", "%m/%d/%Y %I:%M", Timestamp("2010-01-10 05:43")], + [ + "01/10/2010 13:56:01", + "%m/%d/%Y %H:%M:%S", + Timestamp("2010-01-10 13:56:01"), + ], + # The 3 tests below are locale-dependent. + # They pass, except when the machine locale is zh_CN or it_IT . + pytest.param( + "01/10/2010 08:14 PM", + "%m/%d/%Y %I:%M %p", + Timestamp("2010-01-10 20:14"), + marks=pytest.mark.xfail( + locale.getlocale()[0] in ("zh_CN", "it_IT"), + reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8", + strict=False, + ), + ), + pytest.param( + "01/10/2010 07:40 AM", + "%m/%d/%Y %I:%M %p", + Timestamp("2010-01-10 07:40"), + marks=pytest.mark.xfail( + locale.getlocale()[0] in ("zh_CN", "it_IT"), + reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8", + strict=False, + ), + ), + pytest.param( + "01/10/2010 09:12:56 AM", + "%m/%d/%Y %I:%M:%S %p", + Timestamp("2010-01-10 09:12:56"), + marks=pytest.mark.xfail( + locale.getlocale()[0] in ("zh_CN", "it_IT"), + reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8", + strict=False, + ), + ), + ], + ) + def test_to_datetime_format_time(self, cache, value, format, dt): + assert to_datetime(value, format=format, cache=cache) == dt + + @td.skip_if_not_us_locale + def test_to_datetime_with_non_exact(self, cache): + # GH 10834 + # 8904 + # exact kw + ser = Series( + ["19MAY11", "foobar19MAY11", "19MAY11:00:00:00", "19MAY11 00:00:00Z"] + ) + result = to_datetime(ser, format="%d%b%y", exact=False, cache=cache) + expected = to_datetime( + ser.str.extract(r"(\d+\w+\d+)", expand=False), format="%d%b%y", cache=cache + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "format, expected", + [ + ("%Y-%m-%d", Timestamp(2000, 1, 3)), + ("%Y-%d-%m", Timestamp(2000, 3, 1)), + ("%Y-%m-%d %H", Timestamp(2000, 1, 3, 12)), + ("%Y-%d-%m %H", Timestamp(2000, 3, 1, 12)), + ("%Y-%m-%d %H:%M", Timestamp(2000, 1, 3, 12, 34)), + ("%Y-%d-%m %H:%M", Timestamp(2000, 3, 1, 12, 34)), + ("%Y-%m-%d %H:%M:%S", Timestamp(2000, 1, 3, 12, 34, 56)), + ("%Y-%d-%m %H:%M:%S", Timestamp(2000, 3, 1, 12, 34, 56)), + ("%Y-%m-%d %H:%M:%S.%f", Timestamp(2000, 1, 3, 12, 34, 56, 123456)), + ("%Y-%d-%m %H:%M:%S.%f", Timestamp(2000, 3, 1, 12, 34, 56, 123456)), + ( + "%Y-%m-%d %H:%M:%S.%f%z", + Timestamp(2000, 1, 3, 12, 34, 56, 123456, tz="UTC+01:00"), + ), + ( + "%Y-%d-%m %H:%M:%S.%f%z", + Timestamp(2000, 3, 1, 12, 34, 56, 123456, tz="UTC+01:00"), + ), + ], + ) + def test_non_exact_doesnt_parse_whole_string(self, cache, format, expected): + # https://github.com/pandas-dev/pandas/issues/50412 + # the formats alternate between ISO8601 and non-ISO8601 to check both paths + result = to_datetime( + "2000-01-03 12:34:56.123456+01:00", format=format, exact=False + ) + assert result == expected + + @pytest.mark.parametrize( + "arg", + [ + "2012-01-01 09:00:00.000000001", + "2012-01-01 09:00:00.000001", + "2012-01-01 09:00:00.001", + "2012-01-01 09:00:00.001000", + "2012-01-01 09:00:00.001000000", + ], + ) + def test_parse_nanoseconds_with_formula(self, cache, arg): + # GH8989 + # truncating the nanoseconds when a format was provided + expected = to_datetime(arg, cache=cache) + result = to_datetime(arg, format="%Y-%m-%d %H:%M:%S.%f", cache=cache) + assert result == expected + + @pytest.mark.parametrize( + "value,fmt,expected", + [ + ["2009324", "%Y%W%w", Timestamp("2009-08-13")], + ["2013020", "%Y%U%w", Timestamp("2013-01-13")], + ], + ) + def test_to_datetime_format_weeks(self, value, fmt, expected, cache): + assert to_datetime(value, format=fmt, cache=cache) == expected + + @pytest.mark.parametrize( + "fmt,dates,expected_dates", + [ + [ + "%Y-%m-%d %H:%M:%S %Z", + ["2010-01-01 12:00:00 UTC"] * 2, + [Timestamp("2010-01-01 12:00:00", tz="UTC")] * 2, + ], + [ + "%Y-%m-%d %H:%M:%S%z", + ["2010-01-01 12:00:00+0100"] * 2, + [ + Timestamp( + "2010-01-01 12:00:00", tzinfo=timezone(timedelta(minutes=60)) + ) + ] + * 2, + ], + [ + "%Y-%m-%d %H:%M:%S %z", + ["2010-01-01 12:00:00 +0100"] * 2, + [ + Timestamp( + "2010-01-01 12:00:00", tzinfo=timezone(timedelta(minutes=60)) + ) + ] + * 2, + ], + [ + "%Y-%m-%d %H:%M:%S %z", + ["2010-01-01 12:00:00 Z", "2010-01-01 12:00:00 Z"], + [ + Timestamp( + "2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0) + ), # pytz coerces to UTC + Timestamp("2010-01-01 12:00:00", tzinfo=pytz.FixedOffset(0)), + ], + ], + ], + ) + def test_to_datetime_parse_tzname_or_tzoffset(self, fmt, dates, expected_dates): + # GH 13486 + result = to_datetime(dates, format=fmt) + expected = Index(expected_dates) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "fmt,dates,expected_dates", + [ + [ + "%Y-%m-%d %H:%M:%S %Z", + [ + "2010-01-01 12:00:00 UTC", + "2010-01-01 12:00:00 GMT", + "2010-01-01 12:00:00 US/Pacific", + ], + [ + Timestamp("2010-01-01 12:00:00", tz="UTC"), + Timestamp("2010-01-01 12:00:00", tz="GMT"), + Timestamp("2010-01-01 12:00:00", tz="US/Pacific"), + ], + ], + [ + "%Y-%m-%d %H:%M:%S %z", + ["2010-01-01 12:00:00 +0100", "2010-01-01 12:00:00 -0100"], + [ + Timestamp( + "2010-01-01 12:00:00", tzinfo=timezone(timedelta(minutes=60)) + ), + Timestamp( + "2010-01-01 12:00:00", tzinfo=timezone(timedelta(minutes=-60)) + ), + ], + ], + ], + ) + def test_to_datetime_parse_tzname_or_tzoffset_utc_false_deprecated( + self, fmt, dates, expected_dates + ): + # GH 13486, 50887 + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime(dates, format=fmt) + expected = Index(expected_dates) + tm.assert_equal(result, expected) + + def test_to_datetime_parse_tzname_or_tzoffset_different_tz_to_utc(self): + # GH 32792 + dates = [ + "2010-01-01 12:00:00 +0100", + "2010-01-01 12:00:00 -0100", + "2010-01-01 12:00:00 +0300", + "2010-01-01 12:00:00 +0400", + ] + expected_dates = [ + "2010-01-01 11:00:00+00:00", + "2010-01-01 13:00:00+00:00", + "2010-01-01 09:00:00+00:00", + "2010-01-01 08:00:00+00:00", + ] + fmt = "%Y-%m-%d %H:%M:%S %z" + + result = to_datetime(dates, format=fmt, utc=True) + expected = DatetimeIndex(expected_dates) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "offset", ["+0", "-1foo", "UTCbar", ":10", "+01:000:01", ""] + ) + def test_to_datetime_parse_timezone_malformed(self, offset): + fmt = "%Y-%m-%d %H:%M:%S %z" + date = "2010-01-01 12:00:00 " + offset + + msg = "|".join( + [ + r'^time data ".*" doesn\'t match format ".*", at position 0. ' + f"{PARSING_ERR_MSG}$", + r'^unconverted data remains when parsing with format ".*": ".*", ' + f"at position 0. {PARSING_ERR_MSG}$", + ] + ) + with pytest.raises(ValueError, match=msg): + to_datetime([date], format=fmt) + + def test_to_datetime_parse_timezone_keeps_name(self): + # GH 21697 + fmt = "%Y-%m-%d %H:%M:%S %z" + arg = Index(["2010-01-01 12:00:00 Z"], name="foo") + result = to_datetime(arg, format=fmt) + expected = DatetimeIndex(["2010-01-01 12:00:00"], tz="UTC", name="foo") + tm.assert_index_equal(result, expected) + + +class TestToDatetime: + @pytest.mark.filterwarnings("ignore:Could not infer format") + def test_to_datetime_overflow(self): + # we should get an OutOfBoundsDatetime, NOT OverflowError + # TODO: Timestamp raises ValueError("could not convert string to Timestamp") + # can we make these more consistent? + arg = "08335394550" + msg = 'Parsing "08335394550" to datetime overflows, at position 0' + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(arg) + + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime([arg]) + + res = to_datetime(arg, errors="coerce") + assert res is NaT + res = to_datetime([arg], errors="coerce") + tm.assert_index_equal(res, Index([NaT])) + + res = to_datetime(arg, errors="ignore") + assert isinstance(res, str) and res == arg + res = to_datetime([arg], errors="ignore") + tm.assert_index_equal(res, Index([arg], dtype=object)) + + def test_to_datetime_mixed_datetime_and_string(self): + # GH#47018 adapted old doctest with new behavior + d1 = datetime(2020, 1, 1, 17, tzinfo=timezone(-timedelta(hours=1))) + d2 = datetime(2020, 1, 1, 18, tzinfo=timezone(-timedelta(hours=1))) + res = to_datetime(["2020-01-01 17:00 -0100", d2]) + expected = to_datetime([d1, d2]).tz_convert(timezone(timedelta(minutes=-60))) + tm.assert_index_equal(res, expected) + + def test_to_datetime_mixed_string_and_numeric(self): + # GH#55780 np.array(vals) would incorrectly cast the number to str + vals = ["2016-01-01", 0] + expected = DatetimeIndex([Timestamp(x) for x in vals]) + result = to_datetime(vals, format="mixed") + result2 = to_datetime(vals[::-1], format="mixed")[::-1] + result3 = DatetimeIndex(vals) + result4 = DatetimeIndex(vals[::-1])[::-1] + + tm.assert_index_equal(result, expected) + tm.assert_index_equal(result2, expected) + tm.assert_index_equal(result3, expected) + tm.assert_index_equal(result4, expected) + + @pytest.mark.parametrize( + "format", ["%Y-%m-%d", "%Y-%d-%m"], ids=["ISO8601", "non-ISO8601"] + ) + def test_to_datetime_mixed_date_and_string(self, format): + # https://github.com/pandas-dev/pandas/issues/50108 + d1 = date(2020, 1, 2) + res = to_datetime(["2020-01-01", d1], format=format) + expected = DatetimeIndex(["2020-01-01", "2020-01-02"], dtype="M8[ns]") + tm.assert_index_equal(res, expected) + + @pytest.mark.parametrize( + "fmt", + ["%Y-%d-%m %H:%M:%S%z", "%Y-%m-%d %H:%M:%S%z"], + ids=["non-ISO8601 format", "ISO8601 format"], + ) + @pytest.mark.parametrize( + "utc, args, expected", + [ + pytest.param( + True, + ["2000-01-01 01:00:00-08:00", "2000-01-01 02:00:00-08:00"], + DatetimeIndex( + ["2000-01-01 09:00:00+00:00", "2000-01-01 10:00:00+00:00"], + dtype="datetime64[ns, UTC]", + ), + id="all tz-aware, with utc", + ), + pytest.param( + False, + ["2000-01-01 01:00:00+00:00", "2000-01-01 02:00:00+00:00"], + DatetimeIndex( + ["2000-01-01 01:00:00+00:00", "2000-01-01 02:00:00+00:00"], + ), + id="all tz-aware, without utc", + ), + pytest.param( + True, + ["2000-01-01 01:00:00-08:00", "2000-01-01 02:00:00+00:00"], + DatetimeIndex( + ["2000-01-01 09:00:00+00:00", "2000-01-01 02:00:00+00:00"], + dtype="datetime64[ns, UTC]", + ), + id="all tz-aware, mixed offsets, with utc", + ), + pytest.param( + True, + ["2000-01-01 01:00:00", "2000-01-01 02:00:00+00:00"], + DatetimeIndex( + ["2000-01-01 01:00:00+00:00", "2000-01-01 02:00:00+00:00"], + dtype="datetime64[ns, UTC]", + ), + id="tz-aware string, naive pydatetime, with utc", + ), + ], + ) + @pytest.mark.parametrize( + "constructor", + [Timestamp, lambda x: Timestamp(x).to_pydatetime()], + ) + def test_to_datetime_mixed_datetime_and_string_with_format( + self, fmt, utc, args, expected, constructor + ): + # https://github.com/pandas-dev/pandas/issues/49298 + # https://github.com/pandas-dev/pandas/issues/50254 + # note: ISO8601 formats go down a fastpath, so we need to check both + # a ISO8601 format and a non-ISO8601 one + ts1 = constructor(args[0]) + ts2 = args[1] + result = to_datetime([ts1, ts2], format=fmt, utc=utc) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "fmt", + ["%Y-%d-%m %H:%M:%S%z", "%Y-%m-%d %H:%M:%S%z"], + ids=["non-ISO8601 format", "ISO8601 format"], + ) + @pytest.mark.parametrize( + "constructor", + [Timestamp, lambda x: Timestamp(x).to_pydatetime()], + ) + def test_to_datetime_mixed_datetime_and_string_with_format_mixed_offsets_utc_false( + self, fmt, constructor + ): + # https://github.com/pandas-dev/pandas/issues/49298 + # https://github.com/pandas-dev/pandas/issues/50254 + # note: ISO8601 formats go down a fastpath, so we need to check both + # a ISO8601 format and a non-ISO8601 one + args = ["2000-01-01 01:00:00", "2000-01-01 02:00:00+00:00"] + ts1 = constructor(args[0]) + ts2 = args[1] + msg = "parsing datetimes with mixed time zones will raise an error" + + expected = Index( + [ + Timestamp("2000-01-01 01:00:00"), + Timestamp("2000-01-01 02:00:00+0000", tz="UTC"), + ], + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime([ts1, ts2], format=fmt, utc=False) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "fmt, expected", + [ + pytest.param( + "%Y-%m-%d %H:%M:%S%z", + Index( + [ + Timestamp("2000-01-01 09:00:00+0100", tz="UTC+01:00"), + Timestamp("2000-01-02 02:00:00+0200", tz="UTC+02:00"), + NaT, + ] + ), + id="ISO8601, non-UTC", + ), + pytest.param( + "%Y-%d-%m %H:%M:%S%z", + Index( + [ + Timestamp("2000-01-01 09:00:00+0100", tz="UTC+01:00"), + Timestamp("2000-02-01 02:00:00+0200", tz="UTC+02:00"), + NaT, + ] + ), + id="non-ISO8601, non-UTC", + ), + ], + ) + def test_to_datetime_mixed_offsets_with_none_tz(self, fmt, expected): + # https://github.com/pandas-dev/pandas/issues/50071 + msg = "parsing datetimes with mixed time zones will raise an error" + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime( + ["2000-01-01 09:00:00+01:00", "2000-01-02 02:00:00+02:00", None], + format=fmt, + utc=False, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "fmt, expected", + [ + pytest.param( + "%Y-%m-%d %H:%M:%S%z", + DatetimeIndex( + ["2000-01-01 08:00:00+00:00", "2000-01-02 00:00:00+00:00", "NaT"], + dtype="datetime64[ns, UTC]", + ), + id="ISO8601, UTC", + ), + pytest.param( + "%Y-%d-%m %H:%M:%S%z", + DatetimeIndex( + ["2000-01-01 08:00:00+00:00", "2000-02-01 00:00:00+00:00", "NaT"], + dtype="datetime64[ns, UTC]", + ), + id="non-ISO8601, UTC", + ), + ], + ) + def test_to_datetime_mixed_offsets_with_none(self, fmt, expected): + # https://github.com/pandas-dev/pandas/issues/50071 + result = to_datetime( + ["2000-01-01 09:00:00+01:00", "2000-01-02 02:00:00+02:00", None], + format=fmt, + utc=True, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "fmt", + ["%Y-%d-%m %H:%M:%S%z", "%Y-%m-%d %H:%M:%S%z"], + ids=["non-ISO8601 format", "ISO8601 format"], + ) + @pytest.mark.parametrize( + "args", + [ + pytest.param( + ["2000-01-01 01:00:00-08:00", "2000-01-01 02:00:00-07:00"], + id="all tz-aware, mixed timezones, without utc", + ), + ], + ) + @pytest.mark.parametrize( + "constructor", + [Timestamp, lambda x: Timestamp(x).to_pydatetime()], + ) + def test_to_datetime_mixed_datetime_and_string_with_format_raises( + self, fmt, args, constructor + ): + # https://github.com/pandas-dev/pandas/issues/49298 + # note: ISO8601 formats go down a fastpath, so we need to check both + # a ISO8601 format and a non-ISO8601 one + ts1 = constructor(args[0]) + ts2 = constructor(args[1]) + with pytest.raises( + ValueError, match="cannot be converted to datetime64 unless utc=True" + ): + to_datetime([ts1, ts2], format=fmt, utc=False) + + def test_to_datetime_np_str(self): + # GH#32264 + # GH#48969 + value = np.str_("2019-02-04 10:18:46.297000+0000") + + ser = Series([value]) + + exp = Timestamp("2019-02-04 10:18:46.297000", tz="UTC") + + assert to_datetime(value) == exp + assert to_datetime(ser.iloc[0]) == exp + + res = to_datetime([value]) + expected = Index([exp]) + tm.assert_index_equal(res, expected) + + res = to_datetime(ser) + expected = Series(expected) + tm.assert_series_equal(res, expected) + + @pytest.mark.parametrize( + "s, _format, dt", + [ + ["2015-1-1", "%G-%V-%u", datetime(2014, 12, 29, 0, 0)], + ["2015-1-4", "%G-%V-%u", datetime(2015, 1, 1, 0, 0)], + ["2015-1-7", "%G-%V-%u", datetime(2015, 1, 4, 0, 0)], + ], + ) + def test_to_datetime_iso_week_year_format(self, s, _format, dt): + # See GH#16607 + assert to_datetime(s, format=_format) == dt + + @pytest.mark.parametrize( + "msg, s, _format", + [ + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 50", + "%Y %V", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51", + "%G %V", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 Monday", + "%G %A", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 Mon", + "%G %a", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 6", + "%G %w", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 6", + "%G %u", + ], + [ + "ISO year directive '%G' must be used with the ISO week directive " + "'%V' and a weekday directive '%A', '%a', '%w', or '%u'.", + "2051", + "%G", + ], + [ + "Day of the year directive '%j' is not compatible with ISO year " + "directive '%G'. Use '%Y' instead.", + "1999 51 6 256", + "%G %V %u %j", + ], + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 51 Sunday", + "%Y %V %A", + ], + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 51 Sun", + "%Y %V %a", + ], + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 51 1", + "%Y %V %w", + ], + [ + "ISO week directive '%V' is incompatible with the year directive " + "'%Y'. Use the ISO year '%G' instead.", + "1999 51 1", + "%Y %V %u", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "20", + "%V", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51 Sunday", + "%V %A", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51 Sun", + "%V %a", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51 1", + "%V %w", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "1999 51 1", + "%V %u", + ], + [ + "Day of the year directive '%j' is not compatible with ISO year " + "directive '%G'. Use '%Y' instead.", + "1999 50", + "%G %j", + ], + [ + "ISO week directive '%V' must be used with the ISO year directive " + "'%G' and a weekday directive '%A', '%a', '%w', or '%u'.", + "20 Monday", + "%V %A", + ], + ], + ) + @pytest.mark.parametrize("errors", ["raise", "coerce", "ignore"]) + def test_error_iso_week_year(self, msg, s, _format, errors): + # See GH#16607, GH#50308 + # This test checks for errors thrown when giving the wrong format + # However, as discussed on PR#25541, overriding the locale + # causes a different error to be thrown due to the format being + # locale specific, but the test data is in english. + # Therefore, the tests only run when locale is not overwritten, + # as a sort of solution to this problem. + if locale.getlocale() != ("zh_CN", "UTF-8") and locale.getlocale() != ( + "it_IT", + "UTF-8", + ): + with pytest.raises(ValueError, match=msg): + to_datetime(s, format=_format, errors=errors) + + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_to_datetime_dtarr(self, tz): + # DatetimeArray + dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz) + arr = dti._data + + result = to_datetime(arr) + assert result is arr + + # Doesn't work on Windows since tzpath not set correctly + @td.skip_if_windows + @pytest.mark.parametrize("arg_class", [Series, Index]) + @pytest.mark.parametrize("utc", [True, False]) + @pytest.mark.parametrize("tz", [None, "US/Central"]) + def test_to_datetime_arrow(self, tz, utc, arg_class): + pa = pytest.importorskip("pyarrow") + + dti = date_range("1965-04-03", periods=19, freq="2W", tz=tz) + dti = arg_class(dti) + + dti_arrow = dti.astype(pd.ArrowDtype(pa.timestamp(unit="ns", tz=tz))) + + result = to_datetime(dti_arrow, utc=utc) + expected = to_datetime(dti, utc=utc).astype( + pd.ArrowDtype(pa.timestamp(unit="ns", tz=tz if not utc else "UTC")) + ) + if not utc and arg_class is not Series: + # Doesn't hold for utc=True, since that will astype + # to_datetime also returns a new object for series + assert result is dti_arrow + if arg_class is Series: + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + def test_to_datetime_pydatetime(self): + actual = to_datetime(datetime(2008, 1, 15)) + assert actual == datetime(2008, 1, 15) + + def test_to_datetime_YYYYMMDD(self): + actual = to_datetime("20080115") + assert actual == datetime(2008, 1, 15) + + def test_to_datetime_unparsable_ignore(self): + # unparsable + ser = "Month 1, 1999" + assert to_datetime(ser, errors="ignore") == ser + + @td.skip_if_windows # `tm.set_timezone` does not work in windows + def test_to_datetime_now(self): + # See GH#18666 + with tm.set_timezone("US/Eastern"): + # GH#18705 + now = Timestamp("now").as_unit("ns") + pdnow = to_datetime("now") + pdnow2 = to_datetime(["now"])[0] + + # These should all be equal with infinite perf; this gives + # a generous margin of 10 seconds + assert abs(pdnow._value - now._value) < 1e10 + assert abs(pdnow2._value - now._value) < 1e10 + + assert pdnow.tzinfo is None + assert pdnow2.tzinfo is None + + @td.skip_if_windows # `tm.set_timezone` does not work in windows + @pytest.mark.parametrize("tz", ["Pacific/Auckland", "US/Samoa"]) + def test_to_datetime_today(self, tz): + # See GH#18666 + # Test with one timezone far ahead of UTC and another far behind, so + # one of these will _almost_ always be in a different day from UTC. + # Unfortunately this test between 12 and 1 AM Samoa time + # this both of these timezones _and_ UTC will all be in the same day, + # so this test will not detect the regression introduced in #18666. + with tm.set_timezone(tz): + nptoday = np.datetime64("today").astype("datetime64[ns]").astype(np.int64) + pdtoday = to_datetime("today") + pdtoday2 = to_datetime(["today"])[0] + + tstoday = Timestamp("today").as_unit("ns") + tstoday2 = Timestamp.today().as_unit("ns") + + # These should all be equal with infinite perf; this gives + # a generous margin of 10 seconds + assert abs(pdtoday.normalize()._value - nptoday) < 1e10 + assert abs(pdtoday2.normalize()._value - nptoday) < 1e10 + assert abs(pdtoday._value - tstoday._value) < 1e10 + assert abs(pdtoday._value - tstoday2._value) < 1e10 + + assert pdtoday.tzinfo is None + assert pdtoday2.tzinfo is None + + @pytest.mark.parametrize("arg", ["now", "today"]) + def test_to_datetime_today_now_unicode_bytes(self, arg): + to_datetime([arg]) + + @pytest.mark.parametrize( + "format, expected_ds", + [ + ("%Y-%m-%d %H:%M:%S%z", "2020-01-03"), + ("%Y-%d-%m %H:%M:%S%z", "2020-03-01"), + (None, "2020-01-03"), + ], + ) + @pytest.mark.parametrize( + "string, attribute", + [ + ("now", "utcnow"), + ("today", "today"), + ], + ) + def test_to_datetime_now_with_format(self, format, expected_ds, string, attribute): + # https://github.com/pandas-dev/pandas/issues/50359 + result = to_datetime(["2020-01-03 00:00:00Z", string], format=format, utc=True) + expected = DatetimeIndex( + [expected_ds, getattr(Timestamp, attribute)()], dtype="datetime64[ns, UTC]" + ) + assert (expected - result).max().total_seconds() < 1 + + @pytest.mark.parametrize( + "dt", [np.datetime64("2000-01-01"), np.datetime64("2000-01-02")] + ) + def test_to_datetime_dt64s(self, cache, dt): + assert to_datetime(dt, cache=cache) == Timestamp(dt) + + @pytest.mark.parametrize( + "arg, format", + [ + ("2001-01-01", "%Y-%m-%d"), + ("01-01-2001", "%d-%m-%Y"), + ], + ) + def test_to_datetime_dt64s_and_str(self, arg, format): + # https://github.com/pandas-dev/pandas/issues/50036 + result = to_datetime([arg, np.datetime64("2020-01-01")], format=format) + expected = DatetimeIndex(["2001-01-01", "2020-01-01"]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "dt", [np.datetime64("1000-01-01"), np.datetime64("5000-01-02")] + ) + @pytest.mark.parametrize("errors", ["raise", "ignore", "coerce"]) + def test_to_datetime_dt64s_out_of_ns_bounds(self, cache, dt, errors): + # GH#50369 We cast to the nearest supported reso, i.e. "s" + ts = to_datetime(dt, errors=errors, cache=cache) + assert isinstance(ts, Timestamp) + assert ts.unit == "s" + assert ts.asm8 == dt + + ts = Timestamp(dt) + assert ts.unit == "s" + assert ts.asm8 == dt + + @pytest.mark.skip_ubsan + def test_to_datetime_dt64d_out_of_bounds(self, cache): + dt64 = np.datetime64(np.iinfo(np.int64).max, "D") + + msg = "Out of bounds second timestamp: 25252734927768524-07-27" + with pytest.raises(OutOfBoundsDatetime, match=msg): + Timestamp(dt64) + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(dt64, errors="raise", cache=cache) + + assert to_datetime(dt64, errors="coerce", cache=cache) is NaT + + @pytest.mark.parametrize("unit", ["s", "D"]) + def test_to_datetime_array_of_dt64s(self, cache, unit): + # https://github.com/pandas-dev/pandas/issues/31491 + # Need at least 50 to ensure cache is used. + dts = [ + np.datetime64("2000-01-01", unit), + np.datetime64("2000-01-02", unit), + ] * 30 + # Assuming all datetimes are in bounds, to_datetime() returns + # an array that is equal to Timestamp() parsing + result = to_datetime(dts, cache=cache) + if cache: + # FIXME: behavior should not depend on cache + expected = DatetimeIndex([Timestamp(x).asm8 for x in dts], dtype="M8[s]") + else: + expected = DatetimeIndex([Timestamp(x).asm8 for x in dts], dtype="M8[ns]") + + tm.assert_index_equal(result, expected) + + # A list of datetimes where the last one is out of bounds + dts_with_oob = dts + [np.datetime64("9999-01-01")] + + # As of GH#51978 we do not raise in this case + to_datetime(dts_with_oob, errors="raise") + + result = to_datetime(dts_with_oob, errors="coerce", cache=cache) + if not cache: + # FIXME: shouldn't depend on cache! + expected = DatetimeIndex( + [Timestamp(dts_with_oob[0]).asm8, Timestamp(dts_with_oob[1]).asm8] * 30 + + [NaT], + ) + else: + expected = DatetimeIndex(np.array(dts_with_oob, dtype="M8[s]")) + tm.assert_index_equal(result, expected) + + # With errors='ignore', out of bounds datetime64s + # are converted to their .item(), which depending on the version of + # numpy is either a python datetime.datetime or datetime.date + result = to_datetime(dts_with_oob, errors="ignore", cache=cache) + if not cache: + # FIXME: shouldn't depend on cache! + expected = Index(dts_with_oob) + tm.assert_index_equal(result, expected) + + def test_out_of_bounds_errors_ignore(self): + # https://github.com/pandas-dev/pandas/issues/50587 + result = to_datetime(np.datetime64("9999-01-01"), errors="ignore") + expected = np.datetime64("9999-01-01") + assert result == expected + + def test_out_of_bounds_errors_ignore2(self): + # GH#12424 + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = to_datetime( + Series(["2362-01-01", np.nan], dtype=object), errors="ignore" + ) + exp = Series(["2362-01-01", np.nan], dtype=object) + tm.assert_series_equal(res, exp) + + def test_to_datetime_tz(self, cache): + # xref 8260 + # uniform returns a DatetimeIndex + arr = [ + Timestamp("2013-01-01 13:00:00-0800", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00-0800", tz="US/Pacific"), + ] + result = to_datetime(arr, cache=cache) + expected = DatetimeIndex( + ["2013-01-01 13:00:00", "2013-01-02 14:00:00"], tz="US/Pacific" + ) + tm.assert_index_equal(result, expected) + + def test_to_datetime_tz_mixed(self, cache): + # mixed tzs will raise if errors='raise' + # https://github.com/pandas-dev/pandas/issues/50585 + arr = [ + Timestamp("2013-01-01 13:00:00", tz="US/Pacific"), + Timestamp("2013-01-02 14:00:00", tz="US/Eastern"), + ] + msg = ( + "Tz-aware datetime.datetime cannot be " + "converted to datetime64 unless utc=True" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(arr, cache=cache) + + depr_msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = to_datetime(arr, cache=cache, errors="ignore") + expected = Index( + [ + Timestamp("2013-01-01 13:00:00-08:00"), + Timestamp("2013-01-02 14:00:00-05:00"), + ], + dtype="object", + ) + tm.assert_index_equal(result, expected) + result = to_datetime(arr, cache=cache, errors="coerce") + expected = DatetimeIndex( + ["2013-01-01 13:00:00-08:00", "NaT"], dtype="datetime64[ns, US/Pacific]" + ) + tm.assert_index_equal(result, expected) + + def test_to_datetime_different_offsets(self, cache): + # inspired by asv timeseries.ToDatetimeNONISO8601 benchmark + # see GH-26097 for more + ts_string_1 = "March 1, 2018 12:00:00+0400" + ts_string_2 = "March 1, 2018 12:00:00+0500" + arr = [ts_string_1] * 5 + [ts_string_2] * 5 + expected = Index([parse(x) for x in arr]) + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime(arr, cache=cache) + tm.assert_index_equal(result, expected) + + def test_to_datetime_tz_pytz(self, cache): + # see gh-8260 + us_eastern = pytz.timezone("US/Eastern") + arr = np.array( + [ + us_eastern.localize( + datetime(year=2000, month=1, day=1, hour=3, minute=0) + ), + us_eastern.localize( + datetime(year=2000, month=6, day=1, hour=3, minute=0) + ), + ], + dtype=object, + ) + result = to_datetime(arr, utc=True, cache=cache) + expected = DatetimeIndex( + ["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"], + dtype="datetime64[ns, UTC]", + freq=None, + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "init_constructor, end_constructor", + [ + (Index, DatetimeIndex), + (list, DatetimeIndex), + (np.array, DatetimeIndex), + (Series, Series), + ], + ) + def test_to_datetime_utc_true(self, cache, init_constructor, end_constructor): + # See gh-11934 & gh-6415 + data = ["20100102 121314", "20100102 121315"] + expected_data = [ + Timestamp("2010-01-02 12:13:14", tz="utc"), + Timestamp("2010-01-02 12:13:15", tz="utc"), + ] + + result = to_datetime( + init_constructor(data), format="%Y%m%d %H%M%S", utc=True, cache=cache + ) + expected = end_constructor(expected_data) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize( + "scalar, expected", + [ + ["20100102 121314", Timestamp("2010-01-02 12:13:14", tz="utc")], + ["20100102 121315", Timestamp("2010-01-02 12:13:15", tz="utc")], + ], + ) + def test_to_datetime_utc_true_scalar(self, cache, scalar, expected): + # Test scalar case as well + result = to_datetime(scalar, format="%Y%m%d %H%M%S", utc=True, cache=cache) + assert result == expected + + def test_to_datetime_utc_true_with_series_single_value(self, cache): + # GH 15760 UTC=True with Series + ts = 1.5e18 + result = to_datetime(Series([ts]), utc=True, cache=cache) + expected = Series([Timestamp(ts, tz="utc")]) + tm.assert_series_equal(result, expected) + + def test_to_datetime_utc_true_with_series_tzaware_string(self, cache): + ts = "2013-01-01 00:00:00-01:00" + expected_ts = "2013-01-01 01:00:00" + data = Series([ts] * 3) + result = to_datetime(data, utc=True, cache=cache) + expected = Series([Timestamp(expected_ts, tz="utc")] * 3) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "date, dtype", + [ + ("2013-01-01 01:00:00", "datetime64[ns]"), + ("2013-01-01 01:00:00", "datetime64[ns, UTC]"), + ], + ) + def test_to_datetime_utc_true_with_series_datetime_ns(self, cache, date, dtype): + expected = Series( + [Timestamp("2013-01-01 01:00:00", tz="UTC")], dtype="M8[ns, UTC]" + ) + result = to_datetime(Series([date], dtype=dtype), utc=True, cache=cache) + tm.assert_series_equal(result, expected) + + def test_to_datetime_tz_psycopg2(self, request, cache): + # xref 8260 + psycopg2_tz = pytest.importorskip("psycopg2.tz") + + # misc cases + tz1 = psycopg2_tz.FixedOffsetTimezone(offset=-300, name=None) + tz2 = psycopg2_tz.FixedOffsetTimezone(offset=-240, name=None) + arr = np.array( + [ + datetime(2000, 1, 1, 3, 0, tzinfo=tz1), + datetime(2000, 6, 1, 3, 0, tzinfo=tz2), + ], + dtype=object, + ) + + result = to_datetime(arr, errors="coerce", utc=True, cache=cache) + expected = DatetimeIndex( + ["2000-01-01 08:00:00+00:00", "2000-06-01 07:00:00+00:00"], + dtype="datetime64[ns, UTC]", + freq=None, + ) + tm.assert_index_equal(result, expected) + + # dtype coercion + i = DatetimeIndex( + ["2000-01-01 08:00:00"], + tz=psycopg2_tz.FixedOffsetTimezone(offset=-300, name=None), + ) + assert is_datetime64_ns_dtype(i) + + # tz coercion + result = to_datetime(i, errors="coerce", cache=cache) + tm.assert_index_equal(result, i) + + result = to_datetime(i, errors="coerce", utc=True, cache=cache) + expected = DatetimeIndex(["2000-01-01 13:00:00"], dtype="datetime64[ns, UTC]") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("arg", [True, False]) + def test_datetime_bool(self, cache, arg): + # GH13176 + msg = r"dtype bool cannot be converted to datetime64\[ns\]" + with pytest.raises(TypeError, match=msg): + to_datetime(arg) + assert to_datetime(arg, errors="coerce", cache=cache) is NaT + assert to_datetime(arg, errors="ignore", cache=cache) is arg + + def test_datetime_bool_arrays_mixed(self, cache): + msg = f"{type(cache)} is not convertible to datetime" + with pytest.raises(TypeError, match=msg): + to_datetime([False, datetime.today()], cache=cache) + with pytest.raises( + ValueError, + match=( + r'^time data "True" doesn\'t match format "%Y%m%d", ' + f"at position 1. {PARSING_ERR_MSG}$" + ), + ): + to_datetime(["20130101", True], cache=cache) + tm.assert_index_equal( + to_datetime([0, False, NaT, 0.0], errors="coerce", cache=cache), + DatetimeIndex( + [to_datetime(0, cache=cache), NaT, NaT, to_datetime(0, cache=cache)] + ), + ) + + @pytest.mark.parametrize("arg", [bool, to_datetime]) + def test_datetime_invalid_datatype(self, arg): + # GH13176 + msg = "is not convertible to datetime" + with pytest.raises(TypeError, match=msg): + to_datetime(arg) + + @pytest.mark.parametrize("errors", ["coerce", "raise", "ignore"]) + def test_invalid_format_raises(self, errors): + # https://github.com/pandas-dev/pandas/issues/50255 + with pytest.raises( + ValueError, match="':' is a bad directive in format 'H%:M%:S%" + ): + to_datetime(["00:00:00"], format="H%:M%:S%", errors=errors) + + @pytest.mark.parametrize("value", ["a", "00:01:99"]) + @pytest.mark.parametrize("format", [None, "%H:%M:%S"]) + def test_datetime_invalid_scalar(self, value, format): + # GH24763 + res = to_datetime(value, errors="ignore", format=format) + assert res == value + + res = to_datetime(value, errors="coerce", format=format) + assert res is NaT + + msg = "|".join( + [ + r'^time data "a" doesn\'t match format "%H:%M:%S", at position 0. ' + f"{PARSING_ERR_MSG}$", + r'^Given date string "a" not likely a datetime, at position 0$', + r'^unconverted data remains when parsing with format "%H:%M:%S": "9", ' + f"at position 0. {PARSING_ERR_MSG}$", + r"^second must be in 0..59: 00:01:99, at position 0$", + ] + ) + with pytest.raises(ValueError, match=msg): + to_datetime(value, errors="raise", format=format) + + @pytest.mark.parametrize("value", ["3000/12/11 00:00:00"]) + @pytest.mark.parametrize("format", [None, "%H:%M:%S"]) + def test_datetime_outofbounds_scalar(self, value, format): + # GH24763 + res = to_datetime(value, errors="ignore", format=format) + assert res == value + + res = to_datetime(value, errors="coerce", format=format) + assert res is NaT + + if format is not None: + msg = r'^time data ".*" doesn\'t match format ".*", at position 0.' + with pytest.raises(ValueError, match=msg): + to_datetime(value, errors="raise", format=format) + else: + msg = "^Out of bounds .*, at position 0$" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(value, errors="raise", format=format) + + @pytest.mark.parametrize( + ("values"), [(["a"]), (["00:01:99"]), (["a", "b", "99:00:00"])] + ) + @pytest.mark.parametrize("format", [(None), ("%H:%M:%S")]) + def test_datetime_invalid_index(self, values, format): + # GH24763 + # Not great to have logic in tests, but this one's hard to + # parametrise over + if format is None and len(values) > 1: + warn = UserWarning + else: + warn = None + with tm.assert_produces_warning( + warn, match="Could not infer format", raise_on_extra_warnings=False + ): + res = to_datetime(values, errors="ignore", format=format) + tm.assert_index_equal(res, Index(values, dtype=object)) + + with tm.assert_produces_warning( + warn, match="Could not infer format", raise_on_extra_warnings=False + ): + res = to_datetime(values, errors="coerce", format=format) + tm.assert_index_equal(res, DatetimeIndex([NaT] * len(values))) + + msg = "|".join( + [ + r'^Given date string "a" not likely a datetime, at position 0$', + r'^time data "a" doesn\'t match format "%H:%M:%S", at position 0. ' + f"{PARSING_ERR_MSG}$", + r'^unconverted data remains when parsing with format "%H:%M:%S": "9", ' + f"at position 0. {PARSING_ERR_MSG}$", + r"^second must be in 0..59: 00:01:99, at position 0$", + ] + ) + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning( + warn, match="Could not infer format", raise_on_extra_warnings=False + ): + to_datetime(values, errors="raise", format=format) + + @pytest.mark.parametrize("utc", [True, None]) + @pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None]) + @pytest.mark.parametrize("constructor", [list, tuple, np.array, Index, deque]) + def test_to_datetime_cache(self, utc, format, constructor): + date = "20130101 00:00:00" + test_dates = [date] * 10**5 + data = constructor(test_dates) + + result = to_datetime(data, utc=utc, format=format, cache=True) + expected = to_datetime(data, utc=utc, format=format, cache=False) + + tm.assert_index_equal(result, expected) + + def test_to_datetime_from_deque(self): + # GH 29403 + result = to_datetime(deque([Timestamp("2010-06-02 09:30:00")] * 51)) + expected = to_datetime([Timestamp("2010-06-02 09:30:00")] * 51) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("utc", [True, None]) + @pytest.mark.parametrize("format", ["%Y%m%d %H:%M:%S", None]) + def test_to_datetime_cache_series(self, utc, format): + date = "20130101 00:00:00" + test_dates = [date] * 10**5 + data = Series(test_dates) + result = to_datetime(data, utc=utc, format=format, cache=True) + expected = to_datetime(data, utc=utc, format=format, cache=False) + tm.assert_series_equal(result, expected) + + def test_to_datetime_cache_scalar(self): + date = "20130101 00:00:00" + result = to_datetime(date, cache=True) + expected = Timestamp("20130101 00:00:00") + assert result == expected + + @pytest.mark.parametrize( + "datetimelikes,expected_values", + ( + ( + (None, np.nan) + (NaT,) * start_caching_at, + (NaT,) * (start_caching_at + 2), + ), + ( + (None, Timestamp("2012-07-26")) + (NaT,) * start_caching_at, + (NaT, Timestamp("2012-07-26")) + (NaT,) * start_caching_at, + ), + ( + (None,) + + (NaT,) * start_caching_at + + ("2012 July 26", Timestamp("2012-07-26")), + (NaT,) * (start_caching_at + 1) + + (Timestamp("2012-07-26"), Timestamp("2012-07-26")), + ), + ), + ) + def test_convert_object_to_datetime_with_cache( + self, datetimelikes, expected_values + ): + # GH#39882 + ser = Series( + datetimelikes, + dtype="object", + ) + result_series = to_datetime(ser, errors="coerce") + expected_series = Series( + expected_values, + dtype="datetime64[ns]", + ) + tm.assert_series_equal(result_series, expected_series) + + @pytest.mark.parametrize("cache", [True, False]) + @pytest.mark.parametrize( + "input", + [ + Series([NaT] * 20 + [None] * 20, dtype="object"), + Series([NaT] * 60 + [None] * 60, dtype="object"), + Series([None] * 20), + Series([None] * 60), + Series([""] * 20), + Series([""] * 60), + Series([pd.NA] * 20), + Series([pd.NA] * 60), + Series([np.nan] * 20), + Series([np.nan] * 60), + ], + ) + def test_to_datetime_converts_null_like_to_nat(self, cache, input): + # GH35888 + expected = Series([NaT] * len(input), dtype="M8[ns]") + result = to_datetime(input, cache=cache) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "date, format", + [ + ("2017-20", "%Y-%W"), + ("20 Sunday", "%W %A"), + ("20 Sun", "%W %a"), + ("2017-21", "%Y-%U"), + ("20 Sunday", "%U %A"), + ("20 Sun", "%U %a"), + ], + ) + def test_week_without_day_and_calendar_year(self, date, format): + # GH16774 + + msg = "Cannot use '%W' or '%U' without day and year" + with pytest.raises(ValueError, match=msg): + to_datetime(date, format=format) + + def test_to_datetime_coerce(self): + # GH 26122 + ts_strings = [ + "March 1, 2018 12:00:00+0400", + "March 1, 2018 12:00:00+0500", + "20100240", + ] + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime(ts_strings, errors="coerce") + expected = Index( + [ + datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 14400)), + datetime(2018, 3, 1, 12, 0, tzinfo=tzoffset(None, 18000)), + NaT, + ] + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "string_arg, format", + [("March 1, 2018", "%B %d, %Y"), ("2018-03-01", "%Y-%m-%d")], + ) + @pytest.mark.parametrize( + "outofbounds", + [ + datetime(9999, 1, 1), + date(9999, 1, 1), + np.datetime64("9999-01-01"), + "January 1, 9999", + "9999-01-01", + ], + ) + def test_to_datetime_coerce_oob(self, string_arg, format, outofbounds): + # https://github.com/pandas-dev/pandas/issues/50255 + ts_strings = [string_arg, outofbounds] + result = to_datetime(ts_strings, errors="coerce", format=format) + expected = DatetimeIndex([datetime(2018, 3, 1), NaT]) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "errors, expected", + [ + ("coerce", Index([NaT, NaT])), + ("ignore", Index(["200622-12-31", "111111-24-11"], dtype=object)), + ], + ) + def test_to_datetime_malformed_no_raise(self, errors, expected): + # GH 28299 + # GH 48633 + ts_strings = ["200622-12-31", "111111-24-11"] + with tm.assert_produces_warning( + UserWarning, match="Could not infer format", raise_on_extra_warnings=False + ): + result = to_datetime(ts_strings, errors=errors) + tm.assert_index_equal(result, expected) + + def test_to_datetime_malformed_raise(self): + # GH 48633 + ts_strings = ["200622-12-31", "111111-24-11"] + msg = ( + 'Parsed string "200622-12-31" gives an invalid tzoffset, which must ' + r"be between -timedelta\(hours=24\) and timedelta\(hours=24\), " + "at position 0" + ) + with pytest.raises( + ValueError, + match=msg, + ): + with tm.assert_produces_warning( + UserWarning, match="Could not infer format" + ): + to_datetime( + ts_strings, + errors="raise", + ) + + def test_iso_8601_strings_with_same_offset(self): + # GH 17697, 11736 + ts_str = "2015-11-18 15:30:00+05:30" + result = to_datetime(ts_str) + expected = Timestamp(ts_str) + assert result == expected + + expected = DatetimeIndex([Timestamp(ts_str)] * 2) + result = to_datetime([ts_str] * 2) + tm.assert_index_equal(result, expected) + + result = DatetimeIndex([ts_str] * 2) + tm.assert_index_equal(result, expected) + + def test_iso_8601_strings_with_different_offsets(self): + # GH 17697, 11736, 50887 + ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT] + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_datetime(ts_strings) + expected = np.array( + [ + datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)), + datetime(2015, 11, 18, 16, 30, tzinfo=tzoffset(None, 23400)), + NaT, + ], + dtype=object, + ) + # GH 21864 + expected = Index(expected) + tm.assert_index_equal(result, expected) + + def test_iso_8601_strings_with_different_offsets_utc(self): + ts_strings = ["2015-11-18 15:30:00+05:30", "2015-11-18 16:30:00+06:30", NaT] + result = to_datetime(ts_strings, utc=True) + expected = DatetimeIndex( + [Timestamp(2015, 11, 18, 10), Timestamp(2015, 11, 18, 10), NaT], tz="UTC" + ) + tm.assert_index_equal(result, expected) + + def test_mixed_offsets_with_native_datetime_raises(self): + # GH 25978 + + vals = [ + "nan", + Timestamp("1990-01-01"), + "2015-03-14T16:15:14.123-08:00", + "2019-03-04T21:56:32.620-07:00", + None, + "today", + "now", + ] + ser = Series(vals) + assert all(ser[i] is vals[i] for i in range(len(vals))) # GH#40111 + + now = Timestamp("now") + today = Timestamp("today") + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + mixed = to_datetime(ser) + expected = Series( + [ + "NaT", + Timestamp("1990-01-01"), + Timestamp("2015-03-14T16:15:14.123-08:00").to_pydatetime(), + Timestamp("2019-03-04T21:56:32.620-07:00").to_pydatetime(), + None, + ], + dtype=object, + ) + tm.assert_series_equal(mixed[:-2], expected) + # we'll check mixed[-1] and mixed[-2] match now and today to within + # call-timing tolerances + assert (now - mixed.iloc[-1]).total_seconds() <= 0.1 + assert (today - mixed.iloc[-2]).total_seconds() <= 0.1 + + with pytest.raises(ValueError, match="Tz-aware datetime.datetime"): + to_datetime(mixed) + + def test_non_iso_strings_with_tz_offset(self): + result = to_datetime(["March 1, 2018 12:00:00+0400"] * 2) + expected = DatetimeIndex( + [datetime(2018, 3, 1, 12, tzinfo=timezone(timedelta(minutes=240)))] * 2 + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "ts, expected", + [ + (Timestamp("2018-01-01"), Timestamp("2018-01-01", tz="UTC")), + ( + Timestamp("2018-01-01", tz="US/Pacific"), + Timestamp("2018-01-01 08:00", tz="UTC"), + ), + ], + ) + def test_timestamp_utc_true(self, ts, expected): + # GH 24415 + result = to_datetime(ts, utc=True) + assert result == expected + + @pytest.mark.parametrize("dt_str", ["00010101", "13000101", "30000101", "99990101"]) + def test_to_datetime_with_format_out_of_bounds(self, dt_str): + # GH 9107 + msg = "Out of bounds nanosecond timestamp" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(dt_str, format="%Y%m%d") + + def test_to_datetime_utc(self): + arr = np.array([parse("2012-06-13T01:39:00Z")], dtype=object) + + result = to_datetime(arr, utc=True) + assert result.tz is timezone.utc + + def test_to_datetime_fixed_offset(self): + from pandas.tests.indexes.datetimes.test_timezones import FixedOffset + + fixed_off = FixedOffset(-420, "-07:00") + + dates = [ + datetime(2000, 1, 1, tzinfo=fixed_off), + datetime(2000, 1, 2, tzinfo=fixed_off), + datetime(2000, 1, 3, tzinfo=fixed_off), + ] + result = to_datetime(dates) + assert result.tz == fixed_off + + @pytest.mark.parametrize( + "date", + [ + ["2020-10-26 00:00:00+06:00", "2020-10-26 00:00:00+01:00"], + ["2020-10-26 00:00:00+06:00", Timestamp("2018-01-01", tz="US/Pacific")], + [ + "2020-10-26 00:00:00+06:00", + datetime(2020, 1, 1, 18, tzinfo=pytz.timezone("Australia/Melbourne")), + ], + ], + ) + def test_to_datetime_mixed_offsets_with_utc_false_deprecated(self, date): + # GH 50887 + msg = "parsing datetimes with mixed time zones will raise an error" + with tm.assert_produces_warning(FutureWarning, match=msg): + to_datetime(date, utc=False) + + +class TestToDatetimeUnit: + @pytest.mark.parametrize("unit", ["Y", "M"]) + @pytest.mark.parametrize("item", [150, float(150)]) + def test_to_datetime_month_or_year_unit_int(self, cache, unit, item, request): + # GH#50870 Note we have separate tests that pd.Timestamp gets these right + ts = Timestamp(item, unit=unit) + expected = DatetimeIndex([ts], dtype="M8[ns]") + + result = to_datetime([item], unit=unit, cache=cache) + tm.assert_index_equal(result, expected) + + result = to_datetime(np.array([item], dtype=object), unit=unit, cache=cache) + tm.assert_index_equal(result, expected) + + result = to_datetime(np.array([item]), unit=unit, cache=cache) + tm.assert_index_equal(result, expected) + + # with a nan! + result = to_datetime(np.array([item, np.nan]), unit=unit, cache=cache) + assert result.isna()[1] + tm.assert_index_equal(result[:1], expected) + + @pytest.mark.parametrize("unit", ["Y", "M"]) + def test_to_datetime_month_or_year_unit_non_round_float(self, cache, unit): + # GH#50301 + # Match Timestamp behavior in disallowing non-round floats with + # Y or M unit + warn_msg = "strings will be parsed as datetime strings" + msg = f"Conversion of non-round float with unit={unit} is ambiguous" + with pytest.raises(ValueError, match=msg): + to_datetime([1.5], unit=unit, errors="raise") + with pytest.raises(ValueError, match=msg): + to_datetime(np.array([1.5]), unit=unit, errors="raise") + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + to_datetime(["1.5"], unit=unit, errors="raise") + + # with errors="ignore" we also end up raising within the Timestamp + # constructor; this may not be ideal + with pytest.raises(ValueError, match=msg): + to_datetime([1.5], unit=unit, errors="ignore") + + res = to_datetime([1.5], unit=unit, errors="coerce") + expected = Index([NaT], dtype="M8[ns]") + tm.assert_index_equal(res, expected) + + with tm.assert_produces_warning(FutureWarning, match=warn_msg): + res = to_datetime(["1.5"], unit=unit, errors="coerce") + tm.assert_index_equal(res, expected) + + # round floats are OK + res = to_datetime([1.0], unit=unit) + expected = to_datetime([1], unit=unit) + tm.assert_index_equal(res, expected) + + def test_unit(self, cache): + # GH 11758 + # test proper behavior with errors + msg = "cannot specify both format and unit" + with pytest.raises(ValueError, match=msg): + to_datetime([1], unit="D", format="%Y%m%d", cache=cache) + + def test_unit_str(self, cache): + # GH 57051 + # Test that strs aren't dropping precision to 32-bit accidentally. + with tm.assert_produces_warning(FutureWarning): + res = to_datetime(["1704660000"], unit="s", origin="unix") + expected = to_datetime([1704660000], unit="s", origin="unix") + tm.assert_index_equal(res, expected) + + def test_unit_array_mixed_nans(self, cache): + values = [11111111111111111, 1, 1.0, iNaT, NaT, np.nan, "NaT", ""] + result = to_datetime(values, unit="D", errors="ignore", cache=cache) + expected = Index( + [ + 11111111111111111, + Timestamp("1970-01-02"), + Timestamp("1970-01-02"), + NaT, + NaT, + NaT, + NaT, + NaT, + ], + dtype=object, + ) + tm.assert_index_equal(result, expected) + + result = to_datetime(values, unit="D", errors="coerce", cache=cache) + expected = DatetimeIndex( + ["NaT", "1970-01-02", "1970-01-02", "NaT", "NaT", "NaT", "NaT", "NaT"], + dtype="M8[ns]", + ) + tm.assert_index_equal(result, expected) + + msg = "cannot convert input 11111111111111111 with the unit 'D'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(values, unit="D", errors="raise", cache=cache) + + def test_unit_array_mixed_nans_large_int(self, cache): + values = [1420043460000000000000000, iNaT, NaT, np.nan, "NaT"] + + result = to_datetime(values, errors="ignore", unit="s", cache=cache) + expected = Index([1420043460000000000000000, NaT, NaT, NaT, NaT], dtype=object) + tm.assert_index_equal(result, expected) + + result = to_datetime(values, errors="coerce", unit="s", cache=cache) + expected = DatetimeIndex(["NaT", "NaT", "NaT", "NaT", "NaT"], dtype="M8[ns]") + tm.assert_index_equal(result, expected) + + msg = "cannot convert input 1420043460000000000000000 with the unit 's'" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(values, errors="raise", unit="s", cache=cache) + + def test_to_datetime_invalid_str_not_out_of_bounds_valuerror(self, cache): + # if we have a string, then we raise a ValueError + # and NOT an OutOfBoundsDatetime + msg = "non convertible value foo with the unit 's'" + with pytest.raises(ValueError, match=msg): + to_datetime("foo", errors="raise", unit="s", cache=cache) + + @pytest.mark.parametrize("error", ["raise", "coerce", "ignore"]) + def test_unit_consistency(self, cache, error): + # consistency of conversions + expected = Timestamp("1970-05-09 14:25:11") + result = to_datetime(11111111, unit="s", errors=error, cache=cache) + assert result == expected + assert isinstance(result, Timestamp) + + @pytest.mark.parametrize("errors", ["ignore", "raise", "coerce"]) + @pytest.mark.parametrize("dtype", ["float64", "int64"]) + def test_unit_with_numeric(self, cache, errors, dtype): + # GH 13180 + # coercions from floats/ints are ok + expected = DatetimeIndex( + ["2015-06-19 05:33:20", "2015-05-27 22:33:20"], dtype="M8[ns]" + ) + arr = np.array([1.434692e18, 1.432766e18]).astype(dtype) + result = to_datetime(arr, errors=errors, cache=cache) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "exp, arr, warning", + [ + [ + ["NaT", "2015-06-19 05:33:20", "2015-05-27 22:33:20"], + ["foo", 1.434692e18, 1.432766e18], + UserWarning, + ], + [ + ["2015-06-19 05:33:20", "2015-05-27 22:33:20", "NaT", "NaT"], + [1.434692e18, 1.432766e18, "foo", "NaT"], + None, + ], + ], + ) + def test_unit_with_numeric_coerce(self, cache, exp, arr, warning): + # but we want to make sure that we are coercing + # if we have ints/strings + expected = DatetimeIndex(exp, dtype="M8[ns]") + with tm.assert_produces_warning(warning, match="Could not infer format"): + result = to_datetime(arr, errors="coerce", cache=cache) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "arr", + [ + [Timestamp("20130101"), 1.434692e18, 1.432766e18], + [1.434692e18, 1.432766e18, Timestamp("20130101")], + ], + ) + def test_unit_mixed(self, cache, arr): + # GH#50453 pre-2.0 with mixed numeric/datetimes and errors="coerce" + # the numeric entries would be coerced to NaT, was never clear exactly + # why. + # mixed integers/datetimes + expected = Index([Timestamp(x) for x in arr], dtype="M8[ns]") + result = to_datetime(arr, errors="coerce", cache=cache) + tm.assert_index_equal(result, expected) + + # GH#49037 pre-2.0 this raised, but it always worked with Series, + # was never clear why it was disallowed + result = to_datetime(arr, errors="raise", cache=cache) + tm.assert_index_equal(result, expected) + + result = DatetimeIndex(arr) + tm.assert_index_equal(result, expected) + + def test_unit_rounding(self, cache): + # GH 14156 & GH 20445: argument will incur floating point errors + # but no premature rounding + value = 1434743731.8770001 + result = to_datetime(value, unit="s", cache=cache) + expected = Timestamp("2015-06-19 19:55:31.877000093") + assert result == expected + + alt = Timestamp(value, unit="s") + assert alt == result + + def test_unit_ignore_keeps_name(self, cache): + # GH 21697 + expected = Index([15e9] * 2, name="name") + result = to_datetime(expected, errors="ignore", unit="s", cache=cache) + tm.assert_index_equal(result, expected) + + def test_to_datetime_errors_ignore_utc_true(self): + # GH#23758 + result = to_datetime([1], unit="s", utc=True, errors="ignore") + expected = DatetimeIndex(["1970-01-01 00:00:01"], dtype="M8[ns, UTC]") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("dtype", [int, float]) + def test_to_datetime_unit(self, dtype): + epoch = 1370745748 + ser = Series([epoch + t for t in range(20)]).astype(dtype) + result = to_datetime(ser, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in range(20) + ], + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("null", [iNaT, np.nan]) + def test_to_datetime_unit_with_nulls(self, null): + epoch = 1370745748 + ser = Series([epoch + t for t in range(20)] + [null]) + result = to_datetime(ser, unit="s") + expected = Series( + [Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) for t in range(20)] + + [NaT], + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_unit_fractional_seconds(self): + # GH13834 + epoch = 1370745748 + ser = Series([epoch + t for t in np.arange(0, 2, 0.25)] + [iNaT]).astype(float) + result = to_datetime(ser, unit="s") + expected = Series( + [ + Timestamp("2013-06-09 02:42:28") + timedelta(seconds=t) + for t in np.arange(0, 2, 0.25) + ] + + [NaT], + dtype="M8[ns]", + ) + # GH20455 argument will incur floating point errors but no premature rounding + result = result.round("ms") + tm.assert_series_equal(result, expected) + + def test_to_datetime_unit_na_values(self): + result = to_datetime([1, 2, "NaT", NaT, np.nan], unit="D") + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 3, + dtype="M8[ns]", + ) + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("bad_val", ["foo", 111111111]) + def test_to_datetime_unit_invalid(self, bad_val): + msg = f"{bad_val} with the unit 'D'" + with pytest.raises(ValueError, match=msg): + to_datetime([1, 2, bad_val], unit="D") + + @pytest.mark.parametrize("bad_val", ["foo", 111111111]) + def test_to_timestamp_unit_coerce(self, bad_val): + # coerce we can process + expected = DatetimeIndex( + [Timestamp("1970-01-02"), Timestamp("1970-01-03")] + ["NaT"] * 1, + dtype="M8[ns]", + ) + result = to_datetime([1, 2, bad_val], unit="D", errors="coerce") + tm.assert_index_equal(result, expected) + + def test_float_to_datetime_raise_near_bounds(self): + # GH50183 + msg = "cannot convert input with unit 'D'" + oneday_in_ns = 1e9 * 60 * 60 * 24 + tsmax_in_days = 2**63 / oneday_in_ns # 2**63 ns, in days + # just in bounds + should_succeed = Series( + [0, tsmax_in_days - 0.005, -tsmax_in_days + 0.005], dtype=float + ) + expected = (should_succeed * oneday_in_ns).astype(np.int64) + for error_mode in ["raise", "coerce", "ignore"]: + result1 = to_datetime(should_succeed, unit="D", errors=error_mode) + # Cast to `np.float64` so that `rtol` and inexact checking kick in + # (`check_exact` doesn't take place for integer dtypes) + tm.assert_almost_equal( + result1.astype(np.int64).astype(np.float64), + expected.astype(np.float64), + rtol=1e-10, + ) + # just out of bounds + should_fail1 = Series([0, tsmax_in_days + 0.005], dtype=float) + should_fail2 = Series([0, -tsmax_in_days - 0.005], dtype=float) + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(should_fail1, unit="D", errors="raise") + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(should_fail2, unit="D", errors="raise") + + +class TestToDatetimeDataFrame: + @pytest.fixture + def df(self): + return DataFrame( + { + "year": [2015, 2016], + "month": [2, 3], + "day": [4, 5], + "hour": [6, 7], + "minute": [58, 59], + "second": [10, 11], + "ms": [1, 1], + "us": [2, 2], + "ns": [3, 3], + } + ) + + def test_dataframe(self, df, cache): + result = to_datetime( + {"year": df["year"], "month": df["month"], "day": df["day"]}, cache=cache + ) + expected = Series( + [Timestamp("20150204 00:00:00"), Timestamp("20160305 00:0:00")] + ) + tm.assert_series_equal(result, expected) + + # dict-like + result = to_datetime(df[["year", "month", "day"]].to_dict(), cache=cache) + tm.assert_series_equal(result, expected) + + def test_dataframe_dict_with_constructable(self, df, cache): + # dict but with constructable + df2 = df[["year", "month", "day"]].to_dict() + df2["month"] = 2 + result = to_datetime(df2, cache=cache) + expected2 = Series( + [Timestamp("20150204 00:00:00"), Timestamp("20160205 00:0:00")] + ) + tm.assert_series_equal(result, expected2) + + @pytest.mark.parametrize( + "unit", + [ + { + "year": "years", + "month": "months", + "day": "days", + "hour": "hours", + "minute": "minutes", + "second": "seconds", + }, + { + "year": "year", + "month": "month", + "day": "day", + "hour": "hour", + "minute": "minute", + "second": "second", + }, + ], + ) + def test_dataframe_field_aliases_column_subset(self, df, cache, unit): + # unit mappings + result = to_datetime(df[list(unit.keys())].rename(columns=unit), cache=cache) + expected = Series( + [Timestamp("20150204 06:58:10"), Timestamp("20160305 07:59:11")], + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_field_aliases(self, df, cache): + d = { + "year": "year", + "month": "month", + "day": "day", + "hour": "hour", + "minute": "minute", + "second": "second", + "ms": "ms", + "us": "us", + "ns": "ns", + } + + result = to_datetime(df.rename(columns=d), cache=cache) + expected = Series( + [ + Timestamp("20150204 06:58:10.001002003"), + Timestamp("20160305 07:59:11.001002003"), + ] + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_str_dtype(self, df, cache): + # coerce back to int + result = to_datetime(df.astype(str), cache=cache) + expected = Series( + [ + Timestamp("20150204 06:58:10.001002003"), + Timestamp("20160305 07:59:11.001002003"), + ] + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_coerce(self, cache): + # passing coerce + df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]}) + + msg = ( + r'^cannot assemble the datetimes: time data ".+" doesn\'t ' + r'match format "%Y%m%d", at position 1\.' + ) + with pytest.raises(ValueError, match=msg): + to_datetime(df2, cache=cache) + + result = to_datetime(df2, errors="coerce", cache=cache) + expected = Series([Timestamp("20150204 00:00:00"), NaT]) + tm.assert_series_equal(result, expected) + + def test_dataframe_extra_keys_raisesm(self, df, cache): + # extra columns + msg = r"extra keys have been passed to the datetime assemblage: \[foo\]" + with pytest.raises(ValueError, match=msg): + df2 = df.copy() + df2["foo"] = 1 + to_datetime(df2, cache=cache) + + @pytest.mark.parametrize( + "cols", + [ + ["year"], + ["year", "month"], + ["year", "month", "second"], + ["month", "day"], + ["year", "day", "second"], + ], + ) + def test_dataframe_missing_keys_raises(self, df, cache, cols): + # not enough + msg = ( + r"to assemble mappings requires at least that \[year, month, " + r"day\] be specified: \[.+\] is missing" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(df[cols], cache=cache) + + def test_dataframe_duplicate_columns_raises(self, cache): + # duplicates + msg = "cannot assemble with duplicate keys" + df2 = DataFrame({"year": [2015, 2016], "month": [2, 20], "day": [4, 5]}) + df2.columns = ["year", "year", "day"] + with pytest.raises(ValueError, match=msg): + to_datetime(df2, cache=cache) + + df2 = DataFrame( + {"year": [2015, 2016], "month": [2, 20], "day": [4, 5], "hour": [4, 5]} + ) + df2.columns = ["year", "month", "day", "day"] + with pytest.raises(ValueError, match=msg): + to_datetime(df2, cache=cache) + + def test_dataframe_int16(self, cache): + # GH#13451 + df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) + + # int16 + result = to_datetime(df.astype("int16"), cache=cache) + expected = Series( + [Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")] + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_mixed(self, cache): + # mixed dtypes + df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) + df["month"] = df["month"].astype("int8") + df["day"] = df["day"].astype("int8") + result = to_datetime(df, cache=cache) + expected = Series( + [Timestamp("20150204 00:00:00"), Timestamp("20160305 00:00:00")] + ) + tm.assert_series_equal(result, expected) + + def test_dataframe_float(self, cache): + # float + df = DataFrame({"year": [2000, 2001], "month": [1.5, 1], "day": [1, 1]}) + msg = ( + r"^cannot assemble the datetimes: unconverted data remains when parsing " + r'with format ".*": "1", at position 0.' + ) + with pytest.raises(ValueError, match=msg): + to_datetime(df, cache=cache) + + def test_dataframe_utc_true(self): + # GH#23760 + df = DataFrame({"year": [2015, 2016], "month": [2, 3], "day": [4, 5]}) + result = to_datetime(df, utc=True) + expected = Series( + np.array(["2015-02-04", "2016-03-05"], dtype="datetime64[ns]") + ).dt.tz_localize("UTC") + tm.assert_series_equal(result, expected) + + +class TestToDatetimeMisc: + def test_to_datetime_barely_out_of_bounds(self): + # GH#19529 + # GH#19382 close enough to bounds that dropping nanos would result + # in an in-bounds datetime + arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object) + + msg = "^Out of bounds nanosecond timestamp: .*, at position 0" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime(arr) + + @pytest.mark.parametrize( + "arg, exp_str", + [ + ["2012-01-01 00:00:00", "2012-01-01 00:00:00"], + ["20121001", "2012-10-01"], # bad iso 8601 + ], + ) + def test_to_datetime_iso8601(self, cache, arg, exp_str): + result = to_datetime([arg], cache=cache) + exp = Timestamp(exp_str) + assert result[0] == exp + + @pytest.mark.parametrize( + "input, format", + [ + ("2012", "%Y-%m"), + ("2012-01", "%Y-%m-%d"), + ("2012-01-01", "%Y-%m-%d %H"), + ("2012-01-01 10", "%Y-%m-%d %H:%M"), + ("2012-01-01 10:00", "%Y-%m-%d %H:%M:%S"), + ("2012-01-01 10:00:00", "%Y-%m-%d %H:%M:%S.%f"), + ("2012-01-01 10:00:00.123", "%Y-%m-%d %H:%M:%S.%f%z"), + (0, "%Y-%m-%d"), + ], + ) + @pytest.mark.parametrize("exact", [True, False]) + def test_to_datetime_iso8601_fails(self, input, format, exact): + # https://github.com/pandas-dev/pandas/issues/12649 + # `format` is longer than the string, so this fails regardless of `exact` + with pytest.raises( + ValueError, + match=( + rf"time data \"{input}\" doesn't match format " + rf"\"{format}\", at position 0" + ), + ): + to_datetime(input, format=format, exact=exact) + + @pytest.mark.parametrize( + "input, format", + [ + ("2012-01-01", "%Y-%m"), + ("2012-01-01 10", "%Y-%m-%d"), + ("2012-01-01 10:00", "%Y-%m-%d %H"), + ("2012-01-01 10:00:00", "%Y-%m-%d %H:%M"), + (0, "%Y-%m-%d"), + ], + ) + def test_to_datetime_iso8601_exact_fails(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + # `format` is shorter than the date string, so only fails with `exact=True` + msg = "|".join( + [ + '^unconverted data remains when parsing with format ".*": ".*"' + f", at position 0. {PARSING_ERR_MSG}$", + f'^time data ".*" doesn\'t match format ".*", at position 0. ' + f"{PARSING_ERR_MSG}$", + ] + ) + with pytest.raises( + ValueError, + match=(msg), + ): + to_datetime(input, format=format) + + @pytest.mark.parametrize( + "input, format", + [ + ("2012-01-01", "%Y-%m"), + ("2012-01-01 00", "%Y-%m-%d"), + ("2012-01-01 00:00", "%Y-%m-%d %H"), + ("2012-01-01 00:00:00", "%Y-%m-%d %H:%M"), + ], + ) + def test_to_datetime_iso8601_non_exact(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + expected = Timestamp(2012, 1, 1) + result = to_datetime(input, format=format, exact=False) + assert result == expected + + @pytest.mark.parametrize( + "input, format", + [ + ("2020-01", "%Y/%m"), + ("2020-01-01", "%Y/%m/%d"), + ("2020-01-01 00", "%Y/%m/%dT%H"), + ("2020-01-01T00", "%Y/%m/%d %H"), + ("2020-01-01 00:00", "%Y/%m/%dT%H:%M"), + ("2020-01-01T00:00", "%Y/%m/%d %H:%M"), + ("2020-01-01 00:00:00", "%Y/%m/%dT%H:%M:%S"), + ("2020-01-01T00:00:00", "%Y/%m/%d %H:%M:%S"), + ], + ) + def test_to_datetime_iso8601_separator(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + with pytest.raises( + ValueError, + match=( + rf"time data \"{input}\" doesn\'t match format " + rf"\"{format}\", at position 0" + ), + ): + to_datetime(input, format=format) + + @pytest.mark.parametrize( + "input, format", + [ + ("2020-01", "%Y-%m"), + ("2020-01-01", "%Y-%m-%d"), + ("2020-01-01 00", "%Y-%m-%d %H"), + ("2020-01-01T00", "%Y-%m-%dT%H"), + ("2020-01-01 00:00", "%Y-%m-%d %H:%M"), + ("2020-01-01T00:00", "%Y-%m-%dT%H:%M"), + ("2020-01-01 00:00:00", "%Y-%m-%d %H:%M:%S"), + ("2020-01-01T00:00:00", "%Y-%m-%dT%H:%M:%S"), + ("2020-01-01T00:00:00.000", "%Y-%m-%dT%H:%M:%S.%f"), + ("2020-01-01T00:00:00.000000", "%Y-%m-%dT%H:%M:%S.%f"), + ("2020-01-01T00:00:00.000000000", "%Y-%m-%dT%H:%M:%S.%f"), + ], + ) + def test_to_datetime_iso8601_valid(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + expected = Timestamp(2020, 1, 1) + result = to_datetime(input, format=format) + assert result == expected + + @pytest.mark.parametrize( + "input, format", + [ + ("2020-1", "%Y-%m"), + ("2020-1-1", "%Y-%m-%d"), + ("2020-1-1 0", "%Y-%m-%d %H"), + ("2020-1-1T0", "%Y-%m-%dT%H"), + ("2020-1-1 0:0", "%Y-%m-%d %H:%M"), + ("2020-1-1T0:0", "%Y-%m-%dT%H:%M"), + ("2020-1-1 0:0:0", "%Y-%m-%d %H:%M:%S"), + ("2020-1-1T0:0:0", "%Y-%m-%dT%H:%M:%S"), + ("2020-1-1T0:0:0.000", "%Y-%m-%dT%H:%M:%S.%f"), + ("2020-1-1T0:0:0.000000", "%Y-%m-%dT%H:%M:%S.%f"), + ("2020-1-1T0:0:0.000000000", "%Y-%m-%dT%H:%M:%S.%f"), + ], + ) + def test_to_datetime_iso8601_non_padded(self, input, format): + # https://github.com/pandas-dev/pandas/issues/21422 + expected = Timestamp(2020, 1, 1) + result = to_datetime(input, format=format) + assert result == expected + + @pytest.mark.parametrize( + "input, format", + [ + ("2020-01-01T00:00:00.000000000+00:00", "%Y-%m-%dT%H:%M:%S.%f%z"), + ("2020-01-01T00:00:00+00:00", "%Y-%m-%dT%H:%M:%S%z"), + ("2020-01-01T00:00:00Z", "%Y-%m-%dT%H:%M:%S%z"), + ], + ) + def test_to_datetime_iso8601_with_timezone_valid(self, input, format): + # https://github.com/pandas-dev/pandas/issues/12649 + expected = Timestamp(2020, 1, 1, tzinfo=pytz.UTC) + result = to_datetime(input, format=format) + assert result == expected + + def test_to_datetime_default(self, cache): + rs = to_datetime("2001", cache=cache) + xp = datetime(2001, 1, 1) + assert rs == xp + + @pytest.mark.xfail(reason="fails to enforce dayfirst=True, which would raise") + def test_to_datetime_respects_dayfirst(self, cache): + # dayfirst is essentially broken + + # The msg here is not important since it isn't actually raised yet. + msg = "Invalid date specified" + with pytest.raises(ValueError, match=msg): + # if dayfirst is respected, then this would parse as month=13, which + # would raise + with tm.assert_produces_warning(UserWarning, match="Provide format"): + to_datetime("01-13-2012", dayfirst=True, cache=cache) + + def test_to_datetime_on_datetime64_series(self, cache): + # #2699 + ser = Series(date_range("1/1/2000", periods=10)) + + result = to_datetime(ser, cache=cache) + assert result[0] == ser[0] + + def test_to_datetime_with_space_in_series(self, cache): + # GH 6428 + ser = Series(["10/18/2006", "10/18/2008", " "]) + msg = ( + r'^time data " " doesn\'t match format "%m/%d/%Y", ' + rf"at position 2. {PARSING_ERR_MSG}$" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(ser, errors="raise", cache=cache) + result_coerce = to_datetime(ser, errors="coerce", cache=cache) + expected_coerce = Series([datetime(2006, 10, 18), datetime(2008, 10, 18), NaT]) + tm.assert_series_equal(result_coerce, expected_coerce) + result_ignore = to_datetime(ser, errors="ignore", cache=cache) + tm.assert_series_equal(result_ignore, ser) + + @td.skip_if_not_us_locale + def test_to_datetime_with_apply(self, cache): + # this is only locale tested with US/None locales + # GH 5195 + # with a format and coerce a single item to_datetime fails + td = Series(["May 04", "Jun 02", "Dec 11"], index=[1, 2, 3]) + expected = to_datetime(td, format="%b %y", cache=cache) + result = td.apply(to_datetime, format="%b %y", cache=cache) + tm.assert_series_equal(result, expected) + + def test_to_datetime_timezone_name(self): + # https://github.com/pandas-dev/pandas/issues/49748 + result = to_datetime("2020-01-01 00:00:00UTC", format="%Y-%m-%d %H:%M:%S%Z") + expected = Timestamp(2020, 1, 1).tz_localize("UTC") + assert result == expected + + @td.skip_if_not_us_locale + @pytest.mark.parametrize("errors", ["raise", "coerce", "ignore"]) + def test_to_datetime_with_apply_with_empty_str(self, cache, errors): + # this is only locale tested with US/None locales + # GH 5195, GH50251 + # with a format and coerce a single item to_datetime fails + td = Series(["May 04", "Jun 02", ""], index=[1, 2, 3]) + expected = to_datetime(td, format="%b %y", errors=errors, cache=cache) + + result = td.apply( + lambda x: to_datetime(x, format="%b %y", errors="coerce", cache=cache) + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_empty_stt(self, cache): + # empty string + result = to_datetime("", cache=cache) + assert result is NaT + + def test_to_datetime_empty_str_list(self, cache): + result = to_datetime(["", ""], cache=cache) + assert isna(result).all() + + def test_to_datetime_zero(self, cache): + # ints + result = Timestamp(0) + expected = to_datetime(0, cache=cache) + assert result == expected + + def test_to_datetime_strings(self, cache): + # GH 3888 (strings) + expected = to_datetime(["2012"], cache=cache)[0] + result = to_datetime("2012", cache=cache) + assert result == expected + + def test_to_datetime_strings_variation(self, cache): + array = ["2012", "20120101", "20120101 12:01:01"] + expected = [to_datetime(dt_str, cache=cache) for dt_str in array] + result = [Timestamp(date_str) for date_str in array] + tm.assert_almost_equal(result, expected) + + @pytest.mark.parametrize("result", [Timestamp("2012"), to_datetime("2012")]) + def test_to_datetime_strings_vs_constructor(self, result): + expected = Timestamp(2012, 1, 1) + assert result == expected + + def test_to_datetime_unprocessable_input(self, cache): + # GH 4928 + # GH 21864 + result = to_datetime([1, "1"], errors="ignore", cache=cache) + + expected = Index(np.array([1, "1"], dtype="O")) + tm.assert_equal(result, expected) + msg = '^Given date string "1" not likely a datetime, at position 1$' + with pytest.raises(ValueError, match=msg): + to_datetime([1, "1"], errors="raise", cache=cache) + + def test_to_datetime_unhashable_input(self, cache): + series = Series([["a"]] * 100) + result = to_datetime(series, errors="ignore", cache=cache) + tm.assert_series_equal(series, result) + + def test_to_datetime_other_datetime64_units(self): + # 5/25/2012 + scalar = np.int64(1337904000000000).view("M8[us]") + as_obj = scalar.astype("O") + + index = DatetimeIndex([scalar]) + assert index[0] == scalar.astype("O") + + value = Timestamp(scalar) + assert value == as_obj + + def test_to_datetime_list_of_integers(self): + rng = date_range("1/1/2000", periods=20) + rng = DatetimeIndex(rng.values) + + ints = list(rng.asi8) + + result = DatetimeIndex(ints) + + tm.assert_index_equal(rng, result) + + def test_to_datetime_overflow(self): + # gh-17637 + # we are overflowing Timedelta range here + msg = "Cannot cast 139999 days 00:00:00 to unit='ns' without overflow" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + date_range(start="1/1/1700", freq="B", periods=100000) + + def test_string_invalid_operation(self, cache): + invalid = np.array(["87156549591102612381000001219H5"], dtype=object) + # GH #51084 + + with pytest.raises(ValueError, match="Unknown datetime string format"): + to_datetime(invalid, errors="raise", cache=cache) + + def test_string_na_nat_conversion(self, cache): + # GH #999, #858 + + strings = np.array(["1/1/2000", "1/2/2000", np.nan, "1/4/2000"], dtype=object) + + expected = np.empty(4, dtype="M8[ns]") + for i, val in enumerate(strings): + if isna(val): + expected[i] = iNaT + else: + expected[i] = parse(val) + + result = tslib.array_to_datetime(strings)[0] + tm.assert_almost_equal(result, expected) + + result2 = to_datetime(strings, cache=cache) + assert isinstance(result2, DatetimeIndex) + tm.assert_numpy_array_equal(result, result2.values) + + def test_string_na_nat_conversion_malformed(self, cache): + malformed = np.array(["1/100/2000", np.nan], dtype=object) + + # GH 10636, default is now 'raise' + msg = r"Unknown datetime string format" + with pytest.raises(ValueError, match=msg): + to_datetime(malformed, errors="raise", cache=cache) + + result = to_datetime(malformed, errors="ignore", cache=cache) + # GH 21864 + expected = Index(malformed, dtype=object) + tm.assert_index_equal(result, expected) + + with pytest.raises(ValueError, match=msg): + to_datetime(malformed, errors="raise", cache=cache) + + def test_string_na_nat_conversion_with_name(self, cache): + idx = ["a", "b", "c", "d", "e"] + series = Series( + ["1/1/2000", np.nan, "1/3/2000", np.nan, "1/5/2000"], index=idx, name="foo" + ) + dseries = Series( + [ + to_datetime("1/1/2000", cache=cache), + np.nan, + to_datetime("1/3/2000", cache=cache), + np.nan, + to_datetime("1/5/2000", cache=cache), + ], + index=idx, + name="foo", + ) + + result = to_datetime(series, cache=cache) + dresult = to_datetime(dseries, cache=cache) + + expected = Series(np.empty(5, dtype="M8[ns]"), index=idx) + for i in range(5): + x = series.iloc[i] + if isna(x): + expected.iloc[i] = NaT + else: + expected.iloc[i] = to_datetime(x, cache=cache) + + tm.assert_series_equal(result, expected, check_names=False) + assert result.name == "foo" + + tm.assert_series_equal(dresult, expected, check_names=False) + assert dresult.name == "foo" + + @pytest.mark.parametrize( + "unit", + ["h", "m", "s", "ms", "us", "ns"], + ) + def test_dti_constructor_numpy_timeunits(self, cache, unit): + # GH 9114 + dtype = np.dtype(f"M8[{unit}]") + base = to_datetime(["2000-01-01T00:00", "2000-01-02T00:00", "NaT"], cache=cache) + + values = base.values.astype(dtype) + + if unit in ["h", "m"]: + # we cast to closest supported unit + unit = "s" + exp_dtype = np.dtype(f"M8[{unit}]") + expected = DatetimeIndex(base.astype(exp_dtype)) + assert expected.dtype == exp_dtype + + tm.assert_index_equal(DatetimeIndex(values), expected) + tm.assert_index_equal(to_datetime(values, cache=cache), expected) + + def test_dayfirst(self, cache): + # GH 5917 + arr = ["10/02/2014", "11/02/2014", "12/02/2014"] + expected = DatetimeIndex( + [datetime(2014, 2, 10), datetime(2014, 2, 11), datetime(2014, 2, 12)] + ) + idx1 = DatetimeIndex(arr, dayfirst=True) + idx2 = DatetimeIndex(np.array(arr), dayfirst=True) + idx3 = to_datetime(arr, dayfirst=True, cache=cache) + idx4 = to_datetime(np.array(arr), dayfirst=True, cache=cache) + idx5 = DatetimeIndex(Index(arr), dayfirst=True) + idx6 = DatetimeIndex(Series(arr), dayfirst=True) + tm.assert_index_equal(expected, idx1) + tm.assert_index_equal(expected, idx2) + tm.assert_index_equal(expected, idx3) + tm.assert_index_equal(expected, idx4) + tm.assert_index_equal(expected, idx5) + tm.assert_index_equal(expected, idx6) + + def test_dayfirst_warnings_valid_input(self): + # GH 12585 + warning_msg = ( + "Parsing dates in .* format when dayfirst=.* was specified. " + "Pass `dayfirst=.*` or specify a format to silence this warning." + ) + + # CASE 1: valid input + arr = ["31/12/2014", "10/03/2011"] + expected = DatetimeIndex( + ["2014-12-31", "2011-03-10"], dtype="datetime64[ns]", freq=None + ) + + # A. dayfirst arg correct, no warning + res1 = to_datetime(arr, dayfirst=True) + tm.assert_index_equal(expected, res1) + + # B. dayfirst arg incorrect, warning + with tm.assert_produces_warning(UserWarning, match=warning_msg): + res2 = to_datetime(arr, dayfirst=False) + tm.assert_index_equal(expected, res2) + + def test_dayfirst_warnings_invalid_input(self): + # CASE 2: invalid input + # cannot consistently process with single format + # ValueError *always* raised + + # first in DD/MM/YYYY, second in MM/DD/YYYY + arr = ["31/12/2014", "03/30/2011"] + + with pytest.raises( + ValueError, + match=( + r'^time data "03/30/2011" doesn\'t match format ' + rf'"%d/%m/%Y", at position 1. {PARSING_ERR_MSG}$' + ), + ): + to_datetime(arr, dayfirst=True) + + @pytest.mark.parametrize("klass", [DatetimeIndex, DatetimeArray._from_sequence]) + def test_to_datetime_dta_tz(self, klass): + # GH#27733 + dti = date_range("2015-04-05", periods=3).rename("foo") + expected = dti.tz_localize("UTC") + + obj = klass(dti) + expected = klass(expected) + + result = to_datetime(obj, utc=True) + tm.assert_equal(result, expected) + + +class TestGuessDatetimeFormat: + @pytest.mark.parametrize( + "test_list", + [ + [ + "2011-12-30 00:00:00.000000", + "2011-12-30 00:00:00.000000", + "2011-12-30 00:00:00.000000", + ], + [np.nan, np.nan, "2011-12-30 00:00:00.000000"], + ["", "2011-12-30 00:00:00.000000"], + ["NaT", "2011-12-30 00:00:00.000000"], + ["2011-12-30 00:00:00.000000", "random_string"], + ["now", "2011-12-30 00:00:00.000000"], + ["today", "2011-12-30 00:00:00.000000"], + ], + ) + def test_guess_datetime_format_for_array(self, test_list): + expected_format = "%Y-%m-%d %H:%M:%S.%f" + test_array = np.array(test_list, dtype=object) + assert tools._guess_datetime_format_for_array(test_array) == expected_format + + @td.skip_if_not_us_locale + def test_guess_datetime_format_for_array_all_nans(self): + format_for_string_of_nans = tools._guess_datetime_format_for_array( + np.array([np.nan, np.nan, np.nan], dtype="O") + ) + assert format_for_string_of_nans is None + + +class TestToDatetimeInferFormat: + @pytest.mark.parametrize( + "test_format", ["%m-%d-%Y", "%m/%d/%Y %H:%M:%S.%f", "%Y-%m-%dT%H:%M:%S.%f"] + ) + def test_to_datetime_infer_datetime_format_consistent_format( + self, cache, test_format + ): + ser = Series(date_range("20000101", periods=50, freq="h")) + + s_as_dt_strings = ser.apply(lambda x: x.strftime(test_format)) + + with_format = to_datetime(s_as_dt_strings, format=test_format, cache=cache) + without_format = to_datetime(s_as_dt_strings, cache=cache) + + # Whether the format is explicitly passed, or + # it is inferred, the results should all be the same + tm.assert_series_equal(with_format, without_format) + + def test_to_datetime_inconsistent_format(self, cache): + data = ["01/01/2011 00:00:00", "01-02-2011 00:00:00", "2011-01-03T00:00:00"] + ser = Series(np.array(data)) + msg = ( + r'^time data "01-02-2011 00:00:00" doesn\'t match format ' + rf'"%m/%d/%Y %H:%M:%S", at position 1. {PARSING_ERR_MSG}$' + ) + with pytest.raises(ValueError, match=msg): + to_datetime(ser, cache=cache) + + def test_to_datetime_consistent_format(self, cache): + data = ["Jan/01/2011", "Feb/01/2011", "Mar/01/2011"] + ser = Series(np.array(data)) + result = to_datetime(ser, cache=cache) + expected = Series( + ["2011-01-01", "2011-02-01", "2011-03-01"], dtype="datetime64[ns]" + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_series_with_nans(self, cache): + ser = Series( + np.array( + ["01/01/2011 00:00:00", np.nan, "01/03/2011 00:00:00", np.nan], + dtype=object, + ) + ) + result = to_datetime(ser, cache=cache) + expected = Series( + ["2011-01-01", NaT, "2011-01-03", NaT], dtype="datetime64[ns]" + ) + tm.assert_series_equal(result, expected) + + def test_to_datetime_series_start_with_nans(self, cache): + ser = Series( + np.array( + [ + np.nan, + np.nan, + "01/01/2011 00:00:00", + "01/02/2011 00:00:00", + "01/03/2011 00:00:00", + ], + dtype=object, + ) + ) + + result = to_datetime(ser, cache=cache) + expected = Series( + [NaT, NaT, "2011-01-01", "2011-01-02", "2011-01-03"], dtype="datetime64[ns]" + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "tz_name, offset", + [("UTC", 0), ("UTC-3", 180), ("UTC+3", -180)], + ) + def test_infer_datetime_format_tz_name(self, tz_name, offset): + # GH 33133 + ser = Series([f"2019-02-02 08:07:13 {tz_name}"]) + result = to_datetime(ser) + tz = timezone(timedelta(minutes=offset)) + expected = Series([Timestamp("2019-02-02 08:07:13").tz_localize(tz)]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "ts,zero_tz", + [ + ("2019-02-02 08:07:13", "Z"), + ("2019-02-02 08:07:13", ""), + ("2019-02-02 08:07:13.012345", "Z"), + ("2019-02-02 08:07:13.012345", ""), + ], + ) + def test_infer_datetime_format_zero_tz(self, ts, zero_tz): + # GH 41047 + ser = Series([ts + zero_tz]) + result = to_datetime(ser) + tz = pytz.utc if zero_tz == "Z" else None + expected = Series([Timestamp(ts, tz=tz)]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("format", [None, "%Y-%m-%d"]) + def test_to_datetime_iso8601_noleading_0s(self, cache, format): + # GH 11871 + ser = Series(["2014-1-1", "2014-2-2", "2015-3-3"]) + expected = Series( + [ + Timestamp("2014-01-01"), + Timestamp("2014-02-02"), + Timestamp("2015-03-03"), + ] + ) + result = to_datetime(ser, format=format, cache=cache) + tm.assert_series_equal(result, expected) + + def test_parse_dates_infer_datetime_format_warning(self): + # GH 49024 + with tm.assert_produces_warning( + UserWarning, + match="The argument 'infer_datetime_format' is deprecated", + ): + to_datetime(["10-10-2000"], infer_datetime_format=True) + + +class TestDaysInMonth: + # tests for issue #10154 + + @pytest.mark.parametrize( + "arg, format", + [ + ["2015-02-29", None], + ["2015-02-29", "%Y-%m-%d"], + ["2015-02-32", "%Y-%m-%d"], + ["2015-04-31", "%Y-%m-%d"], + ], + ) + def test_day_not_in_month_coerce(self, cache, arg, format): + assert isna(to_datetime(arg, errors="coerce", format=format, cache=cache)) + + def test_day_not_in_month_raise(self, cache): + msg = "day is out of range for month: 2015-02-29, at position 0" + with pytest.raises(ValueError, match=msg): + to_datetime("2015-02-29", errors="raise", cache=cache) + + @pytest.mark.parametrize( + "arg, format, msg", + [ + ( + "2015-02-29", + "%Y-%m-%d", + f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-29-02", + "%Y-%d-%m", + f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-02-32", + "%Y-%m-%d", + '^unconverted data remains when parsing with format "%Y-%m-%d": "2", ' + f"at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-32-02", + "%Y-%d-%m", + '^time data "2015-32-02" doesn\'t match format "%Y-%d-%m", ' + f"at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-04-31", + "%Y-%m-%d", + f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$", + ), + ( + "2015-31-04", + "%Y-%d-%m", + f"^day is out of range for month, at position 0. {PARSING_ERR_MSG}$", + ), + ], + ) + def test_day_not_in_month_raise_value(self, cache, arg, format, msg): + # https://github.com/pandas-dev/pandas/issues/50462 + with pytest.raises(ValueError, match=msg): + to_datetime(arg, errors="raise", format=format, cache=cache) + + @pytest.mark.parametrize( + "expected, format", + [ + ["2015-02-29", None], + ["2015-02-29", "%Y-%m-%d"], + ["2015-02-29", "%Y-%m-%d"], + ["2015-04-31", "%Y-%m-%d"], + ], + ) + def test_day_not_in_month_ignore(self, cache, expected, format): + result = to_datetime(expected, errors="ignore", format=format, cache=cache) + assert result == expected + + +class TestDatetimeParsingWrappers: + @pytest.mark.parametrize( + "date_str, expected", + [ + ("2011-01-01", datetime(2011, 1, 1)), + ("2Q2005", datetime(2005, 4, 1)), + ("2Q05", datetime(2005, 4, 1)), + ("2005Q1", datetime(2005, 1, 1)), + ("05Q1", datetime(2005, 1, 1)), + ("2011Q3", datetime(2011, 7, 1)), + ("11Q3", datetime(2011, 7, 1)), + ("3Q2011", datetime(2011, 7, 1)), + ("3Q11", datetime(2011, 7, 1)), + # quarterly without space + ("2000Q4", datetime(2000, 10, 1)), + ("00Q4", datetime(2000, 10, 1)), + ("4Q2000", datetime(2000, 10, 1)), + ("4Q00", datetime(2000, 10, 1)), + ("2000q4", datetime(2000, 10, 1)), + ("2000-Q4", datetime(2000, 10, 1)), + ("00-Q4", datetime(2000, 10, 1)), + ("4Q-2000", datetime(2000, 10, 1)), + ("4Q-00", datetime(2000, 10, 1)), + ("00q4", datetime(2000, 10, 1)), + ("2005", datetime(2005, 1, 1)), + ("2005-11", datetime(2005, 11, 1)), + ("2005 11", datetime(2005, 11, 1)), + ("11-2005", datetime(2005, 11, 1)), + ("11 2005", datetime(2005, 11, 1)), + ("200511", datetime(2020, 5, 11)), + ("20051109", datetime(2005, 11, 9)), + ("20051109 10:15", datetime(2005, 11, 9, 10, 15)), + ("20051109 08H", datetime(2005, 11, 9, 8, 0)), + ("2005-11-09 10:15", datetime(2005, 11, 9, 10, 15)), + ("2005-11-09 08H", datetime(2005, 11, 9, 8, 0)), + ("2005/11/09 10:15", datetime(2005, 11, 9, 10, 15)), + ("2005/11/09 10:15:32", datetime(2005, 11, 9, 10, 15, 32)), + ("2005/11/09 10:15:32 AM", datetime(2005, 11, 9, 10, 15, 32)), + ("2005/11/09 10:15:32 PM", datetime(2005, 11, 9, 22, 15, 32)), + ("2005/11/09 08H", datetime(2005, 11, 9, 8, 0)), + ("Thu Sep 25 10:36:28 2003", datetime(2003, 9, 25, 10, 36, 28)), + ("Thu Sep 25 2003", datetime(2003, 9, 25)), + ("Sep 25 2003", datetime(2003, 9, 25)), + ("January 1 2014", datetime(2014, 1, 1)), + # GH#10537 + ("2014-06", datetime(2014, 6, 1)), + ("06-2014", datetime(2014, 6, 1)), + ("2014-6", datetime(2014, 6, 1)), + ("6-2014", datetime(2014, 6, 1)), + ("20010101 12", datetime(2001, 1, 1, 12)), + ("20010101 1234", datetime(2001, 1, 1, 12, 34)), + ("20010101 123456", datetime(2001, 1, 1, 12, 34, 56)), + ], + ) + def test_parsers(self, date_str, expected, cache): + # dateutil >= 2.5.0 defaults to yearfirst=True + # https://github.com/dateutil/dateutil/issues/217 + yearfirst = True + + result1, _ = parsing.parse_datetime_string_with_reso( + date_str, yearfirst=yearfirst + ) + result2 = to_datetime(date_str, yearfirst=yearfirst) + result3 = to_datetime([date_str], yearfirst=yearfirst) + # result5 is used below + result4 = to_datetime( + np.array([date_str], dtype=object), yearfirst=yearfirst, cache=cache + ) + result6 = DatetimeIndex([date_str], yearfirst=yearfirst) + # result7 is used below + result8 = DatetimeIndex(Index([date_str]), yearfirst=yearfirst) + result9 = DatetimeIndex(Series([date_str]), yearfirst=yearfirst) + + for res in [result1, result2]: + assert res == expected + for res in [result3, result4, result6, result8, result9]: + exp = DatetimeIndex([Timestamp(expected)]) + tm.assert_index_equal(res, exp) + + # these really need to have yearfirst, but we don't support + if not yearfirst: + result5 = Timestamp(date_str) + assert result5 == expected + result7 = date_range(date_str, freq="S", periods=1, yearfirst=yearfirst) + assert result7 == expected + + def test_na_values_with_cache( + self, cache, unique_nulls_fixture, unique_nulls_fixture2 + ): + # GH22305 + expected = Index([NaT, NaT], dtype="datetime64[ns]") + result = to_datetime([unique_nulls_fixture, unique_nulls_fixture2], cache=cache) + tm.assert_index_equal(result, expected) + + def test_parsers_nat(self): + # Test that each of several string-accepting methods return pd.NaT + result1, _ = parsing.parse_datetime_string_with_reso("NaT") + result2 = to_datetime("NaT") + result3 = Timestamp("NaT") + result4 = DatetimeIndex(["NaT"])[0] + assert result1 is NaT + assert result2 is NaT + assert result3 is NaT + assert result4 is NaT + + @pytest.mark.parametrize( + "date_str, dayfirst, yearfirst, expected", + [ + ("10-11-12", False, False, datetime(2012, 10, 11)), + ("10-11-12", True, False, datetime(2012, 11, 10)), + ("10-11-12", False, True, datetime(2010, 11, 12)), + ("10-11-12", True, True, datetime(2010, 12, 11)), + ("20/12/21", False, False, datetime(2021, 12, 20)), + ("20/12/21", True, False, datetime(2021, 12, 20)), + ("20/12/21", False, True, datetime(2020, 12, 21)), + ("20/12/21", True, True, datetime(2020, 12, 21)), + ], + ) + def test_parsers_dayfirst_yearfirst( + self, cache, date_str, dayfirst, yearfirst, expected + ): + # OK + # 2.5.1 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00 + # 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2012-10-11 00:00:00 + # 2.5.3 10-11-12 [dayfirst=0, yearfirst=0] -> 2012-10-11 00:00:00 + + # OK + # 2.5.1 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00 + # 2.5.2 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00 + # 2.5.3 10-11-12 [dayfirst=0, yearfirst=1] -> 2010-11-12 00:00:00 + + # bug fix in 2.5.2 + # 2.5.1 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-11-12 00:00:00 + # 2.5.2 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00 + # 2.5.3 10-11-12 [dayfirst=1, yearfirst=1] -> 2010-12-11 00:00:00 + + # OK + # 2.5.1 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00 + # 2.5.2 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00 + # 2.5.3 10-11-12 [dayfirst=1, yearfirst=0] -> 2012-11-10 00:00:00 + + # OK + # 2.5.1 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00 + # 2.5.2 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00 + # 2.5.3 20/12/21 [dayfirst=0, yearfirst=0] -> 2021-12-20 00:00:00 + + # OK + # 2.5.1 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00 + # 2.5.2 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00 + # 2.5.3 20/12/21 [dayfirst=0, yearfirst=1] -> 2020-12-21 00:00:00 + + # revert of bug in 2.5.2 + # 2.5.1 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00 + # 2.5.2 20/12/21 [dayfirst=1, yearfirst=1] -> month must be in 1..12 + # 2.5.3 20/12/21 [dayfirst=1, yearfirst=1] -> 2020-12-21 00:00:00 + + # OK + # 2.5.1 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00 + # 2.5.2 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00 + # 2.5.3 20/12/21 [dayfirst=1, yearfirst=0] -> 2021-12-20 00:00:00 + + # str : dayfirst, yearfirst, expected + + # compare with dateutil result + dateutil_result = parse(date_str, dayfirst=dayfirst, yearfirst=yearfirst) + assert dateutil_result == expected + + result1, _ = parsing.parse_datetime_string_with_reso( + date_str, dayfirst=dayfirst, yearfirst=yearfirst + ) + + # we don't support dayfirst/yearfirst here: + if not dayfirst and not yearfirst: + result2 = Timestamp(date_str) + assert result2 == expected + + result3 = to_datetime( + date_str, dayfirst=dayfirst, yearfirst=yearfirst, cache=cache + ) + + result4 = DatetimeIndex([date_str], dayfirst=dayfirst, yearfirst=yearfirst)[0] + + assert result1 == expected + assert result3 == expected + assert result4 == expected + + @pytest.mark.parametrize( + "date_str, exp_def", + [["10:15", datetime(1, 1, 1, 10, 15)], ["9:05", datetime(1, 1, 1, 9, 5)]], + ) + def test_parsers_timestring(self, date_str, exp_def): + # must be the same as dateutil result + exp_now = parse(date_str) + + result1, _ = parsing.parse_datetime_string_with_reso(date_str) + result2 = to_datetime(date_str) + result3 = to_datetime([date_str]) + result4 = Timestamp(date_str) + result5 = DatetimeIndex([date_str])[0] + # parse time string return time string based on default date + # others are not, and can't be changed because it is used in + # time series plot + assert result1 == exp_def + assert result2 == exp_now + assert result3 == exp_now + assert result4 == exp_now + assert result5 == exp_now + + @pytest.mark.parametrize( + "dt_string, tz, dt_string_repr", + [ + ( + "2013-01-01 05:45+0545", + timezone(timedelta(minutes=345)), + "Timestamp('2013-01-01 05:45:00+0545', tz='UTC+05:45')", + ), + ( + "2013-01-01 05:30+0530", + timezone(timedelta(minutes=330)), + "Timestamp('2013-01-01 05:30:00+0530', tz='UTC+05:30')", + ), + ], + ) + def test_parsers_timezone_minute_offsets_roundtrip( + self, cache, dt_string, tz, dt_string_repr + ): + # GH11708 + base = to_datetime("2013-01-01 00:00:00", cache=cache) + base = base.tz_localize("UTC").tz_convert(tz) + dt_time = to_datetime(dt_string, cache=cache) + assert base == dt_time + assert dt_string_repr == repr(dt_time) + + +@pytest.fixture(params=["D", "s", "ms", "us", "ns"]) +def units(request): + """Day and some time units. + + * D + * s + * ms + * us + * ns + """ + return request.param + + +@pytest.fixture +def epoch_1960(): + """Timestamp at 1960-01-01.""" + return Timestamp("1960-01-01") + + +@pytest.fixture +def units_from_epochs(): + return list(range(5)) + + +@pytest.fixture(params=["timestamp", "pydatetime", "datetime64", "str_1960"]) +def epochs(epoch_1960, request): + """Timestamp at 1960-01-01 in various forms. + + * Timestamp + * datetime.datetime + * numpy.datetime64 + * str + """ + assert request.param in {"timestamp", "pydatetime", "datetime64", "str_1960"} + if request.param == "timestamp": + return epoch_1960 + elif request.param == "pydatetime": + return epoch_1960.to_pydatetime() + elif request.param == "datetime64": + return epoch_1960.to_datetime64() + else: + return str(epoch_1960) + + +@pytest.fixture +def julian_dates(): + return date_range("2014-1-1", periods=10).to_julian_date().values + + +class TestOrigin: + def test_origin_and_unit(self): + # GH#42624 + ts = to_datetime(1, unit="s", origin=1) + expected = Timestamp("1970-01-01 00:00:02") + assert ts == expected + + ts = to_datetime(1, unit="s", origin=1_000_000_000) + expected = Timestamp("2001-09-09 01:46:41") + assert ts == expected + + def test_julian(self, julian_dates): + # gh-11276, gh-11745 + # for origin as julian + + result = Series(to_datetime(julian_dates, unit="D", origin="julian")) + expected = Series( + to_datetime(julian_dates - Timestamp(0).to_julian_date(), unit="D") + ) + tm.assert_series_equal(result, expected) + + def test_unix(self): + result = Series(to_datetime([0, 1, 2], unit="D", origin="unix")) + expected = Series( + [Timestamp("1970-01-01"), Timestamp("1970-01-02"), Timestamp("1970-01-03")], + dtype="M8[ns]", + ) + tm.assert_series_equal(result, expected) + + def test_julian_round_trip(self): + result = to_datetime(2456658, origin="julian", unit="D") + assert result.to_julian_date() == 2456658 + + # out-of-bounds + msg = "1 is Out of Bounds for origin='julian'" + with pytest.raises(ValueError, match=msg): + to_datetime(1, origin="julian", unit="D") + + def test_invalid_unit(self, units, julian_dates): + # checking for invalid combination of origin='julian' and unit != D + if units != "D": + msg = "unit must be 'D' for origin='julian'" + with pytest.raises(ValueError, match=msg): + to_datetime(julian_dates, unit=units, origin="julian") + + @pytest.mark.parametrize("unit", ["ns", "D"]) + def test_invalid_origin(self, unit): + # need to have a numeric specified + msg = "it must be numeric with a unit specified" + with pytest.raises(ValueError, match=msg): + to_datetime("2005-01-01", origin="1960-01-01", unit=unit) + + def test_epoch(self, units, epochs, epoch_1960, units_from_epochs): + expected = Series( + [pd.Timedelta(x, unit=units) + epoch_1960 for x in units_from_epochs] + ) + + result = Series(to_datetime(units_from_epochs, unit=units, origin=epochs)) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "origin, exc", + [ + ("random_string", ValueError), + ("epoch", ValueError), + ("13-24-1990", ValueError), + (datetime(1, 1, 1), OutOfBoundsDatetime), + ], + ) + def test_invalid_origins(self, origin, exc, units, units_from_epochs): + msg = "|".join( + [ + f"origin {origin} is Out of Bounds", + f"origin {origin} cannot be converted to a Timestamp", + "Cannot cast .* to unit='ns' without overflow", + ] + ) + with pytest.raises(exc, match=msg): + to_datetime(units_from_epochs, unit=units, origin=origin) + + def test_invalid_origins_tzinfo(self): + # GH16842 + with pytest.raises(ValueError, match="must be tz-naive"): + to_datetime(1, unit="D", origin=datetime(2000, 1, 1, tzinfo=pytz.utc)) + + def test_incorrect_value_exception(self): + # GH47495 + msg = ( + "Unknown datetime string format, unable to parse: yesterday, at position 1" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(["today", "yesterday"]) + + @pytest.mark.parametrize( + "format, warning", + [ + (None, UserWarning), + ("%Y-%m-%d %H:%M:%S", None), + ("%Y-%d-%m %H:%M:%S", None), + ], + ) + def test_to_datetime_out_of_bounds_with_format_arg(self, format, warning): + # see gh-23830 + msg = r"^Out of bounds nanosecond timestamp: 2417-10-10 00:00:00, at position 0" + with pytest.raises(OutOfBoundsDatetime, match=msg): + to_datetime("2417-10-10 00:00:00", format=format) + + @pytest.mark.parametrize( + "arg, origin, expected_str", + [ + [200 * 365, "unix", "2169-11-13 00:00:00"], + [200 * 365, "1870-01-01", "2069-11-13 00:00:00"], + [300 * 365, "1870-01-01", "2169-10-20 00:00:00"], + ], + ) + def test_processing_order(self, arg, origin, expected_str): + # make sure we handle out-of-bounds *before* + # constructing the dates + + result = to_datetime(arg, unit="D", origin=origin) + expected = Timestamp(expected_str) + assert result == expected + + result = to_datetime(200 * 365, unit="D", origin="1870-01-01") + expected = Timestamp("2069-11-13 00:00:00") + assert result == expected + + result = to_datetime(300 * 365, unit="D", origin="1870-01-01") + expected = Timestamp("2169-10-20 00:00:00") + assert result == expected + + @pytest.mark.parametrize( + "offset,utc,exp", + [ + ["Z", True, "2019-01-01T00:00:00.000Z"], + ["Z", None, "2019-01-01T00:00:00.000Z"], + ["-01:00", True, "2019-01-01T01:00:00.000Z"], + ["-01:00", None, "2019-01-01T00:00:00.000-01:00"], + ], + ) + def test_arg_tz_ns_unit(self, offset, utc, exp): + # GH 25546 + arg = "2019-01-01T00:00:00.000" + offset + result = to_datetime([arg], unit="ns", utc=utc) + expected = to_datetime([exp]).as_unit("ns") + tm.assert_index_equal(result, expected) + + +class TestShouldCache: + @pytest.mark.parametrize( + "listlike,do_caching", + [ + ([1, 2, 3, 4, 5, 6, 7, 8, 9, 0], False), + ([1, 1, 1, 1, 4, 5, 6, 7, 8, 9], True), + ], + ) + def test_should_cache(self, listlike, do_caching): + assert ( + tools.should_cache(listlike, check_count=len(listlike), unique_share=0.7) + == do_caching + ) + + @pytest.mark.parametrize( + "unique_share,check_count, err_message", + [ + (0.5, 11, r"check_count must be in next bounds: \[0; len\(arg\)\]"), + (10, 2, r"unique_share must be in next bounds: \(0; 1\)"), + ], + ) + def test_should_cache_errors(self, unique_share, check_count, err_message): + arg = [5] * 10 + + with pytest.raises(AssertionError, match=err_message): + tools.should_cache(arg, unique_share, check_count) + + @pytest.mark.parametrize( + "listlike", + [ + (deque([Timestamp("2010-06-02 09:30:00")] * 51)), + ([Timestamp("2010-06-02 09:30:00")] * 51), + (tuple([Timestamp("2010-06-02 09:30:00")] * 51)), + ], + ) + def test_no_slicing_errors_in_should_cache(self, listlike): + # GH#29403 + assert tools.should_cache(listlike) is True + + +def test_nullable_integer_to_datetime(): + # Test for #30050 + ser = Series([1, 2, None, 2**61, None]) + ser = ser.astype("Int64") + ser_copy = ser.copy() + + res = to_datetime(ser, unit="ns") + + expected = Series( + [ + np.datetime64("1970-01-01 00:00:00.000000001"), + np.datetime64("1970-01-01 00:00:00.000000002"), + np.datetime64("NaT"), + np.datetime64("2043-01-25 23:56:49.213693952"), + np.datetime64("NaT"), + ] + ) + tm.assert_series_equal(res, expected) + # Check that ser isn't mutated + tm.assert_series_equal(ser, ser_copy) + + +@pytest.mark.parametrize("klass", [np.array, list]) +def test_na_to_datetime(nulls_fixture, klass): + if isinstance(nulls_fixture, Decimal): + with pytest.raises(TypeError, match="not convertible to datetime"): + to_datetime(klass([nulls_fixture])) + + else: + result = to_datetime(klass([nulls_fixture])) + + assert result[0] is NaT + + +@pytest.mark.parametrize("errors", ["raise", "coerce", "ignore"]) +@pytest.mark.parametrize( + "args, format", + [ + (["03/24/2016", "03/25/2016", ""], "%m/%d/%Y"), + (["2016-03-24", "2016-03-25", ""], "%Y-%m-%d"), + ], + ids=["non-ISO8601", "ISO8601"], +) +def test_empty_string_datetime(errors, args, format): + # GH13044, GH50251 + td = Series(args) + + # coerce empty string to pd.NaT + result = to_datetime(td, format=format, errors=errors) + expected = Series(["2016-03-24", "2016-03-25", NaT], dtype="datetime64[ns]") + tm.assert_series_equal(expected, result) + + +def test_empty_string_datetime_coerce__unit(): + # GH13044 + # coerce empty string to pd.NaT + result = to_datetime([1, ""], unit="s", errors="coerce") + expected = DatetimeIndex(["1970-01-01 00:00:01", "NaT"], dtype="datetime64[ns]") + tm.assert_index_equal(expected, result) + + # verify that no exception is raised even when errors='raise' is set + result = to_datetime([1, ""], unit="s", errors="raise") + tm.assert_index_equal(expected, result) + + +@pytest.mark.parametrize("cache", [True, False]) +def test_to_datetime_monotonic_increasing_index(cache): + # GH28238 + cstart = start_caching_at + times = date_range(Timestamp("1980"), periods=cstart, freq="YS") + times = times.to_frame(index=False, name="DT").sample(n=cstart, random_state=1) + times.index = times.index.to_series().astype(float) / 1000 + result = to_datetime(times.iloc[:, 0], cache=cache) + expected = times.iloc[:, 0] + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "series_length", + [40, start_caching_at, (start_caching_at + 1), (start_caching_at + 5)], +) +def test_to_datetime_cache_coerce_50_lines_outofbounds(series_length): + # GH#45319 + ser = Series( + [datetime.fromisoformat("1446-04-12 00:00:00+00:00")] + + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length), + dtype=object, + ) + result1 = to_datetime(ser, errors="coerce", utc=True) + + expected1 = Series( + [NaT] + ([Timestamp("1991-10-20 00:00:00+00:00")] * series_length) + ) + + tm.assert_series_equal(result1, expected1) + + result2 = to_datetime(ser, errors="ignore", utc=True) + + expected2 = Series( + [datetime.fromisoformat("1446-04-12 00:00:00+00:00")] + + ([datetime.fromisoformat("1991-10-20 00:00:00+00:00")] * series_length) + ) + + tm.assert_series_equal(result2, expected2) + + with pytest.raises(OutOfBoundsDatetime, match="Out of bounds nanosecond timestamp"): + to_datetime(ser, errors="raise", utc=True) + + +def test_to_datetime_format_f_parse_nanos(): + # GH 48767 + timestamp = "15/02/2020 02:03:04.123456789" + timestamp_format = "%d/%m/%Y %H:%M:%S.%f" + result = to_datetime(timestamp, format=timestamp_format) + expected = Timestamp( + year=2020, + month=2, + day=15, + hour=2, + minute=3, + second=4, + microsecond=123456, + nanosecond=789, + ) + assert result == expected + + +def test_to_datetime_mixed_iso8601(): + # https://github.com/pandas-dev/pandas/issues/50411 + result = to_datetime(["2020-01-01", "2020-01-01 05:00:00"], format="ISO8601") + expected = DatetimeIndex(["2020-01-01 00:00:00", "2020-01-01 05:00:00"]) + tm.assert_index_equal(result, expected) + + +def test_to_datetime_mixed_other(): + # https://github.com/pandas-dev/pandas/issues/50411 + result = to_datetime(["01/11/2000", "12 January 2000"], format="mixed") + expected = DatetimeIndex(["2000-01-11", "2000-01-12"]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("exact", [True, False]) +@pytest.mark.parametrize("format", ["ISO8601", "mixed"]) +def test_to_datetime_mixed_or_iso_exact(exact, format): + msg = "Cannot use 'exact' when 'format' is 'mixed' or 'ISO8601'" + with pytest.raises(ValueError, match=msg): + to_datetime(["2020-01-01"], exact=exact, format=format) + + +def test_to_datetime_mixed_not_necessarily_iso8601_raise(): + # https://github.com/pandas-dev/pandas/issues/50411 + with pytest.raises( + ValueError, match="Time data 01-01-2000 is not ISO8601 format, at position 1" + ): + to_datetime(["2020-01-01", "01-01-2000"], format="ISO8601") + + +@pytest.mark.parametrize( + ("errors", "expected"), + [ + ("coerce", DatetimeIndex(["2020-01-01 00:00:00", NaT])), + ("ignore", Index(["2020-01-01", "01-01-2000"], dtype=object)), + ], +) +def test_to_datetime_mixed_not_necessarily_iso8601_coerce(errors, expected): + # https://github.com/pandas-dev/pandas/issues/50411 + result = to_datetime(["2020-01-01", "01-01-2000"], format="ISO8601", errors=errors) + tm.assert_index_equal(result, expected) + + +def test_ignoring_unknown_tz_deprecated(): + # GH#18702, GH#51476 + dtstr = "2014 Jan 9 05:15 FAKE" + msg = 'un-recognized timezone "FAKE". Dropping unrecognized timezones is deprecated' + with tm.assert_produces_warning(FutureWarning, match=msg): + res = Timestamp(dtstr) + assert res == Timestamp(dtstr[:-5]) + + with tm.assert_produces_warning(FutureWarning): + res = to_datetime(dtstr) + assert res == to_datetime(dtstr[:-5]) + with tm.assert_produces_warning(FutureWarning): + res = to_datetime([dtstr]) + tm.assert_index_equal(res, to_datetime([dtstr[:-5]])) + + +def test_from_numeric_arrow_dtype(any_numeric_ea_dtype): + # GH 52425 + pytest.importorskip("pyarrow") + ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]") + result = to_datetime(ser) + expected = Series([1, 2], dtype="datetime64[ns]") + tm.assert_series_equal(result, expected) + + +def test_to_datetime_with_empty_str_utc_false_format_mixed(): + # GH 50887 + vals = ["2020-01-01 00:00+00:00", ""] + result = to_datetime(vals, format="mixed") + expected = Index([Timestamp("2020-01-01 00:00+00:00"), "NaT"], dtype="M8[ns, UTC]") + tm.assert_index_equal(result, expected) + + # Check that a couple of other similar paths work the same way + alt = to_datetime(vals) + tm.assert_index_equal(alt, expected) + alt2 = DatetimeIndex(vals) + tm.assert_index_equal(alt2, expected) + + +def test_to_datetime_with_empty_str_utc_false_offsets_and_format_mixed(): + # GH 50887 + msg = "parsing datetimes with mixed time zones will raise an error" + + with tm.assert_produces_warning(FutureWarning, match=msg): + to_datetime( + ["2020-01-01 00:00+00:00", "2020-01-01 00:00+02:00", ""], format="mixed" + ) + + +def test_to_datetime_mixed_tzs_mixed_types(): + # GH#55793, GH#55693 mismatched tzs but one is str and other is + # datetime object + ts = Timestamp("2016-01-02 03:04:05", tz="US/Pacific") + dtstr = "2023-10-30 15:06+01" + arr = [ts, dtstr] + + msg = ( + "Mixed timezones detected. pass utc=True in to_datetime or tz='UTC' " + "in DatetimeIndex to convert to a common timezone" + ) + with pytest.raises(ValueError, match=msg): + to_datetime(arr) + with pytest.raises(ValueError, match=msg): + to_datetime(arr, format="mixed") + with pytest.raises(ValueError, match=msg): + DatetimeIndex(arr) + + +def test_to_datetime_mixed_types_matching_tzs(): + # GH#55793 + dtstr = "2023-11-01 09:22:03-07:00" + ts = Timestamp(dtstr) + arr = [ts, dtstr] + res1 = to_datetime(arr) + res2 = to_datetime(arr[::-1])[::-1] + res3 = to_datetime(arr, format="mixed") + res4 = DatetimeIndex(arr) + + expected = DatetimeIndex([ts, ts]) + tm.assert_index_equal(res1, expected) + tm.assert_index_equal(res2, expected) + tm.assert_index_equal(res3, expected) + tm.assert_index_equal(res4, expected) + + +dtstr = "2020-01-01 00:00+00:00" +ts = Timestamp(dtstr) + + +@pytest.mark.filterwarnings("ignore:Could not infer format:UserWarning") +@pytest.mark.parametrize( + "aware_val", + [dtstr, Timestamp(dtstr)], + ids=lambda x: type(x).__name__, +) +@pytest.mark.parametrize( + "naive_val", + [dtstr[:-6], ts.tz_localize(None), ts.date(), ts.asm8, ts.value, float(ts.value)], + ids=lambda x: type(x).__name__, +) +@pytest.mark.parametrize("naive_first", [True, False]) +def test_to_datetime_mixed_awareness_mixed_types(aware_val, naive_val, naive_first): + # GH#55793, GH#55693 + # Empty string parses to NaT + vals = [aware_val, naive_val, ""] + + vec = vals + if naive_first: + # alas, the behavior is order-dependent, so we test both ways + vec = [naive_val, aware_val, ""] + + # both_strs-> paths that were previously already deprecated with warning + # issued in _array_to_datetime_object + both_strs = isinstance(aware_val, str) and isinstance(naive_val, str) + has_numeric = isinstance(naive_val, (int, float)) + + depr_msg = "In a future version of pandas, parsing datetimes with mixed time zones" + + first_non_null = next(x for x in vec if x != "") + # if first_non_null is a not a string, _guess_datetime_format_for_array + # doesn't guess a format so we don't go through array_strptime + if not isinstance(first_non_null, str): + # that case goes through array_strptime which has different behavior + msg = "Cannot mix tz-aware with tz-naive values" + if naive_first and isinstance(aware_val, Timestamp): + if isinstance(naive_val, Timestamp): + msg = "Tz-aware datetime.datetime cannot be converted to datetime64" + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + else: + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + + # No warning/error with utc=True + to_datetime(vec, utc=True) + + elif has_numeric and vec.index(aware_val) < vec.index(naive_val): + msg = "time data .* doesn't match format" + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + with pytest.raises(ValueError, match=msg): + to_datetime(vec, utc=True) + + elif both_strs and vec.index(aware_val) < vec.index(naive_val): + msg = r"time data \"2020-01-01 00:00\" doesn't match format" + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + with pytest.raises(ValueError, match=msg): + to_datetime(vec, utc=True) + + elif both_strs and vec.index(naive_val) < vec.index(aware_val): + msg = "unconverted data remains when parsing with format" + with pytest.raises(ValueError, match=msg): + to_datetime(vec) + with pytest.raises(ValueError, match=msg): + to_datetime(vec, utc=True) + + else: + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + to_datetime(vec) + + # No warning/error with utc=True + to_datetime(vec, utc=True) + + if both_strs: + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + to_datetime(vec, format="mixed") + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + msg = "DatetimeIndex has mixed timezones" + with pytest.raises(TypeError, match=msg): + DatetimeIndex(vec) + else: + msg = "Cannot mix tz-aware with tz-naive values" + if naive_first and isinstance(aware_val, Timestamp): + if isinstance(naive_val, Timestamp): + msg = "Tz-aware datetime.datetime cannot be converted to datetime64" + with pytest.raises(ValueError, match=msg): + to_datetime(vec, format="mixed") + with pytest.raises(ValueError, match=msg): + DatetimeIndex(vec) + else: + with pytest.raises(ValueError, match=msg): + to_datetime(vec, format="mixed") + with pytest.raises(ValueError, match=msg): + DatetimeIndex(vec) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_numeric.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_numeric.py new file mode 100644 index 0000000000000000000000000000000000000000..c452382ec572bd24cf704c445f24f9af87947141 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_numeric.py @@ -0,0 +1,978 @@ +import decimal + +import numpy as np +from numpy import iinfo +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +from pandas import ( + ArrowDtype, + DataFrame, + Index, + Series, + option_context, + to_numeric, +) +import pandas._testing as tm + + +@pytest.fixture(params=[None, "ignore", "raise", "coerce"]) +def errors(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def signed(request): + return request.param + + +@pytest.fixture(params=[lambda x: x, str], ids=["identity", "str"]) +def transform(request): + return request.param + + +@pytest.fixture(params=[47393996303418497800, 100000000000000000000]) +def large_val(request): + return request.param + + +@pytest.fixture(params=[True, False]) +def multiple_elts(request): + return request.param + + +@pytest.fixture( + params=[ + (lambda x: Index(x, name="idx"), tm.assert_index_equal), + (lambda x: Series(x, name="ser"), tm.assert_series_equal), + (lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal), + ] +) +def transform_assert_equal(request): + return request.param + + +@pytest.mark.parametrize( + "input_kwargs,result_kwargs", + [ + ({}, {"dtype": np.int64}), + ({"errors": "coerce", "downcast": "integer"}, {"dtype": np.int8}), + ], +) +def test_empty(input_kwargs, result_kwargs): + # see gh-16302 + ser = Series([], dtype=object) + result = to_numeric(ser, **input_kwargs) + + expected = Series([], **result_kwargs) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))] +) +@pytest.mark.parametrize("last_val", ["7", 7]) +def test_series(last_val, infer_string): + with option_context("future.infer_string", infer_string): + ser = Series(["1", "-3.14", last_val]) + result = to_numeric(ser) + + expected = Series([1, -3.14, 7]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data", + [ + [1, 3, 4, 5], + [1.0, 3.0, 4.0, 5.0], + # Bool is regarded as numeric. + [True, False, True, True], + ], +) +def test_series_numeric(data): + ser = Series(data, index=list("ABCD"), name="EFG") + + result = to_numeric(ser) + tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize( + "data,msg", + [ + ([1, -3.14, "apple"], 'Unable to parse string "apple" at position 2'), + ( + ["orange", 1, -3.14, "apple"], + 'Unable to parse string "orange" at position 0', + ), + ], +) +def test_error(data, msg): + ser = Series(data) + + with pytest.raises(ValueError, match=msg): + to_numeric(ser, errors="raise") + + +@pytest.mark.parametrize( + "errors,exp_data", [("ignore", [1, -3.14, "apple"]), ("coerce", [1, -3.14, np.nan])] +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_ignore_error(errors, exp_data): + ser = Series([1, -3.14, "apple"]) + result = to_numeric(ser, errors=errors) + + expected = Series(exp_data) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "errors,exp", + [ + ("raise", 'Unable to parse string "apple" at position 2'), + ("ignore", [True, False, "apple"]), + # Coerces to float. + ("coerce", [1.0, 0.0, np.nan]), + ], +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_bool_handling(errors, exp): + ser = Series([True, False, "apple"]) + + if isinstance(exp, str): + with pytest.raises(ValueError, match=exp): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) + expected = Series(exp) + + tm.assert_series_equal(result, expected) + + +def test_list(): + ser = ["1", "-3.14", "7"] + res = to_numeric(ser) + + expected = np.array([1, -3.14, 7]) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize( + "data,arr_kwargs", + [ + ([1, 3, 4, 5], {"dtype": np.int64}), + ([1.0, 3.0, 4.0, 5.0], {}), + # Boolean is regarded as numeric. + ([True, False, True, True], {}), + ], +) +def test_list_numeric(data, arr_kwargs): + result = to_numeric(data) + expected = np.array(data, **arr_kwargs) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("kwargs", [{"dtype": "O"}, {}]) +def test_numeric(kwargs): + data = [1, -3.14, 7] + + ser = Series(data, **kwargs) + result = to_numeric(ser) + + expected = Series(data) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "columns", + [ + # One column. + "a", + # Multiple columns. + ["a", "b"], + ], +) +def test_numeric_df_columns(columns): + # see gh-14827 + df = DataFrame( + { + "a": [1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"], + "b": [1.0, 2.0, 3.0, 4.0], + } + ) + + expected = DataFrame({"a": [1.2, 3.14, np.inf, 0.1], "b": [1.0, 2.0, 3.0, 4.0]}) + + df_copy = df.copy() + df_copy[columns] = df_copy[columns].apply(to_numeric) + + tm.assert_frame_equal(df_copy, expected) + + +@pytest.mark.parametrize( + "data,exp_data", + [ + ( + [[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1], + [[3.14, 1.0], 1.6, 0.1], + ), + ([np.array([decimal.Decimal(3.14), 1.0]), 0.1], [[3.14, 1.0], 0.1]), + ], +) +def test_numeric_embedded_arr_likes(data, exp_data): + # Test to_numeric with embedded lists and arrays + df = DataFrame({"a": data}) + df["a"] = df["a"].apply(to_numeric) + + expected = DataFrame({"a": exp_data}) + tm.assert_frame_equal(df, expected) + + +def test_all_nan(): + ser = Series(["a", "b", "c"]) + result = to_numeric(ser, errors="coerce") + + expected = Series([np.nan, np.nan, np.nan]) + tm.assert_series_equal(result, expected) + + +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_type_check(errors): + # see gh-11776 + df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]}) + kwargs = {"errors": errors} if errors is not None else {} + with pytest.raises(TypeError, match="1-d array"): + to_numeric(df, **kwargs) + + +@pytest.mark.parametrize("val", [1, 1.1, 20001]) +def test_scalar(val, signed, transform): + val = -val if signed else val + assert to_numeric(transform(val)) == float(val) + + +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_really_large_scalar(large_val, signed, transform, errors): + # see gh-24910 + kwargs = {"errors": errors} if errors is not None else {} + val = -large_val if signed else large_val + + val = transform(val) + val_is_string = isinstance(val, str) + + if val_is_string and errors in (None, "raise"): + msg = "Integer out of range. at position 0" + with pytest.raises(ValueError, match=msg): + to_numeric(val, **kwargs) + else: + expected = float(val) if (errors == "coerce" and val_is_string) else val + tm.assert_almost_equal(to_numeric(val, **kwargs), expected) + + +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_really_large_in_arr(large_val, signed, transform, multiple_elts, errors): + # see gh-24910 + kwargs = {"errors": errors} if errors is not None else {} + val = -large_val if signed else large_val + val = transform(val) + + extra_elt = "string" + arr = [val] + multiple_elts * [extra_elt] + + val_is_string = isinstance(val, str) + coercing = errors == "coerce" + + if errors in (None, "raise") and (val_is_string or multiple_elts): + if val_is_string: + msg = "Integer out of range. at position 0" + else: + msg = 'Unable to parse string "string" at position 1' + + with pytest.raises(ValueError, match=msg): + to_numeric(arr, **kwargs) + else: + result = to_numeric(arr, **kwargs) + + exp_val = float(val) if (coercing and val_is_string) else val + expected = [exp_val] + + if multiple_elts: + if coercing: + expected.append(np.nan) + exp_dtype = float + else: + expected.append(extra_elt) + exp_dtype = object + else: + exp_dtype = float if isinstance(exp_val, (int, float)) else object + + tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype)) + + +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_really_large_in_arr_consistent(large_val, signed, multiple_elts, errors): + # see gh-24910 + # + # Even if we discover that we have to hold float, does not mean + # we should be lenient on subsequent elements that fail to be integer. + kwargs = {"errors": errors} if errors is not None else {} + arr = [str(-large_val if signed else large_val)] + + if multiple_elts: + arr.insert(0, large_val) + + if errors in (None, "raise"): + index = int(multiple_elts) + msg = f"Integer out of range. at position {index}" + + with pytest.raises(ValueError, match=msg): + to_numeric(arr, **kwargs) + else: + result = to_numeric(arr, **kwargs) + + if errors == "coerce": + expected = [float(i) for i in arr] + exp_dtype = float + else: + expected = arr + exp_dtype = object + + tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype)) + + +@pytest.mark.parametrize( + "errors,checker", + [ + ("raise", 'Unable to parse string "fail" at position 0'), + ("ignore", lambda x: x == "fail"), + ("coerce", lambda x: np.isnan(x)), + ], +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_scalar_fail(errors, checker): + scalar = "fail" + + if isinstance(checker, str): + with pytest.raises(ValueError, match=checker): + to_numeric(scalar, errors=errors) + else: + assert checker(to_numeric(scalar, errors=errors)) + + +@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, np.nan, 3, np.nan]]) +def test_numeric_dtypes(data, transform_assert_equal): + transform, assert_equal = transform_assert_equal + data = transform(data) + + result = to_numeric(data) + assert_equal(result, data) + + +@pytest.mark.parametrize( + "data,exp", + [ + (["1", "2", "3"], np.array([1, 2, 3], dtype="int64")), + (["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4])), + ], +) +def test_str(data, exp, transform_assert_equal): + transform, assert_equal = transform_assert_equal + result = to_numeric(transform(data)) + + expected = transform(exp) + assert_equal(result, expected) + + +def test_datetime_like(tz_naive_fixture, transform_assert_equal): + transform, assert_equal = transform_assert_equal + idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture) + + result = to_numeric(transform(idx)) + expected = transform(idx.asi8) + assert_equal(result, expected) + + +def test_timedelta(transform_assert_equal): + transform, assert_equal = transform_assert_equal + idx = pd.timedelta_range("1 days", periods=3, freq="D") + + result = to_numeric(transform(idx)) + expected = transform(idx.asi8) + assert_equal(result, expected) + + +def test_period(request, transform_assert_equal): + transform, assert_equal = transform_assert_equal + + idx = pd.period_range("2011-01", periods=3, freq="M", name="") + inp = transform(idx) + + if not isinstance(inp, Index): + request.applymarker( + pytest.mark.xfail(reason="Missing PeriodDtype support in to_numeric") + ) + result = to_numeric(inp) + expected = transform(idx.asi8) + assert_equal(result, expected) + + +@pytest.mark.parametrize( + "errors,expected", + [ + ("raise", "Invalid object type at position 0"), + ("ignore", Series([[10.0, 2], 1.0, "apple"])), + ("coerce", Series([np.nan, 1.0, np.nan])), + ], +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_non_hashable(errors, expected): + # see gh-13324 + ser = Series([[10.0, 2], 1.0, "apple"]) + + if isinstance(expected, str): + with pytest.raises(TypeError, match=expected): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) + tm.assert_series_equal(result, expected) + + +def test_downcast_invalid_cast(): + # see gh-13352 + data = ["1", 2, 3] + invalid_downcast = "unsigned-integer" + msg = "invalid downcasting method provided" + + with pytest.raises(ValueError, match=msg): + to_numeric(data, downcast=invalid_downcast) + + +def test_errors_invalid_value(): + # see gh-26466 + data = ["1", 2, 3] + invalid_error_value = "invalid" + msg = "invalid error value specified" + + with pytest.raises(ValueError, match=msg): + to_numeric(data, errors=invalid_error_value) + + +@pytest.mark.parametrize( + "data", + [ + ["1", 2, 3], + [1, 2, 3], + np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"), + ], +) +@pytest.mark.parametrize( + "kwargs,exp_dtype", + [ + # Basic function tests. + ({}, np.int64), + ({"downcast": None}, np.int64), + # Support below np.float32 is rare and far between. + ({"downcast": "float"}, np.dtype(np.float32).char), + # Basic dtype support. + ({"downcast": "unsigned"}, np.dtype(np.typecodes["UnsignedInteger"][0])), + ], +) +def test_downcast_basic(data, kwargs, exp_dtype): + # see gh-13352 + result = to_numeric(data, **kwargs) + expected = np.array([1, 2, 3], dtype=exp_dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("signed_downcast", ["integer", "signed"]) +@pytest.mark.parametrize( + "data", + [ + ["1", 2, 3], + [1, 2, 3], + np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"), + ], +) +def test_signed_downcast(data, signed_downcast): + # see gh-13352 + smallest_int_dtype = np.dtype(np.typecodes["Integer"][0]) + expected = np.array([1, 2, 3], dtype=smallest_int_dtype) + + res = to_numeric(data, downcast=signed_downcast) + tm.assert_numpy_array_equal(res, expected) + + +def test_ignore_downcast_invalid_data(): + # If we can't successfully cast the given + # data to a numeric dtype, do not bother + # with the downcast parameter. + data = ["foo", 2, 3] + expected = np.array(data, dtype=object) + + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = to_numeric(data, errors="ignore", downcast="unsigned") + tm.assert_numpy_array_equal(res, expected) + + +def test_ignore_downcast_neg_to_unsigned(): + # Cannot cast to an unsigned integer + # because we have a negative number. + data = ["-1", 2, 3] + expected = np.array([-1, 2, 3], dtype=np.int64) + + res = to_numeric(data, downcast="unsigned") + tm.assert_numpy_array_equal(res, expected) + + +# Warning in 32 bit platforms +@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning") +@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"]) +@pytest.mark.parametrize( + "data,expected", + [ + (["1.1", 2, 3], np.array([1.1, 2, 3], dtype=np.float64)), + ( + [10000.0, 20000, 3000, 40000.36, 50000, 50000.00], + np.array( + [10000.0, 20000, 3000, 40000.36, 50000, 50000.00], dtype=np.float64 + ), + ), + ], +) +def test_ignore_downcast_cannot_convert_float(data, expected, downcast): + # Cannot cast to an integer (signed or unsigned) + # because we have a float number. + res = to_numeric(data, downcast=downcast) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize( + "downcast,expected_dtype", + [("integer", np.int16), ("signed", np.int16), ("unsigned", np.uint16)], +) +def test_downcast_not8bit(downcast, expected_dtype): + # the smallest integer dtype need not be np.(u)int8 + data = ["256", 257, 258] + + expected = np.array([256, 257, 258], dtype=expected_dtype) + res = to_numeric(data, downcast=downcast) + tm.assert_numpy_array_equal(res, expected) + + +@pytest.mark.parametrize( + "dtype,downcast,min_max", + [ + ("int8", "integer", [iinfo(np.int8).min, iinfo(np.int8).max]), + ("int16", "integer", [iinfo(np.int16).min, iinfo(np.int16).max]), + ("int32", "integer", [iinfo(np.int32).min, iinfo(np.int32).max]), + ("int64", "integer", [iinfo(np.int64).min, iinfo(np.int64).max]), + ("uint8", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max]), + ("uint16", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max]), + ("uint32", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max]), + ("uint64", "unsigned", [iinfo(np.uint64).min, iinfo(np.uint64).max]), + ("int16", "integer", [iinfo(np.int8).min, iinfo(np.int8).max + 1]), + ("int32", "integer", [iinfo(np.int16).min, iinfo(np.int16).max + 1]), + ("int64", "integer", [iinfo(np.int32).min, iinfo(np.int32).max + 1]), + ("int16", "integer", [iinfo(np.int8).min - 1, iinfo(np.int16).max]), + ("int32", "integer", [iinfo(np.int16).min - 1, iinfo(np.int32).max]), + ("int64", "integer", [iinfo(np.int32).min - 1, iinfo(np.int64).max]), + ("uint16", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]), + ("uint32", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]), + ("uint64", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]), + ], +) +def test_downcast_limits(dtype, downcast, min_max): + # see gh-14404: test the limits of each downcast. + series = to_numeric(Series(min_max), downcast=downcast) + assert series.dtype == dtype + + +def test_downcast_float64_to_float32(): + # GH-43693: Check float64 preservation when >= 16,777,217 + series = Series([16777217.0, np.finfo(np.float64).max, np.nan], dtype=np.float64) + result = to_numeric(series, downcast="float") + + assert series.dtype == result.dtype + + +@pytest.mark.parametrize( + "ser,expected", + [ + ( + Series([0, 9223372036854775808]), + Series([0, 9223372036854775808], dtype=np.uint64), + ) + ], +) +def test_downcast_uint64(ser, expected): + # see gh-14422: + # BUG: to_numeric doesn't work uint64 numbers + + result = to_numeric(ser, downcast="unsigned") + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data,exp_data", + [ + ( + [200, 300, "", "NaN", 30000000000000000000], + [200, 300, np.nan, np.nan, 30000000000000000000], + ), + ( + ["12345678901234567890", "1234567890", "ITEM"], + [12345678901234567890, 1234567890, np.nan], + ), + ], +) +def test_coerce_uint64_conflict(data, exp_data): + # see gh-17007 and gh-17125 + # + # Still returns float despite the uint64-nan conflict, + # which would normally force the casting to object. + result = to_numeric(Series(data), errors="coerce") + expected = Series(exp_data, dtype=float) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "errors,exp", + [ + ("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])), + ("raise", "Unable to parse string"), + ], +) +@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") +def test_non_coerce_uint64_conflict(errors, exp): + # see gh-17007 and gh-17125 + # + # For completeness. + ser = Series(["12345678901234567890", "1234567890", "ITEM"]) + + if isinstance(exp, str): + with pytest.raises(ValueError, match=exp): + to_numeric(ser, errors=errors) + else: + result = to_numeric(ser, errors=errors) + tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize("dc1", ["integer", "float", "unsigned"]) +@pytest.mark.parametrize("dc2", ["integer", "float", "unsigned"]) +def test_downcast_empty(dc1, dc2): + # GH32493 + + tm.assert_numpy_array_equal( + to_numeric([], downcast=dc1), + to_numeric([], downcast=dc2), + check_dtype=False, + ) + + +def test_failure_to_convert_uint64_string_to_NaN(): + # GH 32394 + result = to_numeric("uint64", errors="coerce") + assert np.isnan(result) + + ser = Series([32, 64, np.nan]) + result = to_numeric(Series(["32", "64", "uint64"]), errors="coerce") + tm.assert_series_equal(result, ser) + + +@pytest.mark.parametrize( + "strrep", + [ + "243.164", + "245.968", + "249.585", + "259.745", + "265.742", + "272.567", + "279.196", + "280.366", + "275.034", + "271.351", + "272.889", + "270.627", + "280.828", + "290.383", + "308.153", + "319.945", + "336.0", + "344.09", + "351.385", + "356.178", + "359.82", + "361.03", + "367.701", + "380.812", + "387.98", + "391.749", + "391.171", + "385.97", + "385.345", + "386.121", + "390.996", + "399.734", + "413.073", + "421.532", + "430.221", + "437.092", + "439.746", + "446.01", + "451.191", + "460.463", + "469.779", + "472.025", + "479.49", + "474.864", + "467.54", + "471.978", + ], +) +def test_precision_float_conversion(strrep): + # GH 31364 + result = to_numeric(strrep) + + assert result == float(strrep) + + +@pytest.mark.parametrize( + "values, expected", + [ + (["1", "2", None], Series([1, 2, np.nan], dtype="Int64")), + (["1", "2", "3"], Series([1, 2, 3], dtype="Int64")), + (["1", "2", 3], Series([1, 2, 3], dtype="Int64")), + (["1", "2", 3.5], Series([1, 2, 3.5], dtype="Float64")), + (["1", None, 3.5], Series([1, np.nan, 3.5], dtype="Float64")), + (["1", "2", "3.5"], Series([1, 2, 3.5], dtype="Float64")), + ], +) +def test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected): + # https://github.com/pandas-dev/pandas/issues/37262 + s = Series(values, dtype=nullable_string_dtype) + result = to_numeric(s) + tm.assert_series_equal(result, expected) + + +def test_to_numeric_from_nullable_string_coerce(nullable_string_dtype): + # GH#52146 + values = ["a", "1"] + ser = Series(values, dtype=nullable_string_dtype) + result = to_numeric(ser, errors="coerce") + expected = Series([pd.NA, 1], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_to_numeric_from_nullable_string_ignore(nullable_string_dtype): + # GH#52146 + values = ["a", "1"] + ser = Series(values, dtype=nullable_string_dtype) + expected = ser.copy() + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_numeric(ser, errors="ignore") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "data, input_dtype, downcast, expected_dtype", + ( + ([1, 1], "Int64", "integer", "Int8"), + ([1.0, pd.NA], "Float64", "integer", "Int8"), + ([1.0, 1.1], "Float64", "integer", "Float64"), + ([1, pd.NA], "Int64", "integer", "Int8"), + ([450, 300], "Int64", "integer", "Int16"), + ([1, 1], "Float64", "integer", "Int8"), + ([np.iinfo(np.int64).max - 1, 1], "Int64", "integer", "Int64"), + ([1, 1], "Int64", "signed", "Int8"), + ([1.0, 1.0], "Float32", "signed", "Int8"), + ([1.0, 1.1], "Float64", "signed", "Float64"), + ([1, pd.NA], "Int64", "signed", "Int8"), + ([450, -300], "Int64", "signed", "Int16"), + ([np.iinfo(np.uint64).max - 1, 1], "UInt64", "signed", "UInt64"), + ([1, 1], "Int64", "unsigned", "UInt8"), + ([1.0, 1.0], "Float32", "unsigned", "UInt8"), + ([1.0, 1.1], "Float64", "unsigned", "Float64"), + ([1, pd.NA], "Int64", "unsigned", "UInt8"), + ([450, -300], "Int64", "unsigned", "Int64"), + ([-1, -1], "Int32", "unsigned", "Int32"), + ([1, 1], "Float64", "float", "Float32"), + ([1, 1.1], "Float64", "float", "Float32"), + ([1, 1], "Float32", "float", "Float32"), + ([1, 1.1], "Float32", "float", "Float32"), + ), +) +def test_downcast_nullable_numeric(data, input_dtype, downcast, expected_dtype): + arr = pd.array(data, dtype=input_dtype) + result = to_numeric(arr, downcast=downcast) + expected = pd.array(data, dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_downcast_nullable_mask_is_copied(): + # GH38974 + + arr = pd.array([1, 2, pd.NA], dtype="Int64") + + result = to_numeric(arr, downcast="integer") + expected = pd.array([1, 2, pd.NA], dtype="Int8") + tm.assert_extension_array_equal(result, expected) + + arr[1] = pd.NA # should not modify result + tm.assert_extension_array_equal(result, expected) + + +def test_to_numeric_scientific_notation(): + # GH 15898 + result = to_numeric("1.7e+308") + expected = np.float64(1.7e308) + assert result == expected + + +@pytest.mark.parametrize("val", [9876543210.0, 2.0**128]) +def test_to_numeric_large_float_not_downcast_to_float_32(val): + # GH 19729 + expected = Series([val]) + result = to_numeric(expected, downcast="float") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "val, dtype", [(1, "Int64"), (1.5, "Float64"), (True, "boolean")] +) +def test_to_numeric_dtype_backend(val, dtype): + # GH#50505 + ser = Series([val], dtype=object) + result = to_numeric(ser, dtype_backend="numpy_nullable") + expected = Series([val], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "val, dtype", + [ + (1, "Int64"), + (1.5, "Float64"), + (True, "boolean"), + (1, "int64[pyarrow]"), + (1.5, "float64[pyarrow]"), + (True, "bool[pyarrow]"), + ], +) +def test_to_numeric_dtype_backend_na(val, dtype): + # GH#50505 + if "pyarrow" in dtype: + pytest.importorskip("pyarrow") + dtype_backend = "pyarrow" + else: + dtype_backend = "numpy_nullable" + ser = Series([val, None], dtype=object) + result = to_numeric(ser, dtype_backend=dtype_backend) + expected = Series([val, pd.NA], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "val, dtype, downcast", + [ + (1, "Int8", "integer"), + (1.5, "Float32", "float"), + (1, "Int8", "signed"), + (1, "int8[pyarrow]", "integer"), + (1.5, "float[pyarrow]", "float"), + (1, "int8[pyarrow]", "signed"), + ], +) +def test_to_numeric_dtype_backend_downcasting(val, dtype, downcast): + # GH#50505 + if "pyarrow" in dtype: + pytest.importorskip("pyarrow") + dtype_backend = "pyarrow" + else: + dtype_backend = "numpy_nullable" + ser = Series([val, None], dtype=object) + result = to_numeric(ser, dtype_backend=dtype_backend, downcast=downcast) + expected = Series([val, pd.NA], dtype=dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "smaller, dtype_backend", + [["UInt8", "numpy_nullable"], ["uint8[pyarrow]", "pyarrow"]], +) +def test_to_numeric_dtype_backend_downcasting_uint(smaller, dtype_backend): + # GH#50505 + if dtype_backend == "pyarrow": + pytest.importorskip("pyarrow") + ser = Series([1, pd.NA], dtype="UInt64") + result = to_numeric(ser, dtype_backend=dtype_backend, downcast="unsigned") + expected = Series([1, pd.NA], dtype=smaller) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "dtype", + [ + "Int64", + "UInt64", + "Float64", + "boolean", + "int64[pyarrow]", + "uint64[pyarrow]", + "float64[pyarrow]", + "bool[pyarrow]", + ], +) +def test_to_numeric_dtype_backend_already_nullable(dtype): + # GH#50505 + if "pyarrow" in dtype: + pytest.importorskip("pyarrow") + ser = Series([1, pd.NA], dtype=dtype) + result = to_numeric(ser, dtype_backend="numpy_nullable") + expected = Series([1, pd.NA], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_to_numeric_dtype_backend_error(dtype_backend): + # GH#50505 + ser = Series(["a", "b", ""]) + expected = ser.copy() + with pytest.raises(ValueError, match="Unable to parse string"): + to_numeric(ser, dtype_backend=dtype_backend) + + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_numeric(ser, dtype_backend=dtype_backend, errors="ignore") + tm.assert_series_equal(result, expected) + + result = to_numeric(ser, dtype_backend=dtype_backend, errors="coerce") + if dtype_backend == "pyarrow": + dtype = "double[pyarrow]" + else: + dtype = "Float64" + expected = Series([np.nan, np.nan, np.nan], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_invalid_dtype_backend(): + ser = Series([1, 2, 3]) + msg = ( + "dtype_backend numpy is invalid, only 'numpy_nullable' and " + "'pyarrow' are allowed." + ) + with pytest.raises(ValueError, match=msg): + to_numeric(ser, dtype_backend="numpy") + + +def test_coerce_pyarrow_backend(): + # GH 52588 + pa = pytest.importorskip("pyarrow") + ser = Series(list("12x"), dtype=ArrowDtype(pa.string())) + result = to_numeric(ser, errors="coerce", dtype_backend="pyarrow") + expected = Series([1, 2, None], dtype=ArrowDtype(pa.int64())) + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_time.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_time.py new file mode 100644 index 0000000000000000000000000000000000000000..b673bd9c2ec7168971ae0ed802336e4f03ff63a7 --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_time.py @@ -0,0 +1,72 @@ +from datetime import time +import locale + +import numpy as np +import pytest + +from pandas.compat import PY311 + +from pandas import Series +import pandas._testing as tm +from pandas.core.tools.times import to_time + +# The tests marked with this are locale-dependent. +# They pass, except when the machine locale is zh_CN or it_IT. +fails_on_non_english = pytest.mark.xfail( + locale.getlocale()[0] in ("zh_CN", "it_IT"), + reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8", + strict=False, +) + + +class TestToTime: + @pytest.mark.parametrize( + "time_string", + [ + "14:15", + "1415", + pytest.param("2:15pm", marks=fails_on_non_english), + pytest.param("0215pm", marks=fails_on_non_english), + "14:15:00", + "141500", + pytest.param("2:15:00pm", marks=fails_on_non_english), + pytest.param("021500pm", marks=fails_on_non_english), + time(14, 15), + ], + ) + def test_parsers_time(self, time_string): + # GH#11818 + assert to_time(time_string) == time(14, 15) + + def test_odd_format(self): + new_string = "14.15" + msg = r"Cannot convert arg \['14\.15'\] to a time" + if not PY311: + with pytest.raises(ValueError, match=msg): + to_time(new_string) + assert to_time(new_string, format="%H.%M") == time(14, 15) + + def test_arraylike(self): + arg = ["14:15", "20:20"] + expected_arr = [time(14, 15), time(20, 20)] + assert to_time(arg) == expected_arr + assert to_time(arg, format="%H:%M") == expected_arr + assert to_time(arg, infer_time_format=True) == expected_arr + assert to_time(arg, format="%I:%M%p", errors="coerce") == [None, None] + + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = to_time(arg, format="%I:%M%p", errors="ignore") + tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_)) + + msg = "Cannot convert.+to a time with given format" + with pytest.raises(ValueError, match=msg): + to_time(arg, format="%I:%M%p", errors="raise") + + tm.assert_series_equal( + to_time(Series(arg, name="test")), Series(expected_arr, name="test") + ) + + res = to_time(np.array(arg)) + assert isinstance(res, list) + assert res == expected_arr diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_timedelta.py b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_timedelta.py new file mode 100644 index 0000000000000000000000000000000000000000..b67694f1c58c7016221ed629358e8867b2a1534a --- /dev/null +++ b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tools/test_to_timedelta.py @@ -0,0 +1,340 @@ +from datetime import ( + time, + timedelta, +) + +import numpy as np +import pytest + +from pandas.compat import IS64 +from pandas.errors import OutOfBoundsTimedelta + +import pandas as pd +from pandas import ( + Series, + TimedeltaIndex, + isna, + to_timedelta, +) +import pandas._testing as tm +from pandas.core.arrays import TimedeltaArray + + +class TestTimedeltas: + def test_to_timedelta_dt64_raises(self): + # Passing datetime64-dtype data to TimedeltaIndex is no longer + # supported GH#29794 + msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]" + + ser = Series([pd.NaT]) + with pytest.raises(TypeError, match=msg): + to_timedelta(ser) + with pytest.raises(TypeError, match=msg): + ser.to_frame().apply(to_timedelta) + + @pytest.mark.parametrize("readonly", [True, False]) + def test_to_timedelta_readonly(self, readonly): + # GH#34857 + arr = np.array([], dtype=object) + if readonly: + arr.setflags(write=False) + result = to_timedelta(arr) + expected = to_timedelta([]) + tm.assert_index_equal(result, expected) + + def test_to_timedelta_null(self): + result = to_timedelta(["", ""]) + assert isna(result).all() + + def test_to_timedelta_same_np_timedelta64(self): + # pass thru + result = to_timedelta(np.array([np.timedelta64(1, "s")])) + expected = pd.Index(np.array([np.timedelta64(1, "s")])) + tm.assert_index_equal(result, expected) + + def test_to_timedelta_series(self): + # Series + expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)]) + result = to_timedelta(Series(["1d", "1days 00:00:01"])) + tm.assert_series_equal(result, expected) + + def test_to_timedelta_units(self): + # with units + result = TimedeltaIndex( + [np.timedelta64(0, "ns"), np.timedelta64(10, "s").astype("m8[ns]")] + ) + expected = to_timedelta([0, 10], unit="s") + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, unit", + [ + ["int64", "s"], + ["int64", "m"], + ["int64", "h"], + ["timedelta64[s]", "s"], + ["timedelta64[D]", "D"], + ], + ) + def test_to_timedelta_units_dtypes(self, dtype, unit): + # arrays of various dtypes + arr = np.array([1] * 5, dtype=dtype) + result = to_timedelta(arr, unit=unit) + exp_dtype = "m8[ns]" if dtype == "int64" else "m8[s]" + expected = TimedeltaIndex([np.timedelta64(1, unit)] * 5, dtype=exp_dtype) + tm.assert_index_equal(result, expected) + + def test_to_timedelta_oob_non_nano(self): + arr = np.array([pd.NaT._value + 1], dtype="timedelta64[m]") + + msg = ( + "Cannot convert -9223372036854775807 minutes to " + r"timedelta64\[s\] without overflow" + ) + with pytest.raises(OutOfBoundsTimedelta, match=msg): + to_timedelta(arr) + + with pytest.raises(OutOfBoundsTimedelta, match=msg): + TimedeltaIndex(arr) + + with pytest.raises(OutOfBoundsTimedelta, match=msg): + TimedeltaArray._from_sequence(arr, dtype="m8[s]") + + @pytest.mark.parametrize( + "arg", [np.arange(10).reshape(2, 5), pd.DataFrame(np.arange(10).reshape(2, 5))] + ) + @pytest.mark.parametrize("errors", ["ignore", "raise", "coerce"]) + @pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning") + def test_to_timedelta_dataframe(self, arg, errors): + # GH 11776 + with pytest.raises(TypeError, match="1-d array"): + to_timedelta(arg, errors=errors) + + def test_to_timedelta_invalid_errors(self): + # bad value for errors parameter + msg = "errors must be one of" + with pytest.raises(ValueError, match=msg): + to_timedelta(["foo"], errors="never") + + @pytest.mark.parametrize("arg", [[1, 2], 1]) + def test_to_timedelta_invalid_unit(self, arg): + # these will error + msg = "invalid unit abbreviation: foo" + with pytest.raises(ValueError, match=msg): + to_timedelta(arg, unit="foo") + + def test_to_timedelta_time(self): + # time not supported ATM + msg = ( + "Value must be Timedelta, string, integer, float, timedelta or convertible" + ) + with pytest.raises(ValueError, match=msg): + to_timedelta(time(second=1)) + assert to_timedelta(time(second=1), errors="coerce") is pd.NaT + + def test_to_timedelta_bad_value(self): + msg = "Could not convert 'foo' to NumPy timedelta" + with pytest.raises(ValueError, match=msg): + to_timedelta(["foo", "bar"]) + + def test_to_timedelta_bad_value_coerce(self): + tm.assert_index_equal( + TimedeltaIndex([pd.NaT, pd.NaT]), + to_timedelta(["foo", "bar"], errors="coerce"), + ) + + tm.assert_index_equal( + TimedeltaIndex(["1 day", pd.NaT, "1 min"]), + to_timedelta(["1 day", "bar", "1 min"], errors="coerce"), + ) + + def test_to_timedelta_invalid_errors_ignore(self): + # gh-13613: these should not error because errors='ignore' + msg = "errors='ignore' is deprecated" + invalid_data = "apple" + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(invalid_data, errors="ignore") + assert invalid_data == result + + invalid_data = ["apple", "1 days"] + expected = np.array(invalid_data, dtype=object) + + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(invalid_data, errors="ignore") + tm.assert_numpy_array_equal(expected, result) + + invalid_data = pd.Index(["apple", "1 days"]) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(invalid_data, errors="ignore") + tm.assert_index_equal(invalid_data, result) + + invalid_data = Series(["apple", "1 days"]) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(invalid_data, errors="ignore") + tm.assert_series_equal(invalid_data, result) + + @pytest.mark.parametrize( + "val, errors", + [ + ("1M", True), + ("1 M", True), + ("1Y", True), + ("1 Y", True), + ("1y", True), + ("1 y", True), + ("1m", False), + ("1 m", False), + ("1 day", False), + ("2day", False), + ], + ) + def test_unambiguous_timedelta_values(self, val, errors): + # GH36666 Deprecate use of strings denoting units with 'M', 'Y', 'm' or 'y' + # in pd.to_timedelta + msg = "Units 'M', 'Y' and 'y' do not represent unambiguous timedelta" + if errors: + with pytest.raises(ValueError, match=msg): + to_timedelta(val) + else: + # check it doesn't raise + to_timedelta(val) + + def test_to_timedelta_via_apply(self): + # GH 5458 + expected = Series([np.timedelta64(1, "s")]) + result = Series(["00:00:01"]).apply(to_timedelta) + tm.assert_series_equal(result, expected) + + result = Series([to_timedelta("00:00:01")]) + tm.assert_series_equal(result, expected) + + def test_to_timedelta_inference_without_warning(self): + # GH#41731 inference produces a warning in the Series constructor, + # but _not_ in to_timedelta + vals = ["00:00:01", pd.NaT] + with tm.assert_produces_warning(None): + result = to_timedelta(vals) + + expected = TimedeltaIndex([pd.Timedelta(seconds=1), pd.NaT]) + tm.assert_index_equal(result, expected) + + def test_to_timedelta_on_missing_values(self): + # GH5438 + timedelta_NaT = np.timedelta64("NaT") + + actual = to_timedelta(Series(["00:00:01", np.nan])) + expected = Series( + [np.timedelta64(1000000000, "ns"), timedelta_NaT], + dtype=f"{tm.ENDIAN}m8[ns]", + ) + tm.assert_series_equal(actual, expected) + + ser = Series(["00:00:01", pd.NaT], dtype="m8[ns]") + actual = to_timedelta(ser) + tm.assert_series_equal(actual, expected) + + @pytest.mark.parametrize("val", [np.nan, pd.NaT, pd.NA]) + def test_to_timedelta_on_missing_values_scalar(self, val): + actual = to_timedelta(val) + assert actual._value == np.timedelta64("NaT").astype("int64") + + @pytest.mark.parametrize("val", [np.nan, pd.NaT, pd.NA]) + def test_to_timedelta_on_missing_values_list(self, val): + actual = to_timedelta([val]) + assert actual[0]._value == np.timedelta64("NaT").astype("int64") + + @pytest.mark.xfail(not IS64, reason="Floating point error") + def test_to_timedelta_float(self): + # https://github.com/pandas-dev/pandas/issues/25077 + arr = np.arange(0, 1, 1e-6)[-10:] + result = to_timedelta(arr, unit="s") + expected_asi8 = np.arange(999990000, 10**9, 1000, dtype="int64") + tm.assert_numpy_array_equal(result.asi8, expected_asi8) + + def test_to_timedelta_coerce_strings_unit(self): + arr = np.array([1, 2, "error"], dtype=object) + result = to_timedelta(arr, unit="ns", errors="coerce") + expected = to_timedelta([1, 2, pd.NaT], unit="ns") + tm.assert_index_equal(result, expected) + + def test_to_timedelta_ignore_strings_unit(self): + arr = np.array([1, 2, "error"], dtype=object) + msg = "errors='ignore' is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = to_timedelta(arr, unit="ns", errors="ignore") + tm.assert_numpy_array_equal(result, arr) + + @pytest.mark.parametrize( + "expected_val, result_val", [[timedelta(days=2), 2], [None, None]] + ) + def test_to_timedelta_nullable_int64_dtype(self, expected_val, result_val): + # GH 35574 + expected = Series([timedelta(days=1), expected_val]) + result = to_timedelta(Series([1, result_val], dtype="Int64"), unit="days") + + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + ("input", "expected"), + [ + ("8:53:08.71800000001", "8:53:08.718"), + ("8:53:08.718001", "8:53:08.718001"), + ("8:53:08.7180000001", "8:53:08.7180000001"), + ("-8:53:08.71800000001", "-8:53:08.718"), + ("8:53:08.7180000089", "8:53:08.718000008"), + ], + ) + @pytest.mark.parametrize("func", [pd.Timedelta, to_timedelta]) + def test_to_timedelta_precision_over_nanos(self, input, expected, func): + # GH: 36738 + expected = pd.Timedelta(expected) + result = func(input) + assert result == expected + + def test_to_timedelta_zerodim(self, fixed_now_ts): + # ndarray.item() incorrectly returns int for dt64[ns] and td64[ns] + dt64 = fixed_now_ts.to_datetime64() + arg = np.array(dt64) + + msg = ( + "Value must be Timedelta, string, integer, float, timedelta " + "or convertible, not datetime64" + ) + with pytest.raises(ValueError, match=msg): + to_timedelta(arg) + + arg2 = arg.view("m8[ns]") + result = to_timedelta(arg2) + assert isinstance(result, pd.Timedelta) + assert result._value == dt64.view("i8") + + def test_to_timedelta_numeric_ea(self, any_numeric_ea_dtype): + # GH#48796 + ser = Series([1, pd.NA], dtype=any_numeric_ea_dtype) + result = to_timedelta(ser) + expected = Series([pd.Timedelta(1, unit="ns"), pd.NaT]) + tm.assert_series_equal(result, expected) + + def test_to_timedelta_fraction(self): + result = to_timedelta(1.0 / 3, unit="h") + expected = pd.Timedelta("0 days 00:19:59.999999998") + assert result == expected + + +def test_from_numeric_arrow_dtype(any_numeric_ea_dtype): + # GH 52425 + pytest.importorskip("pyarrow") + ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]") + result = to_timedelta(ser) + expected = Series([1, 2], dtype="timedelta64[ns]") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("unit", ["ns", "ms"]) +def test_from_timedelta_arrow_dtype(unit): + # GH 54298 + pytest.importorskip("pyarrow") + expected = Series([timedelta(1)], dtype=f"duration[{unit}][pyarrow]") + result = to_timedelta(expected) + tm.assert_series_equal(result, expected) diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/holiday/__pycache__/test_observance.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/holiday/__pycache__/test_observance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d231740f1ab91315fd34746bfc8f991d50e42de3 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/holiday/__pycache__/test_observance.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..48c01e77cd0a92201384c63705e705c125443fb2 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_month.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_month.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..683f5f73bdfdf6836c3c931259753db6005dfe35 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_month.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_quarter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_quarter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b0f62aaadcff6c7e13524b8403145866f8abaab Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_quarter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_common.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64bc3ac05a5dd4d115604a76ee91374b56079eb5 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_common.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_day.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_day.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..690c59cc43f7289fb0b3df3a161abb6083ad0279 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_day.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_month.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_month.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f5ab2675a5e31bbb32daa23eefa7d52e1e7b43e1 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_month.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_easter.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_easter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b62e009e6ec4852a26fee327367a176eddc2c58 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_easter.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_fiscal.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_fiscal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc4ab49604002f0fbfcd3e35fc6930033bee3eb4 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_fiscal.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_offsets.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_offsets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..66754399ed93f40fb3c2ed7f0ce07a6e10247307 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_offsets.cpython-310.pyc differ diff --git a/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_ticks.cpython-310.pyc b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_ticks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e95e43a1166b6b2d26ac2901e4b49c615459fa39 Binary files /dev/null and b/env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_ticks.cpython-310.pyc differ