diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..71e2ac1c939b913838e6b254b441d76329be1af5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/masked_shared.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/masked_shared.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b44765a67f3f848db4bdd1fe2e437fb009a33903 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/masked_shared.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_array.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_array.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..763964e27bc45d1f9efe4f557b6f3d7d76fc5c41 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_array.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimelike.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimelike.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7eb1c414ad562942bf4f1967ce2bbfb1c53e805d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimelike.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb9043c33156479c0dcad11fcee5bbc5a37f3bd3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_datetimes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_ndarray_backed.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_ndarray_backed.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72ad10ae629b5194895cbde5b397e49e754937c3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_ndarray_backed.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_period.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..26fb981d52b44fef6d20f17cbcdfa99e31943e90 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_period.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_timedeltas.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_timedeltas.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c9b56fa5a452019410680c77455f3b6c3f2d552 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/__pycache__/test_timedeltas.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_comparison.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_comparison.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d744d51f4cdd23a7feb322889d63437fdbb5e405 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_comparison.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_construction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_construction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e4254c649f4660409d709b2999cad5efebb00aa5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_construction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_function.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d8ceb124c41c88c6d63df4c665f7630c09eb914 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_function.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_ops.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2238a96a4b91617964529d99121aa54a26f649c8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_ops.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_reduction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8ee33355cf791bd2e476ef372ebe5649a2a5c66 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/__pycache__/test_reduction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_astype.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..932e903c0e448174e0b960e5ae504282cef61dc7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_astype.py @@ -0,0 +1,53 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +def test_astype(): + # with missing values + arr = pd.array([True, False, None], dtype="boolean") + + with pytest.raises(ValueError, match="cannot convert NA to integer"): + arr.astype("int64") + + with pytest.raises(ValueError, match="cannot convert float NaN to"): + arr.astype("bool") + + result = arr.astype("float64") + expected = np.array([1, 0, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("str") + expected = np.array(["True", "False", ""], dtype=f"{tm.ENDIAN}U5") + tm.assert_numpy_array_equal(result, expected) + + # no missing values + arr = pd.array([True, False, True], dtype="boolean") + result = arr.astype("int64") + expected = np.array([1, 0, 1], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.astype("bool") + expected = np.array([True, False, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_to_boolean_array(): + # astype to BooleanArray + arr = pd.array([True, False, None], dtype="boolean") + + result = arr.astype("boolean") + tm.assert_extension_array_equal(result, arr) + result = arr.astype(pd.BooleanDtype()) + tm.assert_extension_array_equal(result, arr) + + +def test_astype_to_integer_array(): + # astype to IntegerArray + arr = pd.array([True, False, None], dtype="boolean") + + result = arr.astype("Int64") + expected = pd.array([1, 0, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_construction.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_construction.py new file mode 100644 index 0000000000000000000000000000000000000000..645e763fbf00cec4f62474fc7d2d2be564a4e4ba --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_construction.py @@ -0,0 +1,325 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import BooleanArray +from pandas.core.arrays.boolean import coerce_to_array + + +def test_boolean_array_constructor(): + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + + result = BooleanArray(values, mask) + expected = pd.array([True, False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + with pytest.raises(TypeError, match="values should be boolean numpy array"): + BooleanArray(values.tolist(), mask) + + with pytest.raises(TypeError, match="mask should be boolean numpy array"): + BooleanArray(values, mask.tolist()) + + with pytest.raises(TypeError, match="values should be boolean numpy array"): + BooleanArray(values.astype(int), mask) + + with pytest.raises(TypeError, match="mask should be boolean numpy array"): + BooleanArray(values, None) + + with pytest.raises(ValueError, match="values.shape must match mask.shape"): + BooleanArray(values.reshape(1, -1), mask) + + with pytest.raises(ValueError, match="values.shape must match mask.shape"): + BooleanArray(values, mask.reshape(1, -1)) + + +def test_boolean_array_constructor_copy(): + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + + result = BooleanArray(values, mask) + assert result._data is values + assert result._mask is mask + + result = BooleanArray(values, mask, copy=True) + assert result._data is not values + assert result._mask is not mask + + +def test_to_boolean_array(): + expected = BooleanArray( + np.array([True, False, True]), np.array([False, False, False]) + ) + + result = pd.array([True, False, True], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = pd.array(np.array([True, False, True]), dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = pd.array(np.array([True, False, True], dtype=object), dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + # with missing values + expected = BooleanArray( + np.array([True, False, True]), np.array([False, False, True]) + ) + + result = pd.array([True, False, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = pd.array(np.array([True, False, None], dtype=object), dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_to_boolean_array_all_none(): + expected = BooleanArray(np.array([True, True, True]), np.array([True, True, True])) + + result = pd.array([None, None, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + result = pd.array(np.array([None, None, None], dtype=object), dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "a, b", + [ + ([True, False, None, np.nan, pd.NA], [True, False, None, None, None]), + ([True, np.nan], [True, None]), + ([True, pd.NA], [True, None]), + ([np.nan, np.nan], [None, None]), + (np.array([np.nan, np.nan], dtype=float), [None, None]), + ], +) +def test_to_boolean_array_missing_indicators(a, b): + result = pd.array(a, dtype="boolean") + expected = pd.array(b, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + ["foo", "bar"], + ["1", "2"], + # "foo", + [1, 2], + [1.0, 2.0], + pd.date_range("20130101", periods=2), + np.array(["foo"]), + np.array([1, 2]), + np.array([1.0, 2.0]), + [np.nan, {"a": 1}], + ], +) +def test_to_boolean_array_error(values): + # error in converting existing arrays to BooleanArray + msg = "Need to pass bool-like value" + with pytest.raises(TypeError, match=msg): + pd.array(values, dtype="boolean") + + +def test_to_boolean_array_from_integer_array(): + result = pd.array(np.array([1, 0, 1, 0]), dtype="boolean") + expected = pd.array([True, False, True, False], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + # with missing values + result = pd.array(np.array([1, 0, 1, None]), dtype="boolean") + expected = pd.array([True, False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_to_boolean_array_from_float_array(): + result = pd.array(np.array([1.0, 0.0, 1.0, 0.0]), dtype="boolean") + expected = pd.array([True, False, True, False], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + # with missing values + result = pd.array(np.array([1.0, 0.0, 1.0, np.nan]), dtype="boolean") + expected = pd.array([True, False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_to_boolean_array_integer_like(): + # integers of 0's and 1's + result = pd.array([1, 0, 1, 0], dtype="boolean") + expected = pd.array([True, False, True, False], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + # with missing values + result = pd.array([1, 0, 1, None], dtype="boolean") + expected = pd.array([True, False, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + +def test_coerce_to_array(): + # TODO this is currently not public API + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + result = BooleanArray(*coerce_to_array(values, mask=mask)) + expected = BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + assert result._data is values + assert result._mask is mask + result = BooleanArray(*coerce_to_array(values, mask=mask, copy=True)) + expected = BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + assert result._data is not values + assert result._mask is not mask + + # mixed missing from values and mask + values = [True, False, None, False] + mask = np.array([False, False, False, True], dtype="bool") + result = BooleanArray(*coerce_to_array(values, mask=mask)) + expected = BooleanArray( + np.array([True, False, True, True]), np.array([False, False, True, True]) + ) + tm.assert_extension_array_equal(result, expected) + result = BooleanArray(*coerce_to_array(np.array(values, dtype=object), mask=mask)) + tm.assert_extension_array_equal(result, expected) + result = BooleanArray(*coerce_to_array(values, mask=mask.tolist())) + tm.assert_extension_array_equal(result, expected) + + # raise errors for wrong dimension + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + + # passing 2D values is OK as long as no mask + coerce_to_array(values.reshape(1, -1)) + + with pytest.raises(ValueError, match="values.shape and mask.shape must match"): + coerce_to_array(values.reshape(1, -1), mask=mask) + + with pytest.raises(ValueError, match="values.shape and mask.shape must match"): + coerce_to_array(values, mask=mask.reshape(1, -1)) + + +def test_coerce_to_array_from_boolean_array(): + # passing BooleanArray to coerce_to_array + values = np.array([True, False, True, False], dtype="bool") + mask = np.array([False, False, False, True], dtype="bool") + arr = BooleanArray(values, mask) + result = BooleanArray(*coerce_to_array(arr)) + tm.assert_extension_array_equal(result, arr) + # no copy + assert result._data is arr._data + assert result._mask is arr._mask + + result = BooleanArray(*coerce_to_array(arr), copy=True) + tm.assert_extension_array_equal(result, arr) + assert result._data is not arr._data + assert result._mask is not arr._mask + + with pytest.raises(ValueError, match="cannot pass mask for BooleanArray input"): + coerce_to_array(arr, mask=mask) + + +def test_coerce_to_numpy_array(): + # with missing values -> object dtype + arr = pd.array([True, False, None], dtype="boolean") + result = np.array(arr) + expected = np.array([True, False, pd.NA], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + # also with no missing values -> object dtype + arr = pd.array([True, False, True], dtype="boolean") + result = np.array(arr) + expected = np.array([True, False, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + # force bool dtype + result = np.array(arr, dtype="bool") + expected = np.array([True, False, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + # with missing values will raise error + arr = pd.array([True, False, None], dtype="boolean") + msg = ( + "cannot convert to 'bool'-dtype NumPy array with missing values. " + "Specify an appropriate 'na_value' for this dtype." + ) + with pytest.raises(ValueError, match=msg): + np.array(arr, dtype="bool") + + +def test_to_boolean_array_from_strings(): + result = BooleanArray._from_sequence_of_strings( + np.array(["True", "False", "1", "1.0", "0", "0.0", np.nan], dtype=object), + dtype="boolean", + ) + expected = BooleanArray( + np.array([True, False, True, True, False, False, False]), + np.array([False, False, False, False, False, False, True]), + ) + + tm.assert_extension_array_equal(result, expected) + + +def test_to_boolean_array_from_strings_invalid_string(): + with pytest.raises(ValueError, match="cannot be cast"): + BooleanArray._from_sequence_of_strings(["donkey"], dtype="boolean") + + +@pytest.mark.parametrize("box", [True, False], ids=["series", "array"]) +def test_to_numpy(box): + con = pd.Series if box else pd.array + # default (with or without missing values) -> object dtype + arr = con([True, False, True], dtype="boolean") + result = arr.to_numpy() + expected = np.array([True, False, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + arr = con([True, False, None], dtype="boolean") + result = arr.to_numpy() + expected = np.array([True, False, pd.NA], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + arr = con([True, False, None], dtype="boolean") + result = arr.to_numpy(dtype="str") + expected = np.array([True, False, pd.NA], dtype=f"{tm.ENDIAN}U5") + tm.assert_numpy_array_equal(result, expected) + + # no missing values -> can convert to bool, otherwise raises + arr = con([True, False, True], dtype="boolean") + result = arr.to_numpy(dtype="bool") + expected = np.array([True, False, True], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + arr = con([True, False, None], dtype="boolean") + with pytest.raises(ValueError, match="cannot convert to 'bool'-dtype"): + result = arr.to_numpy(dtype="bool") + + # specify dtype and na_value + arr = con([True, False, None], dtype="boolean") + result = arr.to_numpy(dtype=object, na_value=None) + expected = np.array([True, False, None], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype=bool, na_value=False) + expected = np.array([True, False, False], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype="int64", na_value=-99) + expected = np.array([1, 0, -99], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = arr.to_numpy(dtype="float64", na_value=np.nan) + expected = np.array([1, 0, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + # converting to int or float without specifying na_value raises + with pytest.raises(ValueError, match="cannot convert to 'int64'-dtype"): + arr.to_numpy(dtype="int64") + + +def test_to_numpy_copy(): + # to_numpy can be zero-copy if no missing values + arr = pd.array([True, False, True], dtype="boolean") + result = arr.to_numpy(dtype=bool) + result[0] = False + tm.assert_extension_array_equal( + arr, pd.array([False, False, True], dtype="boolean") + ) + + arr = pd.array([True, False, True], dtype="boolean") + result = arr.to_numpy(dtype=bool, copy=True) + result[0] = False + tm.assert_extension_array_equal(arr, pd.array([True, False, True], dtype="boolean")) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_function.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_function.py new file mode 100644 index 0000000000000000000000000000000000000000..2b3f3d3d16ac6c49d231ac526fa89570975e4bfb --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_function.py @@ -0,0 +1,126 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "ufunc", [np.add, np.logical_or, np.logical_and, np.logical_xor] +) +def test_ufuncs_binary(ufunc): + # two BooleanArrays + a = pd.array([True, False, None], dtype="boolean") + result = ufunc(a, a) + expected = pd.array(ufunc(a._data, a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + result = ufunc(s, a) + expected = pd.Series(ufunc(a._data, a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_series_equal(result, expected) + + # Boolean with numpy array + arr = np.array([True, True, False]) + result = ufunc(a, arr) + expected = pd.array(ufunc(a._data, arr), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + result = ufunc(arr, a) + expected = pd.array(ufunc(arr, a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + # BooleanArray with scalar + result = ufunc(a, True) + expected = pd.array(ufunc(a._data, True), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + result = ufunc(True, a) + expected = pd.array(ufunc(True, a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + # not handled types + msg = r"operand type\(s\) all returned NotImplemented from __array_ufunc__" + with pytest.raises(TypeError, match=msg): + ufunc(a, "test") + + +@pytest.mark.parametrize("ufunc", [np.logical_not]) +def test_ufuncs_unary(ufunc): + a = pd.array([True, False, None], dtype="boolean") + result = ufunc(a) + expected = pd.array(ufunc(a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_extension_array_equal(result, expected) + + ser = pd.Series(a) + result = ufunc(ser) + expected = pd.Series(ufunc(a._data), dtype="boolean") + expected[a._mask] = np.nan + tm.assert_series_equal(result, expected) + + +def test_ufunc_numeric(): + # np.sqrt on np.bool_ returns float16, which we upcast to Float32 + # bc we do not have Float16 + arr = pd.array([True, False, None], dtype="boolean") + + res = np.sqrt(arr) + + expected = pd.array([1, 0, None], dtype="Float32") + tm.assert_extension_array_equal(res, expected) + + +@pytest.mark.parametrize("values", [[True, False], [True, None]]) +def test_ufunc_reduce_raises(values): + arr = pd.array(values, dtype="boolean") + + res = np.add.reduce(arr) + if arr[-1] is pd.NA: + expected = pd.NA + else: + expected = arr._data.sum() + tm.assert_almost_equal(res, expected) + + +def test_value_counts_na(): + arr = pd.array([True, False, pd.NA], dtype="boolean") + result = arr.value_counts(dropna=False) + expected = pd.Series([1, 1, 1], index=arr, dtype="Int64", name="count") + assert expected.index.dtype == arr.dtype + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([1, 1], index=arr[:-1], dtype="Int64", name="count") + assert expected.index.dtype == arr.dtype + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(): + ser = pd.Series([True, False, pd.NA], dtype="boolean") + result = ser.value_counts(normalize=True) + expected = pd.Series([1, 1], index=ser[:-1], dtype="Float64", name="proportion") / 2 + assert expected.index.dtype == "boolean" + tm.assert_series_equal(result, expected) + + +def test_diff(): + a = pd.array( + [True, True, False, False, True, None, True, None, False], dtype="boolean" + ) + result = pd.core.algorithms.diff(a, 1) + expected = pd.array( + [None, False, True, False, True, None, None, None, None], dtype="boolean" + ) + tm.assert_extension_array_equal(result, expected) + + ser = pd.Series(a) + result = ser.diff() + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_logical.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_logical.py new file mode 100644 index 0000000000000000000000000000000000000000..66c117ea3fc66cbc5f847cc96c23e80a98329c5d --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_logical.py @@ -0,0 +1,254 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import BooleanArray +from pandas.core.ops.mask_ops import ( + kleene_and, + kleene_or, + kleene_xor, +) +from pandas.tests.extension.base import BaseOpsUtil + + +class TestLogicalOps(BaseOpsUtil): + def test_numpy_scalars_ok(self, all_logical_operators): + a = pd.array([True, False, None], dtype="boolean") + op = getattr(a, all_logical_operators) + + tm.assert_extension_array_equal(op(True), op(np.bool_(True))) + tm.assert_extension_array_equal(op(False), op(np.bool_(False))) + + def get_op_from_name(self, op_name): + short_opname = op_name.strip("_") + short_opname = short_opname if "xor" in short_opname else short_opname + "_" + try: + op = getattr(operator, short_opname) + except AttributeError: + # Assume it is the reverse operator + rop = getattr(operator, short_opname[1:]) + op = lambda x, y: rop(y, x) + + return op + + def test_empty_ok(self, all_logical_operators): + a = pd.array([], dtype="boolean") + op_name = all_logical_operators + result = getattr(a, op_name)(True) + tm.assert_extension_array_equal(a, result) + + result = getattr(a, op_name)(False) + tm.assert_extension_array_equal(a, result) + + result = getattr(a, op_name)(pd.NA) + tm.assert_extension_array_equal(a, result) + + @pytest.mark.parametrize( + "other", ["a", pd.Timestamp(2017, 1, 1, 12), np.timedelta64(4)] + ) + def test_eq_mismatched_type(self, other): + # GH-44499 + arr = pd.array([True, False]) + result = arr == other + expected = pd.array([False, False]) + tm.assert_extension_array_equal(result, expected) + + result = arr != other + expected = pd.array([True, True]) + tm.assert_extension_array_equal(result, expected) + + def test_logical_length_mismatch_raises(self, all_logical_operators): + op_name = all_logical_operators + a = pd.array([True, False, None], dtype="boolean") + msg = "Lengths must match" + + with pytest.raises(ValueError, match=msg): + getattr(a, op_name)([True, False]) + + with pytest.raises(ValueError, match=msg): + getattr(a, op_name)(np.array([True, False])) + + with pytest.raises(ValueError, match=msg): + getattr(a, op_name)(pd.array([True, False], dtype="boolean")) + + def test_logical_nan_raises(self, all_logical_operators): + op_name = all_logical_operators + a = pd.array([True, False, None], dtype="boolean") + msg = "Got float instead" + + with pytest.raises(TypeError, match=msg): + getattr(a, op_name)(np.nan) + + @pytest.mark.parametrize("other", ["a", 1]) + def test_non_bool_or_na_other_raises(self, other, all_logical_operators): + a = pd.array([True, False], dtype="boolean") + with pytest.raises(TypeError, match=str(type(other).__name__)): + getattr(a, all_logical_operators)(other) + + def test_kleene_or(self): + # A clear test of behavior. + a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + b = pd.array([True, False, None] * 3, dtype="boolean") + result = a | b + expected = pd.array( + [True, True, True, True, False, None, True, None, None], dtype="boolean" + ) + tm.assert_extension_array_equal(result, expected) + + result = b | a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + ) + tm.assert_extension_array_equal( + b, pd.array([True, False, None] * 3, dtype="boolean") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (pd.NA, [True, None, None]), + (True, [True, True, True]), + (np.bool_(True), [True, True, True]), + (False, [True, False, None]), + (np.bool_(False), [True, False, None]), + ], + ) + def test_kleene_or_scalar(self, other, expected): + # TODO: test True & False + a = pd.array([True, False, None], dtype="boolean") + result = a | other + expected = pd.array(expected, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + result = other | a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True, False, None], dtype="boolean") + ) + + def test_kleene_and(self): + # A clear test of behavior. + a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + b = pd.array([True, False, None] * 3, dtype="boolean") + result = a & b + expected = pd.array( + [True, False, None, False, False, False, None, False, None], dtype="boolean" + ) + tm.assert_extension_array_equal(result, expected) + + result = b & a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + ) + tm.assert_extension_array_equal( + b, pd.array([True, False, None] * 3, dtype="boolean") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (pd.NA, [None, False, None]), + (True, [True, False, None]), + (False, [False, False, False]), + (np.bool_(True), [True, False, None]), + (np.bool_(False), [False, False, False]), + ], + ) + def test_kleene_and_scalar(self, other, expected): + a = pd.array([True, False, None], dtype="boolean") + result = a & other + expected = pd.array(expected, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + result = other & a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True, False, None], dtype="boolean") + ) + + def test_kleene_xor(self): + a = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + b = pd.array([True, False, None] * 3, dtype="boolean") + result = a ^ b + expected = pd.array( + [False, True, None, True, False, None, None, None, None], dtype="boolean" + ) + tm.assert_extension_array_equal(result, expected) + + result = b ^ a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + ) + tm.assert_extension_array_equal( + b, pd.array([True, False, None] * 3, dtype="boolean") + ) + + @pytest.mark.parametrize( + "other, expected", + [ + (pd.NA, [None, None, None]), + (True, [False, True, None]), + (np.bool_(True), [False, True, None]), + (np.bool_(False), [True, False, None]), + ], + ) + def test_kleene_xor_scalar(self, other, expected): + a = pd.array([True, False, None], dtype="boolean") + result = a ^ other + expected = pd.array(expected, dtype="boolean") + tm.assert_extension_array_equal(result, expected) + + result = other ^ a + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + tm.assert_extension_array_equal( + a, pd.array([True, False, None], dtype="boolean") + ) + + @pytest.mark.parametrize("other", [True, False, pd.NA, [True, False, None] * 3]) + def test_no_masked_assumptions(self, other, all_logical_operators): + # The logical operations should not assume that masked values are False! + a = pd.arrays.BooleanArray( + np.array([True, True, True, False, False, False, True, False, True]), + np.array([False] * 6 + [True, True, True]), + ) + b = pd.array([True] * 3 + [False] * 3 + [None] * 3, dtype="boolean") + if isinstance(other, list): + other = pd.array(other, dtype="boolean") + + result = getattr(a, all_logical_operators)(other) + expected = getattr(b, all_logical_operators)(other) + tm.assert_extension_array_equal(result, expected) + + if isinstance(other, BooleanArray): + other._data[other._mask] = True + a._data[a._mask] = False + + result = getattr(a, all_logical_operators)(other) + expected = getattr(b, all_logical_operators)(other) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("operation", [kleene_or, kleene_xor, kleene_and]) +def test_error_both_scalar(operation): + msg = r"Either `left` or `right` need to be a np\.ndarray." + with pytest.raises(TypeError, match=msg): + # masks need to be non-None, otherwise it ends up in an infinite recursion + operation(True, True, np.zeros(1), np.zeros(1)) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_reduction.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..dd8c3eda9ed05b6844c90024631a1e90f755069e --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_reduction.py @@ -0,0 +1,62 @@ +import numpy as np +import pytest + +import pandas as pd + + +@pytest.fixture +def data(): + """Fixture returning boolean array, with valid and missing values.""" + return pd.array( + [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False], + dtype="boolean", + ) + + +@pytest.mark.parametrize( + "values, exp_any, exp_all, exp_any_noskip, exp_all_noskip", + [ + ([True, pd.NA], True, True, True, pd.NA), + ([False, pd.NA], False, False, pd.NA, False), + ([pd.NA], False, True, pd.NA, pd.NA), + ([], False, True, False, True), + # GH-33253: all True / all False values buggy with skipna=False + ([True, True], True, True, True, True), + ([False, False], False, False, False, False), + ], +) +def test_any_all(values, exp_any, exp_all, exp_any_noskip, exp_all_noskip): + # the methods return numpy scalars + exp_any = pd.NA if exp_any is pd.NA else np.bool_(exp_any) + exp_all = pd.NA if exp_all is pd.NA else np.bool_(exp_all) + exp_any_noskip = pd.NA if exp_any_noskip is pd.NA else np.bool_(exp_any_noskip) + exp_all_noskip = pd.NA if exp_all_noskip is pd.NA else np.bool_(exp_all_noskip) + + for con in [pd.array, pd.Series]: + a = con(values, dtype="boolean") + assert a.any() is exp_any + assert a.all() is exp_all + assert a.any(skipna=False) is exp_any_noskip + assert a.all(skipna=False) is exp_all_noskip + + assert np.any(a.any()) is exp_any + assert np.all(a.all()) is exp_all + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_reductions_return_types(dropna, data, all_numeric_reductions): + op = all_numeric_reductions + s = pd.Series(data) + if dropna: + s = s.dropna() + + if op in ("sum", "prod"): + assert isinstance(getattr(s, op)(), np.int_) + elif op == "count": + # Oddly on the 32 bit build (but not Windows), this is intc (!= intp) + assert isinstance(getattr(s, op)(), np.integer) + elif op in ("min", "max"): + assert isinstance(getattr(s, op)(), np.bool_) + else: + # "mean", "std", "var", "median", "kurt", "skew" + assert isinstance(getattr(s, op)(), np.float64) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_repr.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..0ee904b18cc9ec6197ed3ad009fae1da593c5219 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/boolean/test_repr.py @@ -0,0 +1,13 @@ +import pandas as pd + + +def test_repr(): + df = pd.DataFrame({"A": pd.array([True, False, None], dtype="boolean")}) + expected = " A\n0 True\n1 False\n2 " + assert repr(df) == expected + + expected = "0 True\n1 False\n2 \nName: A, dtype: boolean" + assert repr(df.A) == expected + + expected = "\n[True, False, ]\nLength: 3, dtype: boolean" + assert repr(df.A.array) == expected diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7dac78a6bf35eb61668411ddad8d9f8c126c224 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0dc52b407a164f39d62772924110f504a6ce2b7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3b75d86ab993eb87a2c661f36ab55a39481d1ee3 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7b4fccc5a610ddc4c254165e8da4e405eea07ce8 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_constructors.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..3652b5fec46bbe7a519dd2c3a196ac87bd74784f --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_constructors.py @@ -0,0 +1,284 @@ +import numpy as np +import pytest + +from pandas._libs import iNaT + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray + + +class TestDatetimeArrayConstructor: + def test_from_sequence_invalid_type(self): + mi = pd.MultiIndex.from_product([np.arange(5), np.arange(5)]) + with pytest.raises(TypeError, match="Cannot create a DatetimeArray"): + DatetimeArray._from_sequence(mi, dtype="M8[ns]") + + def test_only_1dim_accepted(self): + arr = np.array([0, 1, 2, 3], dtype="M8[h]").astype("M8[ns]") + + depr_msg = "DatetimeArray.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match="Only 1-dimensional"): + # 3-dim, we allow 2D to sneak in for ops purposes GH#29853 + DatetimeArray(arr.reshape(2, 2, 1)) + + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match="Only 1-dimensional"): + # 0-dim + DatetimeArray(arr[[0]].squeeze()) + + def test_freq_validation(self): + # GH#24623 check that invalid instances cannot be created with the + # public constructor + arr = np.arange(5, dtype=np.int64) * 3600 * 10**9 + + msg = ( + "Inferred frequency h from passed values does not " + "conform to passed frequency W-SUN" + ) + depr_msg = "DatetimeArray.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match=msg): + DatetimeArray(arr, freq="W") + + @pytest.mark.parametrize( + "meth", + [ + DatetimeArray._from_sequence, + pd.to_datetime, + pd.DatetimeIndex, + ], + ) + def test_mixing_naive_tzaware_raises(self, meth): + # GH#24569 + arr = np.array([pd.Timestamp("2000"), pd.Timestamp("2000", tz="CET")]) + + msg = ( + "Cannot mix tz-aware with tz-naive values|" + "Tz-aware datetime.datetime cannot be converted " + "to datetime64 unless utc=True" + ) + + for obj in [arr, arr[::-1]]: + # check that we raise regardless of whether naive is found + # before aware or vice-versa + with pytest.raises(ValueError, match=msg): + meth(obj) + + def test_from_pandas_array(self): + arr = pd.array(np.arange(5, dtype=np.int64)) * 3600 * 10**9 + + result = DatetimeArray._from_sequence(arr, dtype="M8[ns]")._with_freq("infer") + + expected = pd.date_range("1970-01-01", periods=5, freq="h")._data + tm.assert_datetime_array_equal(result, expected) + + def test_mismatched_timezone_raises(self): + depr_msg = "DatetimeArray.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + arr = DatetimeArray( + np.array(["2000-01-01T06:00:00"], dtype="M8[ns]"), + dtype=DatetimeTZDtype(tz="US/Central"), + ) + dtype = DatetimeTZDtype(tz="US/Eastern") + msg = r"dtype=datetime64\[ns.*\] does not match data dtype datetime64\[ns.*\]" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(TypeError, match=msg): + DatetimeArray(arr, dtype=dtype) + + # also with mismatched tzawareness + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(TypeError, match=msg): + DatetimeArray(arr, dtype=np.dtype("M8[ns]")) + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(TypeError, match=msg): + DatetimeArray(arr.tz_localize(None), dtype=arr.dtype) + + def test_non_array_raises(self): + depr_msg = "DatetimeArray.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match="list"): + DatetimeArray([1, 2, 3]) + + def test_bool_dtype_raises(self): + arr = np.array([1, 2, 3], dtype="bool") + + depr_msg = "DatetimeArray.__init__ is deprecated" + msg = "Unexpected value for 'dtype': 'bool'. Must be" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match=msg): + DatetimeArray(arr) + + msg = r"dtype bool cannot be converted to datetime64\[ns\]" + with pytest.raises(TypeError, match=msg): + DatetimeArray._from_sequence(arr, dtype="M8[ns]") + + with pytest.raises(TypeError, match=msg): + pd.DatetimeIndex(arr) + + with pytest.raises(TypeError, match=msg): + pd.to_datetime(arr) + + def test_incorrect_dtype_raises(self): + depr_msg = "DatetimeArray.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): + DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="category") + + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): + DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="m8[s]") + + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match="Unexpected value for 'dtype'."): + DatetimeArray(np.array([1, 2, 3], dtype="i8"), dtype="M8[D]") + + def test_mismatched_values_dtype_units(self): + arr = np.array([1, 2, 3], dtype="M8[s]") + dtype = np.dtype("M8[ns]") + msg = "Values resolution does not match dtype." + depr_msg = "DatetimeArray.__init__ is deprecated" + + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match=msg): + DatetimeArray(arr, dtype=dtype) + + dtype2 = DatetimeTZDtype(tz="UTC", unit="ns") + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match=msg): + DatetimeArray(arr, dtype=dtype2) + + def test_freq_infer_raises(self): + depr_msg = "DatetimeArray.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + with pytest.raises(ValueError, match="Frequency inference"): + DatetimeArray(np.array([1, 2, 3], dtype="i8"), freq="infer") + + def test_copy(self): + data = np.array([1, 2, 3], dtype="M8[ns]") + arr = DatetimeArray._from_sequence(data, copy=False) + assert arr._ndarray is data + + arr = DatetimeArray._from_sequence(data, copy=True) + assert arr._ndarray is not data + + @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"]) + def test_numpy_datetime_unit(self, unit): + data = np.array([1, 2, 3], dtype=f"M8[{unit}]") + arr = DatetimeArray._from_sequence(data) + assert arr.unit == unit + assert arr[0].unit == unit + + +class TestSequenceToDT64NS: + def test_tz_dtype_mismatch_raises(self): + arr = DatetimeArray._from_sequence( + ["2000"], dtype=DatetimeTZDtype(tz="US/Central") + ) + with pytest.raises(TypeError, match="data is already tz-aware"): + DatetimeArray._from_sequence(arr, dtype=DatetimeTZDtype(tz="UTC")) + + def test_tz_dtype_matches(self): + dtype = DatetimeTZDtype(tz="US/Central") + arr = DatetimeArray._from_sequence(["2000"], dtype=dtype) + result = DatetimeArray._from_sequence(arr, dtype=dtype) + tm.assert_equal(arr, result) + + @pytest.mark.parametrize("order", ["F", "C"]) + def test_2d(self, order): + dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific") + arr = np.array(dti, dtype=object).reshape(3, 2) + if order == "F": + arr = arr.T + + res = DatetimeArray._from_sequence(arr, dtype=dti.dtype) + expected = DatetimeArray._from_sequence(arr.ravel(), dtype=dti.dtype).reshape( + arr.shape + ) + tm.assert_datetime_array_equal(res, expected) + + +# ---------------------------------------------------------------------------- +# Arrow interaction + + +EXTREME_VALUES = [0, 123456789, None, iNaT, 2**63 - 1, -(2**63) + 1] +FINE_TO_COARSE_SAFE = [123_000_000_000, None, -123_000_000_000] +COARSE_TO_FINE_SAFE = [123, None, -123] + + +@pytest.mark.parametrize( + ("pa_unit", "pd_unit", "pa_tz", "pd_tz", "data"), + [ + ("s", "s", "UTC", "UTC", EXTREME_VALUES), + ("ms", "ms", "UTC", "Europe/Berlin", EXTREME_VALUES), + ("us", "us", "US/Eastern", "UTC", EXTREME_VALUES), + ("ns", "ns", "US/Central", "Asia/Kolkata", EXTREME_VALUES), + ("ns", "s", "UTC", "UTC", FINE_TO_COARSE_SAFE), + ("us", "ms", "UTC", "Europe/Berlin", FINE_TO_COARSE_SAFE), + ("ms", "us", "US/Eastern", "UTC", COARSE_TO_FINE_SAFE), + ("s", "ns", "US/Central", "Asia/Kolkata", COARSE_TO_FINE_SAFE), + ], +) +def test_from_arrow_with_different_units_and_timezones_with( + pa_unit, pd_unit, pa_tz, pd_tz, data +): + pa = pytest.importorskip("pyarrow") + + pa_type = pa.timestamp(pa_unit, tz=pa_tz) + arr = pa.array(data, type=pa_type) + dtype = DatetimeTZDtype(unit=pd_unit, tz=pd_tz) + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray._from_sequence(data, dtype=f"M8[{pa_unit}, UTC]").astype( + dtype, copy=False + ) + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + ("unit", "tz"), + [ + ("s", "UTC"), + ("ms", "Europe/Berlin"), + ("us", "US/Eastern"), + ("ns", "Asia/Kolkata"), + ("ns", "UTC"), + ], +) +def test_from_arrow_from_empty(unit, tz): + pa = pytest.importorskip("pyarrow") + + data = [] + arr = pa.array(data) + dtype = DatetimeTZDtype(unit=unit, tz=tz) + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray._from_sequence(np.array(data, dtype=f"datetime64[{unit}]")) + expected = expected.tz_localize(tz=tz) + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) + + +def test_from_arrow_from_integers(): + pa = pytest.importorskip("pyarrow") + + data = [0, 123456789, None, 2**63 - 1, iNaT, -123456789] + arr = pa.array(data) + dtype = DatetimeTZDtype(unit="ns", tz="UTC") + + result = dtype.__from_arrow__(arr) + expected = DatetimeArray._from_sequence(np.array(data, dtype="datetime64[ns]")) + expected = expected.tz_localize("UTC") + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_cumulative.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_cumulative.py new file mode 100644 index 0000000000000000000000000000000000000000..e9d2dfdd0048a42a3f23e41be1d45a89aae11d23 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_cumulative.py @@ -0,0 +1,44 @@ +import pytest + +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray + + +class TestAccumulator: + def test_accumulators_freq(self): + # GH#50297 + arr = DatetimeArray._from_sequence( + [ + "2000-01-01", + "2000-01-02", + "2000-01-03", + ], + dtype="M8[ns]", + )._with_freq("infer") + result = arr._accumulate("cummin") + expected = DatetimeArray._from_sequence(["2000-01-01"] * 3, dtype="M8[ns]") + tm.assert_datetime_array_equal(result, expected) + + result = arr._accumulate("cummax") + expected = DatetimeArray._from_sequence( + [ + "2000-01-01", + "2000-01-02", + "2000-01-03", + ], + dtype="M8[ns]", + ) + tm.assert_datetime_array_equal(result, expected) + + @pytest.mark.parametrize("func", ["cumsum", "cumprod"]) + def test_accumulators_disallowed(self, func): + # GH#50297 + arr = DatetimeArray._from_sequence( + [ + "2000-01-01", + "2000-01-02", + ], + dtype="M8[ns]", + )._with_freq("infer") + with pytest.raises(TypeError, match=f"Accumulation {func}"): + arr._accumulate(func) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_reductions.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..a941546b13a567b705f61a3a667119cd55a2f0e4 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/test_reductions.py @@ -0,0 +1,183 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import DatetimeTZDtype + +import pandas as pd +from pandas import NaT +import pandas._testing as tm +from pandas.core.arrays import DatetimeArray + + +class TestReductions: + @pytest.fixture(params=["s", "ms", "us", "ns"]) + def unit(self, request): + return request.param + + @pytest.fixture + def arr1d(self, tz_naive_fixture): + """Fixture returning DatetimeArray with parametrized timezones""" + tz = tz_naive_fixture + dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") + arr = DatetimeArray._from_sequence( + [ + "2000-01-03", + "2000-01-03", + "NaT", + "2000-01-02", + "2000-01-05", + "2000-01-04", + ], + dtype=dtype, + ) + return arr + + def test_min_max(self, arr1d, unit): + arr = arr1d + arr = arr.as_unit(unit) + tz = arr.tz + + result = arr.min() + expected = pd.Timestamp("2000-01-02", tz=tz).as_unit(unit) + assert result == expected + assert result.unit == expected.unit + + result = arr.max() + expected = pd.Timestamp("2000-01-05", tz=tz).as_unit(unit) + assert result == expected + assert result.unit == expected.unit + + result = arr.min(skipna=False) + assert result is NaT + + result = arr.max(skipna=False) + assert result is NaT + + @pytest.mark.parametrize("tz", [None, "US/Central"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_empty(self, skipna, tz): + dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") + arr = DatetimeArray._from_sequence([], dtype=dtype) + result = arr.min(skipna=skipna) + assert result is NaT + + result = arr.max(skipna=skipna) + assert result is NaT + + @pytest.mark.parametrize("tz", [None, "US/Central"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_median_empty(self, skipna, tz): + dtype = DatetimeTZDtype(tz=tz) if tz is not None else np.dtype("M8[ns]") + arr = DatetimeArray._from_sequence([], dtype=dtype) + result = arr.median(skipna=skipna) + assert result is NaT + + arr = arr.reshape(0, 3) + result = arr.median(axis=0, skipna=skipna) + expected = type(arr)._from_sequence([NaT, NaT, NaT], dtype=arr.dtype) + tm.assert_equal(result, expected) + + result = arr.median(axis=1, skipna=skipna) + expected = type(arr)._from_sequence([], dtype=arr.dtype) + tm.assert_equal(result, expected) + + def test_median(self, arr1d): + arr = arr1d + + result = arr.median() + assert result == arr[0] + result = arr.median(skipna=False) + assert result is NaT + + result = arr.dropna().median(skipna=False) + assert result == arr[0] + + result = arr.median(axis=0) + assert result == arr[0] + + def test_median_axis(self, arr1d): + arr = arr1d + assert arr.median(axis=0) == arr.median() + assert arr.median(axis=0, skipna=False) is NaT + + msg = r"abs\(axis\) must be less than ndim" + with pytest.raises(ValueError, match=msg): + arr.median(axis=1) + + @pytest.mark.filterwarnings("ignore:All-NaN slice encountered:RuntimeWarning") + def test_median_2d(self, arr1d): + arr = arr1d.reshape(1, -1) + + # axis = None + assert arr.median() == arr1d.median() + assert arr.median(skipna=False) is NaT + + # axis = 0 + result = arr.median(axis=0) + expected = arr1d + tm.assert_equal(result, expected) + + # Since column 3 is all-NaT, we get NaT there with or without skipna + result = arr.median(axis=0, skipna=False) + expected = arr1d + tm.assert_equal(result, expected) + + # axis = 1 + result = arr.median(axis=1) + expected = type(arr)._from_sequence([arr1d.median()], dtype=arr.dtype) + tm.assert_equal(result, expected) + + result = arr.median(axis=1, skipna=False) + expected = type(arr)._from_sequence([NaT], dtype=arr.dtype) + tm.assert_equal(result, expected) + + def test_mean(self, arr1d): + arr = arr1d + + # manually verified result + expected = arr[0] + 0.4 * pd.Timedelta(days=1) + + result = arr.mean() + assert result == expected + result = arr.mean(skipna=False) + assert result is NaT + + result = arr.dropna().mean(skipna=False) + assert result == expected + + result = arr.mean(axis=0) + assert result == expected + + def test_mean_2d(self): + dti = pd.date_range("2016-01-01", periods=6, tz="US/Pacific") + dta = dti._data.reshape(3, 2) + + result = dta.mean(axis=0) + expected = dta[1] + tm.assert_datetime_array_equal(result, expected) + + result = dta.mean(axis=1) + expected = dta[:, 0] + pd.Timedelta(hours=12) + tm.assert_datetime_array_equal(result, expected) + + result = dta.mean(axis=None) + expected = dti.mean() + assert result == expected + + @pytest.mark.parametrize("skipna", [True, False]) + def test_mean_empty(self, arr1d, skipna): + arr = arr1d[:0] + + assert arr.mean(skipna=skipna) is NaT + + arr2d = arr.reshape(0, 3) + result = arr2d.mean(axis=0, skipna=skipna) + expected = DatetimeArray._from_sequence([NaT, NaT, NaT], dtype=arr.dtype) + tm.assert_datetime_array_equal(result, expected) + + result = arr2d.mean(axis=1, skipna=skipna) + expected = arr # i.e. 1D, empty + tm.assert_datetime_array_equal(result, expected) + + result = arr2d.mean(axis=None, skipna=skipna) + assert result is NaT diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/__init__.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17c6c9c9d2382d0f108360252435cb2b097c1b45 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/__init__.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/conftest.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d6c5b5acbd7ac80725bc9ca212cc66621679bf1a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/conftest.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_arithmetic.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b5fe7ec3001a475654864170ee02ed77a9270cf5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_comparison.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_comparison.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8e186155e70b015a4696041839082928d90e0d7 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_comparison.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_concat.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f62d88fa3c02c8c64b32b1156b4e4d5033f7b12 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_concat.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_construction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_construction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d940e441c03a36b2653d904857633edebaf2378f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_construction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_dtypes.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e64cfb21a4bb58707a5fa78a82b3a47e515aa95e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_dtypes.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_function.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a783286f2f44852a1a97d6bd9662501b227e6801 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_function.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ee74abf0b71d41787dea85d336f19f8ca230a1e Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_reduction.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..95b50a98bba1641ec37e6228e75fd797162dcd4d Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_reduction.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_repr.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..60f1de32f78ef48eef0f5c95f2e9fba280bb59d9 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_repr.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/conftest.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..f73400dfe689e91c4c2b457c4be1a0a41380fd6a --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/conftest.py @@ -0,0 +1,68 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) + + +@pytest.fixture( + params=[ + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, + ] +) +def dtype(request): + """Parametrized fixture returning integer 'dtype'""" + return request.param() + + +@pytest.fixture +def data(dtype): + """ + Fixture returning 'data' array with valid and missing values according to + parametrized integer 'dtype'. + + Used to test dtype conversion with and without missing values. + """ + return pd.array( + list(range(8)) + [np.nan] + list(range(10, 98)) + [np.nan] + [99, 100], + dtype=dtype, + ) + + +@pytest.fixture +def data_missing(dtype): + """ + Fixture returning array with exactly one NaN and one valid integer, + according to parametrized integer 'dtype'. + + Used to test dtype conversion with and without missing values. + """ + return pd.array([np.nan, 1], dtype=dtype) + + +@pytest.fixture(params=["data", "data_missing"]) +def all_data(request, data, data_missing): + """Parametrized fixture returning 'data' or 'data_missing' integer arrays. + + Used to test dtype conversion with and without missing values. + """ + if request.param == "data": + return data + elif request.param == "data_missing": + return data_missing diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_arithmetic.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_arithmetic.py new file mode 100644 index 0000000000000000000000000000000000000000..8acd298f37a0795851c4c37bceff5a8222f521ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_arithmetic.py @@ -0,0 +1,385 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core import ops +from pandas.core.arrays import FloatingArray + +# Basic test for the arithmetic array ops +# ----------------------------------------------------------------------------- + + +@pytest.mark.parametrize( + "opname, exp", + [("add", [1, 3, None, None, 9]), ("mul", [0, 2, None, None, 20])], + ids=["add", "mul"], +) +def test_add_mul(dtype, opname, exp): + a = pd.array([0, 1, None, 3, 4], dtype=dtype) + b = pd.array([1, 2, 3, None, 5], dtype=dtype) + + # array / array + expected = pd.array(exp, dtype=dtype) + + op = getattr(operator, opname) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + + op = getattr(ops, "r" + opname) + result = op(a, b) + tm.assert_extension_array_equal(result, expected) + + +def test_sub(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a - b + expected = pd.array([1, 1, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_div(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a / b + expected = pd.array([np.inf, 2, None, None, 1.25], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("zero, negative", [(0, False), (0.0, False), (-0.0, True)]) +def test_divide_by_zero(zero, negative): + # https://github.com/pandas-dev/pandas/issues/27398, GH#22793 + a = pd.array([0, 1, -1, None], dtype="Int64") + result = a / zero + expected = FloatingArray( + np.array([np.nan, np.inf, -np.inf, 1], dtype="float64"), + np.array([False, False, False, True]), + ) + if negative: + expected *= -1 + tm.assert_extension_array_equal(result, expected) + + +def test_floordiv(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a // b + # Series op sets 1//0 to np.inf, which IntegerArray does not do (yet) + expected = pd.array([0, 2, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_floordiv_by_int_zero_no_mask(any_int_ea_dtype): + # GH 48223: Aligns with non-masked floordiv + # but differs from numpy + # https://github.com/pandas-dev/pandas/issues/30188#issuecomment-564452740 + ser = pd.Series([0, 1], dtype=any_int_ea_dtype) + result = 1 // ser + expected = pd.Series([np.inf, 1.0], dtype="Float64") + tm.assert_series_equal(result, expected) + + ser_non_nullable = ser.astype(ser.dtype.numpy_dtype) + result = 1 // ser_non_nullable + expected = expected.astype(np.float64) + tm.assert_series_equal(result, expected) + + +def test_mod(dtype): + a = pd.array([1, 2, 3, None, 5], dtype=dtype) + b = pd.array([0, 1, None, 3, 4], dtype=dtype) + + result = a % b + expected = pd.array([0, 0, None, None, 1], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_scalar(): + a = pd.array([-1, 0, 1, None, 2], dtype="Int64") + result = a**0 + expected = pd.array([1, 1, 1, 1, 1], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**1 + expected = pd.array([-1, 0, 1, None, 2], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**pd.NA + expected = pd.array([None, None, 1, None, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = a**np.nan + expected = FloatingArray( + np.array([np.nan, np.nan, 1, np.nan, np.nan], dtype="float64"), + np.array([False, False, False, True, False]), + ) + tm.assert_extension_array_equal(result, expected) + + # reversed + a = a[1:] # Can't raise integers to negative powers. + + result = 0**a + expected = pd.array([1, 0, None, 0], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = 1**a + expected = pd.array([1, 1, 1, 1], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = pd.NA**a + expected = pd.array([1, None, None, None], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = np.nan**a + expected = FloatingArray( + np.array([1, np.nan, np.nan, np.nan], dtype="float64"), + np.array([False, False, True, False]), + ) + tm.assert_extension_array_equal(result, expected) + + +def test_pow_array(): + a = pd.array([0, 0, 0, 1, 1, 1, None, None, None]) + b = pd.array([0, 1, None, 0, 1, None, 0, 1, None]) + result = a**b + expected = pd.array([1, 0, None, 1, 1, 1, 1, None, None]) + tm.assert_extension_array_equal(result, expected) + + +def test_rpow_one_to_na(): + # https://github.com/pandas-dev/pandas/issues/22022 + # https://github.com/pandas-dev/pandas/issues/29997 + arr = pd.array([np.nan, np.nan], dtype="Int64") + result = np.array([1.0, 2.0]) ** arr + expected = pd.array([1.0, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("other", [0, 0.5]) +def test_numpy_zero_dim_ndarray(other): + arr = pd.array([1, None, 2]) + result = arr + np.array(other) + expected = arr + other + tm.assert_equal(result, expected) + + +# Test generic characteristics / errors +# ----------------------------------------------------------------------------- + + +def test_error_invalid_values(data, all_arithmetic_operators, using_infer_string): + op = all_arithmetic_operators + s = pd.Series(data) + ops = getattr(s, op) + + if using_infer_string: + import pyarrow as pa + + errs = (TypeError, pa.lib.ArrowNotImplementedError, NotImplementedError) + else: + errs = TypeError + + # invalid scalars + msg = "|".join( + [ + r"can only perform ops with numeric values", + r"IntegerArray cannot perform the operation mod", + r"unsupported operand type", + r"can only concatenate str \(not \"int\"\) to str", + "not all arguments converted during string", + "ufunc '.*' not supported for the input types, and the inputs could not", + "ufunc '.*' did not contain a loop with signature matching types", + "Addition/subtraction of integers and integer-arrays with Timestamp", + "has no kernel", + "not implemented", + "The 'out' kwarg is necessary. Use numpy.strings.multiply without it.", + ] + ) + with pytest.raises(errs, match=msg): + ops("foo") + with pytest.raises(errs, match=msg): + ops(pd.Timestamp("20180101")) + + # invalid array-likes + str_ser = pd.Series("foo", index=s.index) + # with pytest.raises(TypeError, match=msg): + if ( + all_arithmetic_operators + in [ + "__mul__", + "__rmul__", + ] + and not using_infer_string + ): # (data[~data.isna()] >= 0).all(): + res = ops(str_ser) + expected = pd.Series(["foo" * x for x in data], index=s.index) + expected = expected.fillna(np.nan) + # TODO: doing this fillna to keep tests passing as we make + # assert_almost_equal stricter, but the expected with pd.NA seems + # more-correct than np.nan here. + tm.assert_series_equal(res, expected) + else: + with pytest.raises(errs, match=msg): + ops(str_ser) + + msg = "|".join( + [ + "can only perform ops with numeric values", + "cannot perform .* with this index type: DatetimeArray", + "Addition/subtraction of integers and integer-arrays " + "with DatetimeArray is no longer supported. *", + "unsupported operand type", + r"can only concatenate str \(not \"int\"\) to str", + "not all arguments converted during string", + "cannot subtract DatetimeArray from ndarray", + "has no kernel", + "not implemented", + ] + ) + with pytest.raises(errs, match=msg): + ops(pd.Series(pd.date_range("20180101", periods=len(s)))) + + +# Various +# ----------------------------------------------------------------------------- + + +# TODO test unsigned overflow + + +def test_arith_coerce_scalar(data, all_arithmetic_operators): + op = tm.get_op_from_name(all_arithmetic_operators) + s = pd.Series(data) + other = 0.01 + + result = op(s, other) + expected = op(s.astype(float), other) + expected = expected.astype("Float64") + + # rmod results in NaN that wasn't NA in original nullable Series -> unmask it + if all_arithmetic_operators == "__rmod__": + mask = (s == 0).fillna(False).to_numpy(bool) + expected.array._mask[mask] = False + + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("other", [1.0, np.array(1.0)]) +def test_arithmetic_conversion(all_arithmetic_operators, other): + # if we have a float operand we should have a float result + # if that is equal to an integer + op = tm.get_op_from_name(all_arithmetic_operators) + + s = pd.Series([1, 2, 3], dtype="Int64") + result = op(s, other) + assert result.dtype == "Float64" + + +def test_cross_type_arithmetic(): + df = pd.DataFrame( + { + "A": pd.Series([1, 2, np.nan], dtype="Int64"), + "B": pd.Series([1, np.nan, 3], dtype="UInt8"), + "C": [1, 2, 3], + } + ) + + result = df.A + df.C + expected = pd.Series([2, 4, np.nan], dtype="Int64") + tm.assert_series_equal(result, expected) + + result = (df.A + df.C) * 3 == 12 + expected = pd.Series([False, True, None], dtype="boolean") + tm.assert_series_equal(result, expected) + + result = df.A + df.B + expected = pd.Series([2, np.nan, np.nan], dtype="Int64") + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("op", ["mean"]) +def test_reduce_to_float(op): + # some reduce ops always return float, even if the result + # is a rounded number + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([1, None, 3], dtype="Int64"), + } + ) + + # op + result = getattr(df.C, op)() + assert isinstance(result, float) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Float64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "source, neg_target, abs_target", + [ + ([1, 2, 3], [-1, -2, -3], [1, 2, 3]), + ([1, 2, None], [-1, -2, None], [1, 2, None]), + ([-1, 0, 1], [1, 0, -1], [1, 0, 1]), + ], +) +def test_unary_int_operators(any_signed_int_ea_dtype, source, neg_target, abs_target): + dtype = any_signed_int_ea_dtype + arr = pd.array(source, dtype=dtype) + neg_result, pos_result, abs_result = -arr, +arr, abs(arr) + neg_target = pd.array(neg_target, dtype=dtype) + abs_target = pd.array(abs_target, dtype=dtype) + + tm.assert_extension_array_equal(neg_result, neg_target) + tm.assert_extension_array_equal(pos_result, arr) + assert not tm.shares_memory(pos_result, arr) + tm.assert_extension_array_equal(abs_result, abs_target) + + +def test_values_multiplying_large_series_by_NA(): + # GH#33701 + + result = pd.NA * pd.Series(np.zeros(10001)) + expected = pd.Series([pd.NA] * 10001) + + tm.assert_series_equal(result, expected) + + +def test_bitwise(dtype): + left = pd.array([1, None, 3, 4], dtype=dtype) + right = pd.array([None, 3, 5, 4], dtype=dtype) + + result = left | right + expected = pd.array([None, None, 3 | 5, 4 | 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = left & right + expected = pd.array([None, None, 3 & 5, 4 & 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = left ^ right + expected = pd.array([None, None, 3 ^ 5, 4 ^ 4], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + # TODO: desired behavior when operating with boolean? defer? + + floats = right.astype("Float64") + with pytest.raises(TypeError, match="unsupported operand type"): + left | floats + with pytest.raises(TypeError, match="unsupported operand type"): + left & floats + with pytest.raises(TypeError, match="unsupported operand type"): + left ^ floats diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_comparison.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_comparison.py new file mode 100644 index 0000000000000000000000000000000000000000..568b0b087bf1db9610960dba12ea2e0bab8f1729 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_comparison.py @@ -0,0 +1,39 @@ +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.arrays.masked_shared import ( + ComparisonOps, + NumericOps, +) + + +class TestComparisonOps(NumericOps, ComparisonOps): + @pytest.mark.parametrize("other", [True, False, pd.NA, -1, 0, 1]) + def test_scalar(self, other, comparison_op, dtype): + ComparisonOps.test_scalar(self, other, comparison_op, dtype) + + def test_compare_to_int(self, dtype, comparison_op): + # GH 28930 + op_name = f"__{comparison_op.__name__}__" + s1 = pd.Series([1, None, 3], dtype=dtype) + s2 = pd.Series([1, None, 3], dtype="float") + + method = getattr(s1, op_name) + result = method(2) + + method = getattr(s2, op_name) + expected = method(2).astype("boolean") + expected[s2.isna()] = pd.NA + + tm.assert_series_equal(result, expected) + + +def test_equals(): + # GH-30652 + # equals is generally tested in /tests/extension/base/methods, but this + # specifically tests that two arrays of the same class but different dtype + # do not evaluate equal + a1 = pd.array([1, 2, None], dtype="Int64") + a2 = pd.array([1, 2, None], dtype="Int32") + assert a1.equals(a2) is False diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_concat.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_concat.py new file mode 100644 index 0000000000000000000000000000000000000000..feba574da548fd597c25103f67821145bccec9ed --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_concat.py @@ -0,0 +1,69 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Int64", "Int64"], "Int64"), + (["UInt64", "UInt64"], "UInt64"), + (["Int8", "Int8"], "Int8"), + (["Int8", "Int16"], "Int16"), + (["UInt8", "Int8"], "Int16"), + (["Int32", "UInt32"], "Int64"), + (["Int64", "UInt64"], "Float64"), + (["Int64", "boolean"], "object"), + (["UInt8", "boolean"], "object"), + ], +) +def test_concat_series(to_concat_dtypes, result_dtype): + # we expect the same dtypes as we would get with non-masked inputs, + # just masked where available. + + result = pd.concat([pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes]) + expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) + + # order doesn't matter for result + result = pd.concat( + [pd.Series([0, 1, pd.NA], dtype=t) for t in to_concat_dtypes[::-1]] + ) + expected = pd.concat([pd.Series([0, 1, pd.NA], dtype=object)] * 2).astype( + result_dtype + ) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "to_concat_dtypes, result_dtype", + [ + (["Int64", "int64"], "Int64"), + (["UInt64", "uint64"], "UInt64"), + (["Int8", "int8"], "Int8"), + (["Int8", "int16"], "Int16"), + (["UInt8", "int8"], "Int16"), + (["Int32", "uint32"], "Int64"), + (["Int64", "uint64"], "Float64"), + (["Int64", "bool"], "object"), + (["UInt8", "bool"], "object"), + ], +) +def test_concat_series_with_numpy(to_concat_dtypes, result_dtype): + # we expect the same dtypes as we would get with non-masked inputs, + # just masked where available. + + s1 = pd.Series([0, 1, pd.NA], dtype=to_concat_dtypes[0]) + s2 = pd.Series(np.array([0, 1], dtype=to_concat_dtypes[1])) + result = pd.concat([s1, s2], ignore_index=True) + expected = pd.Series([0, 1, pd.NA, 0, 1], dtype=object).astype(result_dtype) + tm.assert_series_equal(result, expected) + + # order doesn't matter for result + result = pd.concat([s2, s1], ignore_index=True) + expected = pd.Series([0, 1, 0, 1, pd.NA], dtype=object).astype(result_dtype) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_construction.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_construction.py new file mode 100644 index 0000000000000000000000000000000000000000..64fe40e53a9d287b6240a908b7ed9d0cf7ec1396 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_construction.py @@ -0,0 +1,245 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.api.types import is_integer +from pandas.core.arrays import IntegerArray +from pandas.core.arrays.integer import ( + Int8Dtype, + Int32Dtype, + Int64Dtype, +) + + +@pytest.fixture(params=[pd.array, IntegerArray._from_sequence]) +def constructor(request): + """Fixture returning parametrized IntegerArray from given sequence. + + Used to test dtype conversions. + """ + return request.param + + +def test_uses_pandas_na(): + a = pd.array([1, None], dtype=Int64Dtype()) + assert a[1] is pd.NA + + +def test_from_dtype_from_float(data): + # construct from our dtype & string dtype + dtype = data.dtype + + # from float + expected = pd.Series(data) + result = pd.Series(data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from int / list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from int / array + expected = pd.Series(data).dropna().reset_index(drop=True) + dropped = np.array(data.dropna()).astype(np.dtype(dtype.type)) + result = pd.Series(dropped, dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + +def test_conversions(data_missing): + # astype to object series + df = pd.DataFrame({"A": data_missing}) + result = df["A"].astype("object") + expected = pd.Series(np.array([pd.NA, 1], dtype=object), name="A") + tm.assert_series_equal(result, expected) + + # convert to object ndarray + # we assert that we are exactly equal + # including type conversions of scalars + result = df["A"].astype("object").values + expected = np.array([pd.NA, 1], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + for r, e in zip(result, expected): + if pd.isnull(r): + assert pd.isnull(e) + elif is_integer(r): + assert r == e + assert is_integer(e) + else: + assert r == e + assert type(r) == type(e) + + +def test_integer_array_constructor(): + values = np.array([1, 2, 3, 4], dtype="int64") + mask = np.array([False, False, False, True], dtype="bool") + + result = IntegerArray(values, mask) + expected = pd.array([1, 2, 3, np.nan], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + msg = r".* should be .* numpy array. Use the 'pd.array' function instead" + with pytest.raises(TypeError, match=msg): + IntegerArray(values.tolist(), mask) + + with pytest.raises(TypeError, match=msg): + IntegerArray(values, mask.tolist()) + + with pytest.raises(TypeError, match=msg): + IntegerArray(values.astype(float), mask) + msg = r"__init__\(\) missing 1 required positional argument: 'mask'" + with pytest.raises(TypeError, match=msg): + IntegerArray(values) + + +def test_integer_array_constructor_copy(): + values = np.array([1, 2, 3, 4], dtype="int64") + mask = np.array([False, False, False, True], dtype="bool") + + result = IntegerArray(values, mask) + assert result._data is values + assert result._mask is mask + + result = IntegerArray(values, mask, copy=True) + assert result._data is not values + assert result._mask is not mask + + +@pytest.mark.parametrize( + "a, b", + [ + ([1, None], [1, np.nan]), + ([None], [np.nan]), + ([None, np.nan], [np.nan, np.nan]), + ([np.nan, np.nan], [np.nan, np.nan]), + ], +) +def test_to_integer_array_none_is_nan(a, b): + result = pd.array(a, dtype="Int64") + expected = pd.array(b, dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + ["foo", "bar"], + "foo", + 1, + 1.0, + pd.date_range("20130101", periods=2), + np.array(["foo"]), + [[1, 2], [3, 4]], + [np.nan, {"a": 1}], + ], +) +def test_to_integer_array_error(values): + # error in converting existing arrays to IntegerArrays + msg = "|".join( + [ + r"cannot be converted to IntegerDtype", + r"invalid literal for int\(\) with base 10:", + r"values must be a 1D list-like", + r"Cannot pass scalar", + r"int\(\) argument must be a string", + ] + ) + with pytest.raises((ValueError, TypeError), match=msg): + pd.array(values, dtype="Int64") + + with pytest.raises((ValueError, TypeError), match=msg): + IntegerArray._from_sequence(values) + + +def test_to_integer_array_inferred_dtype(constructor): + # if values has dtype -> respect it + result = constructor(np.array([1, 2], dtype="int8")) + assert result.dtype == Int8Dtype() + result = constructor(np.array([1, 2], dtype="int32")) + assert result.dtype == Int32Dtype() + + # if values have no dtype -> always int64 + result = constructor([1, 2]) + assert result.dtype == Int64Dtype() + + +def test_to_integer_array_dtype_keyword(constructor): + result = constructor([1, 2], dtype="Int8") + assert result.dtype == Int8Dtype() + + # if values has dtype -> override it + result = constructor(np.array([1, 2], dtype="int8"), dtype="Int32") + assert result.dtype == Int32Dtype() + + +def test_to_integer_array_float(): + result = IntegerArray._from_sequence([1.0, 2.0], dtype="Int64") + expected = pd.array([1, 2], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + with pytest.raises(TypeError, match="cannot safely cast non-equivalent"): + IntegerArray._from_sequence([1.5, 2.0], dtype="Int64") + + # for float dtypes, the itemsize is not preserved + result = IntegerArray._from_sequence( + np.array([1.0, 2.0], dtype="float32"), dtype="Int64" + ) + assert result.dtype == Int64Dtype() + + +def test_to_integer_array_str(): + result = IntegerArray._from_sequence(["1", "2", None], dtype="Int64") + expected = pd.array([1, 2, np.nan], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + with pytest.raises( + ValueError, match=r"invalid literal for int\(\) with base 10: .*" + ): + IntegerArray._from_sequence(["1", "2", ""], dtype="Int64") + + with pytest.raises( + ValueError, match=r"invalid literal for int\(\) with base 10: .*" + ): + IntegerArray._from_sequence(["1.5", "2.0"], dtype="Int64") + + +@pytest.mark.parametrize( + "bool_values, int_values, target_dtype, expected_dtype", + [ + ([False, True], [0, 1], Int64Dtype(), Int64Dtype()), + ([False, True], [0, 1], "Int64", Int64Dtype()), + ([False, True, np.nan], [0, 1, np.nan], Int64Dtype(), Int64Dtype()), + ], +) +def test_to_integer_array_bool( + constructor, bool_values, int_values, target_dtype, expected_dtype +): + result = constructor(bool_values, dtype=target_dtype) + assert result.dtype == expected_dtype + expected = pd.array(int_values, dtype=target_dtype) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values, to_dtype, result_dtype", + [ + (np.array([1], dtype="int64"), None, Int64Dtype), + (np.array([1, np.nan]), None, Int64Dtype), + (np.array([1, np.nan]), "int8", Int8Dtype), + ], +) +def test_to_integer_array(values, to_dtype, result_dtype): + # convert existing arrays to IntegerArrays + result = IntegerArray._from_sequence(values, dtype=to_dtype) + assert result.dtype == result_dtype() + expected = pd.array(values, dtype=result_dtype()) + tm.assert_extension_array_equal(result, expected) + + +def test_integer_array_from_boolean(): + # GH31104 + expected = pd.array(np.array([True, False]), dtype="Int64") + result = pd.array(np.array([True, False], dtype=object), dtype="Int64") + tm.assert_extension_array_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_dtypes.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..8620763988e0676e00d8e61affa75848fe38dd48 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_dtypes.py @@ -0,0 +1,294 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.generic import ABCIndex + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.integer import ( + Int8Dtype, + UInt32Dtype, +) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + if dtype.is_signed_integer: + assert np.dtype(dtype.type).kind == "i" + else: + assert np.dtype(dtype.type).kind == "u" + assert dtype.name is not None + + +@pytest.mark.parametrize("op", ["sum", "min", "max", "prod"]) +def test_preserve_dtypes(op): + # for ops that enable (mean would actually work here + # but generally it is a float return value) + df = pd.DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": pd.array([1, None, 3], dtype="Int64"), + } + ) + + # op + result = getattr(df.C, op)() + if op in {"sum", "prod", "min", "max"}: + assert isinstance(result, np.int64) + else: + assert isinstance(result, int) + + # groupby + result = getattr(df.groupby("A"), op)() + + expected = pd.DataFrame( + {"B": np.array([1.0, 3.0]), "C": pd.array([1, 3], dtype="Int64")}, + index=pd.Index(["a", "b"], name="A"), + ) + tm.assert_frame_equal(result, expected) + + +def test_astype_nansafe(): + # see gh-22343 + arr = pd.array([np.nan, 1, 2], dtype="Int8") + msg = "cannot convert NA to integer" + + with pytest.raises(ValueError, match=msg): + arr.astype("uint32") + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_construct_index(all_data, dropna): + # ensure that we do not coerce to different Index dtype or non-index + + all_data = all_data[:10] + if dropna: + other = np.array(all_data[~all_data.isna()]) + else: + other = all_data + + result = pd.Index(pd.array(other, dtype=all_data.dtype)) + expected = pd.Index(other, dtype=all_data.dtype) + assert all_data.dtype == expected.dtype # dont coerce to object + + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dropna", [True, False]) +def test_astype_index(all_data, dropna): + # as an int/uint index to Index + + all_data = all_data[:10] + if dropna: + other = all_data[~all_data.isna()] + else: + other = all_data + + dtype = all_data.dtype + idx = pd.Index(np.array(other)) + assert isinstance(idx, ABCIndex) + + result = idx.astype(dtype) + expected = idx.astype(object).astype(dtype) + tm.assert_index_equal(result, expected) + + +def test_astype(all_data): + all_data = all_data[:10] + + ints = all_data[~all_data.isna()] + mixed = all_data + dtype = Int8Dtype() + + # coerce to same type - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype) + expected = pd.Series(ints) + tm.assert_series_equal(result, expected) + + # coerce to same other - ints + s = pd.Series(ints) + result = s.astype(dtype) + expected = pd.Series(ints, dtype=dtype) + tm.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - ints + s = pd.Series(ints) + result = s.astype(all_data.dtype.numpy_dtype) + expected = pd.Series(ints._data.astype(all_data.dtype.numpy_dtype)) + tm.assert_series_equal(result, expected) + + # coerce to same type - mixed + s = pd.Series(mixed) + result = s.astype(all_data.dtype) + expected = pd.Series(mixed) + tm.assert_series_equal(result, expected) + + # coerce to same other - mixed + s = pd.Series(mixed) + result = s.astype(dtype) + expected = pd.Series(mixed, dtype=dtype) + tm.assert_series_equal(result, expected) + + # coerce to same numpy_dtype - mixed + s = pd.Series(mixed) + msg = "cannot convert NA to integer" + with pytest.raises(ValueError, match=msg): + s.astype(all_data.dtype.numpy_dtype) + + # coerce to object + s = pd.Series(mixed) + result = s.astype("object") + expected = pd.Series(np.asarray(mixed, dtype=object)) + tm.assert_series_equal(result, expected) + + +def test_astype_copy(): + arr = pd.array([1, 2, 3, None], dtype="Int64") + orig = pd.array([1, 2, 3, None], dtype="Int64") + + # copy=True -> ensure both data and mask are actual copies + result = arr.astype("Int64", copy=True) + assert result is not arr + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + # copy=False + result = arr.astype("Int64", copy=False) + assert result is arr + assert np.shares_memory(result._data, arr._data) + assert np.shares_memory(result._mask, arr._mask) + result[0] = 10 + assert arr[0] == 10 + result[0] = pd.NA + assert arr[0] is pd.NA + + # astype to different dtype -> always needs a copy -> even with copy=False + # we need to ensure that also the mask is actually copied + arr = pd.array([1, 2, 3, None], dtype="Int64") + orig = pd.array([1, 2, 3, None], dtype="Int64") + + result = arr.astype("Int32", copy=False) + assert not tm.shares_memory(result, arr) + result[0] = 10 + tm.assert_extension_array_equal(arr, orig) + result[0] = pd.NA + tm.assert_extension_array_equal(arr, orig) + + +def test_astype_to_larger_numpy(): + a = pd.array([1, 2], dtype="Int32") + result = a.astype("int64") + expected = np.array([1, 2], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + a = pd.array([1, 2], dtype="UInt32") + result = a.astype("uint64") + expected = np.array([1, 2], dtype="uint64") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [Int8Dtype(), "Int8", UInt32Dtype(), "UInt32"]) +def test_astype_specific_casting(dtype): + s = pd.Series([1, 2, 3], dtype="Int64") + result = s.astype(dtype) + expected = pd.Series([1, 2, 3], dtype=dtype) + tm.assert_series_equal(result, expected) + + s = pd.Series([1, 2, 3, None], dtype="Int64") + result = s.astype(dtype) + expected = pd.Series([1, 2, 3, None], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_astype_floating(): + arr = pd.array([1, 2, None], dtype="Int64") + result = arr.astype("Float64") + expected = pd.array([1.0, 2.0, None], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_dt64(): + # GH#32435 + arr = pd.array([1, 2, 3, pd.NA]) * 10**9 + + result = arr.astype("datetime64[ns]") + + expected = np.array([1, 2, 3, "NaT"], dtype="M8[s]").astype("M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + +def test_construct_cast_invalid(dtype): + msg = "cannot safely" + arr = [1.2, 2.3, 3.7] + with pytest.raises(TypeError, match=msg): + pd.array(arr, dtype=dtype) + + with pytest.raises(TypeError, match=msg): + pd.Series(arr).astype(dtype) + + arr = [1.2, 2.3, 3.7, np.nan] + with pytest.raises(TypeError, match=msg): + pd.array(arr, dtype=dtype) + + with pytest.raises(TypeError, match=msg): + pd.Series(arr).astype(dtype) + + +@pytest.mark.parametrize("in_series", [True, False]) +def test_to_numpy_na_nan(in_series): + a = pd.array([0, 1, None], dtype="Int64") + if in_series: + a = pd.Series(a) + + result = a.to_numpy(dtype="float64", na_value=np.nan) + expected = np.array([0.0, 1.0, np.nan], dtype="float64") + tm.assert_numpy_array_equal(result, expected) + + result = a.to_numpy(dtype="int64", na_value=-1) + expected = np.array([0, 1, -1], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + result = a.to_numpy(dtype="bool", na_value=False) + expected = np.array([False, True, False], dtype="bool") + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("in_series", [True, False]) +@pytest.mark.parametrize("dtype", ["int32", "int64", "bool"]) +def test_to_numpy_dtype(dtype, in_series): + a = pd.array([0, 1], dtype="Int64") + if in_series: + a = pd.Series(a) + + result = a.to_numpy(dtype=dtype) + expected = np.array([0, 1], dtype=dtype) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["int64", "bool"]) +def test_to_numpy_na_raises(dtype): + a = pd.array([0, 1, None], dtype="Int64") + with pytest.raises(ValueError, match=dtype): + a.to_numpy(dtype=dtype) + + +def test_astype_str(): + a = pd.array([1, 2, None], dtype="Int64") + expected = np.array(["1", "2", ""], dtype=f"{tm.ENDIAN}U21") + + tm.assert_numpy_array_equal(a.astype(str), expected) + tm.assert_numpy_array_equal(a.astype("str"), expected) + + +def test_astype_boolean(): + # https://github.com/pandas-dev/pandas/issues/31102 + a = pd.array([1, 0, -1, 2, None], dtype="Int64") + result = a.astype("boolean") + expected = pd.array([True, False, True, True, None], dtype="boolean") + tm.assert_extension_array_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_function.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_function.py new file mode 100644 index 0000000000000000000000000000000000000000..d48b636a98feb94c5eaeee9eefbf5730fe9c6f79 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_function.py @@ -0,0 +1,203 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray + + +@pytest.mark.parametrize("ufunc", [np.abs, np.sign]) +# np.sign emits a warning with nans, +@pytest.mark.filterwarnings("ignore:invalid value encountered in sign:RuntimeWarning") +def test_ufuncs_single_int(ufunc): + a = pd.array([1, 2, -3, np.nan]) + result = ufunc(a) + expected = pd.array(ufunc(a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + result = ufunc(s) + expected = pd.Series(pd.array(ufunc(a.astype(float)), dtype="Int64")) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.log, np.exp, np.sin, np.cos, np.sqrt]) +def test_ufuncs_single_float(ufunc): + a = pd.array([1, 2, -3, np.nan]) + with np.errstate(invalid="ignore"): + result = ufunc(a) + expected = FloatingArray(ufunc(a.astype(float)), mask=a._mask) + tm.assert_extension_array_equal(result, expected) + + s = pd.Series(a) + with np.errstate(invalid="ignore"): + result = ufunc(s) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.add, np.subtract]) +def test_ufuncs_binary_int(ufunc): + # two IntegerArrays + a = pd.array([1, 2, -3, np.nan]) + result = ufunc(a, a) + expected = pd.array(ufunc(a.astype(float), a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + # IntegerArray with numpy array + arr = np.array([1, 2, 3, 4]) + result = ufunc(a, arr) + expected = pd.array(ufunc(a.astype(float), arr), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(arr, a) + expected = pd.array(ufunc(arr, a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + # IntegerArray with scalar + result = ufunc(a, 1) + expected = pd.array(ufunc(a.astype(float), 1), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + result = ufunc(1, a) + expected = pd.array(ufunc(1, a.astype(float)), dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +def test_ufunc_binary_output(): + a = pd.array([1, 2, np.nan]) + result = np.modf(a) + expected = np.modf(a.to_numpy(na_value=np.nan, dtype="float")) + expected = (pd.array(expected[0]), pd.array(expected[1])) + + assert isinstance(result, tuple) + assert len(result) == 2 + + for x, y in zip(result, expected): + tm.assert_extension_array_equal(x, y) + + +@pytest.mark.parametrize("values", [[0, 1], [0, None]]) +def test_ufunc_reduce_raises(values): + arr = pd.array(values) + + res = np.add.reduce(arr) + expected = arr.sum(skipna=False) + tm.assert_almost_equal(res, expected) + + +@pytest.mark.parametrize( + "pandasmethname, kwargs", + [ + ("var", {"ddof": 0}), + ("var", {"ddof": 1}), + ("std", {"ddof": 0}), + ("std", {"ddof": 1}), + ("kurtosis", {}), + ("skew", {}), + ("sem", {}), + ], +) +def test_stat_method(pandasmethname, kwargs): + s = pd.Series(data=[1, 2, 3, 4, 5, 6, np.nan, np.nan], dtype="Int64") + pandasmeth = getattr(s, pandasmethname) + result = pandasmeth(**kwargs) + s2 = pd.Series(data=[1, 2, 3, 4, 5, 6], dtype="Int64") + pandasmeth = getattr(s2, pandasmethname) + expected = pandasmeth(**kwargs) + assert expected == result + + +def test_value_counts_na(): + arr = pd.array([1, 2, 1, pd.NA], dtype="Int64") + result = arr.value_counts(dropna=False) + ex_index = pd.Index([1, 2, pd.NA], dtype="Int64") + assert ex_index.dtype == "Int64" + expected = pd.Series([2, 1, 1], index=ex_index, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([2, 1], index=arr[:2], dtype="Int64", name="count") + assert expected.index.dtype == arr.dtype + tm.assert_series_equal(result, expected) + + +def test_value_counts_empty(): + # https://github.com/pandas-dev/pandas/issues/33317 + ser = pd.Series([], dtype="Int64") + result = ser.value_counts() + idx = pd.Index([], dtype=ser.dtype) + assert idx.dtype == ser.dtype + expected = pd.Series([], index=idx, dtype="Int64", name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(): + # GH 33172 + ser = pd.Series([1, 2, 1, pd.NA], dtype="Int64") + result = ser.value_counts(normalize=True) + expected = pd.Series([2, 1], index=ser[:2], dtype="Float64", name="proportion") / 3 + assert expected.index.dtype == ser.dtype + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 4]) +def test_integer_array_sum(skipna, min_count, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([1, 2, 3, None], dtype=dtype) + result = arr.sum(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 6 + else: + assert result is pd.NA + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("method", ["min", "max"]) +def test_integer_array_min_max(skipna, method, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([0, 1, None], dtype=dtype) + func = getattr(arr, method) + result = func(skipna=skipna) + if skipna: + assert result == (0 if method == "min" else 1) + else: + assert result is pd.NA + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.parametrize("min_count", [0, 9]) +def test_integer_array_prod(skipna, min_count, any_int_ea_dtype): + dtype = any_int_ea_dtype + arr = pd.array([1, 2, None], dtype=dtype) + result = arr.prod(skipna=skipna, min_count=min_count) + if skipna and min_count == 0: + assert result == 2 + else: + assert result is pd.NA + + +@pytest.mark.parametrize( + "values, expected", [([1, 2, 3], 6), ([1, 2, 3, None], 6), ([None], 0)] +) +def test_integer_array_numpy_sum(values, expected): + arr = pd.array(values, dtype="Int64") + result = np.sum(arr) + assert result == expected + + +@pytest.mark.parametrize("op", ["sum", "prod", "min", "max"]) +def test_dataframe_reductions(op): + # https://github.com/pandas-dev/pandas/pull/32867 + # ensure the integers are not cast to float during reductions + df = pd.DataFrame({"a": pd.array([1, 2], dtype="Int64")}) + result = df.max() + assert isinstance(result["a"], np.int64) + + +# TODO(jreback) - these need testing / are broken + +# shift + +# set_index (destroys type) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_indexing.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..4b953d699108b2aed1c992cf3a33f3013b298254 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_indexing.py @@ -0,0 +1,19 @@ +import pandas as pd +import pandas._testing as tm + + +def test_array_setitem_nullable_boolean_mask(): + # GH 31446 + ser = pd.Series([1, 2], dtype="Int64") + result = ser.where(ser > 1) + expected = pd.Series([pd.NA, 2], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_array_setitem(): + # GH 31446 + arr = pd.Series([1, 2], dtype="Int64").array + arr[arr > 1] = 1 + + expected = pd.array([1, 1], dtype="Int64") + tm.assert_extension_array_equal(arr, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_reduction.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_reduction.py new file mode 100644 index 0000000000000000000000000000000000000000..db04862e4ea0797ffcc1562bbcb603448131fae8 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_reduction.py @@ -0,0 +1,125 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Series, + array, +) +import pandas._testing as tm + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", np.int64(3)], + ["prod", np.int64(2)], + ["min", np.int64(1)], + ["max", np.int64(2)], + ["mean", np.float64(1.5)], + ["median", np.float64(1.5)], + ["var", np.float64(0.5)], + ["std", np.float64(0.5**0.5)], + ["skew", pd.NA], + ["kurt", pd.NA], + ["any", True], + ["all", True], + ], +) +def test_series_reductions(op, expected): + ser = Series([1, 2], dtype="Int64") + result = getattr(ser, op)() + tm.assert_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", Series([3], index=["a"], dtype="Int64")], + ["prod", Series([2], index=["a"], dtype="Int64")], + ["min", Series([1], index=["a"], dtype="Int64")], + ["max", Series([2], index=["a"], dtype="Int64")], + ["mean", Series([1.5], index=["a"], dtype="Float64")], + ["median", Series([1.5], index=["a"], dtype="Float64")], + ["var", Series([0.5], index=["a"], dtype="Float64")], + ["std", Series([0.5**0.5], index=["a"], dtype="Float64")], + ["skew", Series([pd.NA], index=["a"], dtype="Float64")], + ["kurt", Series([pd.NA], index=["a"], dtype="Float64")], + ["any", Series([True], index=["a"], dtype="boolean")], + ["all", Series([True], index=["a"], dtype="boolean")], + ], +) +def test_dataframe_reductions(op, expected): + df = DataFrame({"a": array([1, 2], dtype="Int64")}) + result = getattr(df, op)() + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", array([1, 3], dtype="Int64")], + ["prod", array([1, 3], dtype="Int64")], + ["min", array([1, 3], dtype="Int64")], + ["max", array([1, 3], dtype="Int64")], + ["mean", array([1, 3], dtype="Float64")], + ["median", array([1, 3], dtype="Float64")], + ["var", array([pd.NA], dtype="Float64")], + ["std", array([pd.NA], dtype="Float64")], + ["skew", array([pd.NA], dtype="Float64")], + ["any", array([True, True], dtype="boolean")], + ["all", array([True, True], dtype="boolean")], + ], +) +def test_groupby_reductions(op, expected): + df = DataFrame( + { + "A": ["a", "b", "b"], + "B": array([1, None, 3], dtype="Int64"), + } + ) + result = getattr(df.groupby("A"), op)() + expected = DataFrame(expected, index=pd.Index(["a", "b"], name="A"), columns=["B"]) + + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize( + "op, expected", + [ + ["sum", Series([4, 4], index=["B", "C"], dtype="Float64")], + ["prod", Series([3, 3], index=["B", "C"], dtype="Float64")], + ["min", Series([1, 1], index=["B", "C"], dtype="Float64")], + ["max", Series([3, 3], index=["B", "C"], dtype="Float64")], + ["mean", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["median", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["var", Series([2, 2], index=["B", "C"], dtype="Float64")], + ["std", Series([2**0.5, 2**0.5], index=["B", "C"], dtype="Float64")], + ["skew", Series([pd.NA, pd.NA], index=["B", "C"], dtype="Float64")], + ["kurt", Series([pd.NA, pd.NA], index=["B", "C"], dtype="Float64")], + ["any", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")], + ["all", Series([True, True, True], index=["A", "B", "C"], dtype="boolean")], + ], +) +def test_mixed_reductions(op, expected, using_infer_string): + if op in ["any", "all"] and using_infer_string: + expected = expected.astype("bool") + df = DataFrame( + { + "A": ["a", "b", "b"], + "B": [1, None, 3], + "C": array([1, None, 3], dtype="Int64"), + } + ) + + # series + result = getattr(df.C, op)() + tm.assert_equal(result, expected["C"]) + + # frame + if op in ["any", "all"]: + result = getattr(df, op)() + else: + result = getattr(df, op)(numeric_only=True) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_repr.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..168210eed5d06a461bbf42dd1e1fae3db0fd851c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_repr.py @@ -0,0 +1,67 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas.core.arrays.integer import ( + Int8Dtype, + Int16Dtype, + Int32Dtype, + Int64Dtype, + UInt8Dtype, + UInt16Dtype, + UInt32Dtype, + UInt64Dtype, +) + + +def test_dtypes(dtype): + # smoke tests on auto dtype construction + + if dtype.is_signed_integer: + assert np.dtype(dtype.type).kind == "i" + else: + assert np.dtype(dtype.type).kind == "u" + assert dtype.name is not None + + +@pytest.mark.parametrize( + "dtype, expected", + [ + (Int8Dtype(), "Int8Dtype()"), + (Int16Dtype(), "Int16Dtype()"), + (Int32Dtype(), "Int32Dtype()"), + (Int64Dtype(), "Int64Dtype()"), + (UInt8Dtype(), "UInt8Dtype()"), + (UInt16Dtype(), "UInt16Dtype()"), + (UInt32Dtype(), "UInt32Dtype()"), + (UInt64Dtype(), "UInt64Dtype()"), + ], +) +def test_repr_dtype(dtype, expected): + assert repr(dtype) == expected + + +def test_repr_array(): + result = repr(pd.array([1, None, 3])) + expected = "\n[1, , 3]\nLength: 3, dtype: Int64" + assert result == expected + + +def test_repr_array_long(): + data = pd.array([1, 2, None] * 1000) + expected = ( + "\n" + "[ 1, 2, , 1, 2, , 1, 2, , 1,\n" + " ...\n" + " , 1, 2, , 1, 2, , 1, 2, ]\n" + "Length: 3000, dtype: Int64" + ) + result = repr(data) + assert result == expected + + +def test_frame_repr(data_missing): + df = pd.DataFrame({"A": data_missing}) + result = repr(df) + expected = " A\n0 \n1 1" + assert result == expected diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/masked_shared.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/masked_shared.py new file mode 100644 index 0000000000000000000000000000000000000000..3e74402263cf9c119ec344c5da48dd8598970f69 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/masked_shared.py @@ -0,0 +1,154 @@ +""" +Tests shared by MaskedArray subclasses. +""" +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.tests.extension.base import BaseOpsUtil + + +class ComparisonOps(BaseOpsUtil): + def _compare_other(self, data, op, other): + # array + result = pd.Series(op(data, other)) + expected = pd.Series(op(data._data, other), dtype="boolean") + + # fill the nan locations + expected[data._mask] = pd.NA + + tm.assert_series_equal(result, expected) + + # series + ser = pd.Series(data) + result = op(ser, other) + + # Set nullable dtype here to avoid upcasting when setting to pd.NA below + expected = op(pd.Series(data._data), other).astype("boolean") + + # fill the nan locations + expected[data._mask] = pd.NA + + tm.assert_series_equal(result, expected) + + # subclass will override to parametrize 'other' + def test_scalar(self, other, comparison_op, dtype): + op = comparison_op + left = pd.array([1, 0, None], dtype=dtype) + + result = op(left, other) + + if other is pd.NA: + expected = pd.array([None, None, None], dtype="boolean") + else: + values = op(left._data, other) + expected = pd.arrays.BooleanArray(values, left._mask, copy=True) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = pd.NA + tm.assert_extension_array_equal(left, pd.array([1, 0, None], dtype=dtype)) + + +class NumericOps: + # Shared by IntegerArray and FloatingArray, not BooleanArray + + def test_searchsorted_nan(self, dtype): + # The base class casts to object dtype, for which searchsorted returns + # 0 from the left and 10 from the right. + arr = pd.array(range(10), dtype=dtype) + + assert arr.searchsorted(np.nan, side="left") == 10 + assert arr.searchsorted(np.nan, side="right") == 10 + + def test_no_shared_mask(self, data): + result = data + 1 + assert not tm.shares_memory(result, data) + + def test_array(self, comparison_op, dtype): + op = comparison_op + + left = pd.array([0, 1, 2, None, None, None], dtype=dtype) + right = pd.array([0, 1, None, 0, 1, None], dtype=dtype) + + result = op(left, right) + values = op(left._data, right._data) + mask = left._mask | right._mask + + expected = pd.arrays.BooleanArray(values, mask) + tm.assert_extension_array_equal(result, expected) + + # ensure we haven't mutated anything inplace + result[0] = pd.NA + tm.assert_extension_array_equal( + left, pd.array([0, 1, 2, None, None, None], dtype=dtype) + ) + tm.assert_extension_array_equal( + right, pd.array([0, 1, None, 0, 1, None], dtype=dtype) + ) + + def test_compare_with_booleanarray(self, comparison_op, dtype): + op = comparison_op + + left = pd.array([True, False, None] * 3, dtype="boolean") + right = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype=dtype) + other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean") + + expected = op(left, other) + result = op(left, right) + tm.assert_extension_array_equal(result, expected) + + # reversed op + expected = op(other, left) + result = op(right, left) + tm.assert_extension_array_equal(result, expected) + + def test_compare_to_string(self, dtype): + # GH#28930 + ser = pd.Series([1, None], dtype=dtype) + result = ser == "a" + expected = pd.Series([False, pd.NA], dtype="boolean") + + tm.assert_series_equal(result, expected) + + def test_ufunc_with_out(self, dtype): + arr = pd.array([1, 2, 3], dtype=dtype) + arr2 = pd.array([1, 2, pd.NA], dtype=dtype) + + mask = arr == arr + mask2 = arr2 == arr2 + + result = np.zeros(3, dtype=bool) + result |= mask + # If MaskedArray.__array_ufunc__ handled "out" appropriately, + # `result` should still be an ndarray. + assert isinstance(result, np.ndarray) + assert result.all() + + # result |= mask worked because mask could be cast losslessly to + # boolean ndarray. mask2 can't, so this raises + result = np.zeros(3, dtype=bool) + msg = "Specify an appropriate 'na_value' for this dtype" + with pytest.raises(ValueError, match=msg): + result |= mask2 + + # addition + res = np.add(arr, arr2) + expected = pd.array([2, 4, pd.NA], dtype=dtype) + tm.assert_extension_array_equal(res, expected) + + # when passing out=arr, we will modify 'arr' inplace. + res = np.add(arr, arr2, out=arr) + assert res is arr + tm.assert_extension_array_equal(res, expected) + tm.assert_extension_array_equal(arr, expected) + + def test_mul_td64_array(self, dtype): + # GH#45622 + arr = pd.array([1, 2, pd.NA], dtype=dtype) + other = np.arange(3, dtype=np.int64).view("m8[ns]") + + result = arr * other + expected = pd.array([pd.Timedelta(0), pd.Timedelta(2), pd.NaT]) + tm.assert_extension_array_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_constructors.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8fcef9755ef78340d8a4e50a0c997e5be61939c5 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_accessor.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_accessor.py new file mode 100644 index 0000000000000000000000000000000000000000..87eb7bcfa9cee3e92386ad0f148b896c0e682b07 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_accessor.py @@ -0,0 +1,253 @@ +import string + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestSeriesAccessor: + def test_to_dense(self): + ser = pd.Series([0, 1, 0, 10], dtype="Sparse[int64]") + result = ser.sparse.to_dense() + expected = pd.Series([0, 1, 0, 10]) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize("attr", ["npoints", "density", "fill_value", "sp_values"]) + def test_get_attributes(self, attr): + arr = SparseArray([0, 1]) + ser = pd.Series(arr) + + result = getattr(ser.sparse, attr) + expected = getattr(arr, attr) + assert result == expected + + def test_from_coo(self): + scipy_sparse = pytest.importorskip("scipy.sparse") + + row = [0, 3, 1, 0] + col = [0, 3, 1, 2] + data = [4, 5, 7, 9] + + sp_array = scipy_sparse.coo_matrix((data, (row, col))) + result = pd.Series.sparse.from_coo(sp_array) + + index = pd.MultiIndex.from_arrays( + [ + np.array([0, 0, 1, 3], dtype=np.int32), + np.array([0, 2, 1, 3], dtype=np.int32), + ], + ) + expected = pd.Series([4, 9, 7, 5], index=index, dtype="Sparse[int]") + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "sort_labels, expected_rows, expected_cols, expected_values_pos", + [ + ( + False, + [("b", 2), ("a", 2), ("b", 1), ("a", 1)], + [("z", 1), ("z", 2), ("x", 2), ("z", 0)], + {1: (1, 0), 3: (3, 3)}, + ), + ( + True, + [("a", 1), ("a", 2), ("b", 1), ("b", 2)], + [("x", 2), ("z", 0), ("z", 1), ("z", 2)], + {1: (1, 2), 3: (0, 1)}, + ), + ], + ) + def test_to_coo( + self, sort_labels, expected_rows, expected_cols, expected_values_pos + ): + sp_sparse = pytest.importorskip("scipy.sparse") + + values = SparseArray([0, np.nan, 1, 0, None, 3], fill_value=0) + index = pd.MultiIndex.from_tuples( + [ + ("b", 2, "z", 1), + ("a", 2, "z", 2), + ("a", 2, "z", 1), + ("a", 2, "x", 2), + ("b", 1, "z", 1), + ("a", 1, "z", 0), + ] + ) + ss = pd.Series(values, index=index) + + expected_A = np.zeros((4, 4)) + for value, (row, col) in expected_values_pos.items(): + expected_A[row, col] = value + + A, rows, cols = ss.sparse.to_coo( + row_levels=(0, 1), column_levels=(2, 3), sort_labels=sort_labels + ) + assert isinstance(A, sp_sparse.coo_matrix) + tm.assert_numpy_array_equal(A.toarray(), expected_A) + assert rows == expected_rows + assert cols == expected_cols + + def test_non_sparse_raises(self): + ser = pd.Series([1, 2, 3]) + with pytest.raises(AttributeError, match=".sparse"): + ser.sparse.density + + +class TestFrameAccessor: + def test_accessor_raises(self): + df = pd.DataFrame({"A": [0, 1]}) + with pytest.raises(AttributeError, match="sparse"): + df.sparse + + @pytest.mark.parametrize("format", ["csc", "csr", "coo"]) + @pytest.mark.parametrize("labels", [None, list(string.ascii_letters[:10])]) + @pytest.mark.parametrize("dtype", ["float64", "int64"]) + def test_from_spmatrix(self, format, labels, dtype): + sp_sparse = pytest.importorskip("scipy.sparse") + + sp_dtype = SparseDtype(dtype, np.array(0, dtype=dtype).item()) + + mat = sp_sparse.eye(10, format=format, dtype=dtype) + result = pd.DataFrame.sparse.from_spmatrix(mat, index=labels, columns=labels) + expected = pd.DataFrame( + np.eye(10, dtype=dtype), index=labels, columns=labels + ).astype(sp_dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("format", ["csc", "csr", "coo"]) + def test_from_spmatrix_including_explicit_zero(self, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(10, 2, density=0.5, format=format) + mat.data[0] = 0 + result = pd.DataFrame.sparse.from_spmatrix(mat) + dtype = SparseDtype("float64", 0.0) + expected = pd.DataFrame(mat.todense()).astype(dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "columns", + [["a", "b"], pd.MultiIndex.from_product([["A"], ["a", "b"]]), ["a", "a"]], + ) + def test_from_spmatrix_columns(self, columns): + sp_sparse = pytest.importorskip("scipy.sparse") + + dtype = SparseDtype("float64", 0.0) + + mat = sp_sparse.random(10, 2, density=0.5) + result = pd.DataFrame.sparse.from_spmatrix(mat, columns=columns) + expected = pd.DataFrame(mat.toarray(), columns=columns).astype(dtype) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "colnames", [("A", "B"), (1, 2), (1, pd.NA), (0.1, 0.2), ("x", "x"), (0, 0)] + ) + def test_to_coo(self, colnames): + sp_sparse = pytest.importorskip("scipy.sparse") + + df = pd.DataFrame( + {colnames[0]: [0, 1, 0], colnames[1]: [1, 0, 0]}, dtype="Sparse[int64, 0]" + ) + result = df.sparse.to_coo() + expected = sp_sparse.coo_matrix(np.asarray(df)) + assert (result != expected).nnz == 0 + + @pytest.mark.parametrize("fill_value", [1, np.nan]) + def test_to_coo_nonzero_fill_val_raises(self, fill_value): + pytest.importorskip("scipy") + df = pd.DataFrame( + { + "A": SparseArray( + [fill_value, fill_value, fill_value, 2], fill_value=fill_value + ), + "B": SparseArray( + [fill_value, 2, fill_value, fill_value], fill_value=fill_value + ), + } + ) + with pytest.raises(ValueError, match="fill value must be 0"): + df.sparse.to_coo() + + def test_to_coo_midx_categorical(self): + # GH#50996 + sp_sparse = pytest.importorskip("scipy.sparse") + + midx = pd.MultiIndex.from_arrays( + [ + pd.CategoricalIndex(list("ab"), name="x"), + pd.CategoricalIndex([0, 1], name="y"), + ] + ) + + ser = pd.Series(1, index=midx, dtype="Sparse[int]") + result = ser.sparse.to_coo(row_levels=["x"], column_levels=["y"])[0] + expected = sp_sparse.coo_matrix( + (np.array([1, 1]), (np.array([0, 1]), np.array([0, 1]))), shape=(2, 2) + ) + assert (result != expected).nnz == 0 + + def test_to_dense(self): + df = pd.DataFrame( + { + "A": SparseArray([1, 0], dtype=SparseDtype("int64", 0)), + "B": SparseArray([1, 0], dtype=SparseDtype("int64", 1)), + "C": SparseArray([1.0, 0.0], dtype=SparseDtype("float64", 0.0)), + }, + index=["b", "a"], + ) + result = df.sparse.to_dense() + expected = pd.DataFrame( + {"A": [1, 0], "B": [1, 0], "C": [1.0, 0.0]}, index=["b", "a"] + ) + tm.assert_frame_equal(result, expected) + + def test_density(self): + df = pd.DataFrame( + { + "A": SparseArray([1, 0, 2, 1], fill_value=0), + "B": SparseArray([0, 1, 1, 1], fill_value=0), + } + ) + res = df.sparse.density + expected = 0.75 + assert res == expected + + @pytest.mark.parametrize("dtype", ["int64", "float64"]) + @pytest.mark.parametrize("dense_index", [True, False]) + def test_series_from_coo(self, dtype, dense_index): + sp_sparse = pytest.importorskip("scipy.sparse") + + A = sp_sparse.eye(3, format="coo", dtype=dtype) + result = pd.Series.sparse.from_coo(A, dense_index=dense_index) + + index = pd.MultiIndex.from_tuples( + [ + np.array([0, 0], dtype=np.int32), + np.array([1, 1], dtype=np.int32), + np.array([2, 2], dtype=np.int32), + ], + ) + expected = pd.Series(SparseArray(np.array([1, 1, 1], dtype=dtype)), index=index) + if dense_index: + expected = expected.reindex(pd.MultiIndex.from_product(index.levels)) + + tm.assert_series_equal(result, expected) + + def test_series_from_coo_incorrect_format_raises(self): + # gh-26554 + sp_sparse = pytest.importorskip("scipy.sparse") + + m = sp_sparse.csr_matrix(np.array([[0, 1], [0, 0]])) + with pytest.raises( + TypeError, match="Expected coo_matrix. Got csr_matrix instead." + ): + pd.Series.sparse.from_coo(m) + + def test_with_column_named_sparse(self): + # https://github.com/pandas-dev/pandas/issues/30758 + df = pd.DataFrame({"sparse": pd.arrays.SparseArray([1, 2])}) + assert isinstance(df.sparse, pd.core.arrays.sparse.accessor.SparseFrameAccessor) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py new file mode 100644 index 0000000000000000000000000000000000000000..ffc93b4e4f176385ac7b2b8a0b51027cb0bad9f6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_arithmetics.py @@ -0,0 +1,514 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture(params=["integer", "block"]) +def kind(request): + """kind kwarg to pass to SparseArray""" + return request.param + + +@pytest.fixture(params=[True, False]) +def mix(request): + """ + Fixture returning True or False, determining whether to operate + op(sparse, dense) instead of op(sparse, sparse) + """ + return request.param + + +class TestSparseArrayArithmetics: + def _assert(self, a, b): + # We have to use tm.assert_sp_array_equal. See GH #45126 + tm.assert_numpy_array_equal(a, b) + + def _check_numeric_ops(self, a, b, a_dense, b_dense, mix: bool, op): + # Check that arithmetic behavior matches non-Sparse Series arithmetic + + if isinstance(a_dense, np.ndarray): + expected = op(pd.Series(a_dense), b_dense).values + elif isinstance(b_dense, np.ndarray): + expected = op(a_dense, pd.Series(b_dense)).values + else: + raise NotImplementedError + + with np.errstate(invalid="ignore", divide="ignore"): + if mix: + result = op(a, b_dense).to_dense() + else: + result = op(a, b).to_dense() + + self._assert(result, expected) + + def _check_bool_result(self, res): + assert isinstance(res, SparseArray) + assert isinstance(res.dtype, SparseDtype) + assert res.dtype.subtype == np.bool_ + assert isinstance(res.fill_value, bool) + + def _check_comparison_ops(self, a, b, a_dense, b_dense): + with np.errstate(invalid="ignore"): + # Unfortunately, trying to wrap the computation of each expected + # value is with np.errstate() is too tedious. + # + # sparse & sparse + self._check_bool_result(a == b) + self._assert((a == b).to_dense(), a_dense == b_dense) + + self._check_bool_result(a != b) + self._assert((a != b).to_dense(), a_dense != b_dense) + + self._check_bool_result(a >= b) + self._assert((a >= b).to_dense(), a_dense >= b_dense) + + self._check_bool_result(a <= b) + self._assert((a <= b).to_dense(), a_dense <= b_dense) + + self._check_bool_result(a > b) + self._assert((a > b).to_dense(), a_dense > b_dense) + + self._check_bool_result(a < b) + self._assert((a < b).to_dense(), a_dense < b_dense) + + # sparse & dense + self._check_bool_result(a == b_dense) + self._assert((a == b_dense).to_dense(), a_dense == b_dense) + + self._check_bool_result(a != b_dense) + self._assert((a != b_dense).to_dense(), a_dense != b_dense) + + self._check_bool_result(a >= b_dense) + self._assert((a >= b_dense).to_dense(), a_dense >= b_dense) + + self._check_bool_result(a <= b_dense) + self._assert((a <= b_dense).to_dense(), a_dense <= b_dense) + + self._check_bool_result(a > b_dense) + self._assert((a > b_dense).to_dense(), a_dense > b_dense) + + self._check_bool_result(a < b_dense) + self._assert((a < b_dense).to_dense(), a_dense < b_dense) + + def _check_logical_ops(self, a, b, a_dense, b_dense): + # sparse & sparse + self._check_bool_result(a & b) + self._assert((a & b).to_dense(), a_dense & b_dense) + + self._check_bool_result(a | b) + self._assert((a | b).to_dense(), a_dense | b_dense) + # sparse & dense + self._check_bool_result(a & b_dense) + self._assert((a & b_dense).to_dense(), a_dense & b_dense) + + self._check_bool_result(a | b_dense) + self._assert((a | b_dense).to_dense(), a_dense | b_dense) + + @pytest.mark.parametrize("scalar", [0, 1, 3]) + @pytest.mark.parametrize("fill_value", [None, 0, 2]) + def test_float_scalar( + self, kind, mix, all_arithmetic_functions, fill_value, scalar, request + ): + op = all_arithmetic_functions + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + a = SparseArray(values, kind=kind, fill_value=fill_value) + self._check_numeric_ops(a, scalar, values, scalar, mix, op) + + def test_float_scalar_comparison(self, kind): + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + + a = SparseArray(values, kind=kind) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + a = SparseArray(values, kind=kind, fill_value=0) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + a = SparseArray(values, kind=kind, fill_value=2) + self._check_comparison_ops(a, 1, values, 1) + self._check_comparison_ops(a, 0, values, 0) + self._check_comparison_ops(a, 3, values, 3) + + def test_float_same_index_without_nans(self, kind, mix, all_arithmetic_functions): + # when sp_index are the same + op = all_arithmetic_functions + + values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) + rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_same_index_with_nans( + self, kind, mix, all_arithmetic_functions, request + ): + # when sp_index are the same + op = all_arithmetic_functions + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_same_index_comparison(self, kind): + # when sp_index are the same + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([np.nan, 2, 3, 4, np.nan, 0, 1, 3, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + values = np.array([0.0, 1.0, 2.0, 6.0, 0.0, 0.0, 1.0, 2.0, 1.0, 0.0]) + rvalues = np.array([0.0, 2.0, 3.0, 4.0, 0.0, 0.0, 1.0, 3.0, 2.0, 0.0]) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + def test_float_array(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_array_different_kind(self, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind="integer") + b = SparseArray(rvalues, kind="block") + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind="integer", fill_value=0) + b = SparseArray(rvalues, kind="block") + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind="integer", fill_value=0) + b = SparseArray(rvalues, kind="block", fill_value=0) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind="integer", fill_value=1) + b = SparseArray(rvalues, kind="block", fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_float_array_comparison(self, kind): + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, np.nan, 2, 3, np.nan, 0, 1, 5, 2, np.nan]) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + def test_int_array(self, kind, mix, all_arithmetic_functions): + op = all_arithmetic_functions + + # have to specify dtype explicitly until fixing GH 667 + dtype = np.int64 + + values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) + + a = SparseArray(values, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, fill_value=0, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype) + b = SparseArray(rvalues, fill_value=0, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, fill_value=1, dtype=dtype, kind=kind) + assert a.dtype == SparseDtype(dtype, fill_value=1) + b = SparseArray(rvalues, fill_value=2, dtype=dtype, kind=kind) + assert b.dtype == SparseDtype(dtype, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_int_array_comparison(self, kind): + dtype = "int64" + # int32 NI ATM + + values = np.array([0, 1, 2, 0, 0, 0, 1, 2, 1, 0], dtype=dtype) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=dtype) + + a = SparseArray(values, dtype=dtype, kind=kind) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0) + b = SparseArray(rvalues, dtype=dtype, kind=kind) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=0) + b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=0) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, dtype=dtype, kind=kind, fill_value=1) + b = SparseArray(rvalues, dtype=dtype, kind=kind, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + @pytest.mark.parametrize("fill_value", [True, False, np.nan]) + def test_bool_same_index(self, kind, fill_value): + # GH 14000 + # when sp_index are the same + values = np.array([True, False, True, True], dtype=np.bool_) + rvalues = np.array([True, False, True, True], dtype=np.bool_) + + a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value) + b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value) + self._check_logical_ops(a, b, values, rvalues) + + @pytest.mark.parametrize("fill_value", [True, False, np.nan]) + def test_bool_array_logical(self, kind, fill_value): + # GH 14000 + # when sp_index are the same + values = np.array([True, False, True, False, True, True], dtype=np.bool_) + rvalues = np.array([True, False, False, True, False, True], dtype=np.bool_) + + a = SparseArray(values, kind=kind, dtype=np.bool_, fill_value=fill_value) + b = SparseArray(rvalues, kind=kind, dtype=np.bool_, fill_value=fill_value) + self._check_logical_ops(a, b, values, rvalues) + + def test_mixed_array_float_int(self, kind, mix, all_arithmetic_functions, request): + op = all_arithmetic_functions + rdtype = "int64" + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + + self._check_numeric_ops(a, b, values, rvalues, mix, op) + self._check_numeric_ops(a, b * 0, values, rvalues * 0, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + assert b.dtype == SparseDtype(rdtype) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + assert b.dtype == SparseDtype(rdtype, fill_value=2) + self._check_numeric_ops(a, b, values, rvalues, mix, op) + + def test_mixed_array_comparison(self, kind): + rdtype = "int64" + # int32 NI ATM + + values = np.array([np.nan, 1, 2, 0, np.nan, 0, 1, 2, 1, np.nan]) + rvalues = np.array([2, 0, 2, 3, 0, 0, 1, 5, 2, 0], dtype=rdtype) + + a = SparseArray(values, kind=kind) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + + self._check_comparison_ops(a, b, values, rvalues) + self._check_comparison_ops(a, b * 0, values, rvalues * 0) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind) + assert b.dtype == SparseDtype(rdtype) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=0) + b = SparseArray(rvalues, kind=kind, fill_value=0) + assert b.dtype == SparseDtype(rdtype) + self._check_comparison_ops(a, b, values, rvalues) + + a = SparseArray(values, kind=kind, fill_value=1) + b = SparseArray(rvalues, kind=kind, fill_value=2) + assert b.dtype == SparseDtype(rdtype, fill_value=2) + self._check_comparison_ops(a, b, values, rvalues) + + def test_xor(self): + s = SparseArray([True, True, False, False]) + t = SparseArray([True, False, True, False]) + result = s ^ t + sp_index = pd.core.arrays.sparse.IntIndex(4, np.array([0, 1, 2], dtype="int32")) + expected = SparseArray([False, True, True], sparse_index=sp_index) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("op", [operator.eq, operator.add]) +def test_with_list(op): + arr = SparseArray([0, 1], fill_value=0) + result = op(arr, [0, 1]) + expected = op(arr, SparseArray([0, 1])) + tm.assert_sp_array_equal(result, expected) + + +def test_with_dataframe(): + # GH#27910 + arr = SparseArray([0, 1], fill_value=0) + df = pd.DataFrame([[1, 2], [3, 4]]) + result = arr.__add__(df) + assert result is NotImplemented + + +def test_with_zerodim_ndarray(): + # GH#27910 + arr = SparseArray([0, 1], fill_value=0) + + result = arr * np.array(2) + expected = arr * 2 + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("ufunc", [np.abs, np.exp]) +@pytest.mark.parametrize( + "arr", [SparseArray([0, 0, -1, 1]), SparseArray([None, None, -1, 1])] +) +def test_ufuncs(ufunc, arr): + result = ufunc(arr) + fill_value = ufunc(arr.fill_value) + expected = SparseArray(ufunc(np.asarray(arr)), fill_value=fill_value) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize( + "a, b", + [ + (SparseArray([0, 0, 0]), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + (SparseArray([0, 0, 0], fill_value=1), np.array([0, 1, 2])), + ], +) +@pytest.mark.parametrize("ufunc", [np.add, np.greater]) +def test_binary_ufuncs(ufunc, a, b): + # can't say anything about fill value here. + result = ufunc(a, b) + expected = ufunc(np.asarray(a), np.asarray(b)) + assert isinstance(result, SparseArray) + tm.assert_numpy_array_equal(np.asarray(result), expected) + + +def test_ndarray_inplace(): + sparray = SparseArray([0, 2, 0, 0]) + ndarray = np.array([0, 1, 2, 3]) + ndarray += sparray + expected = np.array([0, 3, 2, 3]) + tm.assert_numpy_array_equal(ndarray, expected) + + +def test_sparray_inplace(): + sparray = SparseArray([0, 2, 0, 0]) + ndarray = np.array([0, 1, 2, 3]) + sparray += ndarray + expected = SparseArray([0, 3, 2, 3], fill_value=0) + tm.assert_sp_array_equal(sparray, expected) + + +@pytest.mark.parametrize("cons", [list, np.array, SparseArray]) +def test_mismatched_length_cmp_op(cons): + left = SparseArray([True, True]) + right = cons([True, True, True]) + with pytest.raises(ValueError, match="operands have mismatched length"): + left & right + + +@pytest.mark.parametrize("op", ["add", "sub", "mul", "truediv", "floordiv", "pow"]) +@pytest.mark.parametrize("fill_value", [np.nan, 3]) +def test_binary_operators(op, fill_value): + op = getattr(operator, op) + data1 = np.random.default_rng(2).standard_normal(20) + data2 = np.random.default_rng(2).standard_normal(20) + + data1[::2] = fill_value + data2[::3] = fill_value + + first = SparseArray(data1, fill_value=fill_value) + second = SparseArray(data2, fill_value=fill_value) + + with np.errstate(all="ignore"): + res = op(first, second) + exp = SparseArray( + op(first.to_dense(), second.to_dense()), fill_value=first.fill_value + ) + assert isinstance(res, SparseArray) + tm.assert_almost_equal(res.to_dense(), exp.to_dense()) + + res2 = op(first, second.to_dense()) + assert isinstance(res2, SparseArray) + tm.assert_sp_array_equal(res, res2) + + res3 = op(first.to_dense(), second) + assert isinstance(res3, SparseArray) + tm.assert_sp_array_equal(res, res3) + + res4 = op(first, 4) + assert isinstance(res4, SparseArray) + + # Ignore this if the actual op raises (e.g. pow). + try: + exp = op(first.to_dense(), 4) + exp_fv = op(first.fill_value, 4) + except ValueError: + pass + else: + tm.assert_almost_equal(res4.fill_value, exp_fv) + tm.assert_almost_equal(res4.to_dense(), exp) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_array.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_array.py new file mode 100644 index 0000000000000000000000000000000000000000..883d6ea3959ff6c12659c55762b889be18349ef7 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_array.py @@ -0,0 +1,480 @@ +import re + +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +import pandas as pd +from pandas import ( + SparseDtype, + isna, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture +def arr_data(): + """Fixture returning numpy array with valid and missing entries""" + return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + + +@pytest.fixture +def arr(arr_data): + """Fixture returning SparseArray from 'arr_data'""" + return SparseArray(arr_data) + + +@pytest.fixture +def zarr(): + """Fixture returning SparseArray with integer entries and 'fill_value=0'""" + return SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + + +class TestSparseArray: + @pytest.mark.parametrize("fill_value", [0, None, np.nan]) + def test_shift_fill_value(self, fill_value): + # GH #24128 + sparse = SparseArray(np.array([1, 0, 0, 3, 0]), fill_value=8.0) + res = sparse.shift(1, fill_value=fill_value) + if isna(fill_value): + fill_value = res.dtype.na_value + exp = SparseArray(np.array([fill_value, 1, 0, 0, 3]), fill_value=8.0) + tm.assert_sp_array_equal(res, exp) + + def test_set_fill_value(self): + arr = SparseArray([1.0, np.nan, 2.0], fill_value=np.nan) + arr.fill_value = 2 + assert arr.fill_value == 2 + + arr = SparseArray([1, 0, 2], fill_value=0, dtype=np.int64) + arr.fill_value = 2 + assert arr.fill_value == 2 + + msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + arr.fill_value = 3.1 + assert arr.fill_value == 3.1 + + arr.fill_value = np.nan + assert np.isnan(arr.fill_value) + + arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_) + arr.fill_value = True + assert arr.fill_value is True + + with tm.assert_produces_warning(FutureWarning, match=msg): + arr.fill_value = 0 + + arr.fill_value = np.nan + assert np.isnan(arr.fill_value) + + @pytest.mark.parametrize("val", [[1, 2, 3], np.array([1, 2]), (1, 2, 3)]) + def test_set_fill_invalid_non_scalar(self, val): + arr = SparseArray([True, False, True], fill_value=False, dtype=np.bool_) + msg = "fill_value must be a scalar" + + with pytest.raises(ValueError, match=msg): + arr.fill_value = val + + def test_copy(self, arr): + arr2 = arr.copy() + assert arr2.sp_values is not arr.sp_values + assert arr2.sp_index is arr.sp_index + + def test_values_asarray(self, arr_data, arr): + tm.assert_almost_equal(arr.to_dense(), arr_data) + + @pytest.mark.parametrize( + "data,shape,dtype", + [ + ([0, 0, 0, 0, 0], (5,), None), + ([], (0,), None), + ([0], (1,), None), + (["A", "A", np.nan, "B"], (4,), object), + ], + ) + def test_shape(self, data, shape, dtype): + # GH 21126 + out = SparseArray(data, dtype=dtype) + assert out.shape == shape + + @pytest.mark.parametrize( + "vals", + [ + [np.nan, np.nan, np.nan, np.nan, np.nan], + [1, np.nan, np.nan, 3, np.nan], + [1, np.nan, 0, 3, 0], + ], + ) + @pytest.mark.parametrize("fill_value", [None, 0]) + def test_dense_repr(self, vals, fill_value): + vals = np.array(vals) + arr = SparseArray(vals, fill_value=fill_value) + + res = arr.to_dense() + tm.assert_numpy_array_equal(res, vals) + + @pytest.mark.parametrize("fix", ["arr", "zarr"]) + def test_pickle(self, fix, request): + obj = request.getfixturevalue(fix) + unpickled = tm.round_trip_pickle(obj) + tm.assert_sp_array_equal(unpickled, obj) + + def test_generator_warnings(self): + sp_arr = SparseArray([1, 2, 3]) + with tm.assert_produces_warning(None): + for _ in sp_arr: + pass + + def test_where_retain_fill_value(self): + # GH#45691 don't lose fill_value on _where + arr = SparseArray([np.nan, 1.0], fill_value=0) + + mask = np.array([True, False]) + + res = arr._where(~mask, 1) + exp = SparseArray([1, 1.0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + ser = pd.Series(arr) + res = ser.where(~mask, 1) + tm.assert_series_equal(res, pd.Series(exp)) + + def test_fillna(self): + s = SparseArray([1, np.nan, np.nan, 3, np.nan]) + res = s.fillna(-1) + exp = SparseArray([1, -1, -1, 3, -1], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([1, -1, -1, 3, -1], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, 0, 3, 0]) + res = s.fillna(-1) + exp = SparseArray([1, -1, 0, 3, 0], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([1, np.nan, 0, 3, 0], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([1, -1, 0, 3, 0], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([np.nan, np.nan, np.nan, np.nan]) + res = s.fillna(-1) + exp = SparseArray([-1, -1, -1, -1], fill_value=-1, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + s = SparseArray([np.nan, np.nan, np.nan, np.nan], fill_value=0) + res = s.fillna(-1) + exp = SparseArray([-1, -1, -1, -1], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + # float dtype's fill_value is np.nan, replaced by -1 + s = SparseArray([0.0, 0.0, 0.0, 0.0]) + res = s.fillna(-1) + exp = SparseArray([0.0, 0.0, 0.0, 0.0], fill_value=-1) + tm.assert_sp_array_equal(res, exp) + + # int dtype shouldn't have missing. No changes. + s = SparseArray([0, 0, 0, 0]) + assert s.dtype == SparseDtype(np.int64) + assert s.fill_value == 0 + res = s.fillna(-1) + tm.assert_sp_array_equal(res, s) + + s = SparseArray([0, 0, 0, 0], fill_value=0) + assert s.dtype == SparseDtype(np.int64) + assert s.fill_value == 0 + res = s.fillna(-1) + exp = SparseArray([0, 0, 0, 0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + # fill_value can be nan if there is no missing hole. + # only fill_value will be changed + s = SparseArray([0, 0, 0, 0], fill_value=np.nan) + assert s.dtype == SparseDtype(np.int64, fill_value=np.nan) + assert np.isnan(s.fill_value) + res = s.fillna(-1) + exp = SparseArray([0, 0, 0, 0], fill_value=-1) + tm.assert_sp_array_equal(res, exp) + + def test_fillna_overlap(self): + s = SparseArray([1, np.nan, np.nan, 3, np.nan]) + # filling with existing value doesn't replace existing value with + # fill_value, i.e. existing 3 remains in sp_values + res = s.fillna(3) + exp = np.array([1, 3, 3, 3, 3], dtype=np.float64) + tm.assert_numpy_array_equal(res.to_dense(), exp) + + s = SparseArray([1, np.nan, np.nan, 3, np.nan], fill_value=0) + res = s.fillna(3) + exp = SparseArray([1, 3, 3, 3, 3], fill_value=0, dtype=np.float64) + tm.assert_sp_array_equal(res, exp) + + def test_nonzero(self): + # Tests regression #21172. + sa = SparseArray([float("nan"), float("nan"), 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) + expected = np.array([2, 5, 9], dtype=np.int32) + (result,) = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + + sa = SparseArray([0, 0, 1, 0, 0, 2, 0, 0, 0, 3, 0, 0]) + (result,) = sa.nonzero() + tm.assert_numpy_array_equal(expected, result) + + +class TestSparseArrayAnalytics: + @pytest.mark.parametrize( + "data,expected", + [ + ( + np.array([1, 2, 3, 4, 5], dtype=float), # non-null data + SparseArray(np.array([1.0, 3.0, 6.0, 10.0, 15.0])), + ), + ( + np.array([1, 2, np.nan, 4, 5], dtype=float), # null data + SparseArray(np.array([1.0, 3.0, np.nan, 7.0, 12.0])), + ), + ], + ) + @pytest.mark.parametrize("numpy", [True, False]) + def test_cumsum(self, data, expected, numpy): + cumsum = np.cumsum if numpy else lambda s: s.cumsum() + + out = cumsum(SparseArray(data)) + tm.assert_sp_array_equal(out, expected) + + out = cumsum(SparseArray(data, fill_value=np.nan)) + tm.assert_sp_array_equal(out, expected) + + out = cumsum(SparseArray(data, fill_value=2)) + tm.assert_sp_array_equal(out, expected) + + if numpy: # numpy compatibility checks. + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.cumsum(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.cumsum(SparseArray(data), out=out) + else: + axis = 1 # SparseArray currently 1-D, so only axis = 0 is valid. + msg = re.escape(f"axis(={axis}) out of bounds") + with pytest.raises(ValueError, match=msg): + SparseArray(data).cumsum(axis=axis) + + def test_ufunc(self): + # GH 13853 make sure ufunc is applied to fill_value + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray([1, np.nan, 2, np.nan, 2]) + tm.assert_sp_array_equal(abs(sparse), result) + tm.assert_sp_array_equal(np.abs(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray([1, 2, 2], sparse_index=sparse.sp_index, fill_value=1) + tm.assert_sp_array_equal(abs(sparse), result) + tm.assert_sp_array_equal(np.abs(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=-1) + exp = SparseArray([1, 1, 2, 2], fill_value=1) + tm.assert_sp_array_equal(abs(sparse), exp) + tm.assert_sp_array_equal(np.abs(sparse), exp) + + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray(np.sin([1, np.nan, 2, np.nan, -2])) + tm.assert_sp_array_equal(np.sin(sparse), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray(np.sin([1, -1, 2, -2]), fill_value=np.sin(1)) + tm.assert_sp_array_equal(np.sin(sparse), result) + + sparse = SparseArray([1, -1, 0, -2], fill_value=0) + result = SparseArray(np.sin([1, -1, 0, -2]), fill_value=np.sin(0)) + tm.assert_sp_array_equal(np.sin(sparse), result) + + def test_ufunc_args(self): + # GH 13853 make sure ufunc is applied to fill_value, including its arg + sparse = SparseArray([1, np.nan, 2, np.nan, -2]) + result = SparseArray([2, np.nan, 3, np.nan, -1]) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + sparse = SparseArray([1, -1, 2, -2], fill_value=1) + result = SparseArray([2, 0, 3, -1], fill_value=2) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + sparse = SparseArray([1, -1, 0, -2], fill_value=0) + result = SparseArray([2, 0, 1, -1], fill_value=1) + tm.assert_sp_array_equal(np.add(sparse, 1), result) + + @pytest.mark.parametrize("fill_value", [0.0, np.nan]) + def test_modf(self, fill_value): + # https://github.com/pandas-dev/pandas/issues/26946 + sparse = SparseArray([fill_value] * 10 + [1.1, 2.2], fill_value=fill_value) + r1, r2 = np.modf(sparse) + e1, e2 = np.modf(np.asarray(sparse)) + tm.assert_sp_array_equal(r1, SparseArray(e1, fill_value=fill_value)) + tm.assert_sp_array_equal(r2, SparseArray(e2, fill_value=fill_value)) + + def test_nbytes_integer(self): + arr = SparseArray([1, 0, 0, 0, 2], kind="integer") + result = arr.nbytes + # (2 * 8) + 2 * 4 + assert result == 24 + + def test_nbytes_block(self): + arr = SparseArray([1, 2, 0, 0, 0], kind="block") + result = arr.nbytes + # (2 * 8) + 4 + 4 + # sp_values, blocs, blengths + assert result == 24 + + def test_asarray_datetime64(self): + s = SparseArray(pd.to_datetime(["2012", None, None, "2013"])) + np.asarray(s) + + def test_density(self): + arr = SparseArray([0, 1]) + assert arr.density == 0.5 + + def test_npoints(self): + arr = SparseArray([0, 1]) + assert arr.npoints == 1 + + +def test_setting_fill_value_fillna_still_works(): + # This is why letting users update fill_value / dtype is bad + # astype has the same problem. + arr = SparseArray([1.0, np.nan, 1.0], fill_value=0.0) + arr.fill_value = np.nan + result = arr.isna() + # Can't do direct comparison, since the sp_index will be different + # So let's convert to ndarray and check there. + result = np.asarray(result) + + expected = np.array([False, True, False]) + tm.assert_numpy_array_equal(result, expected) + + +def test_setting_fill_value_updates(): + arr = SparseArray([0.0, np.nan], fill_value=0) + arr.fill_value = np.nan + # use private constructor to get the index right + # otherwise both nans would be un-stored. + expected = SparseArray._simple_new( + sparse_array=np.array([np.nan]), + sparse_index=IntIndex(2, [1]), + dtype=SparseDtype(float, np.nan), + ) + tm.assert_sp_array_equal(arr, expected) + + +@pytest.mark.parametrize( + "arr,fill_value,loc", + [ + ([None, 1, 2], None, 0), + ([0, None, 2], None, 1), + ([0, 1, None], None, 2), + ([0, 1, 1, None, None], None, 3), + ([1, 1, 1, 2], None, -1), + ([], None, -1), + ([None, 1, 0, 0, None, 2], None, 0), + ([None, 1, 0, 0, None, 2], 1, 1), + ([None, 1, 0, 0, None, 2], 2, 5), + ([None, 1, 0, 0, None, 2], 3, -1), + ([None, 0, 0, 1, 2, 1], 0, 1), + ([None, 0, 0, 1, 2, 1], 1, 3), + ], +) +def test_first_fill_value_loc(arr, fill_value, loc): + result = SparseArray(arr, fill_value=fill_value)._first_fill_value_loc() + assert result == loc + + +@pytest.mark.parametrize( + "arr", + [ + [1, 2, np.nan, np.nan], + [1, np.nan, 2, np.nan], + [1, 2, np.nan], + [np.nan, 1, 0, 0, np.nan, 2], + [np.nan, 0, 0, 1, 2, 1], + ], +) +@pytest.mark.parametrize("fill_value", [np.nan, 0, 1]) +def test_unique_na_fill(arr, fill_value): + a = SparseArray(arr, fill_value=fill_value).unique() + b = pd.Series(arr).unique() + assert isinstance(a, SparseArray) + a = np.asarray(a) + tm.assert_numpy_array_equal(a, b) + + +def test_unique_all_sparse(): + # https://github.com/pandas-dev/pandas/issues/23168 + arr = SparseArray([0, 0]) + result = arr.unique() + expected = SparseArray([0]) + tm.assert_sp_array_equal(result, expected) + + +def test_map(): + arr = SparseArray([0, 1, 2]) + expected = SparseArray([10, 11, 12], fill_value=10) + + # dict + result = arr.map({0: 10, 1: 11, 2: 12}) + tm.assert_sp_array_equal(result, expected) + + # series + result = arr.map(pd.Series({0: 10, 1: 11, 2: 12})) + tm.assert_sp_array_equal(result, expected) + + # function + result = arr.map(pd.Series({0: 10, 1: 11, 2: 12})) + expected = SparseArray([10, 11, 12], fill_value=10) + tm.assert_sp_array_equal(result, expected) + + +def test_map_missing(): + arr = SparseArray([0, 1, 2]) + expected = SparseArray([10, 11, None], fill_value=10) + + result = arr.map({0: 10, 1: 11}) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("fill_value", [np.nan, 1]) +def test_dropna(fill_value): + # GH-28287 + arr = SparseArray([np.nan, 1], fill_value=fill_value) + exp = SparseArray([1.0], fill_value=fill_value) + tm.assert_sp_array_equal(arr.dropna(), exp) + + df = pd.DataFrame({"a": [0, 1], "b": arr}) + expected_df = pd.DataFrame({"a": [1], "b": exp}, index=pd.Index([1])) + tm.assert_equal(df.dropna(), expected_df) + + +def test_drop_duplicates_fill_value(): + # GH 11726 + df = pd.DataFrame(np.zeros((5, 5))).apply(lambda x: SparseArray(x, fill_value=0)) + result = df.drop_duplicates() + expected = pd.DataFrame({i: SparseArray([0.0], fill_value=0) for i in range(5)}) + tm.assert_frame_equal(result, expected) + + +def test_zero_sparse_column(): + # GH 27781 + df1 = pd.DataFrame({"A": SparseArray([0, 0, 0]), "B": [1, 2, 3]}) + df2 = pd.DataFrame({"A": SparseArray([0, 1, 0]), "B": [1, 2, 3]}) + result = df1.loc[df1["B"] != 2] + expected = df2.loc[df2["B"] != 2] + tm.assert_frame_equal(result, expected) + + expected = pd.DataFrame({"A": SparseArray([0, 0]), "B": [1, 3]}, index=[0, 2]) + tm.assert_frame_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_astype.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..83a507e679d460d687b27d09a4bed8321e1199e2 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_astype.py @@ -0,0 +1,133 @@ +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +from pandas import ( + SparseDtype, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestAstype: + def test_astype(self): + # float -> float + arr = SparseArray([None, None, 0, 2]) + result = arr.astype("Sparse[float32]") + expected = SparseArray([None, None, 0, 2], dtype=np.dtype("float32")) + tm.assert_sp_array_equal(result, expected) + + dtype = SparseDtype("float64", fill_value=0) + result = arr.astype(dtype) + expected = SparseArray._simple_new( + np.array([0.0, 2.0], dtype=dtype.subtype), IntIndex(4, [2, 3]), dtype + ) + tm.assert_sp_array_equal(result, expected) + + dtype = SparseDtype("int64", 0) + result = arr.astype(dtype) + expected = SparseArray._simple_new( + np.array([0, 2], dtype=np.int64), IntIndex(4, [2, 3]), dtype + ) + tm.assert_sp_array_equal(result, expected) + + arr = SparseArray([0, np.nan, 0, 1], fill_value=0) + with pytest.raises(ValueError, match="NA"): + arr.astype("Sparse[i8]") + + def test_astype_bool(self): + a = SparseArray([1, 0, 0, 1], dtype=SparseDtype(int, 0)) + result = a.astype(bool) + expected = np.array([1, 0, 0, 1], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + # update fill value + result = a.astype(SparseDtype(bool, False)) + expected = SparseArray( + [True, False, False, True], dtype=SparseDtype(bool, False) + ) + tm.assert_sp_array_equal(result, expected) + + def test_astype_all(self, any_real_numpy_dtype): + vals = np.array([1, 2, 3]) + arr = SparseArray(vals, fill_value=1) + typ = np.dtype(any_real_numpy_dtype) + res = arr.astype(typ) + tm.assert_numpy_array_equal(res, vals.astype(any_real_numpy_dtype)) + + @pytest.mark.parametrize( + "arr, dtype, expected", + [ + ( + SparseArray([0, 1]), + "float", + SparseArray([0.0, 1.0], dtype=SparseDtype(float, 0.0)), + ), + (SparseArray([0, 1]), bool, SparseArray([False, True])), + ( + SparseArray([0, 1], fill_value=1), + bool, + SparseArray([False, True], dtype=SparseDtype(bool, True)), + ), + pytest.param( + SparseArray([0, 1]), + "datetime64[ns]", + SparseArray( + np.array([0, 1], dtype="datetime64[ns]"), + dtype=SparseDtype("datetime64[ns]", Timestamp("1970")), + ), + ), + ( + SparseArray([0, 1, 10]), + str, + SparseArray(["0", "1", "10"], dtype=SparseDtype(str, "0")), + ), + (SparseArray(["10", "20"]), float, SparseArray([10.0, 20.0])), + ( + SparseArray([0, 1, 0]), + object, + SparseArray([0, 1, 0], dtype=SparseDtype(object, 0)), + ), + ], + ) + def test_astype_more(self, arr, dtype, expected): + result = arr.astype(arr.dtype.update_dtype(dtype)) + tm.assert_sp_array_equal(result, expected) + + def test_astype_nan_raises(self): + arr = SparseArray([1.0, np.nan]) + with pytest.raises(ValueError, match="Cannot convert non-finite"): + arr.astype(int) + + def test_astype_copy_false(self): + # GH#34456 bug caused by using .view instead of .astype in astype_nansafe + arr = SparseArray([1, 2, 3]) + + dtype = SparseDtype(float, 0) + + result = arr.astype(dtype, copy=False) + expected = SparseArray([1.0, 2.0, 3.0], fill_value=0.0) + tm.assert_sp_array_equal(result, expected) + + def test_astype_dt64_to_int64(self): + # GH#49631 match non-sparse behavior + values = np.array(["NaT", "2016-01-02", "2016-01-03"], dtype="M8[ns]") + + arr = SparseArray(values) + result = arr.astype("int64") + expected = values.astype("int64") + tm.assert_numpy_array_equal(result, expected) + + # we should also be able to cast to equivalent Sparse[int64] + dtype_int64 = SparseDtype("int64", np.iinfo(np.int64).min) + result2 = arr.astype(dtype_int64) + tm.assert_numpy_array_equal(result2.to_numpy(), expected) + + # GH#50087 we should match the non-sparse behavior regardless of + # if we have a fill_value other than NaT + dtype = SparseDtype("datetime64[ns]", values[1]) + arr3 = SparseArray(values, dtype=dtype) + result3 = arr3.astype("int64") + tm.assert_numpy_array_equal(result3, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py new file mode 100644 index 0000000000000000000000000000000000000000..0f09af269148bc6fec712b9b1df63cca6f44d248 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_combine_concat.py @@ -0,0 +1,62 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestSparseArrayConcat: + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_basic(self, kind): + a = SparseArray([1, 0, 0, 2], kind=kind) + b = SparseArray([1, 0, 2, 2], kind=kind) + + result = SparseArray._concat_same_type([a, b]) + # Can't make any assertions about the sparse index itself + # since we aren't don't merge sparse blocs across arrays + # in to_concat + expected = np.array([1, 2, 1, 2, 2], dtype="int64") + tm.assert_numpy_array_equal(result.sp_values, expected) + assert result.kind == kind + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_uses_first_kind(self, kind): + other = "integer" if kind == "block" else "block" + a = SparseArray([1, 0, 0, 2], kind=kind) + b = SparseArray([1, 0, 2, 2], kind=other) + + result = SparseArray._concat_same_type([a, b]) + expected = np.array([1, 2, 1, 2, 2], dtype="int64") + tm.assert_numpy_array_equal(result.sp_values, expected) + assert result.kind == kind + + +@pytest.mark.parametrize( + "other, expected_dtype", + [ + # compatible dtype -> preserve sparse + (pd.Series([3, 4, 5], dtype="int64"), pd.SparseDtype("int64", 0)), + # (pd.Series([3, 4, 5], dtype="Int64"), pd.SparseDtype("int64", 0)), + # incompatible dtype -> Sparse[common dtype] + (pd.Series([1.5, 2.5, 3.5], dtype="float64"), pd.SparseDtype("float64", 0)), + # incompatible dtype -> Sparse[object] dtype + (pd.Series(["a", "b", "c"], dtype=object), pd.SparseDtype(object, 0)), + # categorical with compatible categories -> dtype of the categories + (pd.Series([3, 4, 5], dtype="category"), np.dtype("int64")), + (pd.Series([1.5, 2.5, 3.5], dtype="category"), np.dtype("float64")), + # categorical with incompatible categories -> object dtype + (pd.Series(["a", "b", "c"], dtype="category"), np.dtype(object)), + ], +) +def test_concat_with_non_sparse(other, expected_dtype): + # https://github.com/pandas-dev/pandas/issues/34336 + s_sparse = pd.Series([1, 0, 2], dtype=pd.SparseDtype("int64", 0)) + + result = pd.concat([s_sparse, other], ignore_index=True) + expected = pd.Series(list(s_sparse) + list(other)).astype(expected_dtype) + tm.assert_series_equal(result, expected) + + result = pd.concat([other, s_sparse], ignore_index=True) + expected = pd.Series(list(other) + list(s_sparse)).astype(expected_dtype) + tm.assert_series_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_constructors.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..2831c8abdaf137b6454ea8f73bff7e94a3ec1b2b --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_constructors.py @@ -0,0 +1,285 @@ +import numpy as np +import pytest + +from pandas._libs.sparse import IntIndex + +import pandas as pd +from pandas import ( + SparseDtype, + isna, +) +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +class TestConstructors: + def test_constructor_dtype(self): + arr = SparseArray([np.nan, 1, 2, np.nan]) + assert arr.dtype == SparseDtype(np.float64, np.nan) + assert arr.dtype.subtype == np.float64 + assert np.isnan(arr.fill_value) + + arr = SparseArray([np.nan, 1, 2, np.nan], fill_value=0) + assert arr.dtype == SparseDtype(np.float64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], dtype=np.float64) + assert arr.dtype == SparseDtype(np.float64, np.nan) + assert np.isnan(arr.fill_value) + + arr = SparseArray([0, 1, 2, 4], dtype=np.int64) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=np.int64) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], dtype=None) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + arr = SparseArray([0, 1, 2, 4], fill_value=0, dtype=None) + assert arr.dtype == SparseDtype(np.int64, 0) + assert arr.fill_value == 0 + + def test_constructor_dtype_str(self): + result = SparseArray([1, 2, 3], dtype="int") + expected = SparseArray([1, 2, 3], dtype=int) + tm.assert_sp_array_equal(result, expected) + + def test_constructor_sparse_dtype(self): + result = SparseArray([1, 0, 0, 1], dtype=SparseDtype("int64", -1)) + expected = SparseArray([1, 0, 0, 1], fill_value=-1, dtype=np.int64) + tm.assert_sp_array_equal(result, expected) + assert result.sp_values.dtype == np.dtype("int64") + + def test_constructor_sparse_dtype_str(self): + result = SparseArray([1, 0, 0, 1], dtype="Sparse[int32]") + expected = SparseArray([1, 0, 0, 1], dtype=np.int32) + tm.assert_sp_array_equal(result, expected) + assert result.sp_values.dtype == np.dtype("int32") + + def test_constructor_object_dtype(self): + # GH#11856 + arr = SparseArray(["A", "A", np.nan, "B"], dtype=object) + assert arr.dtype == SparseDtype(object) + assert np.isnan(arr.fill_value) + + arr = SparseArray(["A", "A", np.nan, "B"], dtype=object, fill_value="A") + assert arr.dtype == SparseDtype(object, "A") + assert arr.fill_value == "A" + + def test_constructor_object_dtype_bool_fill(self): + # GH#17574 + data = [False, 0, 100.0, 0.0] + arr = SparseArray(data, dtype=object, fill_value=False) + assert arr.dtype == SparseDtype(object, False) + assert arr.fill_value is False + arr_expected = np.array(data, dtype=object) + it = (type(x) == type(y) and x == y for x, y in zip(arr, arr_expected)) + assert np.fromiter(it, dtype=np.bool_).all() + + @pytest.mark.parametrize("dtype", [SparseDtype(int, 0), int]) + def test_constructor_na_dtype(self, dtype): + with pytest.raises(ValueError, match="Cannot convert"): + SparseArray([0, 1, np.nan], dtype=dtype) + + def test_constructor_warns_when_losing_timezone(self): + # GH#32501 warn when losing timezone information + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + + expected = SparseArray(np.asarray(dti, dtype="datetime64[ns]")) + + with tm.assert_produces_warning(UserWarning): + result = SparseArray(dti) + + tm.assert_sp_array_equal(result, expected) + + with tm.assert_produces_warning(UserWarning): + result = SparseArray(pd.Series(dti)) + + tm.assert_sp_array_equal(result, expected) + + def test_constructor_spindex_dtype(self): + arr = SparseArray(data=[1, 2], sparse_index=IntIndex(4, [1, 2])) + # TODO: actionable? + # XXX: Behavior change: specifying SparseIndex no longer changes the + # fill_value + expected = SparseArray([0, 1, 2, 0], kind="integer") + tm.assert_sp_array_equal(arr, expected) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2, 3], + sparse_index=IntIndex(4, [1, 2, 3]), + dtype=np.int64, + fill_value=0, + ) + exp = SparseArray([0, 1, 2, 3], dtype=np.int64, fill_value=0) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=np.int64 + ) + exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=np.int64) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + arr = SparseArray( + data=[1, 2, 3], + sparse_index=IntIndex(4, [1, 2, 3]), + dtype=None, + fill_value=0, + ) + exp = SparseArray([0, 1, 2, 3], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + @pytest.mark.parametrize("sparse_index", [None, IntIndex(1, [0])]) + def test_constructor_spindex_dtype_scalar(self, sparse_index): + # scalar input + msg = "Constructing SparseArray with scalar data is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + arr = SparseArray(data=1, sparse_index=sparse_index, dtype=None) + exp = SparseArray([1], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + with tm.assert_produces_warning(FutureWarning, match=msg): + arr = SparseArray(data=1, sparse_index=IntIndex(1, [0]), dtype=None) + exp = SparseArray([1], dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + def test_constructor_spindex_dtype_scalar_broadcasts(self): + arr = SparseArray( + data=[1, 2], sparse_index=IntIndex(4, [1, 2]), fill_value=0, dtype=None + ) + exp = SparseArray([0, 1, 2, 0], fill_value=0, dtype=None) + tm.assert_sp_array_equal(arr, exp) + assert arr.dtype == SparseDtype(np.int64) + assert arr.fill_value == 0 + + @pytest.mark.parametrize( + "data, fill_value", + [ + (np.array([1, 2]), 0), + (np.array([1.0, 2.0]), np.nan), + ([True, False], False), + ([pd.Timestamp("2017-01-01")], pd.NaT), + ], + ) + def test_constructor_inferred_fill_value(self, data, fill_value): + result = SparseArray(data).fill_value + + if isna(fill_value): + assert isna(result) + else: + assert result == fill_value + + @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) + @pytest.mark.parametrize("size", [0, 10]) + def test_from_spmatrix(self, size, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(size, 1, density=0.5, format=format) + result = SparseArray.from_spmatrix(mat) + + result = np.asarray(result) + expected = mat.toarray().ravel() + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("format", ["coo", "csc", "csr"]) + def test_from_spmatrix_including_explicit_zero(self, format): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.random(10, 1, density=0.5, format=format) + mat.data[0] = 0 + result = SparseArray.from_spmatrix(mat) + + result = np.asarray(result) + expected = mat.toarray().ravel() + tm.assert_numpy_array_equal(result, expected) + + def test_from_spmatrix_raises(self): + sp_sparse = pytest.importorskip("scipy.sparse") + + mat = sp_sparse.eye(5, 4, format="csc") + + with pytest.raises(ValueError, match="not '4'"): + SparseArray.from_spmatrix(mat) + + def test_constructor_from_too_large_array(self): + with pytest.raises(TypeError, match="expected dimension <= 1 data"): + SparseArray(np.arange(10).reshape((2, 5))) + + def test_constructor_from_sparse(self): + zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + res = SparseArray(zarr) + assert res.fill_value == 0 + tm.assert_almost_equal(res.sp_values, zarr.sp_values) + + def test_constructor_copy(self): + arr_data = np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + arr = SparseArray(arr_data) + + cp = SparseArray(arr, copy=True) + cp.sp_values[:3] = 0 + assert not (arr.sp_values[:3] == 0).any() + + not_copy = SparseArray(arr) + not_copy.sp_values[:3] = 0 + assert (arr.sp_values[:3] == 0).all() + + def test_constructor_bool(self): + # GH#10648 + data = np.array([False, False, True, True, False, False]) + arr = SparseArray(data, fill_value=False, dtype=bool) + + assert arr.dtype == SparseDtype(bool) + tm.assert_numpy_array_equal(arr.sp_values, np.array([True, True])) + # Behavior change: np.asarray densifies. + # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr)) + tm.assert_numpy_array_equal(arr.sp_index.indices, np.array([2, 3], np.int32)) + + dense = arr.to_dense() + assert dense.dtype == bool + tm.assert_numpy_array_equal(dense, data) + + def test_constructor_bool_fill_value(self): + arr = SparseArray([True, False, True], dtype=None) + assert arr.dtype == SparseDtype(np.bool_) + assert not arr.fill_value + + arr = SparseArray([True, False, True], dtype=np.bool_) + assert arr.dtype == SparseDtype(np.bool_) + assert not arr.fill_value + + arr = SparseArray([True, False, True], dtype=np.bool_, fill_value=True) + assert arr.dtype == SparseDtype(np.bool_, True) + assert arr.fill_value + + def test_constructor_float32(self): + # GH#10648 + data = np.array([1.0, np.nan, 3], dtype=np.float32) + arr = SparseArray(data, dtype=np.float32) + + assert arr.dtype == SparseDtype(np.float32) + tm.assert_numpy_array_equal(arr.sp_values, np.array([1, 3], dtype=np.float32)) + # Behavior change: np.asarray densifies. + # tm.assert_numpy_array_equal(arr.sp_values, np.asarray(arr)) + tm.assert_numpy_array_equal( + arr.sp_index.indices, np.array([0, 2], dtype=np.int32) + ) + + dense = arr.to_dense() + assert dense.dtype == np.float32 + tm.assert_numpy_array_equal(dense, data) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_dtype.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_dtype.py new file mode 100644 index 0000000000000000000000000000000000000000..234f4092421e592b9c11b668e25f29fa108f13f0 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_dtype.py @@ -0,0 +1,224 @@ +import re +import warnings + +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype + + +@pytest.mark.parametrize( + "dtype, fill_value", + [ + ("int", 0), + ("float", np.nan), + ("bool", False), + ("object", np.nan), + ("datetime64[ns]", np.datetime64("NaT", "ns")), + ("timedelta64[ns]", np.timedelta64("NaT", "ns")), + ], +) +def test_inferred_dtype(dtype, fill_value): + sparse_dtype = SparseDtype(dtype) + result = sparse_dtype.fill_value + if pd.isna(fill_value): + assert pd.isna(result) and type(result) == type(fill_value) + else: + assert result == fill_value + + +def test_from_sparse_dtype(): + dtype = SparseDtype("float", 0) + result = SparseDtype(dtype) + assert result.fill_value == 0 + + +def test_from_sparse_dtype_fill_value(): + dtype = SparseDtype("int", 1) + result = SparseDtype(dtype, fill_value=2) + expected = SparseDtype("int", 2) + assert result == expected + + +@pytest.mark.parametrize( + "dtype, fill_value", + [ + ("int", None), + ("float", None), + ("bool", None), + ("object", None), + ("datetime64[ns]", None), + ("timedelta64[ns]", None), + ("int", np.nan), + ("float", 0), + ], +) +def test_equal(dtype, fill_value): + a = SparseDtype(dtype, fill_value) + b = SparseDtype(dtype, fill_value) + assert a == b + assert b == a + + +def test_nans_equal(): + a = SparseDtype(float, float("nan")) + b = SparseDtype(float, np.nan) + assert a == b + assert b == a + + +with warnings.catch_warnings(): + msg = "Allowing arbitrary scalar fill_value in SparseDtype is deprecated" + warnings.filterwarnings("ignore", msg, category=FutureWarning) + + tups = [ + (SparseDtype("float64"), SparseDtype("float32")), + (SparseDtype("float64"), SparseDtype("float64", 0)), + (SparseDtype("float64"), SparseDtype("datetime64[ns]", np.nan)), + (SparseDtype(int, pd.NaT), SparseDtype(float, pd.NaT)), + (SparseDtype("float64"), np.dtype("float64")), + ] + + +@pytest.mark.parametrize( + "a, b", + tups, +) +def test_not_equal(a, b): + assert a != b + + +def test_construct_from_string_raises(): + with pytest.raises( + TypeError, match="Cannot construct a 'SparseDtype' from 'not a dtype'" + ): + SparseDtype.construct_from_string("not a dtype") + + +@pytest.mark.parametrize( + "dtype, expected", + [ + (SparseDtype(int), True), + (SparseDtype(float), True), + (SparseDtype(bool), True), + (SparseDtype(object), False), + (SparseDtype(str), False), + ], +) +def test_is_numeric(dtype, expected): + assert dtype._is_numeric is expected + + +def test_str_uses_object(): + result = SparseDtype(str).subtype + assert result == np.dtype("object") + + +@pytest.mark.parametrize( + "string, expected", + [ + ("Sparse[float64]", SparseDtype(np.dtype("float64"))), + ("Sparse[float32]", SparseDtype(np.dtype("float32"))), + ("Sparse[int]", SparseDtype(np.dtype("int"))), + ("Sparse[str]", SparseDtype(np.dtype("str"))), + ("Sparse[datetime64[ns]]", SparseDtype(np.dtype("datetime64[ns]"))), + ("Sparse", SparseDtype(np.dtype("float"), np.nan)), + ], +) +def test_construct_from_string(string, expected): + result = SparseDtype.construct_from_string(string) + assert result == expected + + +@pytest.mark.parametrize( + "a, b, expected", + [ + (SparseDtype(float, 0.0), SparseDtype(np.dtype("float"), 0.0), True), + (SparseDtype(int, 0), SparseDtype(int, 0), True), + (SparseDtype(float, float("nan")), SparseDtype(float, np.nan), True), + (SparseDtype(float, 0), SparseDtype(float, np.nan), False), + (SparseDtype(int, 0.0), SparseDtype(float, 0.0), False), + ], +) +def test_hash_equal(a, b, expected): + result = a == b + assert result is expected + + result = hash(a) == hash(b) + assert result is expected + + +@pytest.mark.parametrize( + "string, expected", + [ + ("Sparse[int]", "int"), + ("Sparse[int, 0]", "int"), + ("Sparse[int64]", "int64"), + ("Sparse[int64, 0]", "int64"), + ("Sparse[datetime64[ns], 0]", "datetime64[ns]"), + ], +) +def test_parse_subtype(string, expected): + subtype, _ = SparseDtype._parse_subtype(string) + assert subtype == expected + + +@pytest.mark.parametrize( + "string", ["Sparse[int, 1]", "Sparse[float, 0.0]", "Sparse[bool, True]"] +) +def test_construct_from_string_fill_value_raises(string): + with pytest.raises(TypeError, match="fill_value in the string is not"): + SparseDtype.construct_from_string(string) + + +@pytest.mark.parametrize( + "original, dtype, expected", + [ + (SparseDtype(int, 0), float, SparseDtype(float, 0.0)), + (SparseDtype(int, 1), float, SparseDtype(float, 1.0)), + (SparseDtype(int, 1), str, SparseDtype(object, "1")), + (SparseDtype(float, 1.5), int, SparseDtype(int, 1)), + ], +) +def test_update_dtype(original, dtype, expected): + result = original.update_dtype(dtype) + assert result == expected + + +@pytest.mark.parametrize( + "original, dtype, expected_error_msg", + [ + ( + SparseDtype(float, np.nan), + int, + re.escape("Cannot convert non-finite values (NA or inf) to integer"), + ), + ( + SparseDtype(str, "abc"), + int, + r"invalid literal for int\(\) with base 10: ('abc'|np\.str_\('abc'\))", + ), + ], +) +def test_update_dtype_raises(original, dtype, expected_error_msg): + with pytest.raises(ValueError, match=expected_error_msg): + original.update_dtype(dtype) + + +def test_repr(): + # GH-34352 + result = str(SparseDtype("int64", fill_value=0)) + expected = "Sparse[int64, 0]" + assert result == expected + + result = str(SparseDtype(object, fill_value="0")) + expected = "Sparse[object, '0']" + assert result == expected + + +def test_sparse_dtype_subtype_must_be_numpy_dtype(): + # GH#53160 + msg = "SparseDtype subtype must be a numpy dtype" + with pytest.raises(TypeError, match=msg): + SparseDtype("category", fill_value="c") diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_indexing.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..60029ac06ddb47ef0ad4ee35a75fd09ca12f7f53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_indexing.py @@ -0,0 +1,302 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import SparseDtype +import pandas._testing as tm +from pandas.core.arrays.sparse import SparseArray + + +@pytest.fixture +def arr_data(): + return np.array([np.nan, np.nan, 1, 2, 3, np.nan, 4, 5, np.nan, 6]) + + +@pytest.fixture +def arr(arr_data): + return SparseArray(arr_data) + + +class TestGetitem: + def test_getitem(self, arr): + dense = arr.to_dense() + for i, value in enumerate(arr): + tm.assert_almost_equal(value, dense[i]) + tm.assert_almost_equal(arr[-i], dense[-i]) + + def test_getitem_arraylike_mask(self, arr): + arr = SparseArray([0, 1, 2]) + result = arr[[True, False, True]] + expected = SparseArray([0, 2]) + tm.assert_sp_array_equal(result, expected) + + @pytest.mark.parametrize( + "slc", + [ + np.s_[:], + np.s_[1:10], + np.s_[1:100], + np.s_[10:1], + np.s_[:-3], + np.s_[-5:-4], + np.s_[:-12], + np.s_[-12:], + np.s_[2:], + np.s_[2::3], + np.s_[::2], + np.s_[::-1], + np.s_[::-2], + np.s_[1:6:2], + np.s_[:-6:-2], + ], + ) + @pytest.mark.parametrize( + "as_dense", [[np.nan] * 10, [1] * 10, [np.nan] * 5 + [1] * 5, []] + ) + def test_getslice(self, slc, as_dense): + as_dense = np.array(as_dense) + arr = SparseArray(as_dense) + + result = arr[slc] + expected = SparseArray(as_dense[slc]) + + tm.assert_sp_array_equal(result, expected) + + def test_getslice_tuple(self): + dense = np.array([np.nan, 0, 3, 4, 0, 5, np.nan, np.nan, 0]) + + sparse = SparseArray(dense) + res = sparse[(slice(4, None),)] + exp = SparseArray(dense[4:]) + tm.assert_sp_array_equal(res, exp) + + sparse = SparseArray(dense, fill_value=0) + res = sparse[(slice(4, None),)] + exp = SparseArray(dense[4:], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + msg = "too many indices for array" + with pytest.raises(IndexError, match=msg): + sparse[4:, :] + + with pytest.raises(IndexError, match=msg): + # check numpy compat + dense[4:, :] + + def test_boolean_slice_empty(self): + arr = SparseArray([0, 1, 2]) + res = arr[[False, False, False]] + assert res.dtype == arr.dtype + + def test_getitem_bool_sparse_array(self, arr): + # GH 23122 + spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True) + exp = SparseArray([np.nan, 2, np.nan, 5, 6]) + tm.assert_sp_array_equal(arr[spar_bool], exp) + + spar_bool = ~spar_bool + res = arr[spar_bool] + exp = SparseArray([np.nan, 1, 3, 4, np.nan]) + tm.assert_sp_array_equal(res, exp) + + spar_bool = SparseArray( + [False, True, np.nan] * 3, dtype=np.bool_, fill_value=np.nan + ) + res = arr[spar_bool] + exp = SparseArray([np.nan, 3, 5]) + tm.assert_sp_array_equal(res, exp) + + def test_getitem_bool_sparse_array_as_comparison(self): + # GH 45110 + arr = SparseArray([1, 2, 3, 4, np.nan, np.nan], fill_value=np.nan) + res = arr[arr > 2] + exp = SparseArray([3.0, 4.0], fill_value=np.nan) + tm.assert_sp_array_equal(res, exp) + + def test_get_item(self, arr): + zarr = SparseArray([0, 0, 1, 2, 3, 0, 4, 5, 0, 6], fill_value=0) + + assert np.isnan(arr[1]) + assert arr[2] == 1 + assert arr[7] == 5 + + assert zarr[0] == 0 + assert zarr[2] == 1 + assert zarr[7] == 5 + + errmsg = "must be an integer between -10 and 10" + + with pytest.raises(IndexError, match=errmsg): + arr[11] + + with pytest.raises(IndexError, match=errmsg): + arr[-11] + + assert arr[-1] == arr[len(arr) - 1] + + +class TestSetitem: + def test_set_item(self, arr_data): + arr = SparseArray(arr_data).copy() + + def setitem(): + arr[5] = 3 + + def setslice(): + arr[1:5] = 2 + + with pytest.raises(TypeError, match="assignment via setitem"): + setitem() + + with pytest.raises(TypeError, match="assignment via setitem"): + setslice() + + +class TestTake: + def test_take_scalar_raises(self, arr): + msg = "'indices' must be an array, not a scalar '2'." + with pytest.raises(ValueError, match=msg): + arr.take(2) + + def test_take(self, arr_data, arr): + exp = SparseArray(np.take(arr_data, [2, 3])) + tm.assert_sp_array_equal(arr.take([2, 3]), exp) + + exp = SparseArray(np.take(arr_data, [0, 1, 2])) + tm.assert_sp_array_equal(arr.take([0, 1, 2]), exp) + + def test_take_all_empty(self): + sparse = pd.array([0, 0], dtype=SparseDtype("int64")) + result = sparse.take([0, 1], allow_fill=True, fill_value=np.nan) + tm.assert_sp_array_equal(sparse, result) + + def test_take_different_fill_value(self): + # Take with a different fill value shouldn't overwrite the original + sparse = pd.array([0.0], dtype=SparseDtype("float64", fill_value=0.0)) + result = sparse.take([0, -1], allow_fill=True, fill_value=np.nan) + expected = pd.array([0, np.nan], dtype=sparse.dtype) + tm.assert_sp_array_equal(expected, result) + + def test_take_fill_value(self): + data = np.array([1, np.nan, 0, 3, 0]) + sparse = SparseArray(data, fill_value=0) + + exp = SparseArray(np.take(data, [0]), fill_value=0) + tm.assert_sp_array_equal(sparse.take([0]), exp) + + exp = SparseArray(np.take(data, [1, 3, 4]), fill_value=0) + tm.assert_sp_array_equal(sparse.take([1, 3, 4]), exp) + + def test_take_negative(self, arr_data, arr): + exp = SparseArray(np.take(arr_data, [-1])) + tm.assert_sp_array_equal(arr.take([-1]), exp) + + exp = SparseArray(np.take(arr_data, [-4, -3, -2])) + tm.assert_sp_array_equal(arr.take([-4, -3, -2]), exp) + + def test_bad_take(self, arr): + with pytest.raises(IndexError, match="bounds"): + arr.take([11]) + + def test_take_filling(self): + # similar tests as GH 12631 + sparse = SparseArray([np.nan, np.nan, 1, np.nan, 4]) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([np.nan, np.nan, 4]) + tm.assert_sp_array_equal(result, expected) + + # TODO: actionable? + # XXX: test change: fill_value=True -> allow_fill=True + result = sparse.take(np.array([1, 0, -1]), allow_fill=True) + expected = SparseArray([np.nan, np.nan, np.nan]) + tm.assert_sp_array_equal(result, expected) + + # allow_fill=False + result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = SparseArray([np.nan, np.nan, 4]) + tm.assert_sp_array_equal(result, expected) + + msg = "Invalid value in 'indices'" + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -2]), allow_fill=True) + + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -5]), allow_fill=True) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), allow_fill=True) + + def test_take_filling_fill_value(self): + # same tests as GH#12631 + sparse = SparseArray([np.nan, 0, 1, 0, 4], fill_value=0) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([0, np.nan, 4], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + # fill_value + result = sparse.take(np.array([1, 0, -1]), allow_fill=True) + # TODO: actionable? + # XXX: behavior change. + # the old way of filling self.fill_value doesn't follow EA rules. + # It's supposed to be self.dtype.na_value (nan in this case) + expected = SparseArray([0, np.nan, np.nan], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + # allow_fill=False + result = sparse.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True) + expected = SparseArray([0, np.nan, 4], fill_value=0) + tm.assert_sp_array_equal(result, expected) + + msg = "Invalid value in 'indices'." + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -2]), allow_fill=True) + with pytest.raises(ValueError, match=msg): + sparse.take(np.array([1, 0, -5]), allow_fill=True) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), fill_value=True) + + @pytest.mark.parametrize("kind", ["block", "integer"]) + def test_take_filling_all_nan(self, kind): + sparse = SparseArray([np.nan, np.nan, np.nan, np.nan, np.nan], kind=kind) + result = sparse.take(np.array([1, 0, -1])) + expected = SparseArray([np.nan, np.nan, np.nan], kind=kind) + tm.assert_sp_array_equal(result, expected) + + result = sparse.take(np.array([1, 0, -1]), fill_value=True) + expected = SparseArray([np.nan, np.nan, np.nan], kind=kind) + tm.assert_sp_array_equal(result, expected) + + msg = "out of bounds value in 'indices'" + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, -6])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5])) + with pytest.raises(IndexError, match=msg): + sparse.take(np.array([1, 5]), fill_value=True) + + +class TestWhere: + def test_where_retain_fill_value(self): + # GH#45691 don't lose fill_value on _where + arr = SparseArray([np.nan, 1.0], fill_value=0) + + mask = np.array([True, False]) + + res = arr._where(~mask, 1) + exp = SparseArray([1, 1.0], fill_value=0) + tm.assert_sp_array_equal(res, exp) + + ser = pd.Series(arr) + res = ser.where(~mask, 1) + tm.assert_series_equal(res, pd.Series(exp)) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_libsparse.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_libsparse.py new file mode 100644 index 0000000000000000000000000000000000000000..7a77a2064e7e097f924a3901994d131d98164ad6 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_libsparse.py @@ -0,0 +1,551 @@ +import operator + +import numpy as np +import pytest + +import pandas._libs.sparse as splib +import pandas.util._test_decorators as td + +from pandas import Series +import pandas._testing as tm +from pandas.core.arrays.sparse import ( + BlockIndex, + IntIndex, + make_sparse_index, +) + + +@pytest.fixture +def test_length(): + return 20 + + +@pytest.fixture( + params=[ + [ + [0, 7, 15], + [3, 5, 5], + [2, 9, 14], + [2, 3, 5], + [2, 9, 15], + [1, 3, 4], + ], + [ + [0, 5], + [4, 4], + [1], + [4], + [1], + [3], + ], + [ + [0], + [10], + [0, 5], + [3, 7], + [0, 5], + [3, 5], + ], + [ + [10], + [5], + [0, 12], + [5, 3], + [12], + [3], + ], + [ + [0, 10], + [4, 6], + [5, 17], + [4, 2], + [], + [], + ], + [ + [0], + [5], + [], + [], + [], + [], + ], + ], + ids=[ + "plain_case", + "delete_blocks", + "split_blocks", + "skip_block", + "no_intersect", + "one_empty", + ], +) +def cases(request): + return request.param + + +class TestSparseIndexUnion: + @pytest.mark.parametrize( + "xloc, xlen, yloc, ylen, eloc, elen", + [ + [[0], [5], [5], [4], [0], [9]], + [[0, 10], [5, 5], [2, 17], [5, 2], [0, 10, 17], [7, 5, 2]], + [[1], [5], [3], [5], [1], [7]], + [[2, 10], [4, 4], [4], [8], [2], [12]], + [[0, 5], [3, 5], [0], [7], [0], [10]], + [[2, 10], [4, 4], [4, 13], [8, 4], [2], [15]], + [[2], [15], [4, 9, 14], [3, 2, 2], [2], [15]], + [[0, 10], [3, 3], [5, 15], [2, 2], [0, 5, 10, 15], [3, 2, 3, 2]], + ], + ) + def test_index_make_union(self, xloc, xlen, yloc, ylen, eloc, elen, test_length): + # Case 1 + # x: ---- + # y: ---- + # r: -------- + # Case 2 + # x: ----- ----- + # y: ----- -- + # Case 3 + # x: ------ + # y: ------- + # r: ---------- + # Case 4 + # x: ------ ----- + # y: ------- + # r: ------------- + # Case 5 + # x: --- ----- + # y: ------- + # r: ------------- + # Case 6 + # x: ------ ----- + # y: ------- --- + # r: ------------- + # Case 7 + # x: ---------------------- + # y: ---- ---- --- + # r: ---------------------- + # Case 8 + # x: ---- --- + # y: --- --- + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + bresult = xindex.make_union(yindex) + assert isinstance(bresult, BlockIndex) + tm.assert_numpy_array_equal(bresult.blocs, np.array(eloc, dtype=np.int32)) + tm.assert_numpy_array_equal(bresult.blengths, np.array(elen, dtype=np.int32)) + + ixindex = xindex.to_int_index() + iyindex = yindex.to_int_index() + iresult = ixindex.make_union(iyindex) + assert isinstance(iresult, IntIndex) + tm.assert_numpy_array_equal(iresult.indices, bresult.to_int_index().indices) + + def test_int_index_make_union(self): + a = IntIndex(5, np.array([0, 3, 4], dtype=np.int32)) + b = IntIndex(5, np.array([0, 2], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 2, 3, 4], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([], dtype=np.int32)) + b = IntIndex(5, np.array([0, 2], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 2], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([], dtype=np.int32)) + b = IntIndex(5, np.array([], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32)) + b = IntIndex(5, np.array([0, 1, 2, 3, 4], dtype=np.int32)) + res = a.make_union(b) + exp = IntIndex(5, np.array([0, 1, 2, 3, 4], np.int32)) + assert res.equals(exp) + + a = IntIndex(5, np.array([0, 1], dtype=np.int32)) + b = IntIndex(4, np.array([0, 1], dtype=np.int32)) + + msg = "Indices must reference same underlying length" + with pytest.raises(ValueError, match=msg): + a.make_union(b) + + +class TestSparseIndexIntersect: + @td.skip_if_windows + def test_intersect(self, cases, test_length): + xloc, xlen, yloc, ylen, eloc, elen = cases + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + expected = BlockIndex(test_length, eloc, elen) + longer_index = BlockIndex(test_length + 1, yloc, ylen) + + result = xindex.intersect(yindex) + assert result.equals(expected) + result = xindex.to_int_index().intersect(yindex.to_int_index()) + assert result.equals(expected.to_int_index()) + + msg = "Indices must reference same underlying length" + with pytest.raises(Exception, match=msg): + xindex.intersect(longer_index) + with pytest.raises(Exception, match=msg): + xindex.to_int_index().intersect(longer_index.to_int_index()) + + def test_intersect_empty(self): + xindex = IntIndex(4, np.array([], dtype=np.int32)) + yindex = IntIndex(4, np.array([2, 3], dtype=np.int32)) + assert xindex.intersect(yindex).equals(xindex) + assert yindex.intersect(xindex).equals(xindex) + + xindex = xindex.to_block_index() + yindex = yindex.to_block_index() + assert xindex.intersect(yindex).equals(xindex) + assert yindex.intersect(xindex).equals(xindex) + + @pytest.mark.parametrize( + "case", + [ + # Argument 2 to "IntIndex" has incompatible type "ndarray[Any, + # dtype[signedinteger[_32Bit]]]"; expected "Sequence[int]" + IntIndex(5, np.array([1, 2], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(5, np.array([0, 2, 4], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(0, np.array([], dtype=np.int32)), # type: ignore[arg-type] + IntIndex(5, np.array([], dtype=np.int32)), # type: ignore[arg-type] + ], + ) + def test_intersect_identical(self, case): + assert case.intersect(case).equals(case) + case = case.to_block_index() + assert case.intersect(case).equals(case) + + +class TestSparseIndexCommon: + def test_int_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) + + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer" + ) + assert isinstance(idx, IntIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) + + def test_block_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 3 + tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32)) + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_lookup(self, kind): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == -1 + assert idx.lookup(1) == -1 + assert idx.lookup(2) == 0 + assert idx.lookup(3) == 1 + assert idx.lookup(4) == -1 + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind) + + for i in range(-1, 5): + assert idx.lookup(i) == -1 + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == 0 + assert idx.lookup(1) == 1 + assert idx.lookup(2) == 2 + assert idx.lookup(3) == 3 + assert idx.lookup(4) == -1 + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) + assert idx.lookup(-1) == -1 + assert idx.lookup(0) == 0 + assert idx.lookup(1) == -1 + assert idx.lookup(2) == 1 + assert idx.lookup(3) == 2 + assert idx.lookup(4) == -1 + + @pytest.mark.parametrize("kind", ["integer", "block"]) + def test_lookup_array(self, kind): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind=kind) + + res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) + exp = np.array([-1, -1, 0], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32)) + exp = np.array([-1, 0, -1, 1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([-1, 0, 2, 4], dtype=np.int32)) + exp = np.array([-1, -1, -1, -1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([-1, 0, 2], dtype=np.int32)) + exp = np.array([-1, 0, 2], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([4, 2, 1, 3], dtype=np.int32)) + exp = np.array([-1, 2, 1, 3], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind=kind) + res = idx.lookup_array(np.array([2, 1, 3, 0], dtype=np.int32)) + exp = np.array([1, -1, 2, 0], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + res = idx.lookup_array(np.array([1, 4, 2, 5], dtype=np.int32)) + exp = np.array([-1, -1, 1, -1], dtype=np.int32) + tm.assert_numpy_array_equal(res, exp) + + @pytest.mark.parametrize( + "idx, expected", + [ + [0, -1], + [5, 0], + [7, 2], + [8, -1], + [9, -1], + [10, -1], + [11, -1], + [12, 3], + [17, 8], + [18, -1], + ], + ) + def test_lookup_basics(self, idx, expected): + bindex = BlockIndex(20, [5, 12], [3, 6]) + assert bindex.lookup(idx) == expected + + iindex = bindex.to_int_index() + assert iindex.lookup(idx) == expected + + +class TestBlockIndex: + def test_block_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.blocs, np.array([2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([2], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.blocs, np.array([], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 1, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.blocs, np.array([0], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([4], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([0, 2, 3], dtype=np.int32), kind="block") + assert isinstance(idx, BlockIndex) + assert idx.npoints == 3 + tm.assert_numpy_array_equal(idx.blocs, np.array([0, 2], dtype=np.int32)) + tm.assert_numpy_array_equal(idx.blengths, np.array([1, 2], dtype=np.int32)) + + @pytest.mark.parametrize("i", [5, 10, 100, 101]) + def test_make_block_boundary(self, i): + idx = make_sparse_index(i, np.arange(0, i, 2, dtype=np.int32), kind="block") + + exp = np.arange(0, i, 2, dtype=np.int32) + tm.assert_numpy_array_equal(idx.blocs, exp) + tm.assert_numpy_array_equal(idx.blengths, np.ones(len(exp), dtype=np.int32)) + + def test_equals(self): + index = BlockIndex(10, [0, 4], [2, 5]) + + assert index.equals(index) + assert not index.equals(BlockIndex(10, [0, 4], [2, 6])) + + def test_check_integrity(self): + locs = [] + lengths = [] + + # 0-length OK + BlockIndex(0, locs, lengths) + + # also OK even though empty + BlockIndex(1, locs, lengths) + + msg = "Block 0 extends beyond end" + with pytest.raises(ValueError, match=msg): + BlockIndex(10, [5], [10]) + + msg = "Block 0 overlaps" + with pytest.raises(ValueError, match=msg): + BlockIndex(10, [2, 5], [5, 3]) + + def test_to_int_index(self): + locs = [0, 10] + lengths = [4, 6] + exp_inds = [0, 1, 2, 3, 10, 11, 12, 13, 14, 15] + + block = BlockIndex(20, locs, lengths) + dense = block.to_int_index() + + tm.assert_numpy_array_equal(dense.indices, np.array(exp_inds, dtype=np.int32)) + + def test_to_block_index(self): + index = BlockIndex(10, [0, 5], [4, 5]) + assert index.to_block_index() is index + + +class TestIntIndex: + def test_check_integrity(self): + # Too many indices than specified in self.length + msg = "Too many indices" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=1, indices=[1, 2, 3]) + + # No index can be negative. + msg = "No index can be less than zero" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, -2, 3]) + + # No index can be negative. + msg = "No index can be less than zero" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, -2, 3]) + + # All indices must be less than the length. + msg = "All indices must be less than the length" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 2, 5]) + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 2, 6]) + + # Indices must be strictly ascending. + msg = "Indices must be strictly increasing" + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 3, 2]) + + with pytest.raises(ValueError, match=msg): + IntIndex(length=5, indices=[1, 3, 3]) + + def test_int_internal(self): + idx = make_sparse_index(4, np.array([2, 3], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 2 + tm.assert_numpy_array_equal(idx.indices, np.array([2, 3], dtype=np.int32)) + + idx = make_sparse_index(4, np.array([], dtype=np.int32), kind="integer") + assert isinstance(idx, IntIndex) + assert idx.npoints == 0 + tm.assert_numpy_array_equal(idx.indices, np.array([], dtype=np.int32)) + + idx = make_sparse_index( + 4, np.array([0, 1, 2, 3], dtype=np.int32), kind="integer" + ) + assert isinstance(idx, IntIndex) + assert idx.npoints == 4 + tm.assert_numpy_array_equal(idx.indices, np.array([0, 1, 2, 3], dtype=np.int32)) + + def test_equals(self): + index = IntIndex(10, [0, 1, 2, 3, 4]) + assert index.equals(index) + assert not index.equals(IntIndex(10, [0, 1, 2, 3])) + + def test_to_block_index(self, cases, test_length): + xloc, xlen, yloc, ylen, _, _ = cases + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + + # see if survive the round trip + xbindex = xindex.to_int_index().to_block_index() + ybindex = yindex.to_int_index().to_block_index() + assert isinstance(xbindex, BlockIndex) + assert xbindex.equals(xindex) + assert ybindex.equals(yindex) + + def test_to_int_index(self): + index = IntIndex(10, [2, 3, 4, 5, 6]) + assert index.to_int_index() is index + + +class TestSparseOperators: + @pytest.mark.parametrize("opname", ["add", "sub", "mul", "truediv", "floordiv"]) + def test_op(self, opname, cases, test_length): + xloc, xlen, yloc, ylen, _, _ = cases + sparse_op = getattr(splib, f"sparse_{opname}_float64") + python_op = getattr(operator, opname) + + xindex = BlockIndex(test_length, xloc, xlen) + yindex = BlockIndex(test_length, yloc, ylen) + + xdindex = xindex.to_int_index() + ydindex = yindex.to_int_index() + + x = np.arange(xindex.npoints) * 10.0 + 1 + y = np.arange(yindex.npoints) * 100.0 + 1 + + xfill = 0 + yfill = 2 + + result_block_vals, rb_index, bfill = sparse_op( + x, xindex, xfill, y, yindex, yfill + ) + result_int_vals, ri_index, ifill = sparse_op( + x, xdindex, xfill, y, ydindex, yfill + ) + + assert rb_index.to_int_index().equals(ri_index) + tm.assert_numpy_array_equal(result_block_vals, result_int_vals) + assert bfill == ifill + + # check versus Series... + xseries = Series(x, xdindex.indices) + xseries = xseries.reindex(np.arange(test_length)).fillna(xfill) + + yseries = Series(y, ydindex.indices) + yseries = yseries.reindex(np.arange(test_length)).fillna(yfill) + + series_result = python_op(xseries, yseries) + series_result = series_result.reindex(ri_index.indices) + + tm.assert_numpy_array_equal(result_block_vals, series_result.values) + tm.assert_numpy_array_equal(result_int_vals, series_result.values) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_reductions.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..f44423d5e635c3f74725db219d48fcaca27c4d53 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_reductions.py @@ -0,0 +1,306 @@ +import numpy as np +import pytest + +from pandas import ( + NaT, + SparseDtype, + Timestamp, + isna, +) +from pandas.core.arrays.sparse import SparseArray + + +class TestReductions: + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([True, True, True], True, False), + ([1, 2, 1], 1, 0), + ([1.0, 2.0, 1.0], 1.0, 0.0), + ], + ) + def test_all(self, data, pos, neg): + # GH#17570 + out = SparseArray(data).all() + assert out + + out = SparseArray(data, fill_value=pos).all() + assert out + + data[1] = neg + out = SparseArray(data).all() + assert not out + + out = SparseArray(data, fill_value=pos).all() + assert not out + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([True, True, True], True, False), + ([1, 2, 1], 1, 0), + ([1.0, 2.0, 1.0], 1.0, 0.0), + ], + ) + def test_numpy_all(self, data, pos, neg): + # GH#17570 + out = np.all(SparseArray(data)) + assert out + + out = np.all(SparseArray(data, fill_value=pos)) + assert out + + data[1] = neg + out = np.all(SparseArray(data)) + assert not out + + out = np.all(SparseArray(data, fill_value=pos)) + assert not out + + # raises with a different message on py2. + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.all(SparseArray(data), out=np.array([])) + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([False, True, False], True, False), + ([0, 2, 0], 2, 0), + ([0.0, 2.0, 0.0], 2.0, 0.0), + ], + ) + def test_any(self, data, pos, neg): + # GH#17570 + out = SparseArray(data).any() + assert out + + out = SparseArray(data, fill_value=pos).any() + assert out + + data[1] = neg + out = SparseArray(data).any() + assert not out + + out = SparseArray(data, fill_value=pos).any() + assert not out + + @pytest.mark.parametrize( + "data,pos,neg", + [ + ([False, True, False], True, False), + ([0, 2, 0], 2, 0), + ([0.0, 2.0, 0.0], 2.0, 0.0), + ], + ) + def test_numpy_any(self, data, pos, neg): + # GH#17570 + out = np.any(SparseArray(data)) + assert out + + out = np.any(SparseArray(data, fill_value=pos)) + assert out + + data[1] = neg + out = np.any(SparseArray(data)) + assert not out + + out = np.any(SparseArray(data, fill_value=pos)) + assert not out + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.any(SparseArray(data), out=out) + + def test_sum(self): + data = np.arange(10).astype(float) + out = SparseArray(data).sum() + assert out == 45.0 + + data[5] = np.nan + out = SparseArray(data, fill_value=2).sum() + assert out == 40.0 + + out = SparseArray(data, fill_value=np.nan).sum() + assert out == 40.0 + + @pytest.mark.parametrize( + "arr", + [np.array([0, 1, np.nan, 1]), np.array([0, 1, 1])], + ) + @pytest.mark.parametrize("fill_value", [0, 1, np.nan]) + @pytest.mark.parametrize("min_count, expected", [(3, 2), (4, np.nan)]) + def test_sum_min_count(self, arr, fill_value, min_count, expected): + # GH#25777 + sparray = SparseArray(arr, fill_value=fill_value) + result = sparray.sum(min_count=min_count) + if np.isnan(expected): + assert np.isnan(result) + else: + assert result == expected + + def test_bool_sum_min_count(self): + spar_bool = SparseArray([False, True] * 5, dtype=np.bool_, fill_value=True) + res = spar_bool.sum(min_count=1) + assert res == 5 + res = spar_bool.sum(min_count=11) + assert isna(res) + + def test_numpy_sum(self): + data = np.arange(10).astype(float) + out = np.sum(SparseArray(data)) + assert out == 45.0 + + data[5] = np.nan + out = np.sum(SparseArray(data, fill_value=2)) + assert out == 40.0 + + out = np.sum(SparseArray(data, fill_value=np.nan)) + assert out == 40.0 + + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.sum(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.sum(SparseArray(data), out=out) + + def test_mean(self): + data = np.arange(10).astype(float) + out = SparseArray(data).mean() + assert out == 4.5 + + data[5] = np.nan + out = SparseArray(data).mean() + assert out == 40.0 / 9 + + def test_numpy_mean(self): + data = np.arange(10).astype(float) + out = np.mean(SparseArray(data)) + assert out == 4.5 + + data[5] = np.nan + out = np.mean(SparseArray(data)) + assert out == 40.0 / 9 + + msg = "the 'dtype' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.mean(SparseArray(data), dtype=np.int64) + + msg = "the 'out' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.mean(SparseArray(data), out=out) + + +class TestMinMax: + @pytest.mark.parametrize( + "raw_data,max_expected,min_expected", + [ + (np.arange(5.0), [4], [0]), + (-np.arange(5.0), [0], [-4]), + (np.array([0, 1, 2, np.nan, 4]), [4], [0]), + (np.array([np.nan] * 5), [np.nan], [np.nan]), + (np.array([]), [np.nan], [np.nan]), + ], + ) + def test_nan_fill_value(self, raw_data, max_expected, min_expected): + arr = SparseArray(raw_data) + max_result = arr.max() + min_result = arr.min() + assert max_result in max_expected + assert min_result in min_expected + + max_result = arr.max(skipna=False) + min_result = arr.min(skipna=False) + if np.isnan(raw_data).any(): + assert np.isnan(max_result) + assert np.isnan(min_result) + else: + assert max_result in max_expected + assert min_result in min_expected + + @pytest.mark.parametrize( + "fill_value,max_expected,min_expected", + [ + (100, 100, 0), + (-100, 1, -100), + ], + ) + def test_fill_value(self, fill_value, max_expected, min_expected): + arr = SparseArray( + np.array([fill_value, 0, 1]), dtype=SparseDtype("int", fill_value) + ) + max_result = arr.max() + assert max_result == max_expected + + min_result = arr.min() + assert min_result == min_expected + + def test_only_fill_value(self): + fv = 100 + arr = SparseArray(np.array([fv, fv, fv]), dtype=SparseDtype("int", fv)) + assert len(arr._valid_sp_values) == 0 + + assert arr.max() == fv + assert arr.min() == fv + assert arr.max(skipna=False) == fv + assert arr.min(skipna=False) == fv + + @pytest.mark.parametrize("func", ["min", "max"]) + @pytest.mark.parametrize("data", [np.array([]), np.array([np.nan, np.nan])]) + @pytest.mark.parametrize( + "dtype,expected", + [ + (SparseDtype(np.float64, np.nan), np.nan), + (SparseDtype(np.float64, 5.0), np.nan), + (SparseDtype("datetime64[ns]", NaT), NaT), + (SparseDtype("datetime64[ns]", Timestamp("2018-05-05")), NaT), + ], + ) + def test_na_value_if_no_valid_values(self, func, data, dtype, expected): + arr = SparseArray(data, dtype=dtype) + result = getattr(arr, func)() + if expected is NaT: + # TODO: pin down whether we wrap datetime64("NaT") + assert result is NaT or np.isnat(result) + else: + assert np.isnan(result) + + +class TestArgmaxArgmin: + @pytest.mark.parametrize( + "arr,argmax_expected,argmin_expected", + [ + (SparseArray([1, 2, 0, 1, 2]), 1, 2), + (SparseArray([-1, -2, 0, -1, -2]), 2, 1), + (SparseArray([np.nan, 1, 0, 0, np.nan, -1]), 1, 5), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2]), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=-1), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=0), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=1), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=2), 5, 2), + (SparseArray([np.nan, 1, 0, 0, np.nan, 2], fill_value=3), 5, 2), + (SparseArray([0] * 10 + [-1], fill_value=0), 0, 10), + (SparseArray([0] * 10 + [-1], fill_value=-1), 0, 10), + (SparseArray([0] * 10 + [-1], fill_value=1), 0, 10), + (SparseArray([-1] + [0] * 10, fill_value=0), 1, 0), + (SparseArray([1] + [0] * 10, fill_value=0), 0, 1), + (SparseArray([-1] + [0] * 10, fill_value=-1), 1, 0), + (SparseArray([1] + [0] * 10, fill_value=1), 0, 1), + ], + ) + def test_argmax_argmin(self, arr, argmax_expected, argmin_expected): + argmax_result = arr.argmax() + argmin_result = arr.argmin() + assert argmax_result == argmax_expected + assert argmin_result == argmin_expected + + @pytest.mark.parametrize( + "arr,method", + [(SparseArray([]), "argmax"), (SparseArray([]), "argmin")], + ) + def test_empty_array(self, arr, method): + msg = f"attempt to get {method} of an empty sequence" + with pytest.raises(ValueError, match=msg): + arr.argmax() if method == "argmax" else arr.argmin() diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_unary.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_unary.py new file mode 100644 index 0000000000000000000000000000000000000000..c00a73773fdd4795e3d5d7f030a591a060dc3bfc --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/sparse/test_unary.py @@ -0,0 +1,79 @@ +import operator + +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import SparseArray + + +@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning") +@pytest.mark.parametrize("fill_value", [0, np.nan]) +@pytest.mark.parametrize("op", [operator.pos, operator.neg]) +def test_unary_op(op, fill_value): + arr = np.array([0, 1, np.nan, 2]) + sparray = SparseArray(arr, fill_value=fill_value) + result = op(sparray) + expected = SparseArray(op(arr), fill_value=op(fill_value)) + tm.assert_sp_array_equal(result, expected) + + +@pytest.mark.parametrize("fill_value", [True, False]) +def test_invert(fill_value): + arr = np.array([True, False, False, True]) + sparray = SparseArray(arr, fill_value=fill_value) + result = ~sparray + expected = SparseArray(~arr, fill_value=not fill_value) + tm.assert_sp_array_equal(result, expected) + + result = ~pd.Series(sparray) + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + result = ~pd.DataFrame({"A": sparray}) + expected = pd.DataFrame({"A": expected}) + tm.assert_frame_equal(result, expected) + + +class TestUnaryMethods: + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in cast:RuntimeWarning" + ) + def test_neg_operator(self): + arr = SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + res = -arr + exp = SparseArray([1, 2, np.nan, -3], fill_value=np.nan, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([-1, -2, 1, 3], fill_value=-1, dtype=np.int8) + res = -arr + exp = SparseArray([1, 2, -1, -3], fill_value=1, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + @pytest.mark.filterwarnings( + "ignore:invalid value encountered in cast:RuntimeWarning" + ) + def test_abs_operator(self): + arr = SparseArray([-1, -2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + res = abs(arr) + exp = SparseArray([1, 2, np.nan, 3], fill_value=np.nan, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([-1, -2, 1, 3], fill_value=-1, dtype=np.int8) + res = abs(arr) + exp = SparseArray([1, 2, 1, 3], fill_value=1, dtype=np.int8) + tm.assert_sp_array_equal(exp, res) + + def test_invert_operator(self): + arr = SparseArray([False, True, False, True], fill_value=False, dtype=np.bool_) + exp = SparseArray( + np.invert([False, True, False, True]), fill_value=True, dtype=np.bool_ + ) + res = ~arr + tm.assert_sp_array_equal(exp, res) + + arr = SparseArray([0, 1, 0, 2, 3, 0], fill_value=0, dtype=np.int32) + res = ~arr + exp = SparseArray([-1, -2, -1, -3, -4, -1], fill_value=-1, dtype=np.int32) + tm.assert_sp_array_equal(exp, res) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimelike.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimelike.py new file mode 100644 index 0000000000000000000000000000000000000000..7f85c891afeedac6c0122d884f706fac6fdac42c --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/arrays/test_datetimelike.py @@ -0,0 +1,1340 @@ +from __future__ import annotations + +import re +import warnings + +import numpy as np +import pytest + +from pandas._libs import ( + NaT, + OutOfBoundsDatetime, + Timestamp, +) +from pandas._libs.tslibs.dtypes import freq_to_period_freqstr +from pandas.compat.numpy import np_version_gt2 + +import pandas as pd +from pandas import ( + DatetimeIndex, + Period, + PeriodIndex, + TimedeltaIndex, +) +import pandas._testing as tm +from pandas.core.arrays import ( + DatetimeArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, +) + + +# TODO: more freq variants +@pytest.fixture(params=["D", "B", "W", "ME", "QE", "YE"]) +def freqstr(request): + """Fixture returning parametrized frequency in string format.""" + return request.param + + +@pytest.fixture +def period_index(freqstr): + """ + A fixture to provide PeriodIndex objects with different frequencies. + + Most PeriodArray behavior is already tested in PeriodIndex tests, + so here we just test that the PeriodArray behavior matches + the PeriodIndex behavior. + """ + # TODO: non-monotone indexes; NaTs, different start dates + with warnings.catch_warnings(): + # suppress deprecation of Period[B] + warnings.filterwarnings( + "ignore", message="Period with BDay freq", category=FutureWarning + ) + freqstr = freq_to_period_freqstr(1, freqstr) + pi = pd.period_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr) + return pi + + +@pytest.fixture +def datetime_index(freqstr): + """ + A fixture to provide DatetimeIndex objects with different frequencies. + + Most DatetimeArray behavior is already tested in DatetimeIndex tests, + so here we just test that the DatetimeArray behavior matches + the DatetimeIndex behavior. + """ + # TODO: non-monotone indexes; NaTs, different start dates, timezones + dti = pd.date_range(start=Timestamp("2000-01-01"), periods=100, freq=freqstr) + return dti + + +@pytest.fixture +def timedelta_index(): + """ + A fixture to provide TimedeltaIndex objects with different frequencies. + Most TimedeltaArray behavior is already tested in TimedeltaIndex tests, + so here we just test that the TimedeltaArray behavior matches + the TimedeltaIndex behavior. + """ + # TODO: flesh this out + return TimedeltaIndex(["1 Day", "3 Hours", "NaT"]) + + +class SharedTests: + index_cls: type[DatetimeIndex | PeriodIndex | TimedeltaIndex] + + @pytest.fixture + def arr1d(self): + """Fixture returning DatetimeArray with daily frequency.""" + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, freq="D") + else: + arr = self.index_cls(data, freq="D")._data + return arr + + def test_compare_len1_raises(self, arr1d): + # make sure we raise when comparing with different lengths, specific + # to the case where one has length-1, which numpy would broadcast + arr = arr1d + idx = self.index_cls(arr) + + with pytest.raises(ValueError, match="Lengths must match"): + arr == arr[:1] + + # test the index classes while we're at it, GH#23078 + with pytest.raises(ValueError, match="Lengths must match"): + idx <= idx[[0]] + + @pytest.mark.parametrize( + "result", + [ + pd.date_range("2020", periods=3), + pd.date_range("2020", periods=3, tz="UTC"), + pd.timedelta_range("0 days", periods=3), + pd.period_range("2020Q1", periods=3, freq="Q"), + ], + ) + def test_compare_with_Categorical(self, result): + expected = pd.Categorical(result) + assert all(result == expected) + assert not any(result != expected) + + @pytest.mark.parametrize("reverse", [True, False]) + @pytest.mark.parametrize("as_index", [True, False]) + def test_compare_categorical_dtype(self, arr1d, as_index, reverse, ordered): + other = pd.Categorical(arr1d, ordered=ordered) + if as_index: + other = pd.CategoricalIndex(other) + + left, right = arr1d, other + if reverse: + left, right = right, left + + ones = np.ones(arr1d.shape, dtype=bool) + zeros = ~ones + + result = left == right + tm.assert_numpy_array_equal(result, ones) + + result = left != right + tm.assert_numpy_array_equal(result, zeros) + + if not reverse and not as_index: + # Otherwise Categorical raises TypeError bc it is not ordered + # TODO: we should probably get the same behavior regardless? + result = left < right + tm.assert_numpy_array_equal(result, zeros) + + result = left <= right + tm.assert_numpy_array_equal(result, ones) + + result = left > right + tm.assert_numpy_array_equal(result, zeros) + + result = left >= right + tm.assert_numpy_array_equal(result, ones) + + def test_take(self): + data = np.arange(100, dtype="i8") * 24 * 3600 * 10**9 + np.random.default_rng(2).shuffle(data) + + if self.array_cls is PeriodArray: + arr = PeriodArray(data, dtype="period[D]") + else: + arr = self.index_cls(data)._data + idx = self.index_cls._simple_new(arr) + + takers = [1, 4, 94] + result = arr.take(takers) + expected = idx.take(takers) + + tm.assert_index_equal(self.index_cls(result), expected) + + takers = np.array([1, 4, 94]) + result = arr.take(takers) + expected = idx.take(takers) + + tm.assert_index_equal(self.index_cls(result), expected) + + @pytest.mark.parametrize("fill_value", [2, 2.0, Timestamp(2021, 1, 1, 12).time]) + def test_take_fill_raises(self, fill_value, arr1d): + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + arr1d.take([0, 1], allow_fill=True, fill_value=fill_value) + + def test_take_fill(self, arr1d): + arr = arr1d + + result = arr.take([-1, 1], allow_fill=True, fill_value=None) + assert result[0] is NaT + + result = arr.take([-1, 1], allow_fill=True, fill_value=np.nan) + assert result[0] is NaT + + result = arr.take([-1, 1], allow_fill=True, fill_value=NaT) + assert result[0] is NaT + + @pytest.mark.filterwarnings( + "ignore:Period with BDay freq is deprecated:FutureWarning" + ) + def test_take_fill_str(self, arr1d): + # Cast str fill_value matching other fill_value-taking methods + result = arr1d.take([-1, 1], allow_fill=True, fill_value=str(arr1d[-1])) + expected = arr1d[[-1, 1]] + tm.assert_equal(result, expected) + + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + arr1d.take([-1, 1], allow_fill=True, fill_value="foo") + + def test_concat_same_type(self, arr1d): + arr = arr1d + idx = self.index_cls(arr) + idx = idx.insert(0, NaT) + arr = arr1d + + result = arr._concat_same_type([arr[:-1], arr[1:], arr]) + arr2 = arr.astype(object) + expected = self.index_cls(np.concatenate([arr2[:-1], arr2[1:], arr2])) + + tm.assert_index_equal(self.index_cls(result), expected) + + def test_unbox_scalar(self, arr1d): + result = arr1d._unbox_scalar(arr1d[0]) + expected = arr1d._ndarray.dtype.type + assert isinstance(result, expected) + + result = arr1d._unbox_scalar(NaT) + assert isinstance(result, expected) + + msg = f"'value' should be a {self.scalar_type.__name__}." + with pytest.raises(ValueError, match=msg): + arr1d._unbox_scalar("foo") + + def test_check_compatible_with(self, arr1d): + arr1d._check_compatible_with(arr1d[0]) + arr1d._check_compatible_with(arr1d[:1]) + arr1d._check_compatible_with(NaT) + + def test_scalar_from_string(self, arr1d): + result = arr1d._scalar_from_string(str(arr1d[0])) + assert result == arr1d[0] + + def test_reduce_invalid(self, arr1d): + msg = "does not support reduction 'not a method'" + with pytest.raises(TypeError, match=msg): + arr1d._reduce("not a method") + + @pytest.mark.parametrize("method", ["pad", "backfill"]) + def test_fillna_method_doesnt_change_orig(self, method): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.array_cls._from_sequence(data) + arr[4] = NaT + + fill_value = arr[3] if method == "pad" else arr[5] + + result = arr._pad_or_backfill(method=method) + assert result[4] == fill_value + + # check that the original was not changed + assert arr[4] is NaT + + def test_searchsorted(self): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.array_cls._from_sequence(data) + + # scalar + result = arr.searchsorted(arr[1]) + assert result == 1 + + result = arr.searchsorted(arr[2], side="right") + assert result == 3 + + # own-type + result = arr.searchsorted(arr[1:3]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + result = arr.searchsorted(arr[1:3], side="right") + expected = np.array([2, 3], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + # GH#29884 match numpy convention on whether NaT goes + # at the end or the beginning + result = arr.searchsorted(NaT) + assert result == 10 + + @pytest.mark.parametrize("box", [None, "index", "series"]) + def test_searchsorted_castable_strings(self, arr1d, box, string_storage): + arr = arr1d + if box is None: + pass + elif box == "index": + # Test the equivalent Index.searchsorted method while we're here + arr = self.index_cls(arr) + else: + # Test the equivalent Series.searchsorted method while we're here + arr = pd.Series(arr) + + # scalar + result = arr.searchsorted(str(arr[1])) + assert result == 1 + + result = arr.searchsorted(str(arr[2]), side="right") + assert result == 3 + + result = arr.searchsorted([str(x) for x in arr[1:3]]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + with pytest.raises( + TypeError, + match=re.escape( + f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " + "or array of those. Got 'str' instead." + ), + ): + arr.searchsorted("foo") + + with pd.option_context("string_storage", string_storage): + with pytest.raises( + TypeError, + match=re.escape( + f"value should be a '{arr1d._scalar_type.__name__}', 'NaT', " + "or array of those. Got string array instead." + ), + ): + arr.searchsorted([str(arr[1]), "baz"]) + + def test_getitem_near_implementation_bounds(self): + # We only check tz-naive for DTA bc the bounds are slightly different + # for other tzs + i8vals = np.asarray([NaT._value + n for n in range(1, 5)], dtype="i8") + if self.array_cls is PeriodArray: + arr = self.array_cls(i8vals, dtype="period[ns]") + else: + arr = self.index_cls(i8vals, freq="ns")._data + arr[0] # should not raise OutOfBoundsDatetime + + index = pd.Index(arr) + index[0] # should not raise OutOfBoundsDatetime + + ser = pd.Series(arr) + ser[0] # should not raise OutOfBoundsDatetime + + def test_getitem_2d(self, arr1d): + # 2d slicing on a 1D array + expected = type(arr1d)._simple_new( + arr1d._ndarray[:, np.newaxis], dtype=arr1d.dtype + ) + result = arr1d[:, np.newaxis] + tm.assert_equal(result, expected) + + # Lookup on a 2D array + arr2d = expected + expected = type(arr2d)._simple_new(arr2d._ndarray[:3, 0], dtype=arr2d.dtype) + result = arr2d[:3, 0] + tm.assert_equal(result, expected) + + # Scalar lookup + result = arr2d[-1, 0] + expected = arr1d[-1] + assert result == expected + + def test_iter_2d(self, arr1d): + data2d = arr1d._ndarray[:3, np.newaxis] + arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype) + result = list(arr2d) + assert len(result) == 3 + for x in result: + assert isinstance(x, type(arr1d)) + assert x.ndim == 1 + assert x.dtype == arr1d.dtype + + def test_repr_2d(self, arr1d): + data2d = arr1d._ndarray[:3, np.newaxis] + arr2d = type(arr1d)._simple_new(data2d, dtype=arr1d.dtype) + + result = repr(arr2d) + + if isinstance(arr2d, TimedeltaArray): + expected = ( + f"<{type(arr2d).__name__}>\n" + "[\n" + f"['{arr1d[0]._repr_base()}'],\n" + f"['{arr1d[1]._repr_base()}'],\n" + f"['{arr1d[2]._repr_base()}']\n" + "]\n" + f"Shape: (3, 1), dtype: {arr1d.dtype}" + ) + else: + expected = ( + f"<{type(arr2d).__name__}>\n" + "[\n" + f"['{arr1d[0]}'],\n" + f"['{arr1d[1]}'],\n" + f"['{arr1d[2]}']\n" + "]\n" + f"Shape: (3, 1), dtype: {arr1d.dtype}" + ) + + assert result == expected + + def test_setitem(self): + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.index_cls(data, freq="D")._data + + arr[0] = arr[1] + expected = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + expected[0] = expected[1] + + tm.assert_numpy_array_equal(arr.asi8, expected) + + arr[:2] = arr[-2:] + expected[:2] = expected[-2:] + tm.assert_numpy_array_equal(arr.asi8, expected) + + @pytest.mark.parametrize( + "box", + [ + pd.Index, + pd.Series, + np.array, + list, + NumpyExtensionArray, + ], + ) + def test_setitem_object_dtype(self, box, arr1d): + expected = arr1d.copy()[::-1] + if expected.dtype.kind in ["m", "M"]: + expected = expected._with_freq(None) + + vals = expected + if box is list: + vals = list(vals) + elif box is np.array: + # if we do np.array(x).astype(object) then dt64 and td64 cast to ints + vals = np.array(vals.astype(object)) + elif box is NumpyExtensionArray: + vals = box(np.asarray(vals, dtype=object)) + else: + vals = box(vals).astype(object) + + arr1d[:] = vals + + tm.assert_equal(arr1d, expected) + + def test_setitem_strs(self, arr1d): + # Check that we parse strs in both scalar and listlike + + # Setting list-like of strs + expected = arr1d.copy() + expected[[0, 1]] = arr1d[-2:] + + result = arr1d.copy() + result[:2] = [str(x) for x in arr1d[-2:]] + tm.assert_equal(result, expected) + + # Same thing but now for just a scalar str + expected = arr1d.copy() + expected[0] = arr1d[-1] + + result = arr1d.copy() + result[0] = str(arr1d[-1]) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("as_index", [True, False]) + def test_setitem_categorical(self, arr1d, as_index): + expected = arr1d.copy()[::-1] + if not isinstance(expected, PeriodArray): + expected = expected._with_freq(None) + + cat = pd.Categorical(arr1d) + if as_index: + cat = pd.CategoricalIndex(cat) + + arr1d[:] = cat[::-1] + + tm.assert_equal(arr1d, expected) + + def test_setitem_raises(self, arr1d): + arr = arr1d[:10] + val = arr[0] + + with pytest.raises(IndexError, match="index 12 is out of bounds"): + arr[12] = val + + with pytest.raises(TypeError, match="value should be a.* 'object'"): + arr[0] = object() + + msg = "cannot set using a list-like indexer with a different length" + with pytest.raises(ValueError, match=msg): + # GH#36339 + arr[[]] = [arr[1]] + + msg = "cannot set using a slice indexer with a different length than" + with pytest.raises(ValueError, match=msg): + # GH#36339 + arr[1:1] = arr[:3] + + @pytest.mark.parametrize("box", [list, np.array, pd.Index, pd.Series]) + def test_setitem_numeric_raises(self, arr1d, box): + # We dont case e.g. int64 to our own dtype for setitem + + msg = ( + f"value should be a '{arr1d._scalar_type.__name__}', " + "'NaT', or array of those. Got" + ) + with pytest.raises(TypeError, match=msg): + arr1d[:2] = box([0, 1]) + + with pytest.raises(TypeError, match=msg): + arr1d[:2] = box([0.0, 1.0]) + + def test_inplace_arithmetic(self): + # GH#24115 check that iadd and isub are actually in-place + data = np.arange(10, dtype="i8") * 24 * 3600 * 10**9 + if self.array_cls is PeriodArray: + arr = self.array_cls(data, dtype="period[D]") + else: + arr = self.index_cls(data, freq="D")._data + + expected = arr + pd.Timedelta(days=1) + arr += pd.Timedelta(days=1) + tm.assert_equal(arr, expected) + + expected = arr - pd.Timedelta(days=1) + arr -= pd.Timedelta(days=1) + tm.assert_equal(arr, expected) + + def test_shift_fill_int_deprecated(self, arr1d): + # GH#31971, enforced in 2.0 + with pytest.raises(TypeError, match="value should be a"): + arr1d.shift(1, fill_value=1) + + def test_median(self, arr1d): + arr = arr1d + if len(arr) % 2 == 0: + # make it easier to define `expected` + arr = arr[:-1] + + expected = arr[len(arr) // 2] + + result = arr.median() + assert type(result) is type(expected) + assert result == expected + + arr[len(arr) // 2] = NaT + if not isinstance(expected, Period): + expected = arr[len(arr) // 2 - 1 : len(arr) // 2 + 2].mean() + + assert arr.median(skipna=False) is NaT + + result = arr.median() + assert type(result) is type(expected) + assert result == expected + + assert arr[:0].median() is NaT + assert arr[:0].median(skipna=False) is NaT + + # 2d Case + arr2 = arr.reshape(-1, 1) + + result = arr2.median(axis=None) + assert type(result) is type(expected) + assert result == expected + + assert arr2.median(axis=None, skipna=False) is NaT + + result = arr2.median(axis=0) + expected2 = type(arr)._from_sequence([expected], dtype=arr.dtype) + tm.assert_equal(result, expected2) + + result = arr2.median(axis=0, skipna=False) + expected2 = type(arr)._from_sequence([NaT], dtype=arr.dtype) + tm.assert_equal(result, expected2) + + result = arr2.median(axis=1) + tm.assert_equal(result, arr) + + result = arr2.median(axis=1, skipna=False) + tm.assert_equal(result, arr) + + def test_from_integer_array(self): + arr = np.array([1, 2, 3], dtype=np.int64) + data = pd.array(arr, dtype="Int64") + if self.array_cls is PeriodArray: + expected = self.array_cls(arr, dtype=self.example_dtype) + result = self.array_cls(data, dtype=self.example_dtype) + else: + expected = self.array_cls._from_sequence(arr, dtype=self.example_dtype) + result = self.array_cls._from_sequence(data, dtype=self.example_dtype) + + tm.assert_extension_array_equal(result, expected) + + +class TestDatetimeArray(SharedTests): + index_cls = DatetimeIndex + array_cls = DatetimeArray + scalar_type = Timestamp + example_dtype = "M8[ns]" + + @pytest.fixture + def arr1d(self, tz_naive_fixture, freqstr): + """ + Fixture returning DatetimeArray with parametrized frequency and + timezones + """ + tz = tz_naive_fixture + dti = pd.date_range("2016-01-01 01:01:00", periods=5, freq=freqstr, tz=tz) + dta = dti._data + return dta + + def test_round(self, arr1d): + # GH#24064 + dti = self.index_cls(arr1d) + + result = dti.round(freq="2min") + expected = dti - pd.Timedelta(minutes=1) + expected = expected._with_freq(None) + tm.assert_index_equal(result, expected) + + dta = dti._data + result = dta.round(freq="2min") + expected = expected._data._with_freq(None) + tm.assert_datetime_array_equal(result, expected) + + def test_array_interface(self, datetime_index): + arr = datetime_index._data + copy_false = None if np_version_gt2 else False + + # default asarray gives the same underlying data (for tz naive) + result = np.asarray(arr) + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, copy=copy_false) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + + # specifying M8[ns] gives the same result as default + result = np.asarray(arr, dtype="datetime64[ns]") + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="datetime64[ns]", copy=copy_false) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="datetime64[ns]") + assert result is not expected + tm.assert_numpy_array_equal(result, expected) + + # to object dtype + result = np.asarray(arr, dtype=object) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to other dtype always copies + result = np.asarray(arr, dtype="int64") + assert result is not arr.asi8 + assert not np.may_share_memory(arr, result) + expected = arr.asi8.copy() + tm.assert_numpy_array_equal(result, expected) + + # other dtypes handled by numpy + for dtype in ["float64", str]: + result = np.asarray(arr, dtype=dtype) + expected = np.asarray(arr).astype(dtype) + tm.assert_numpy_array_equal(result, expected) + + def test_array_object_dtype(self, arr1d): + # GH#23524 + arr = arr1d + dti = self.index_cls(arr1d) + + expected = np.array(list(dti)) + + result = np.array(arr, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # also test the DatetimeIndex method while we're at it + result = np.array(dti, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_array_tz(self, arr1d): + # GH#23524 + arr = arr1d + dti = self.index_cls(arr1d) + copy_false = None if np_version_gt2 else False + + expected = dti.asi8.view("M8[ns]") + result = np.array(arr, dtype="M8[ns]") + tm.assert_numpy_array_equal(result, expected) + + result = np.array(arr, dtype="datetime64[ns]") + tm.assert_numpy_array_equal(result, expected) + + # check that we are not making copies when setting copy=copy_false + result = np.array(arr, dtype="M8[ns]", copy=copy_false) + assert result.base is expected.base + assert result.base is not None + result = np.array(arr, dtype="datetime64[ns]", copy=copy_false) + assert result.base is expected.base + assert result.base is not None + + def test_array_i8_dtype(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + copy_false = None if np_version_gt2 else False + + expected = dti.asi8 + result = np.array(arr, dtype="i8") + tm.assert_numpy_array_equal(result, expected) + + result = np.array(arr, dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + # check that we are still making copies when setting copy=copy_false + result = np.array(arr, dtype="i8", copy=copy_false) + assert result.base is not expected.base + assert result.base is None + + def test_from_array_keeps_base(self): + # Ensure that DatetimeArray._ndarray.base isn't lost. + arr = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]") + dta = DatetimeArray._from_sequence(arr) + + assert dta._ndarray is arr + dta = DatetimeArray._from_sequence(arr[:0]) + assert dta._ndarray.base is arr + + def test_from_dti(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + assert list(dti) == list(arr) + + # Check that Index.__new__ knows what to do with DatetimeArray + dti2 = pd.Index(arr) + assert isinstance(dti2, DatetimeIndex) + assert list(dti2) == list(arr) + + def test_astype_object(self, arr1d): + arr = arr1d + dti = self.index_cls(arr1d) + + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(dti) + + @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") + def test_to_period(self, datetime_index, freqstr): + dti = datetime_index + arr = dti._data + + freqstr = freq_to_period_freqstr(1, freqstr) + expected = dti.to_period(freq=freqstr) + result = arr.to_period(freq=freqstr) + assert isinstance(result, PeriodArray) + + tm.assert_equal(result, expected._data) + + def test_to_period_2d(self, arr1d): + arr2d = arr1d.reshape(1, -1) + + warn = None if arr1d.tz is None else UserWarning + with tm.assert_produces_warning(warn): + result = arr2d.to_period("D") + expected = arr1d.to_period("D").reshape(1, -1) + tm.assert_period_array_equal(result, expected) + + @pytest.mark.parametrize("propname", DatetimeArray._bool_ops) + def test_bool_properties(self, arr1d, propname): + # in this case _bool_ops is just `is_leap_year` + dti = self.index_cls(arr1d) + arr = arr1d + assert dti.freq == arr.freq + + result = getattr(arr, propname) + expected = np.array(getattr(dti, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("propname", DatetimeArray._field_ops) + def test_int_properties(self, arr1d, propname): + dti = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(dti, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + def test_take_fill_valid(self, arr1d, fixed_now_ts): + arr = arr1d + dti = self.index_cls(arr1d) + + now = fixed_now_ts.tz_localize(dti.tz) + result = arr.take([-1, 1], allow_fill=True, fill_value=now) + assert result[0] == now + + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # fill_value Timedelta invalid + arr.take([-1, 1], allow_fill=True, fill_value=now - now) + + with pytest.raises(TypeError, match=msg): + # fill_value Period invalid + arr.take([-1, 1], allow_fill=True, fill_value=Period("2014Q1")) + + tz = None if dti.tz is not None else "US/Eastern" + now = fixed_now_ts.tz_localize(tz) + msg = "Cannot compare tz-naive and tz-aware datetime-like objects" + with pytest.raises(TypeError, match=msg): + # Timestamp with mismatched tz-awareness + arr.take([-1, 1], allow_fill=True, fill_value=now) + + value = NaT._value + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # require NaT, not iNaT, as it could be confused with an integer + arr.take([-1, 1], allow_fill=True, fill_value=value) + + value = np.timedelta64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + if arr.tz is not None: + # GH#37356 + # Assuming here that arr1d fixture does not include Australia/Melbourne + value = fixed_now_ts.tz_localize("Australia/Melbourne") + result = arr.take([-1, 1], allow_fill=True, fill_value=value) + + expected = arr.take( + [-1, 1], + allow_fill=True, + fill_value=value.tz_convert(arr.dtype.tz), + ) + tm.assert_equal(result, expected) + + def test_concat_same_type_invalid(self, arr1d): + # different timezones + arr = arr1d + + if arr.tz is None: + other = arr.tz_localize("UTC") + else: + other = arr.tz_localize(None) + + with pytest.raises(ValueError, match="to_concat must have the same"): + arr._concat_same_type([arr, other]) + + def test_concat_same_type_different_freq(self, unit): + # we *can* concatenate DTI with different freqs. + a = pd.date_range("2000", periods=2, freq="D", tz="US/Central", unit=unit)._data + b = pd.date_range("2000", periods=2, freq="h", tz="US/Central", unit=unit)._data + result = DatetimeArray._concat_same_type([a, b]) + expected = ( + pd.to_datetime( + [ + "2000-01-01 00:00:00", + "2000-01-02 00:00:00", + "2000-01-01 00:00:00", + "2000-01-01 01:00:00", + ] + ) + .tz_localize("US/Central") + .as_unit(unit) + ._data + ) + + tm.assert_datetime_array_equal(result, expected) + + def test_strftime(self, arr1d): + arr = arr1d + + result = arr.strftime("%Y %b") + expected = np.array([ts.strftime("%Y %b") for ts in arr], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_strftime_nat(self): + # GH 29578 + arr = DatetimeIndex(["2019-01-01", NaT])._data + + result = arr.strftime("%Y-%m-%d") + expected = np.array(["2019-01-01", np.nan], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +class TestTimedeltaArray(SharedTests): + index_cls = TimedeltaIndex + array_cls = TimedeltaArray + scalar_type = pd.Timedelta + example_dtype = "m8[ns]" + + def test_from_tdi(self): + tdi = TimedeltaIndex(["1 Day", "3 Hours"]) + arr = tdi._data + assert list(arr) == list(tdi) + + # Check that Index.__new__ knows what to do with TimedeltaArray + tdi2 = pd.Index(arr) + assert isinstance(tdi2, TimedeltaIndex) + assert list(tdi2) == list(arr) + + def test_astype_object(self): + tdi = TimedeltaIndex(["1 Day", "3 Hours"]) + arr = tdi._data + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(tdi) + + def test_to_pytimedelta(self, timedelta_index): + tdi = timedelta_index + arr = tdi._data + + expected = tdi.to_pytimedelta() + result = arr.to_pytimedelta() + + tm.assert_numpy_array_equal(result, expected) + + def test_total_seconds(self, timedelta_index): + tdi = timedelta_index + arr = tdi._data + + expected = tdi.total_seconds() + result = arr.total_seconds() + + tm.assert_numpy_array_equal(result, expected.values) + + @pytest.mark.parametrize("propname", TimedeltaArray._field_ops) + def test_int_properties(self, timedelta_index, propname): + tdi = timedelta_index + arr = tdi._data + + result = getattr(arr, propname) + expected = np.array(getattr(tdi, propname), dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self, timedelta_index): + arr = timedelta_index._data + copy_false = None if np_version_gt2 else False + + # default asarray gives the same underlying data + result = np.asarray(arr) + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, copy=copy_false) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + + # specifying m8[ns] gives the same result as default + result = np.asarray(arr, dtype="timedelta64[ns]") + expected = arr._ndarray + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="timedelta64[ns]", copy=copy_false) + assert result is expected + tm.assert_numpy_array_equal(result, expected) + result = np.array(arr, dtype="timedelta64[ns]") + assert result is not expected + tm.assert_numpy_array_equal(result, expected) + + # to object dtype + result = np.asarray(arr, dtype=object) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to other dtype always copies + result = np.asarray(arr, dtype="int64") + assert result is not arr.asi8 + assert not np.may_share_memory(arr, result) + expected = arr.asi8.copy() + tm.assert_numpy_array_equal(result, expected) + + # other dtypes handled by numpy + for dtype in ["float64", str]: + result = np.asarray(arr, dtype=dtype) + expected = np.asarray(arr).astype(dtype) + tm.assert_numpy_array_equal(result, expected) + + def test_take_fill_valid(self, timedelta_index, fixed_now_ts): + tdi = timedelta_index + arr = tdi._data + + td1 = pd.Timedelta(days=1) + result = arr.take([-1, 1], allow_fill=True, fill_value=td1) + assert result[0] == td1 + + value = fixed_now_ts + msg = f"value should be a '{arr._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # fill_value Timestamp invalid + arr.take([0, 1], allow_fill=True, fill_value=value) + + value = fixed_now_ts.to_period("D") + with pytest.raises(TypeError, match=msg): + # fill_value Period invalid + arr.take([0, 1], allow_fill=True, fill_value=value) + + value = np.datetime64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + +@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning") +@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning") +class TestPeriodArray(SharedTests): + index_cls = PeriodIndex + array_cls = PeriodArray + scalar_type = Period + example_dtype = PeriodIndex([], freq="W").dtype + + @pytest.fixture + def arr1d(self, period_index): + """ + Fixture returning DatetimeArray from parametrized PeriodIndex objects + """ + return period_index._data + + def test_from_pi(self, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + assert list(arr) == list(pi) + + # Check that Index.__new__ knows what to do with PeriodArray + pi2 = pd.Index(arr) + assert isinstance(pi2, PeriodIndex) + assert list(pi2) == list(arr) + + def test_astype_object(self, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + asobj = arr.astype("O") + assert isinstance(asobj, np.ndarray) + assert asobj.dtype == "O" + assert list(asobj) == list(pi) + + def test_take_fill_valid(self, arr1d): + arr = arr1d + + value = NaT._value + msg = f"value should be a '{arr1d._scalar_type.__name__}' or 'NaT'. Got" + with pytest.raises(TypeError, match=msg): + # require NaT, not iNaT, as it could be confused with an integer + arr.take([-1, 1], allow_fill=True, fill_value=value) + + value = np.timedelta64("NaT", "ns") + with pytest.raises(TypeError, match=msg): + # require appropriate-dtype if we have a NA value + arr.take([-1, 1], allow_fill=True, fill_value=value) + + @pytest.mark.parametrize("how", ["S", "E"]) + def test_to_timestamp(self, how, arr1d): + pi = self.index_cls(arr1d) + arr = arr1d + + expected = DatetimeIndex(pi.to_timestamp(how=how))._data + result = arr.to_timestamp(how=how) + assert isinstance(result, DatetimeArray) + + tm.assert_equal(result, expected) + + def test_to_timestamp_roundtrip_bday(self): + # Case where infer_freq inside would choose "D" instead of "B" + dta = pd.date_range("2021-10-18", periods=3, freq="B")._data + parr = dta.to_period() + result = parr.to_timestamp() + assert result.freq == "B" + tm.assert_extension_array_equal(result, dta) + + dta2 = dta[::2] + parr2 = dta2.to_period() + result2 = parr2.to_timestamp() + assert result2.freq == "2B" + tm.assert_extension_array_equal(result2, dta2) + + parr3 = dta.to_period("2B") + result3 = parr3.to_timestamp() + assert result3.freq == "B" + tm.assert_extension_array_equal(result3, dta) + + def test_to_timestamp_out_of_bounds(self): + # GH#19643 previously overflowed silently + pi = pd.period_range("1500", freq="Y", periods=3) + msg = "Out of bounds nanosecond timestamp: 1500-01-01 00:00:00" + with pytest.raises(OutOfBoundsDatetime, match=msg): + pi.to_timestamp() + + with pytest.raises(OutOfBoundsDatetime, match=msg): + pi._data.to_timestamp() + + @pytest.mark.parametrize("propname", PeriodArray._bool_ops) + def test_bool_properties(self, arr1d, propname): + # in this case _bool_ops is just `is_leap_year` + pi = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(pi, propname)) + + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("propname", PeriodArray._field_ops) + def test_int_properties(self, arr1d, propname): + pi = self.index_cls(arr1d) + arr = arr1d + + result = getattr(arr, propname) + expected = np.array(getattr(pi, propname)) + + tm.assert_numpy_array_equal(result, expected) + + def test_array_interface(self, arr1d): + arr = arr1d + + # default asarray gives objects + result = np.asarray(arr) + expected = np.array(list(arr), dtype=object) + tm.assert_numpy_array_equal(result, expected) + + # to object dtype (same as default) + result = np.asarray(arr, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = np.asarray(arr, dtype="int64") + tm.assert_numpy_array_equal(result, arr.asi8) + + # to other dtypes + msg = r"float\(\) argument must be a string or a( real)? number, not 'Period'" + with pytest.raises(TypeError, match=msg): + np.asarray(arr, dtype="float64") + + result = np.asarray(arr, dtype="S20") + expected = np.asarray(arr).astype("S20") + tm.assert_numpy_array_equal(result, expected) + + def test_strftime(self, arr1d): + arr = arr1d + + result = arr.strftime("%Y") + expected = np.array([per.strftime("%Y") for per in arr], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_strftime_nat(self): + # GH 29578 + arr = PeriodArray(PeriodIndex(["2019-01-01", NaT], dtype="period[D]")) + + result = arr.strftime("%Y-%m-%d") + expected = np.array(["2019-01-01", np.nan], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "arr,casting_nats", + [ + ( + TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, + (NaT, np.timedelta64("NaT", "ns")), + ), + ( + pd.date_range("2000-01-01", periods=3, freq="D")._data, + (NaT, np.datetime64("NaT", "ns")), + ), + (pd.period_range("2000-01-01", periods=3, freq="D")._data, (NaT,)), + ], + ids=lambda x: type(x).__name__, +) +def test_casting_nat_setitem_array(arr, casting_nats): + expected = type(arr)._from_sequence([NaT, arr[1], arr[2]], dtype=arr.dtype) + + for nat in casting_nats: + arr = arr.copy() + arr[0] = nat + tm.assert_equal(arr, expected) + + +@pytest.mark.parametrize( + "arr,non_casting_nats", + [ + ( + TimedeltaIndex(["1 Day", "3 Hours", "NaT"])._data, + (np.datetime64("NaT", "ns"), NaT._value), + ), + ( + pd.date_range("2000-01-01", periods=3, freq="D")._data, + (np.timedelta64("NaT", "ns"), NaT._value), + ), + ( + pd.period_range("2000-01-01", periods=3, freq="D")._data, + (np.datetime64("NaT", "ns"), np.timedelta64("NaT", "ns"), NaT._value), + ), + ], + ids=lambda x: type(x).__name__, +) +def test_invalid_nat_setitem_array(arr, non_casting_nats): + msg = ( + "value should be a '(Timestamp|Timedelta|Period)', 'NaT', or array of those. " + "Got '(timedelta64|datetime64|int)' instead." + ) + + for nat in non_casting_nats: + with pytest.raises(TypeError, match=msg): + arr[0] = nat + + +@pytest.mark.parametrize( + "arr", + [ + pd.date_range("2000", periods=4).array, + pd.timedelta_range("2000", periods=4).array, + ], +) +def test_to_numpy_extra(arr): + arr[0] = NaT + original = arr.copy() + + result = arr.to_numpy() + assert np.isnan(result[0]) + + result = arr.to_numpy(dtype="int64") + assert result[0] == -9223372036854775808 + + result = arr.to_numpy(dtype="int64", na_value=0) + assert result[0] == 0 + + result = arr.to_numpy(na_value=arr[1].to_numpy()) + assert result[0] == result[1] + + result = arr.to_numpy(na_value=arr[1].to_numpy(copy=False)) + assert result[0] == result[1] + + tm.assert_equal(arr, original) + + +@pytest.mark.parametrize("as_index", [True, False]) +@pytest.mark.parametrize( + "values", + [ + pd.to_datetime(["2020-01-01", "2020-02-01"]), + pd.to_timedelta([1, 2], unit="D"), + PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"), + ], +) +@pytest.mark.parametrize( + "klass", + [ + list, + np.array, + pd.array, + pd.Series, + pd.Index, + pd.Categorical, + pd.CategoricalIndex, + ], +) +def test_searchsorted_datetimelike_with_listlike(values, klass, as_index): + # https://github.com/pandas-dev/pandas/issues/32762 + if not as_index: + values = values._data + + result = values.searchsorted(klass(values)) + expected = np.array([0, 1], dtype=result.dtype) + + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + pd.to_datetime(["2020-01-01", "2020-02-01"]), + pd.to_timedelta([1, 2], unit="D"), + PeriodIndex(["2020-01-01", "2020-02-01"], freq="D"), + ], +) +@pytest.mark.parametrize( + "arg", [[1, 2], ["a", "b"], [Timestamp("2020-01-01", tz="Europe/London")] * 2] +) +def test_searchsorted_datetimelike_with_listlike_invalid_dtype(values, arg): + # https://github.com/pandas-dev/pandas/issues/32762 + msg = "[Unexpected type|Cannot compare]" + with pytest.raises(TypeError, match=msg): + values.searchsorted(arg) + + +@pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series]) +def test_period_index_construction_from_strings(klass): + # https://github.com/pandas-dev/pandas/issues/26109 + strings = ["2020Q1", "2020Q2"] * 2 + data = klass(strings) + result = PeriodIndex(data, freq="Q") + expected = PeriodIndex([Period(s) for s in strings]) + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"]) +def test_from_pandas_array(dtype): + # GH#24615 + data = np.array([1, 2, 3], dtype=dtype) + arr = NumpyExtensionArray(data) + + cls = {"M8[ns]": DatetimeArray, "m8[ns]": TimedeltaArray}[dtype] + + depr_msg = f"{cls.__name__}.__init__ is deprecated" + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + result = cls(arr) + expected = cls(data) + tm.assert_extension_array_equal(result, expected) + + result = cls._from_sequence(arr, dtype=dtype) + expected = cls._from_sequence(data, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + func = {"M8[ns]": pd.to_datetime, "m8[ns]": pd.to_timedelta}[dtype] + result = func(arr).array + expected = func(data).array + tm.assert_equal(result, expected) + + # Let's check the Indexes while we're here + idx_cls = {"M8[ns]": DatetimeIndex, "m8[ns]": TimedeltaIndex}[dtype] + result = idx_cls(arr) + expected = idx_cls(data) + tm.assert_index_equal(result, expected) diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__init__.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_day.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_day.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..830b4fe7890a946bd83d430b0400a2ef08476e68 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_day.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_quarter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_quarter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d3b7ff3524d4f0c13c1a4158b59d1374385e630 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_quarter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_hour.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_hour.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25e44c1da9fe41ff73ad5f6d3f35a08ce98a1c7f Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_hour.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_easter.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_easter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f3fb365d0ee10d85791eb9139c7fc1dcaf15e4a Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_easter.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_fiscal.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_fiscal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92498a94828aafbc6b2db8164274085ba658e7c6 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_fiscal.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_offsets_properties.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_offsets_properties.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b8c55df936ee9e2ce6147d02252d82178c2ab45 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_offsets_properties.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_ticks.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_ticks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5fac44a107cdeddcffbf6d8337da0dae7de40299 Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_ticks.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_week.cpython-310.pyc b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_week.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..36cc9717fc78e3d7c753f2908a1af574d5f811ac Binary files /dev/null and b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_week.cpython-310.pyc differ diff --git a/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/test_ticks.py b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/test_ticks.py new file mode 100644 index 0000000000000000000000000000000000000000..399b7038d3426a9f3e4916927bd7e38ac3996531 --- /dev/null +++ b/llmeval-env/lib/python3.10/site-packages/pandas/tests/tseries/offsets/test_ticks.py @@ -0,0 +1,405 @@ +""" +Tests for offsets.Tick and subclasses +""" +from datetime import ( + datetime, + timedelta, +) + +from hypothesis import ( + assume, + example, + given, +) +import numpy as np +import pytest + +from pandas._libs.tslibs.offsets import delta_to_tick +from pandas.errors import OutOfBoundsTimedelta + +from pandas import ( + Timedelta, + Timestamp, +) +import pandas._testing as tm +from pandas._testing._hypothesis import INT_NEG_999_TO_POS_999 +from pandas.tests.tseries.offsets.common import assert_offset_equal + +from pandas.tseries import offsets +from pandas.tseries.offsets import ( + Hour, + Micro, + Milli, + Minute, + Nano, + Second, +) + +# --------------------------------------------------------------------- +# Test Helpers + +tick_classes = [Hour, Minute, Second, Milli, Micro, Nano] + + +# --------------------------------------------------------------------- + + +def test_apply_ticks(): + result = offsets.Hour(3) + offsets.Hour(4) + exp = offsets.Hour(7) + assert result == exp + + +def test_delta_to_tick(): + delta = timedelta(3) + + tick = delta_to_tick(delta) + assert tick == offsets.Day(3) + + td = Timedelta(nanoseconds=5) + tick = delta_to_tick(td) + assert tick == Nano(5) + + +@pytest.mark.parametrize("cls", tick_classes) +@example(n=2, m=3) +@example(n=800, m=300) +@example(n=1000, m=5) +@given(n=INT_NEG_999_TO_POS_999, m=INT_NEG_999_TO_POS_999) +def test_tick_add_sub(cls, n, m): + # For all Tick subclasses and all integers n, m, we should have + # tick(n) + tick(m) == tick(n+m) + # tick(n) - tick(m) == tick(n-m) + left = cls(n) + right = cls(m) + expected = cls(n + m) + + assert left + right == expected + + expected = cls(n - m) + assert left - right == expected + + +@pytest.mark.arm_slow +@pytest.mark.parametrize("cls", tick_classes) +@example(n=2, m=3) +@given(n=INT_NEG_999_TO_POS_999, m=INT_NEG_999_TO_POS_999) +def test_tick_equality(cls, n, m): + assume(m != n) + # tick == tock iff tick.n == tock.n + left = cls(n) + right = cls(m) + assert left != right + + right = cls(n) + assert left == right + assert not left != right + + if n != 0: + assert cls(n) != cls(-n) + + +# --------------------------------------------------------------------- + + +def test_Hour(): + assert_offset_equal(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1)) + assert_offset_equal(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1)) + assert_offset_equal(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2)) + assert_offset_equal(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1)) + + assert Hour(3) + Hour(2) == Hour(5) + assert Hour(3) - Hour(2) == Hour() + + assert Hour(4) != Hour(1) + + +def test_Minute(): + assert_offset_equal(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1)) + assert_offset_equal(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1)) + assert_offset_equal(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2)) + assert_offset_equal(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1)) + + assert Minute(3) + Minute(2) == Minute(5) + assert Minute(3) - Minute(2) == Minute() + assert Minute(5) != Minute() + + +def test_Second(): + assert_offset_equal(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1)) + assert_offset_equal(Second(-1), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1)) + assert_offset_equal( + 2 * Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 2) + ) + assert_offset_equal( + -1 * Second(), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1) + ) + + assert Second(3) + Second(2) == Second(5) + assert Second(3) - Second(2) == Second() + + +def test_Millisecond(): + assert_offset_equal( + Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1000) + ) + assert_offset_equal( + Milli(-1), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1) + ) + assert_offset_equal( + Milli(2), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000) + ) + assert_offset_equal( + 2 * Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000) + ) + assert_offset_equal( + -1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1) + ) + + assert Milli(3) + Milli(2) == Milli(5) + assert Milli(3) - Milli(2) == Milli() + + +def test_MillisecondTimestampArithmetic(): + assert_offset_equal( + Milli(), Timestamp("2010-01-01"), Timestamp("2010-01-01 00:00:00.001") + ) + assert_offset_equal( + Milli(-1), Timestamp("2010-01-01 00:00:00.001"), Timestamp("2010-01-01") + ) + + +def test_Microsecond(): + assert_offset_equal(Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1)) + assert_offset_equal( + Micro(-1), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1) + ) + + assert_offset_equal( + 2 * Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2) + ) + assert_offset_equal( + -1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1) + ) + + assert Micro(3) + Micro(2) == Micro(5) + assert Micro(3) - Micro(2) == Micro() + + +def test_NanosecondGeneric(): + timestamp = Timestamp(datetime(2010, 1, 1)) + assert timestamp.nanosecond == 0 + + result = timestamp + Nano(10) + assert result.nanosecond == 10 + + reverse_result = Nano(10) + timestamp + assert reverse_result.nanosecond == 10 + + +def test_Nanosecond(): + timestamp = Timestamp(datetime(2010, 1, 1)) + assert_offset_equal(Nano(), timestamp, timestamp + np.timedelta64(1, "ns")) + assert_offset_equal(Nano(-1), timestamp + np.timedelta64(1, "ns"), timestamp) + assert_offset_equal(2 * Nano(), timestamp, timestamp + np.timedelta64(2, "ns")) + assert_offset_equal(-1 * Nano(), timestamp + np.timedelta64(1, "ns"), timestamp) + + assert Nano(3) + Nano(2) == Nano(5) + assert Nano(3) - Nano(2) == Nano() + + # GH9284 + assert Nano(1) + Nano(10) == Nano(11) + assert Nano(5) + Micro(1) == Nano(1005) + assert Micro(5) + Nano(1) == Nano(5001) + + +@pytest.mark.parametrize( + "kls, expected", + [ + (Hour, Timedelta(hours=5)), + (Minute, Timedelta(hours=2, minutes=3)), + (Second, Timedelta(hours=2, seconds=3)), + (Milli, Timedelta(hours=2, milliseconds=3)), + (Micro, Timedelta(hours=2, microseconds=3)), + (Nano, Timedelta(hours=2, nanoseconds=3)), + ], +) +def test_tick_addition(kls, expected): + offset = kls(3) + td = Timedelta(hours=2) + + for other in [td, td.to_pytimedelta(), td.to_timedelta64()]: + result = offset + other + assert isinstance(result, Timedelta) + assert result == expected + + result = other + offset + assert isinstance(result, Timedelta) + assert result == expected + + +def test_tick_delta_overflow(): + # GH#55503 raise OutOfBoundsTimedelta, not OverflowError + tick = offsets.Day(10**9) + msg = "Cannot cast 1000000000 days 00:00:00 to unit='ns' without overflow" + depr_msg = "Day.delta is deprecated" + with pytest.raises(OutOfBoundsTimedelta, match=msg): + with tm.assert_produces_warning(FutureWarning, match=depr_msg): + tick.delta + + +@pytest.mark.parametrize("cls", tick_classes) +def test_tick_division(cls): + off = cls(10) + + assert off / cls(5) == 2 + assert off / 2 == cls(5) + assert off / 2.0 == cls(5) + + assert off / off._as_pd_timedelta == 1 + assert off / off._as_pd_timedelta.to_timedelta64() == 1 + + assert off / Nano(1) == off._as_pd_timedelta / Nano(1)._as_pd_timedelta + + if cls is not Nano: + # A case where we end up with a smaller class + result = off / 1000 + assert isinstance(result, offsets.Tick) + assert not isinstance(result, cls) + assert result._as_pd_timedelta == off._as_pd_timedelta / 1000 + + if cls._nanos_inc < Timedelta(seconds=1)._value: + # Case where we end up with a bigger class + result = off / 0.001 + assert isinstance(result, offsets.Tick) + assert not isinstance(result, cls) + assert result._as_pd_timedelta == off._as_pd_timedelta / 0.001 + + +def test_tick_mul_float(): + off = Micro(2) + + # Case where we retain type + result = off * 1.5 + expected = Micro(3) + assert result == expected + assert isinstance(result, Micro) + + # Case where we bump up to the next type + result = off * 1.25 + expected = Nano(2500) + assert result == expected + assert isinstance(result, Nano) + + +@pytest.mark.parametrize("cls", tick_classes) +def test_tick_rdiv(cls): + off = cls(10) + delta = off._as_pd_timedelta + td64 = delta.to_timedelta64() + instance__type = ".".join([cls.__module__, cls.__name__]) + msg = ( + "unsupported operand type\\(s\\) for \\/: 'int'|'float' and " + f"'{instance__type}'" + ) + + with pytest.raises(TypeError, match=msg): + 2 / off + with pytest.raises(TypeError, match=msg): + 2.0 / off + + assert (td64 * 2.5) / off == 2.5 + + if cls is not Nano: + # skip pytimedelta for Nano since it gets dropped + assert (delta.to_pytimedelta() * 2) / off == 2 + + result = np.array([2 * td64, td64]) / off + expected = np.array([2.0, 1.0]) + tm.assert_numpy_array_equal(result, expected) + + +@pytest.mark.parametrize("cls1", tick_classes) +@pytest.mark.parametrize("cls2", tick_classes) +def test_tick_zero(cls1, cls2): + assert cls1(0) == cls2(0) + assert cls1(0) + cls2(0) == cls1(0) + + if cls1 is not Nano: + assert cls1(2) + cls2(0) == cls1(2) + + if cls1 is Nano: + assert cls1(2) + Nano(0) == cls1(2) + + +@pytest.mark.parametrize("cls", tick_classes) +def test_tick_equalities(cls): + assert cls() == cls(1) + + +@pytest.mark.parametrize("cls", tick_classes) +def test_tick_offset(cls): + msg = f"{cls.__name__}.is_anchored is deprecated " + + with tm.assert_produces_warning(FutureWarning, match=msg): + assert not cls().is_anchored() + + +@pytest.mark.parametrize("cls", tick_classes) +def test_compare_ticks(cls): + three = cls(3) + four = cls(4) + + assert three < cls(4) + assert cls(3) < four + assert four > cls(3) + assert cls(4) > three + assert cls(3) == cls(3) + assert cls(3) != cls(4) + + +@pytest.mark.parametrize("cls", tick_classes) +def test_compare_ticks_to_strs(cls): + # GH#23524 + off = cls(19) + + # These tests should work with any strings, but we particularly are + # interested in "infer" as that comparison is convenient to make in + # Datetime/Timedelta Array/Index constructors + assert not off == "infer" + assert not "foo" == off + + instance_type = ".".join([cls.__module__, cls.__name__]) + msg = ( + "'<'|'<='|'>'|'>=' not supported between instances of " + f"'str' and '{instance_type}'|'{instance_type}' and 'str'" + ) + + for left, right in [("infer", off), (off, "infer")]: + with pytest.raises(TypeError, match=msg): + left < right + with pytest.raises(TypeError, match=msg): + left <= right + with pytest.raises(TypeError, match=msg): + left > right + with pytest.raises(TypeError, match=msg): + left >= right + + +@pytest.mark.parametrize("cls", tick_classes) +def test_compare_ticks_to_timedeltalike(cls): + off = cls(19) + + td = off._as_pd_timedelta + + others = [td, td.to_timedelta64()] + if cls is not Nano: + others.append(td.to_pytimedelta()) + + for other in others: + assert off == other + assert not off != other + assert not off < other + assert not off > other + assert off <= other + assert off >= other