diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..da7c72d4d50b375a1ffc9fbb1c1617017bc89c4e --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_12_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:849e8e7212d65890ce21feb9e7b6c5b12145c46d0c9665b1e9189479b01a464c +size 41830138 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_218_mp_rank_03_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_218_mp_rank_03_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6ea85882e307565db838eb519c080f81f47ad430 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_218_mp_rank_03_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a113d81ba309fea78b7e6977263f8d0ae6fbaee2c0750e4dd5c52dbaf7b56ff5 +size 41830404 diff --git a/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_38_mp_rank_02_optim_states.pt b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_38_mp_rank_02_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..63ba5fa381b571c21b800b5f16c75f45bc260d03 --- /dev/null +++ b/ckpts/llama-3b/global_step100/bf16_zero_pp_rank_38_mp_rank_02_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f7c23480d5db9e207172e95b7d615f7d36a3254fb1892741bbc6acda377a877e +size 41830330 diff --git a/ckpts/universal/global_step80/zero/12.mlp.dense_4h_to_h.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/12.mlp.dense_4h_to_h.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..a01a4fd9eb0c83ba77914b2b472244b0b377c07d --- /dev/null +++ b/ckpts/universal/global_step80/zero/12.mlp.dense_4h_to_h.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df139606a9a17d26ed7b8b6e86e887aaad905ffa7e61202beb656025099f2264 +size 33555627 diff --git a/ckpts/universal/global_step80/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt b/ckpts/universal/global_step80/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt new file mode 100644 index 0000000000000000000000000000000000000000..5f852e05051a5f8bd395126cf41ebdb4587b43ac --- /dev/null +++ b/ckpts/universal/global_step80/zero/29.vocab_parallel_projection.weight/exp_avg_sq.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5e7c3c9d66ead814ad35c98a202ae788615f0ae3e104ca0d579cc09687025e84 +size 415237291 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b027b0264d6bb9f68b0580ae515cf3e5ed162713 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d1aca2417f2dc1599902c941a563f967105bce3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_astype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..397010205f55d9045375ff3989a06dddcce37ce8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_dtypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ee8f2f17d846a906c8cda506fc9408434fdfa1d Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_dtypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_replace.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_replace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9af6089c67d71eb0dd394831d48bdf654e48447 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_replace.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_repr.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d5eec2d4e28d4a0c46baea5a81435bc27bc5f54 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_repr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_subclass.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_subclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..916c7ac690da212feffdcba705f52236950db52e Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_subclass.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_warnings.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_warnings.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a6f718c30ace58de3ba6f35d3cc34ed4a10a187 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/__pycache__/test_warnings.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_algos.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_algos.py new file mode 100644 index 0000000000000000000000000000000000000000..d4c19a4970135cfb1865eaa0fae0845dc7d17971 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_algos.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm + + +@pytest.mark.parametrize("ordered", [True, False]) +@pytest.mark.parametrize("categories", [["b", "a", "c"], ["a", "b", "c", "d"]]) +def test_factorize(categories, ordered): + cat = pd.Categorical( + ["b", "b", "a", "c", None], categories=categories, ordered=ordered + ) + codes, uniques = pd.factorize(cat) + expected_codes = np.array([0, 0, 1, 2, -1], dtype=np.intp) + expected_uniques = pd.Categorical( + ["b", "a", "c"], categories=categories, ordered=ordered + ) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_factorized_sort(): + cat = pd.Categorical(["b", "b", None, "a"]) + codes, uniques = pd.factorize(cat, sort=True) + expected_codes = np.array([1, 1, -1, 0], dtype=np.intp) + expected_uniques = pd.Categorical(["a", "b"]) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_factorized_sort_ordered(): + cat = pd.Categorical( + ["b", "b", None, "a"], categories=["c", "b", "a"], ordered=True + ) + + codes, uniques = pd.factorize(cat, sort=True) + expected_codes = np.array([0, 0, -1, 1], dtype=np.intp) + expected_uniques = pd.Categorical( + ["b", "a"], categories=["c", "b", "a"], ordered=True + ) + + tm.assert_numpy_array_equal(codes, expected_codes) + tm.assert_categorical_equal(uniques, expected_uniques) + + +def test_isin_cats(): + # GH2003 + cat = pd.Categorical(["a", "b", np.nan]) + + result = cat.isin(["a", np.nan]) + expected = np.array([True, False, True], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + result = cat.isin(["a", "c"]) + expected = np.array([True, False, False], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + +@pytest.mark.parametrize("value", [[""], [None, ""], [pd.NaT, ""]]) +def test_isin_cats_corner_cases(value): + # GH36550 + cat = pd.Categorical([""]) + result = cat.isin(value) + expected = np.array([True], dtype=bool) + tm.assert_numpy_array_equal(expected, result) + + +@pytest.mark.parametrize("empty", [[], pd.Series(dtype=object), np.array([])]) +def test_isin_empty(empty): + s = pd.Categorical(["a", "b"]) + expected = np.array([False, False], dtype=bool) + + result = s.isin(empty) + tm.assert_numpy_array_equal(expected, result) + + +def test_diff(): + ser = pd.Series([1, 2, 3], dtype="category") + + msg = "Convert to a suitable dtype" + with pytest.raises(TypeError, match=msg): + ser.diff() + + df = ser.to_frame(name="A") + with pytest.raises(TypeError, match=msg): + df.diff() diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_analytics.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_analytics.py new file mode 100644 index 0000000000000000000000000000000000000000..c2c53fbc4637ed60dc92914f6e2ca74d5e0bdfe9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_analytics.py @@ -0,0 +1,349 @@ +import re +import sys + +import numpy as np +import pytest + +from pandas.compat import PYPY + +from pandas import ( + Categorical, + CategoricalDtype, + DataFrame, + Index, + NaT, + Series, + date_range, +) +import pandas._testing as tm +from pandas.api.types import is_scalar + + +class TestCategoricalAnalytics: + @pytest.mark.parametrize("aggregation", ["min", "max"]) + def test_min_max_not_ordered_raises(self, aggregation): + # unordered cats have no min/max + cat = Categorical(["a", "b", "c", "d"], ordered=False) + msg = f"Categorical is not ordered for operation {aggregation}" + agg_func = getattr(cat, aggregation) + + with pytest.raises(TypeError, match=msg): + agg_func() + + ufunc = np.minimum if aggregation == "min" else np.maximum + with pytest.raises(TypeError, match=msg): + ufunc.reduce(cat) + + def test_min_max_ordered(self, index_or_series_or_array): + cat = Categorical(["a", "b", "c", "d"], ordered=True) + obj = index_or_series_or_array(cat) + _min = obj.min() + _max = obj.max() + assert _min == "a" + assert _max == "d" + + assert np.minimum.reduce(obj) == "a" + assert np.maximum.reduce(obj) == "d" + # TODO: raises if we pass axis=0 (on Index and Categorical, not Series) + + cat = Categorical( + ["a", "b", "c", "d"], categories=["d", "c", "b", "a"], ordered=True + ) + obj = index_or_series_or_array(cat) + _min = obj.min() + _max = obj.max() + assert _min == "d" + assert _max == "a" + assert np.minimum.reduce(obj) == "d" + assert np.maximum.reduce(obj) == "a" + + def test_min_max_reduce(self): + # GH52788 + cat = Categorical(["a", "b", "c", "d"], ordered=True) + df = DataFrame(cat) + + result_max = df.agg("max") + expected_max = Series(Categorical(["d"], dtype=cat.dtype)) + tm.assert_series_equal(result_max, expected_max) + + result_min = df.agg("min") + expected_min = Series(Categorical(["a"], dtype=cat.dtype)) + tm.assert_series_equal(result_min, expected_min) + + @pytest.mark.parametrize( + "categories,expected", + [ + (list("ABC"), np.nan), + ([1, 2, 3], np.nan), + pytest.param( + Series(date_range("2020-01-01", periods=3), dtype="category"), + NaT, + marks=pytest.mark.xfail( + reason="https://github.com/pandas-dev/pandas/issues/29962" + ), + ), + ], + ) + @pytest.mark.parametrize("aggregation", ["min", "max"]) + def test_min_max_ordered_empty(self, categories, expected, aggregation): + # GH 30227 + cat = Categorical([], categories=categories, ordered=True) + + agg_func = getattr(cat, aggregation) + result = agg_func() + assert result is expected + + @pytest.mark.parametrize( + "values, categories", + [(["a", "b", "c", np.nan], list("cba")), ([1, 2, 3, np.nan], [3, 2, 1])], + ) + @pytest.mark.parametrize("skipna", [True, False]) + @pytest.mark.parametrize("function", ["min", "max"]) + def test_min_max_with_nan(self, values, categories, function, skipna): + # GH 25303 + cat = Categorical(values, categories=categories, ordered=True) + result = getattr(cat, function)(skipna=skipna) + + if skipna is False: + assert result is np.nan + else: + expected = categories[0] if function == "min" else categories[2] + assert result == expected + + @pytest.mark.parametrize("function", ["min", "max"]) + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_only_nan(self, function, skipna): + # https://github.com/pandas-dev/pandas/issues/33450 + cat = Categorical([np.nan], categories=[1, 2], ordered=True) + result = getattr(cat, function)(skipna=skipna) + assert result is np.nan + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numeric_only_min_max_raises(self, method): + # GH 25303 + cat = Categorical( + [np.nan, 1, 2, np.nan], categories=[5, 4, 3, 2, 1], ordered=True + ) + with pytest.raises(TypeError, match=".* got an unexpected keyword"): + getattr(cat, method)(numeric_only=True) + + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numpy_min_max_raises(self, method): + cat = Categorical(["a", "b", "c", "b"], ordered=False) + msg = ( + f"Categorical is not ordered for operation {method}\n" + "you can use .as_ordered() to change the Categorical to an ordered one" + ) + method = getattr(np, method) + with pytest.raises(TypeError, match=re.escape(msg)): + method(cat) + + @pytest.mark.parametrize("kwarg", ["axis", "out", "keepdims"]) + @pytest.mark.parametrize("method", ["min", "max"]) + def test_numpy_min_max_unsupported_kwargs_raises(self, method, kwarg): + cat = Categorical(["a", "b", "c", "b"], ordered=True) + msg = ( + f"the '{kwarg}' parameter is not supported in the pandas implementation " + f"of {method}" + ) + if kwarg == "axis": + msg = r"`axis` must be fewer than the number of dimensions \(1\)" + kwargs = {kwarg: 42} + method = getattr(np, method) + with pytest.raises(ValueError, match=msg): + method(cat, **kwargs) + + @pytest.mark.parametrize("method, expected", [("min", "a"), ("max", "c")]) + def test_numpy_min_max_axis_equals_none(self, method, expected): + cat = Categorical(["a", "b", "c", "b"], ordered=True) + method = getattr(np, method) + result = method(cat, axis=None) + assert result == expected + + @pytest.mark.parametrize( + "values,categories,exp_mode", + [ + ([1, 1, 2, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5]), + ([1, 1, 1, 4, 5, 5, 5], [5, 4, 3, 2, 1], [5, 1]), + ([1, 2, 3, 4, 5], [5, 4, 3, 2, 1], [5, 4, 3, 2, 1]), + ([np.nan, np.nan, np.nan, 4, 5], [5, 4, 3, 2, 1], [5, 4]), + ([np.nan, np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]), + ([np.nan, np.nan, 4, 5, 4], [5, 4, 3, 2, 1], [4]), + ], + ) + def test_mode(self, values, categories, exp_mode): + cat = Categorical(values, categories=categories, ordered=True) + res = Series(cat).mode()._values + exp = Categorical(exp_mode, categories=categories, ordered=True) + tm.assert_categorical_equal(res, exp) + + def test_searchsorted(self, ordered): + # https://github.com/pandas-dev/pandas/issues/8420 + # https://github.com/pandas-dev/pandas/issues/14522 + + cat = Categorical( + ["cheese", "milk", "apple", "bread", "bread"], + categories=["cheese", "milk", "apple", "bread"], + ordered=ordered, + ) + ser = Series(cat) + + # Searching for single item argument, side='left' (default) + res_cat = cat.searchsorted("apple") + assert res_cat == 2 + assert is_scalar(res_cat) + + res_ser = ser.searchsorted("apple") + assert res_ser == 2 + assert is_scalar(res_ser) + + # Searching for single item array, side='left' (default) + res_cat = cat.searchsorted(["bread"]) + res_ser = ser.searchsorted(["bread"]) + exp = np.array([3], dtype=np.intp) + tm.assert_numpy_array_equal(res_cat, exp) + tm.assert_numpy_array_equal(res_ser, exp) + + # Searching for several items array, side='right' + res_cat = cat.searchsorted(["apple", "bread"], side="right") + res_ser = ser.searchsorted(["apple", "bread"], side="right") + exp = np.array([3, 5], dtype=np.intp) + tm.assert_numpy_array_equal(res_cat, exp) + tm.assert_numpy_array_equal(res_ser, exp) + + # Searching for a single value that is not from the Categorical + with pytest.raises(TypeError, match="cucumber"): + cat.searchsorted("cucumber") + with pytest.raises(TypeError, match="cucumber"): + ser.searchsorted("cucumber") + + # Searching for multiple values one of each is not from the Categorical + msg = ( + "Cannot setitem on a Categorical with a new category, " + "set the categories first" + ) + with pytest.raises(TypeError, match=msg): + cat.searchsorted(["bread", "cucumber"]) + with pytest.raises(TypeError, match=msg): + ser.searchsorted(["bread", "cucumber"]) + + def test_unique(self, ordered): + # GH38140 + dtype = CategoricalDtype(["a", "b", "c"], ordered=ordered) + + # categories are reordered based on value when ordered=False + cat = Categorical(["a", "b", "c"], dtype=dtype) + res = cat.unique() + tm.assert_categorical_equal(res, cat) + + cat = Categorical(["a", "b", "a", "a"], dtype=dtype) + res = cat.unique() + tm.assert_categorical_equal(res, Categorical(["a", "b"], dtype=dtype)) + + cat = Categorical(["c", "a", "b", "a", "a"], dtype=dtype) + res = cat.unique() + exp_cat = Categorical(["c", "a", "b"], dtype=dtype) + tm.assert_categorical_equal(res, exp_cat) + + # nan must be removed + cat = Categorical(["b", np.nan, "b", np.nan, "a"], dtype=dtype) + res = cat.unique() + exp_cat = Categorical(["b", np.nan, "a"], dtype=dtype) + tm.assert_categorical_equal(res, exp_cat) + + def test_unique_index_series(self, ordered): + # GH38140 + dtype = CategoricalDtype([3, 2, 1], ordered=ordered) + + c = Categorical([3, 1, 2, 2, 1], dtype=dtype) + # Categorical.unique sorts categories by appearance order + # if ordered=False + exp = Categorical([3, 1, 2], dtype=dtype) + tm.assert_categorical_equal(c.unique(), exp) + + tm.assert_index_equal(Index(c).unique(), Index(exp)) + tm.assert_categorical_equal(Series(c).unique(), exp) + + c = Categorical([1, 1, 2, 2], dtype=dtype) + exp = Categorical([1, 2], dtype=dtype) + tm.assert_categorical_equal(c.unique(), exp) + tm.assert_index_equal(Index(c).unique(), Index(exp)) + tm.assert_categorical_equal(Series(c).unique(), exp) + + def test_shift(self): + # GH 9416 + cat = Categorical(["a", "b", "c", "d", "a"]) + + # shift forward + sp1 = cat.shift(1) + xp1 = Categorical([np.nan, "a", "b", "c", "d"]) + tm.assert_categorical_equal(sp1, xp1) + tm.assert_categorical_equal(cat[:-1], sp1[1:]) + + # shift back + sn2 = cat.shift(-2) + xp2 = Categorical( + ["c", "d", "a", np.nan, np.nan], categories=["a", "b", "c", "d"] + ) + tm.assert_categorical_equal(sn2, xp2) + tm.assert_categorical_equal(cat[2:], sn2[:-2]) + + # shift by zero + tm.assert_categorical_equal(cat, cat.shift(0)) + + def test_nbytes(self): + cat = Categorical([1, 2, 3]) + exp = 3 + 3 * 8 # 3 int8s for values + 3 int64s for categories + assert cat.nbytes == exp + + def test_memory_usage(self): + cat = Categorical([1, 2, 3]) + + # .categories is an index, so we include the hashtable + assert 0 < cat.nbytes <= cat.memory_usage() + assert 0 < cat.nbytes <= cat.memory_usage(deep=True) + + cat = Categorical(["foo", "foo", "bar"]) + assert cat.memory_usage(deep=True) > cat.nbytes + + if not PYPY: + # sys.getsizeof will call the .memory_usage with + # deep=True, and add on some GC overhead + diff = cat.memory_usage(deep=True) - sys.getsizeof(cat) + assert abs(diff) < 100 + + def test_map(self): + c = Categorical(list("ABABC"), categories=list("CBA"), ordered=True) + result = c.map(lambda x: x.lower(), na_action=None) + exp = Categorical(list("ababc"), categories=list("cba"), ordered=True) + tm.assert_categorical_equal(result, exp) + + c = Categorical(list("ABABC"), categories=list("ABC"), ordered=False) + result = c.map(lambda x: x.lower(), na_action=None) + exp = Categorical(list("ababc"), categories=list("abc"), ordered=False) + tm.assert_categorical_equal(result, exp) + + result = c.map(lambda x: 1, na_action=None) + # GH 12766: Return an index not an array + tm.assert_index_equal(result, Index(np.array([1] * 5, dtype=np.int64))) + + @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0]) + def test_validate_inplace_raises(self, value): + cat = Categorical(["A", "B", "B", "C", "A"]) + msg = ( + 'For argument "inplace" expected type bool, ' + f"received type {type(value).__name__}" + ) + + with pytest.raises(ValueError, match=msg): + cat.sort_values(inplace=value) + + def test_quantile_empty(self): + # make sure we have correct itemsize on resulting codes + cat = Categorical(["A", "B"]) + idx = Index([0.0, 0.5]) + result = cat[:0]._quantile(idx, interpolation="linear") + assert result._codes.dtype == np.int8 + + expected = cat.take([-1, -1], allow_fill=True) + tm.assert_extension_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_api.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_api.py new file mode 100644 index 0000000000000000000000000000000000000000..a939ee5f6f53f805211d46773c625c8361203991 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_api.py @@ -0,0 +1,501 @@ +import re + +import numpy as np +import pytest + +from pandas.compat import PY311 + +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + Index, + Series, + StringDtype, +) +import pandas._testing as tm +from pandas.core.arrays.categorical import recode_for_categories + + +class TestCategoricalAPI: + def test_to_list_deprecated(self): + # GH#51254 + cat1 = Categorical(list("acb"), ordered=False) + msg = "Categorical.to_list is deprecated and will be removed" + with tm.assert_produces_warning(FutureWarning, match=msg): + cat1.to_list() + + def test_ordered_api(self): + # GH 9347 + cat1 = Categorical(list("acb"), ordered=False) + tm.assert_index_equal(cat1.categories, Index(["a", "b", "c"])) + assert not cat1.ordered + + cat2 = Categorical(list("acb"), categories=list("bca"), ordered=False) + tm.assert_index_equal(cat2.categories, Index(["b", "c", "a"])) + assert not cat2.ordered + + cat3 = Categorical(list("acb"), ordered=True) + tm.assert_index_equal(cat3.categories, Index(["a", "b", "c"])) + assert cat3.ordered + + cat4 = Categorical(list("acb"), categories=list("bca"), ordered=True) + tm.assert_index_equal(cat4.categories, Index(["b", "c", "a"])) + assert cat4.ordered + + def test_set_ordered(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + cat2 = cat.as_unordered() + assert not cat2.ordered + cat2 = cat.as_ordered() + assert cat2.ordered + + assert cat2.set_ordered(True).ordered + assert not cat2.set_ordered(False).ordered + + # removed in 0.19.0 + msg = ( + "property 'ordered' of 'Categorical' object has no setter" + if PY311 + else "can't set attribute" + ) + with pytest.raises(AttributeError, match=msg): + cat.ordered = True + with pytest.raises(AttributeError, match=msg): + cat.ordered = False + + def test_rename_categories(self): + cat = Categorical(["a", "b", "c", "a"]) + + # inplace=False: the old one must not be changed + res = cat.rename_categories([1, 2, 3]) + tm.assert_numpy_array_equal( + res.__array__(), np.array([1, 2, 3, 1], dtype=np.int64) + ) + tm.assert_index_equal(res.categories, Index([1, 2, 3])) + + exp_cat = np.array(["a", "b", "c", "a"], dtype=np.object_) + tm.assert_numpy_array_equal(cat.__array__(), exp_cat) + + exp_cat = Index(["a", "b", "c"]) + tm.assert_index_equal(cat.categories, exp_cat) + + # GH18862 (let rename_categories take callables) + result = cat.rename_categories(lambda x: x.upper()) + expected = Categorical(["A", "B", "C", "A"]) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]]) + def test_rename_categories_wrong_length_raises(self, new_categories): + cat = Categorical(["a", "b", "c", "a"]) + msg = ( + "new categories need to have the same number of items as the " + "old categories!" + ) + with pytest.raises(ValueError, match=msg): + cat.rename_categories(new_categories) + + def test_rename_categories_series(self): + # https://github.com/pandas-dev/pandas/issues/17981 + c = Categorical(["a", "b"]) + result = c.rename_categories(Series([0, 1], index=["a", "b"])) + expected = Categorical([0, 1]) + tm.assert_categorical_equal(result, expected) + + def test_rename_categories_dict(self): + # GH 17336 + cat = Categorical(["a", "b", "c", "d"]) + res = cat.rename_categories({"a": 4, "b": 3, "c": 2, "d": 1}) + expected = Index([4, 3, 2, 1]) + tm.assert_index_equal(res.categories, expected) + + # Test for dicts of smaller length + cat = Categorical(["a", "b", "c", "d"]) + res = cat.rename_categories({"a": 1, "c": 3}) + + expected = Index([1, "b", 3, "d"]) + tm.assert_index_equal(res.categories, expected) + + # Test for dicts with bigger length + cat = Categorical(["a", "b", "c", "d"]) + res = cat.rename_categories({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "f": 6}) + expected = Index([1, 2, 3, 4]) + tm.assert_index_equal(res.categories, expected) + + # Test for dicts with no items from old categories + cat = Categorical(["a", "b", "c", "d"]) + res = cat.rename_categories({"f": 1, "g": 3}) + + expected = Index(["a", "b", "c", "d"]) + tm.assert_index_equal(res.categories, expected) + + def test_reorder_categories(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + old = cat.copy() + new = Categorical( + ["a", "b", "c", "a"], categories=["c", "b", "a"], ordered=True + ) + + res = cat.reorder_categories(["c", "b", "a"]) + # cat must be the same as before + tm.assert_categorical_equal(cat, old) + # only res is changed + tm.assert_categorical_equal(res, new) + + @pytest.mark.parametrize( + "new_categories", + [ + ["a"], # not all "old" included in "new" + ["a", "b", "d"], # still not all "old" in "new" + ["a", "b", "c", "d"], # all "old" included in "new", but too long + ], + ) + def test_reorder_categories_raises(self, new_categories): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + msg = "items in new_categories are not the same as in old categories" + with pytest.raises(ValueError, match=msg): + cat.reorder_categories(new_categories) + + def test_add_categories(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + old = cat.copy() + new = Categorical( + ["a", "b", "c", "a"], categories=["a", "b", "c", "d"], ordered=True + ) + + res = cat.add_categories("d") + tm.assert_categorical_equal(cat, old) + tm.assert_categorical_equal(res, new) + + res = cat.add_categories(["d"]) + tm.assert_categorical_equal(cat, old) + tm.assert_categorical_equal(res, new) + + # GH 9927 + cat = Categorical(list("abc"), ordered=True) + expected = Categorical(list("abc"), categories=list("abcde"), ordered=True) + # test with Series, np.array, index, list + res = cat.add_categories(Series(["d", "e"])) + tm.assert_categorical_equal(res, expected) + res = cat.add_categories(np.array(["d", "e"])) + tm.assert_categorical_equal(res, expected) + res = cat.add_categories(Index(["d", "e"])) + tm.assert_categorical_equal(res, expected) + res = cat.add_categories(["d", "e"]) + tm.assert_categorical_equal(res, expected) + + def test_add_categories_existing_raises(self): + # new is in old categories + cat = Categorical(["a", "b", "c", "d"], ordered=True) + msg = re.escape("new categories must not include old categories: {'d'}") + with pytest.raises(ValueError, match=msg): + cat.add_categories(["d"]) + + def test_add_categories_losing_dtype_information(self): + # GH#48812 + cat = Categorical(Series([1, 2], dtype="Int64")) + ser = Series([4], dtype="Int64") + result = cat.add_categories(ser) + expected = Categorical( + Series([1, 2], dtype="Int64"), categories=Series([1, 2, 4], dtype="Int64") + ) + tm.assert_categorical_equal(result, expected) + + cat = Categorical(Series(["a", "b", "a"], dtype=StringDtype())) + ser = Series(["d"], dtype=StringDtype()) + result = cat.add_categories(ser) + expected = Categorical( + Series(["a", "b", "a"], dtype=StringDtype()), + categories=Series(["a", "b", "d"], dtype=StringDtype()), + ) + tm.assert_categorical_equal(result, expected) + + def test_set_categories(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + exp_categories = Index(["c", "b", "a"]) + exp_values = np.array(["a", "b", "c", "a"], dtype=np.object_) + + cat = cat.set_categories(["c", "b", "a"]) + res = cat.set_categories(["a", "b", "c"]) + # cat must be the same as before + tm.assert_index_equal(cat.categories, exp_categories) + tm.assert_numpy_array_equal(cat.__array__(), exp_values) + # only res is changed + exp_categories_back = Index(["a", "b", "c"]) + tm.assert_index_equal(res.categories, exp_categories_back) + tm.assert_numpy_array_equal(res.__array__(), exp_values) + + # not all "old" included in "new" -> all not included ones are now + # np.nan + cat = Categorical(["a", "b", "c", "a"], ordered=True) + res = cat.set_categories(["a"]) + tm.assert_numpy_array_equal(res.codes, np.array([0, -1, -1, 0], dtype=np.int8)) + + # still not all "old" in "new" + res = cat.set_categories(["a", "b", "d"]) + tm.assert_numpy_array_equal(res.codes, np.array([0, 1, -1, 0], dtype=np.int8)) + tm.assert_index_equal(res.categories, Index(["a", "b", "d"])) + + # all "old" included in "new" + cat = cat.set_categories(["a", "b", "c", "d"]) + exp_categories = Index(["a", "b", "c", "d"]) + tm.assert_index_equal(cat.categories, exp_categories) + + # internals... + c = Categorical([1, 2, 3, 4, 1], categories=[1, 2, 3, 4], ordered=True) + tm.assert_numpy_array_equal(c._codes, np.array([0, 1, 2, 3, 0], dtype=np.int8)) + tm.assert_index_equal(c.categories, Index([1, 2, 3, 4])) + + exp = np.array([1, 2, 3, 4, 1], dtype=np.int64) + tm.assert_numpy_array_equal(np.asarray(c), exp) + + # all "pointers" to '4' must be changed from 3 to 0,... + c = c.set_categories([4, 3, 2, 1]) + + # positions are changed + tm.assert_numpy_array_equal(c._codes, np.array([3, 2, 1, 0, 3], dtype=np.int8)) + + # categories are now in new order + tm.assert_index_equal(c.categories, Index([4, 3, 2, 1])) + + # output is the same + exp = np.array([1, 2, 3, 4, 1], dtype=np.int64) + tm.assert_numpy_array_equal(np.asarray(c), exp) + assert c.min() == 4 + assert c.max() == 1 + + # set_categories should set the ordering if specified + c2 = c.set_categories([4, 3, 2, 1], ordered=False) + assert not c2.ordered + + tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2)) + + # set_categories should pass thru the ordering + c2 = c.set_ordered(False).set_categories([4, 3, 2, 1]) + assert not c2.ordered + + tm.assert_numpy_array_equal(np.asarray(c), np.asarray(c2)) + + @pytest.mark.parametrize( + "values, categories, new_categories", + [ + # No NaNs, same cats, same order + (["a", "b", "a"], ["a", "b"], ["a", "b"]), + # No NaNs, same cats, different order + (["a", "b", "a"], ["a", "b"], ["b", "a"]), + # Same, unsorted + (["b", "a", "a"], ["a", "b"], ["a", "b"]), + # No NaNs, same cats, different order + (["b", "a", "a"], ["a", "b"], ["b", "a"]), + # NaNs + (["a", "b", "c"], ["a", "b"], ["a", "b"]), + (["a", "b", "c"], ["a", "b"], ["b", "a"]), + (["b", "a", "c"], ["a", "b"], ["a", "b"]), + (["b", "a", "c"], ["a", "b"], ["a", "b"]), + # Introduce NaNs + (["a", "b", "c"], ["a", "b"], ["a"]), + (["a", "b", "c"], ["a", "b"], ["b"]), + (["b", "a", "c"], ["a", "b"], ["a"]), + (["b", "a", "c"], ["a", "b"], ["a"]), + # No overlap + (["a", "b", "c"], ["a", "b"], ["d", "e"]), + ], + ) + @pytest.mark.parametrize("ordered", [True, False]) + def test_set_categories_many(self, values, categories, new_categories, ordered): + c = Categorical(values, categories) + expected = Categorical(values, new_categories, ordered) + result = c.set_categories(new_categories, ordered=ordered) + tm.assert_categorical_equal(result, expected) + + def test_set_categories_rename_less(self): + # GH 24675 + cat = Categorical(["A", "B"]) + result = cat.set_categories(["A"], rename=True) + expected = Categorical(["A", np.nan]) + tm.assert_categorical_equal(result, expected) + + def test_set_categories_private(self): + cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"]) + cat._set_categories(["a", "c", "d", "e"]) + expected = Categorical(["a", "c", "d"], categories=list("acde")) + tm.assert_categorical_equal(cat, expected) + + # fastpath + cat = Categorical(["a", "b", "c"], categories=["a", "b", "c", "d"]) + cat._set_categories(["a", "c", "d", "e"], fastpath=True) + expected = Categorical(["a", "c", "d"], categories=list("acde")) + tm.assert_categorical_equal(cat, expected) + + def test_remove_categories(self): + cat = Categorical(["a", "b", "c", "a"], ordered=True) + old = cat.copy() + new = Categorical(["a", "b", np.nan, "a"], categories=["a", "b"], ordered=True) + + res = cat.remove_categories("c") + tm.assert_categorical_equal(cat, old) + tm.assert_categorical_equal(res, new) + + res = cat.remove_categories(["c"]) + tm.assert_categorical_equal(cat, old) + tm.assert_categorical_equal(res, new) + + @pytest.mark.parametrize("removals", [["c"], ["c", np.nan], "c", ["c", "c"]]) + def test_remove_categories_raises(self, removals): + cat = Categorical(["a", "b", "a"]) + message = re.escape("removals must all be in old categories: {'c'}") + + with pytest.raises(ValueError, match=message): + cat.remove_categories(removals) + + def test_remove_unused_categories(self): + c = Categorical(["a", "b", "c", "d", "a"], categories=["a", "b", "c", "d", "e"]) + exp_categories_all = Index(["a", "b", "c", "d", "e"]) + exp_categories_dropped = Index(["a", "b", "c", "d"]) + + tm.assert_index_equal(c.categories, exp_categories_all) + + res = c.remove_unused_categories() + tm.assert_index_equal(res.categories, exp_categories_dropped) + tm.assert_index_equal(c.categories, exp_categories_all) + + # with NaN values (GH11599) + c = Categorical(["a", "b", "c", np.nan], categories=["a", "b", "c", "d", "e"]) + res = c.remove_unused_categories() + tm.assert_index_equal(res.categories, Index(np.array(["a", "b", "c"]))) + exp_codes = np.array([0, 1, 2, -1], dtype=np.int8) + tm.assert_numpy_array_equal(res.codes, exp_codes) + tm.assert_index_equal(c.categories, exp_categories_all) + + val = ["F", np.nan, "D", "B", "D", "F", np.nan] + cat = Categorical(values=val, categories=list("ABCDEFG")) + out = cat.remove_unused_categories() + tm.assert_index_equal(out.categories, Index(["B", "D", "F"])) + exp_codes = np.array([2, -1, 1, 0, 1, 2, -1], dtype=np.int8) + tm.assert_numpy_array_equal(out.codes, exp_codes) + assert out.tolist() == val + + alpha = list("abcdefghijklmnopqrstuvwxyz") + val = np.random.default_rng(2).choice(alpha[::2], 10000).astype("object") + val[np.random.default_rng(2).choice(len(val), 100)] = np.nan + + cat = Categorical(values=val, categories=alpha) + out = cat.remove_unused_categories() + assert out.tolist() == val.tolist() + + +class TestCategoricalAPIWithFactor: + def test_describe(self): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + # string type + desc = factor.describe() + assert factor.ordered + exp_index = CategoricalIndex( + ["a", "b", "c"], name="categories", ordered=factor.ordered + ) + expected = DataFrame( + {"counts": [3, 2, 3], "freqs": [3 / 8.0, 2 / 8.0, 3 / 8.0]}, index=exp_index + ) + tm.assert_frame_equal(desc, expected) + + # check unused categories + cat = factor.copy() + cat = cat.set_categories(["a", "b", "c", "d"]) + desc = cat.describe() + + exp_index = CategoricalIndex( + list("abcd"), ordered=factor.ordered, name="categories" + ) + expected = DataFrame( + {"counts": [3, 2, 3, 0], "freqs": [3 / 8.0, 2 / 8.0, 3 / 8.0, 0]}, + index=exp_index, + ) + tm.assert_frame_equal(desc, expected) + + # check an integer one + cat = Categorical([1, 2, 3, 1, 2, 3, 3, 2, 1, 1, 1]) + desc = cat.describe() + exp_index = CategoricalIndex([1, 2, 3], ordered=cat.ordered, name="categories") + expected = DataFrame( + {"counts": [5, 3, 3], "freqs": [5 / 11.0, 3 / 11.0, 3 / 11.0]}, + index=exp_index, + ) + tm.assert_frame_equal(desc, expected) + + # https://github.com/pandas-dev/pandas/issues/3678 + # describe should work with NaN + cat = Categorical([np.nan, 1, 2, 2]) + desc = cat.describe() + expected = DataFrame( + {"counts": [1, 2, 1], "freqs": [1 / 4.0, 2 / 4.0, 1 / 4.0]}, + index=CategoricalIndex( + [1, 2, np.nan], categories=[1, 2], name="categories" + ), + ) + tm.assert_frame_equal(desc, expected) + + +class TestPrivateCategoricalAPI: + def test_codes_immutable(self): + # Codes should be read only + c = Categorical(["a", "b", "c", "a", np.nan]) + exp = np.array([0, 1, 2, 0, -1], dtype="int8") + tm.assert_numpy_array_equal(c.codes, exp) + + # Assignments to codes should raise + msg = ( + "property 'codes' of 'Categorical' object has no setter" + if PY311 + else "can't set attribute" + ) + with pytest.raises(AttributeError, match=msg): + c.codes = np.array([0, 1, 2, 0, 1], dtype="int8") + + # changes in the codes array should raise + codes = c.codes + + with pytest.raises(ValueError, match="assignment destination is read-only"): + codes[4] = 1 + + # But even after getting the codes, the original array should still be + # writeable! + c[4] = "a" + exp = np.array([0, 1, 2, 0, 0], dtype="int8") + tm.assert_numpy_array_equal(c.codes, exp) + c._codes[4] = 2 + exp = np.array([0, 1, 2, 0, 2], dtype="int8") + tm.assert_numpy_array_equal(c.codes, exp) + + @pytest.mark.parametrize( + "codes, old, new, expected", + [ + ([0, 1], ["a", "b"], ["a", "b"], [0, 1]), + ([0, 1], ["b", "a"], ["b", "a"], [0, 1]), + ([0, 1], ["a", "b"], ["b", "a"], [1, 0]), + ([0, 1], ["b", "a"], ["a", "b"], [1, 0]), + ([0, 1, 0, 1], ["a", "b"], ["a", "b", "c"], [0, 1, 0, 1]), + ([0, 1, 2, 2], ["a", "b", "c"], ["a", "b"], [0, 1, -1, -1]), + ([0, 1, -1], ["a", "b", "c"], ["a", "b", "c"], [0, 1, -1]), + ([0, 1, -1], ["a", "b", "c"], ["b"], [-1, 0, -1]), + ([0, 1, -1], ["a", "b", "c"], ["d"], [-1, -1, -1]), + ([0, 1, -1], ["a", "b", "c"], [], [-1, -1, -1]), + ([-1, -1], [], ["a", "b"], [-1, -1]), + ([1, 0], ["b", "a"], ["a", "b"], [0, 1]), + ], + ) + def test_recode_to_categories(self, codes, old, new, expected): + codes = np.asanyarray(codes, dtype=np.int8) + expected = np.asanyarray(expected, dtype=np.int8) + old = Index(old) + new = Index(new) + result = recode_for_categories(codes, old, new) + tm.assert_numpy_array_equal(result, expected) + + def test_recode_to_categories_large(self): + N = 1000 + codes = np.arange(N) + old = Index(codes) + expected = np.arange(N - 1, -1, -1, dtype=np.int16) + new = Index(expected) + result = recode_for_categories(codes, old, new) + tm.assert_numpy_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_astype.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..a2a53af6ab1ad3701a58bcc6d00929ad2629d36b --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_astype.py @@ -0,0 +1,155 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + DatetimeIndex, + Interval, + NaT, + Period, + Timestamp, + array, + to_datetime, +) +import pandas._testing as tm + + +class TestAstype: + @pytest.mark.parametrize("cls", [Categorical, CategoricalIndex]) + @pytest.mark.parametrize("values", [[1, np.nan], [Timestamp("2000"), NaT]]) + def test_astype_nan_to_int(self, cls, values): + # GH#28406 + obj = cls(values) + + msg = "Cannot (cast|convert)" + with pytest.raises((ValueError, TypeError), match=msg): + obj.astype(int) + + @pytest.mark.parametrize( + "expected", + [ + array(["2019", "2020"], dtype="datetime64[ns, UTC]"), + array([0, 0], dtype="timedelta64[ns]"), + array([Period("2019"), Period("2020")], dtype="period[Y-DEC]"), + array([Interval(0, 1), Interval(1, 2)], dtype="interval"), + array([1, np.nan], dtype="Int64"), + ], + ) + def test_astype_category_to_extension_dtype(self, expected): + # GH#28668 + result = expected.astype("category").astype(expected.dtype) + + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, expected", + [ + ( + "datetime64[ns]", + np.array(["2015-01-01T00:00:00.000000000"], dtype="datetime64[ns]"), + ), + ( + "datetime64[ns, MET]", + DatetimeIndex([Timestamp("2015-01-01 00:00:00+0100", tz="MET")]).array, + ), + ], + ) + def test_astype_to_datetime64(self, dtype, expected): + # GH#28448 + result = Categorical(["2015-01-01"]).astype(dtype) + assert result == expected + + def test_astype_str_int_categories_to_nullable_int(self): + # GH#39616 + dtype = CategoricalDtype([str(i) for i in range(5)]) + codes = np.random.default_rng(2).integers(5, size=20) + arr = Categorical.from_codes(codes, dtype=dtype) + + res = arr.astype("Int64") + expected = array(codes, dtype="Int64") + tm.assert_extension_array_equal(res, expected) + + def test_astype_str_int_categories_to_nullable_float(self): + # GH#39616 + dtype = CategoricalDtype([str(i / 2) for i in range(5)]) + codes = np.random.default_rng(2).integers(5, size=20) + arr = Categorical.from_codes(codes, dtype=dtype) + + res = arr.astype("Float64") + expected = array(codes, dtype="Float64") / 2 + tm.assert_extension_array_equal(res, expected) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_astype(self, ordered): + # string + cat = Categorical(list("abbaaccc"), ordered=ordered) + result = cat.astype(object) + expected = np.array(cat) + tm.assert_numpy_array_equal(result, expected) + + msg = r"Cannot cast object|string dtype to float64" + with pytest.raises(ValueError, match=msg): + cat.astype(float) + + # numeric + cat = Categorical([0, 1, 2, 2, 1, 0, 1, 0, 2], ordered=ordered) + result = cat.astype(object) + expected = np.array(cat, dtype=object) + tm.assert_numpy_array_equal(result, expected) + + result = cat.astype(int) + expected = np.array(cat, dtype="int") + tm.assert_numpy_array_equal(result, expected) + + result = cat.astype(float) + expected = np.array(cat, dtype=float) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("dtype_ordered", [True, False]) + @pytest.mark.parametrize("cat_ordered", [True, False]) + def test_astype_category(self, dtype_ordered, cat_ordered): + # GH#10696/GH#18593 + data = list("abcaacbab") + cat = Categorical(data, categories=list("bac"), ordered=cat_ordered) + + # standard categories + dtype = CategoricalDtype(ordered=dtype_ordered) + result = cat.astype(dtype) + expected = Categorical(data, categories=cat.categories, ordered=dtype_ordered) + tm.assert_categorical_equal(result, expected) + + # non-standard categories + dtype = CategoricalDtype(list("adc"), dtype_ordered) + result = cat.astype(dtype) + expected = Categorical(data, dtype=dtype) + tm.assert_categorical_equal(result, expected) + + if dtype_ordered is False: + # dtype='category' can't specify ordered, so only test once + result = cat.astype("category") + expected = cat + tm.assert_categorical_equal(result, expected) + + def test_astype_object_datetime_categories(self): + # GH#40754 + cat = Categorical(to_datetime(["2021-03-27", NaT])) + result = cat.astype(object) + expected = np.array([Timestamp("2021-03-27 00:00:00"), NaT], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + def test_astype_object_timestamp_categories(self): + # GH#18024 + cat = Categorical([Timestamp("2014-01-01")]) + result = cat.astype(object) + expected = np.array([Timestamp("2014-01-01 00:00:00")], dtype="object") + tm.assert_numpy_array_equal(result, expected) + + def test_astype_category_readonly_mask_values(self): + # GH#53658 + arr = array([0, 1, 2], dtype="Int64") + arr._mask.flags["WRITEABLE"] = False + result = arr.astype("category") + expected = array([0, 1, 2], dtype="Int64").astype("category") + tm.assert_extension_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..373f1c95463fc43feaaf50d9d984e539628a6b5e --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_constructors.py @@ -0,0 +1,783 @@ +from datetime import ( + date, + datetime, +) + +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer_dtype, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DatetimeIndex, + Index, + Interval, + IntervalIndex, + MultiIndex, + NaT, + Series, + Timestamp, + date_range, + period_range, + timedelta_range, +) +import pandas._testing as tm + + +class TestCategoricalConstructors: + def test_fastpath_deprecated(self): + codes = np.array([1, 2, 3]) + dtype = CategoricalDtype(categories=["a", "b", "c", "d"], ordered=False) + msg = "The 'fastpath' keyword in Categorical is deprecated" + with tm.assert_produces_warning(DeprecationWarning, match=msg): + Categorical(codes, dtype=dtype, fastpath=True) + + def test_categorical_from_cat_and_dtype_str_preserve_ordered(self): + # GH#49309 we should preserve orderedness in `res` + cat = Categorical([3, 1], categories=[3, 2, 1], ordered=True) + + res = Categorical(cat, dtype="category") + assert res.dtype.ordered + + def test_categorical_disallows_scalar(self): + # GH#38433 + with pytest.raises(TypeError, match="Categorical input must be list-like"): + Categorical("A", categories=["A", "B"]) + + def test_categorical_1d_only(self): + # ndim > 1 + msg = "> 1 ndim Categorical are not supported at this time" + with pytest.raises(NotImplementedError, match=msg): + Categorical(np.array([list("abcd")])) + + def test_validate_ordered(self): + # see gh-14058 + exp_msg = "'ordered' must either be 'True' or 'False'" + exp_err = TypeError + + # This should be a boolean. + ordered = np.array([0, 1, 2]) + + with pytest.raises(exp_err, match=exp_msg): + Categorical([1, 2, 3], ordered=ordered) + + with pytest.raises(exp_err, match=exp_msg): + Categorical.from_codes( + [0, 0, 1], categories=["a", "b", "c"], ordered=ordered + ) + + def test_constructor_empty(self): + # GH 17248 + c = Categorical([]) + expected = Index([]) + tm.assert_index_equal(c.categories, expected) + + c = Categorical([], categories=[1, 2, 3]) + expected = Index([1, 2, 3], dtype=np.int64) + tm.assert_index_equal(c.categories, expected) + + def test_constructor_empty_boolean(self): + # see gh-22702 + cat = Categorical([], categories=[True, False]) + categories = sorted(cat.categories.tolist()) + assert categories == [False, True] + + def test_constructor_tuples(self): + values = np.array([(1,), (1, 2), (1,), (1, 2)], dtype=object) + result = Categorical(values) + expected = Index([(1,), (1, 2)], tupleize_cols=False) + tm.assert_index_equal(result.categories, expected) + assert result.ordered is False + + def test_constructor_tuples_datetimes(self): + # numpy will auto reshape when all of the tuples are the + # same len, so add an extra one with 2 items and slice it off + values = np.array( + [ + (Timestamp("2010-01-01"),), + (Timestamp("2010-01-02"),), + (Timestamp("2010-01-01"),), + (Timestamp("2010-01-02"),), + ("a", "b"), + ], + dtype=object, + )[:-1] + result = Categorical(values) + expected = Index( + [(Timestamp("2010-01-01"),), (Timestamp("2010-01-02"),)], + tupleize_cols=False, + ) + tm.assert_index_equal(result.categories, expected) + + def test_constructor_unsortable(self): + # it works! + arr = np.array([1, 2, 3, datetime.now()], dtype="O") + factor = Categorical(arr, ordered=False) + assert not factor.ordered + + # this however will raise as cannot be sorted + msg = ( + "'values' is not ordered, please explicitly specify the " + "categories order by passing in a categories argument." + ) + with pytest.raises(TypeError, match=msg): + Categorical(arr, ordered=True) + + def test_constructor_interval(self): + result = Categorical( + [Interval(1, 2), Interval(2, 3), Interval(3, 6)], ordered=True + ) + ii = IntervalIndex([Interval(1, 2), Interval(2, 3), Interval(3, 6)]) + exp = Categorical(ii, ordered=True) + tm.assert_categorical_equal(result, exp) + tm.assert_index_equal(result.categories, ii) + + def test_constructor(self): + exp_arr = np.array(["a", "b", "c", "a", "b", "c"], dtype=np.object_) + c1 = Categorical(exp_arr) + tm.assert_numpy_array_equal(c1.__array__(), exp_arr) + c2 = Categorical(exp_arr, categories=["a", "b", "c"]) + tm.assert_numpy_array_equal(c2.__array__(), exp_arr) + c2 = Categorical(exp_arr, categories=["c", "b", "a"]) + tm.assert_numpy_array_equal(c2.__array__(), exp_arr) + + # categories must be unique + msg = "Categorical categories must be unique" + with pytest.raises(ValueError, match=msg): + Categorical([1, 2], [1, 2, 2]) + + with pytest.raises(ValueError, match=msg): + Categorical(["a", "b"], ["a", "b", "b"]) + + # The default should be unordered + c1 = Categorical(["a", "b", "c", "a"]) + assert not c1.ordered + + # Categorical as input + c1 = Categorical(["a", "b", "c", "a"]) + c2 = Categorical(c1) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + c2 = Categorical(c1) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"]) + c2 = Categorical(c1) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"]) + c2 = Categorical(c1, categories=["a", "b", "c"]) + tm.assert_numpy_array_equal(c1.__array__(), c2.__array__()) + tm.assert_index_equal(c2.categories, Index(["a", "b", "c"])) + + # Series of dtype category + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + c2 = Categorical(Series(c1)) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "c", "b"]) + c2 = Categorical(Series(c1)) + tm.assert_categorical_equal(c1, c2) + + # Series + c1 = Categorical(["a", "b", "c", "a"]) + c2 = Categorical(Series(["a", "b", "c", "a"])) + tm.assert_categorical_equal(c1, c2) + + c1 = Categorical(["a", "b", "c", "a"], categories=["a", "b", "c", "d"]) + c2 = Categorical(Series(["a", "b", "c", "a"]), categories=["a", "b", "c", "d"]) + tm.assert_categorical_equal(c1, c2) + + # This should result in integer categories, not float! + cat = Categorical([1, 2, 3, np.nan], categories=[1, 2, 3]) + assert is_integer_dtype(cat.categories) + + # https://github.com/pandas-dev/pandas/issues/3678 + cat = Categorical([np.nan, 1, 2, 3]) + assert is_integer_dtype(cat.categories) + + # this should result in floats + cat = Categorical([np.nan, 1, 2.0, 3]) + assert is_float_dtype(cat.categories) + + cat = Categorical([np.nan, 1.0, 2.0, 3.0]) + assert is_float_dtype(cat.categories) + + # This doesn't work -> this would probably need some kind of "remember + # the original type" feature to try to cast the array interface result + # to... + + # vals = np.asarray(cat[cat.notna()]) + # assert is_integer_dtype(vals) + + # corner cases + cat = Categorical([1]) + assert len(cat.categories) == 1 + assert cat.categories[0] == 1 + assert len(cat.codes) == 1 + assert cat.codes[0] == 0 + + cat = Categorical(["a"]) + assert len(cat.categories) == 1 + assert cat.categories[0] == "a" + assert len(cat.codes) == 1 + assert cat.codes[0] == 0 + + # two arrays + # - when the first is an integer dtype and the second is not + # - when the resulting codes are all -1/NaN + with tm.assert_produces_warning(None): + Categorical([0, 1, 2, 0, 1, 2], categories=["a", "b", "c"]) + + with tm.assert_produces_warning(None): + Categorical([0, 1, 2, 0, 1, 2], categories=[3, 4, 5]) + + # the next one are from the old docs + with tm.assert_produces_warning(None): + Categorical([0, 1, 2, 0, 1, 2], [1, 2, 3]) + cat = Categorical([1, 2], categories=[1, 2, 3]) + + # this is a legitimate constructor + with tm.assert_produces_warning(None): + Categorical(np.array([], dtype="int64"), categories=[3, 2, 1], ordered=True) + + def test_constructor_with_existing_categories(self): + # GH25318: constructing with pd.Series used to bogusly skip recoding + # categories + c0 = Categorical(["a", "b", "c", "a"]) + c1 = Categorical(["a", "b", "c", "a"], categories=["b", "c"]) + + c2 = Categorical(c0, categories=c1.categories) + tm.assert_categorical_equal(c1, c2) + + c3 = Categorical(Series(c0), categories=c1.categories) + tm.assert_categorical_equal(c1, c3) + + def test_constructor_not_sequence(self): + # https://github.com/pandas-dev/pandas/issues/16022 + msg = r"^Parameter 'categories' must be list-like, was" + with pytest.raises(TypeError, match=msg): + Categorical(["a", "b"], categories="a") + + def test_constructor_with_null(self): + # Cannot have NaN in categories + msg = "Categorical categories cannot be null" + with pytest.raises(ValueError, match=msg): + Categorical([np.nan, "a", "b", "c"], categories=[np.nan, "a", "b", "c"]) + + with pytest.raises(ValueError, match=msg): + Categorical([None, "a", "b", "c"], categories=[None, "a", "b", "c"]) + + with pytest.raises(ValueError, match=msg): + Categorical( + DatetimeIndex(["nat", "20160101"]), + categories=[NaT, Timestamp("20160101")], + ) + + def test_constructor_with_index(self): + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + tm.assert_categorical_equal(ci.values, Categorical(ci)) + + ci = CategoricalIndex(list("aabbca"), categories=list("cab")) + tm.assert_categorical_equal( + ci.values, Categorical(ci.astype(object), categories=ci.categories) + ) + + def test_constructor_with_generator(self): + # This was raising an Error in isna(single_val).any() because isna + # returned a scalar for a generator + + exp = Categorical([0, 1, 2]) + cat = Categorical(x for x in [0, 1, 2]) + tm.assert_categorical_equal(cat, exp) + cat = Categorical(range(3)) + tm.assert_categorical_equal(cat, exp) + + MultiIndex.from_product([range(5), ["a", "b", "c"]]) + + # check that categories accept generators and sequences + cat = Categorical([0, 1, 2], categories=(x for x in [0, 1, 2])) + tm.assert_categorical_equal(cat, exp) + cat = Categorical([0, 1, 2], categories=range(3)) + tm.assert_categorical_equal(cat, exp) + + def test_constructor_with_rangeindex(self): + # RangeIndex is preserved in Categories + rng = Index(range(3)) + + cat = Categorical(rng) + tm.assert_index_equal(cat.categories, rng, exact=True) + + cat = Categorical([1, 2, 0], categories=rng) + tm.assert_index_equal(cat.categories, rng, exact=True) + + @pytest.mark.parametrize( + "dtl", + [ + date_range("1995-01-01 00:00:00", periods=5, freq="s"), + date_range("1995-01-01 00:00:00", periods=5, freq="s", tz="US/Eastern"), + timedelta_range("1 day", periods=5, freq="s"), + ], + ) + def test_constructor_with_datetimelike(self, dtl): + # see gh-12077 + # constructor with a datetimelike and NaT + + s = Series(dtl) + c = Categorical(s) + + expected = type(dtl)(s) + expected._data.freq = None + + tm.assert_index_equal(c.categories, expected) + tm.assert_numpy_array_equal(c.codes, np.arange(5, dtype="int8")) + + # with NaT + s2 = s.copy() + s2.iloc[-1] = NaT + c = Categorical(s2) + + expected = type(dtl)(s2.dropna()) + expected._data.freq = None + + tm.assert_index_equal(c.categories, expected) + + exp = np.array([0, 1, 2, 3, -1], dtype=np.int8) + tm.assert_numpy_array_equal(c.codes, exp) + + result = repr(c) + assert "NaT" in result + + def test_constructor_from_index_series_datetimetz(self): + idx = date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern") + idx = idx._with_freq(None) # freq not preserved in result.categories + result = Categorical(idx) + tm.assert_index_equal(result.categories, idx) + + result = Categorical(Series(idx)) + tm.assert_index_equal(result.categories, idx) + + def test_constructor_date_objects(self): + # we dont cast date objects to timestamps, matching Index constructor + v = date.today() + + cat = Categorical([v, v]) + assert cat.categories.dtype == object + assert type(cat.categories[0]) is date + + def test_constructor_from_index_series_timedelta(self): + idx = timedelta_range("1 days", freq="D", periods=3) + idx = idx._with_freq(None) # freq not preserved in result.categories + result = Categorical(idx) + tm.assert_index_equal(result.categories, idx) + + result = Categorical(Series(idx)) + tm.assert_index_equal(result.categories, idx) + + def test_constructor_from_index_series_period(self): + idx = period_range("2015-01-01", freq="D", periods=3) + result = Categorical(idx) + tm.assert_index_equal(result.categories, idx) + + result = Categorical(Series(idx)) + tm.assert_index_equal(result.categories, idx) + + @pytest.mark.parametrize( + "values", + [ + np.array([1.0, 1.2, 1.8, np.nan]), + np.array([1, 2, 3], dtype="int64"), + ["a", "b", "c", np.nan], + [pd.Period("2014-01"), pd.Period("2014-02"), NaT], + [Timestamp("2014-01-01"), Timestamp("2014-01-02"), NaT], + [ + Timestamp("2014-01-01", tz="US/Eastern"), + Timestamp("2014-01-02", tz="US/Eastern"), + NaT, + ], + ], + ) + def test_constructor_invariant(self, values): + # GH 14190 + c = Categorical(values) + c2 = Categorical(c) + tm.assert_categorical_equal(c, c2) + + @pytest.mark.parametrize("ordered", [True, False]) + def test_constructor_with_dtype(self, ordered): + categories = ["b", "a", "c"] + dtype = CategoricalDtype(categories, ordered=ordered) + result = Categorical(["a", "b", "a", "c"], dtype=dtype) + expected = Categorical( + ["a", "b", "a", "c"], categories=categories, ordered=ordered + ) + tm.assert_categorical_equal(result, expected) + assert result.ordered is ordered + + def test_constructor_dtype_and_others_raises(self): + dtype = CategoricalDtype(["a", "b"], ordered=True) + msg = "Cannot specify `categories` or `ordered` together with `dtype`." + with pytest.raises(ValueError, match=msg): + Categorical(["a", "b"], categories=["a", "b"], dtype=dtype) + + with pytest.raises(ValueError, match=msg): + Categorical(["a", "b"], ordered=True, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + Categorical(["a", "b"], ordered=False, dtype=dtype) + + @pytest.mark.parametrize("categories", [None, ["a", "b"], ["a", "c"]]) + @pytest.mark.parametrize("ordered", [True, False]) + def test_constructor_str_category(self, categories, ordered): + result = Categorical( + ["a", "b"], categories=categories, ordered=ordered, dtype="category" + ) + expected = Categorical(["a", "b"], categories=categories, ordered=ordered) + tm.assert_categorical_equal(result, expected) + + def test_constructor_str_unknown(self): + with pytest.raises(ValueError, match="Unknown dtype"): + Categorical([1, 2], dtype="foo") + + @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="Can't be NumPy strings") + def test_constructor_np_strs(self): + # GH#31499 Hashtable.map_locations needs to work on np.str_ objects + cat = Categorical(["1", "0", "1"], [np.str_("0"), np.str_("1")]) + assert all(isinstance(x, np.str_) for x in cat.categories) + + def test_constructor_from_categorical_with_dtype(self): + dtype = CategoricalDtype(["a", "b", "c"], ordered=True) + values = Categorical(["a", "b", "d"]) + result = Categorical(values, dtype=dtype) + # We use dtype.categories, not values.categories + expected = Categorical( + ["a", "b", "d"], categories=["a", "b", "c"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + def test_constructor_from_categorical_with_unknown_dtype(self): + dtype = CategoricalDtype(None, ordered=True) + values = Categorical(["a", "b", "d"]) + result = Categorical(values, dtype=dtype) + # We use values.categories, not dtype.categories + expected = Categorical( + ["a", "b", "d"], categories=["a", "b", "d"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + def test_constructor_from_categorical_string(self): + values = Categorical(["a", "b", "d"]) + # use categories, ordered + result = Categorical( + values, categories=["a", "b", "c"], ordered=True, dtype="category" + ) + expected = Categorical( + ["a", "b", "d"], categories=["a", "b", "c"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + # No string + result = Categorical(values, categories=["a", "b", "c"], ordered=True) + tm.assert_categorical_equal(result, expected) + + def test_constructor_with_categorical_categories(self): + # GH17884 + expected = Categorical(["a", "b"], categories=["a", "b", "c"]) + + result = Categorical(["a", "b"], categories=Categorical(["a", "b", "c"])) + tm.assert_categorical_equal(result, expected) + + result = Categorical(["a", "b"], categories=CategoricalIndex(["a", "b", "c"])) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("klass", [lambda x: np.array(x, dtype=object), list]) + def test_construction_with_null(self, klass, nulls_fixture): + # https://github.com/pandas-dev/pandas/issues/31927 + values = klass(["a", nulls_fixture, "b"]) + result = Categorical(values) + + dtype = CategoricalDtype(["a", "b"]) + codes = [0, -1, 1] + expected = Categorical.from_codes(codes=codes, dtype=dtype) + + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("validate", [True, False]) + def test_from_codes_nullable_int_categories(self, any_numeric_ea_dtype, validate): + # GH#39649 + cats = pd.array(range(5), dtype=any_numeric_ea_dtype) + codes = np.random.default_rng(2).integers(5, size=3) + dtype = CategoricalDtype(cats) + arr = Categorical.from_codes(codes, dtype=dtype, validate=validate) + assert arr.categories.dtype == cats.dtype + tm.assert_index_equal(arr.categories, Index(cats)) + + def test_from_codes_empty(self): + cat = ["a", "b", "c"] + result = Categorical.from_codes([], categories=cat) + expected = Categorical([], categories=cat) + + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("validate", [True, False]) + def test_from_codes_validate(self, validate): + # GH53122 + dtype = CategoricalDtype(["a", "b"]) + if validate: + with pytest.raises(ValueError, match="codes need to be between "): + Categorical.from_codes([4, 5], dtype=dtype, validate=validate) + else: + # passes, though has incorrect codes, but that's the user responsibility + Categorical.from_codes([4, 5], dtype=dtype, validate=validate) + + def test_from_codes_too_few_categories(self): + dtype = CategoricalDtype(categories=[1, 2]) + msg = "codes need to be between " + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([1, 2], categories=dtype.categories) + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([1, 2], dtype=dtype) + + def test_from_codes_non_int_codes(self): + dtype = CategoricalDtype(categories=[1, 2]) + msg = "codes need to be array-like integers" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(["a"], categories=dtype.categories) + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(["a"], dtype=dtype) + + def test_from_codes_non_unique_categories(self): + with pytest.raises(ValueError, match="Categorical categories must be unique"): + Categorical.from_codes([0, 1, 2], categories=["a", "a", "b"]) + + def test_from_codes_nan_cat_included(self): + with pytest.raises(ValueError, match="Categorical categories cannot be null"): + Categorical.from_codes([0, 1, 2], categories=["a", "b", np.nan]) + + def test_from_codes_too_negative(self): + dtype = CategoricalDtype(categories=["a", "b", "c"]) + msg = r"codes need to be between -1 and len\(categories\)-1" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([-2, 1, 2], categories=dtype.categories) + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([-2, 1, 2], dtype=dtype) + + def test_from_codes(self): + dtype = CategoricalDtype(categories=["a", "b", "c"]) + exp = Categorical(["a", "b", "c"], ordered=False) + res = Categorical.from_codes([0, 1, 2], categories=dtype.categories) + tm.assert_categorical_equal(exp, res) + + res = Categorical.from_codes([0, 1, 2], dtype=dtype) + tm.assert_categorical_equal(exp, res) + + @pytest.mark.parametrize("klass", [Categorical, CategoricalIndex]) + def test_from_codes_with_categorical_categories(self, klass): + # GH17884 + expected = Categorical(["a", "b"], categories=["a", "b", "c"]) + + result = Categorical.from_codes([0, 1], categories=klass(["a", "b", "c"])) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("klass", [Categorical, CategoricalIndex]) + def test_from_codes_with_non_unique_categorical_categories(self, klass): + with pytest.raises(ValueError, match="Categorical categories must be unique"): + Categorical.from_codes([0, 1], klass(["a", "b", "a"])) + + def test_from_codes_with_nan_code(self): + # GH21767 + codes = [1, 2, np.nan] + dtype = CategoricalDtype(categories=["a", "b", "c"]) + with pytest.raises(ValueError, match="codes need to be array-like integers"): + Categorical.from_codes(codes, categories=dtype.categories) + with pytest.raises(ValueError, match="codes need to be array-like integers"): + Categorical.from_codes(codes, dtype=dtype) + + @pytest.mark.parametrize("codes", [[1.0, 2.0, 0], [1.1, 2.0, 0]]) + def test_from_codes_with_float(self, codes): + # GH21767 + # float codes should raise even if values are equal to integers + dtype = CategoricalDtype(categories=["a", "b", "c"]) + + msg = "codes need to be array-like integers" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(codes, dtype.categories) + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(codes, dtype=dtype) + + def test_from_codes_with_dtype_raises(self): + msg = "Cannot specify" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes( + [0, 1], categories=["a", "b"], dtype=CategoricalDtype(["a", "b"]) + ) + + with pytest.raises(ValueError, match=msg): + Categorical.from_codes( + [0, 1], ordered=True, dtype=CategoricalDtype(["a", "b"]) + ) + + def test_from_codes_neither(self): + msg = "Both were None" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes([0, 1]) + + def test_from_codes_with_nullable_int(self): + codes = pd.array([0, 1], dtype="Int64") + categories = ["a", "b"] + + result = Categorical.from_codes(codes, categories=categories) + expected = Categorical.from_codes(codes.to_numpy(int), categories=categories) + + tm.assert_categorical_equal(result, expected) + + def test_from_codes_with_nullable_int_na_raises(self): + codes = pd.array([0, None], dtype="Int64") + categories = ["a", "b"] + + msg = "codes cannot contain NA values" + with pytest.raises(ValueError, match=msg): + Categorical.from_codes(codes, categories=categories) + + @pytest.mark.parametrize("dtype", [None, "category"]) + def test_from_inferred_categories(self, dtype): + cats = ["a", "b"] + codes = np.array([0, 0, 1, 1], dtype="i8") + result = Categorical._from_inferred_categories(cats, codes, dtype) + expected = Categorical.from_codes(codes, cats) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("dtype", [None, "category"]) + def test_from_inferred_categories_sorts(self, dtype): + cats = ["b", "a"] + codes = np.array([0, 1, 1, 1], dtype="i8") + result = Categorical._from_inferred_categories(cats, codes, dtype) + expected = Categorical.from_codes([1, 0, 0, 0], ["a", "b"]) + tm.assert_categorical_equal(result, expected) + + def test_from_inferred_categories_dtype(self): + cats = ["a", "b", "d"] + codes = np.array([0, 1, 0, 2], dtype="i8") + dtype = CategoricalDtype(["c", "b", "a"], ordered=True) + result = Categorical._from_inferred_categories(cats, codes, dtype) + expected = Categorical( + ["a", "b", "a", "d"], categories=["c", "b", "a"], ordered=True + ) + tm.assert_categorical_equal(result, expected) + + def test_from_inferred_categories_coerces(self): + cats = ["1", "2", "bad"] + codes = np.array([0, 0, 1, 2], dtype="i8") + dtype = CategoricalDtype([1, 2]) + result = Categorical._from_inferred_categories(cats, codes, dtype) + expected = Categorical([1, 1, 2, np.nan]) + tm.assert_categorical_equal(result, expected) + + @pytest.mark.parametrize("ordered", [None, True, False]) + def test_construction_with_ordered(self, ordered): + # GH 9347, 9190 + cat = Categorical([0, 1, 2], ordered=ordered) + assert cat.ordered == bool(ordered) + + def test_constructor_imaginary(self): + values = [1, 2, 3 + 1j] + c1 = Categorical(values) + tm.assert_index_equal(c1.categories, Index(values)) + tm.assert_numpy_array_equal(np.array(c1), np.array(values)) + + def test_constructor_string_and_tuples(self): + # GH 21416 + c = Categorical(np.array(["c", ("a", "b"), ("b", "a"), "c"], dtype=object)) + expected_index = Index([("a", "b"), ("b", "a"), "c"]) + assert c.categories.equals(expected_index) + + def test_interval(self): + idx = pd.interval_range(0, 10, periods=10) + cat = Categorical(idx, categories=idx) + expected_codes = np.arange(10, dtype="int8") + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # infer categories + cat = Categorical(idx) + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # list values + cat = Categorical(list(idx)) + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # list values, categories + cat = Categorical(list(idx), categories=list(idx)) + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # shuffled + values = idx.take([1, 2, 0]) + cat = Categorical(values, categories=idx) + tm.assert_numpy_array_equal(cat.codes, np.array([1, 2, 0], dtype="int8")) + tm.assert_index_equal(cat.categories, idx) + + # extra + values = pd.interval_range(8, 11, periods=3) + cat = Categorical(values, categories=idx) + expected_codes = np.array([8, 9, -1], dtype="int8") + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + # overlapping + idx = IntervalIndex([Interval(0, 2), Interval(0, 1)]) + cat = Categorical(idx, categories=idx) + expected_codes = np.array([0, 1], dtype="int8") + tm.assert_numpy_array_equal(cat.codes, expected_codes) + tm.assert_index_equal(cat.categories, idx) + + def test_categorical_extension_array_nullable(self, nulls_fixture): + # GH: + arr = pd.arrays.StringArray._from_sequence( + [nulls_fixture] * 2, dtype=pd.StringDtype() + ) + result = Categorical(arr) + assert arr.dtype == result.categories.dtype + expected = Categorical(Series([pd.NA, pd.NA], dtype=arr.dtype)) + tm.assert_categorical_equal(result, expected) + + def test_from_sequence_copy(self): + cat = Categorical(np.arange(5).repeat(2)) + result = Categorical._from_sequence(cat, dtype=cat.dtype, copy=False) + + # more generally, we'd be OK with a view + assert result._codes is cat._codes + + result = Categorical._from_sequence(cat, dtype=cat.dtype, copy=True) + + assert not tm.shares_memory(result, cat) + + def test_constructor_datetime64_non_nano(self): + categories = np.arange(10).view("M8[D]") + values = categories[::2].copy() + + cat = Categorical(values, categories=categories) + assert (cat == values).all() + + def test_constructor_preserves_freq(self): + # GH33830 freq retention in categorical + dti = date_range("2016-01-01", periods=5) + + expected = dti.freq + + cat = Categorical(dti) + result = cat.categories.freq + + assert expected == result diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_dtypes.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_dtypes.py new file mode 100644 index 0000000000000000000000000000000000000000..525663cad1745880bc5e683e7302afdc2c06a527 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_dtypes.py @@ -0,0 +1,139 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas import ( + Categorical, + CategoricalIndex, + Index, + IntervalIndex, + Series, + Timestamp, +) +import pandas._testing as tm + + +class TestCategoricalDtypes: + def test_categories_match_up_to_permutation(self): + # test dtype comparisons between cats + + c1 = Categorical(list("aabca"), categories=list("abc"), ordered=False) + c2 = Categorical(list("aabca"), categories=list("cab"), ordered=False) + c3 = Categorical(list("aabca"), categories=list("cab"), ordered=True) + assert c1._categories_match_up_to_permutation(c1) + assert c2._categories_match_up_to_permutation(c2) + assert c3._categories_match_up_to_permutation(c3) + assert c1._categories_match_up_to_permutation(c2) + assert not c1._categories_match_up_to_permutation(c3) + assert not c1._categories_match_up_to_permutation(Index(list("aabca"))) + assert not c1._categories_match_up_to_permutation(c1.astype(object)) + assert c1._categories_match_up_to_permutation(CategoricalIndex(c1)) + assert c1._categories_match_up_to_permutation( + CategoricalIndex(c1, categories=list("cab")) + ) + assert not c1._categories_match_up_to_permutation( + CategoricalIndex(c1, ordered=True) + ) + + # GH 16659 + s1 = Series(c1) + s2 = Series(c2) + s3 = Series(c3) + assert c1._categories_match_up_to_permutation(s1) + assert c2._categories_match_up_to_permutation(s2) + assert c3._categories_match_up_to_permutation(s3) + assert c1._categories_match_up_to_permutation(s2) + assert not c1._categories_match_up_to_permutation(s3) + assert not c1._categories_match_up_to_permutation(s1.astype(object)) + + def test_set_dtype_same(self): + c = Categorical(["a", "b", "c"]) + result = c._set_dtype(CategoricalDtype(["a", "b", "c"])) + tm.assert_categorical_equal(result, c) + + def test_set_dtype_new_categories(self): + c = Categorical(["a", "b", "c"]) + result = c._set_dtype(CategoricalDtype(list("abcd"))) + tm.assert_numpy_array_equal(result.codes, c.codes) + tm.assert_index_equal(result.dtype.categories, Index(list("abcd"))) + + @pytest.mark.parametrize( + "values, categories, new_categories", + [ + # No NaNs, same cats, same order + (["a", "b", "a"], ["a", "b"], ["a", "b"]), + # No NaNs, same cats, different order + (["a", "b", "a"], ["a", "b"], ["b", "a"]), + # Same, unsorted + (["b", "a", "a"], ["a", "b"], ["a", "b"]), + # No NaNs, same cats, different order + (["b", "a", "a"], ["a", "b"], ["b", "a"]), + # NaNs + (["a", "b", "c"], ["a", "b"], ["a", "b"]), + (["a", "b", "c"], ["a", "b"], ["b", "a"]), + (["b", "a", "c"], ["a", "b"], ["a", "b"]), + (["b", "a", "c"], ["a", "b"], ["a", "b"]), + # Introduce NaNs + (["a", "b", "c"], ["a", "b"], ["a"]), + (["a", "b", "c"], ["a", "b"], ["b"]), + (["b", "a", "c"], ["a", "b"], ["a"]), + (["b", "a", "c"], ["a", "b"], ["a"]), + # No overlap + (["a", "b", "c"], ["a", "b"], ["d", "e"]), + ], + ) + @pytest.mark.parametrize("ordered", [True, False]) + def test_set_dtype_many(self, values, categories, new_categories, ordered): + c = Categorical(values, categories) + expected = Categorical(values, new_categories, ordered) + result = c._set_dtype(expected.dtype) + tm.assert_categorical_equal(result, expected) + + def test_set_dtype_no_overlap(self): + c = Categorical(["a", "b", "c"], ["d", "e"]) + result = c._set_dtype(CategoricalDtype(["a", "b"])) + expected = Categorical([None, None, None], categories=["a", "b"]) + tm.assert_categorical_equal(result, expected) + + def test_codes_dtypes(self): + # GH 8453 + result = Categorical(["foo", "bar", "baz"]) + assert result.codes.dtype == "int8" + + result = Categorical([f"foo{i:05d}" for i in range(400)]) + assert result.codes.dtype == "int16" + + result = Categorical([f"foo{i:05d}" for i in range(40000)]) + assert result.codes.dtype == "int32" + + # adding cats + result = Categorical(["foo", "bar", "baz"]) + assert result.codes.dtype == "int8" + result = result.add_categories([f"foo{i:05d}" for i in range(400)]) + assert result.codes.dtype == "int16" + + # removing cats + result = result.remove_categories([f"foo{i:05d}" for i in range(300)]) + assert result.codes.dtype == "int8" + + def test_iter_python_types(self): + # GH-19909 + cat = Categorical([1, 2]) + assert isinstance(next(iter(cat)), int) + assert isinstance(cat.tolist()[0], int) + + def test_iter_python_types_datetime(self): + cat = Categorical([Timestamp("2017-01-01"), Timestamp("2017-01-02")]) + assert isinstance(next(iter(cat)), Timestamp) + assert isinstance(cat.tolist()[0], Timestamp) + + def test_interval_index_category(self): + # GH 38316 + index = IntervalIndex.from_breaks(np.arange(3, dtype="uint64")) + + result = CategoricalIndex(index).dtype.categories + expected = IntervalIndex.from_arrays( + [0, 1], [1, 2], dtype="interval[uint64, right]" + ) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_indexing.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..5e1c5c64fa660f501d2b9d77c9181f47e013267f --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_indexing.py @@ -0,0 +1,388 @@ +import math + +import numpy as np +import pytest + +from pandas import ( + NA, + Categorical, + CategoricalIndex, + Index, + Interval, + IntervalIndex, + NaT, + PeriodIndex, + Series, + Timedelta, + Timestamp, +) +import pandas._testing as tm +import pandas.core.common as com + + +class TestCategoricalIndexingWithFactor: + def test_getitem(self): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + assert factor[0] == "a" + assert factor[-1] == "c" + + subf = factor[[0, 1, 2]] + tm.assert_numpy_array_equal(subf._codes, np.array([0, 1, 1], dtype=np.int8)) + + subf = factor[np.asarray(factor) == "c"] + tm.assert_numpy_array_equal(subf._codes, np.array([2, 2, 2], dtype=np.int8)) + + def test_setitem(self): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + # int/positional + c = factor.copy() + c[0] = "b" + assert c[0] == "b" + c[-1] = "a" + assert c[-1] == "a" + + # boolean + c = factor.copy() + indexer = np.zeros(len(c), dtype="bool") + indexer[0] = True + indexer[-1] = True + c[indexer] = "c" + expected = Categorical(["c", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + + tm.assert_categorical_equal(c, expected) + + @pytest.mark.parametrize( + "other", + [Categorical(["b", "a"]), Categorical(["b", "a"], categories=["b", "a"])], + ) + def test_setitem_same_but_unordered(self, other): + # GH-24142 + target = Categorical(["a", "b"], categories=["a", "b"]) + mask = np.array([True, False]) + target[mask] = other[mask] + expected = Categorical(["b", "b"], categories=["a", "b"]) + tm.assert_categorical_equal(target, expected) + + @pytest.mark.parametrize( + "other", + [ + Categorical(["b", "a"], categories=["b", "a", "c"]), + Categorical(["b", "a"], categories=["a", "b", "c"]), + Categorical(["a", "a"], categories=["a"]), + Categorical(["b", "b"], categories=["b"]), + ], + ) + def test_setitem_different_unordered_raises(self, other): + # GH-24142 + target = Categorical(["a", "b"], categories=["a", "b"]) + mask = np.array([True, False]) + msg = "Cannot set a Categorical with another, without identical categories" + with pytest.raises(TypeError, match=msg): + target[mask] = other[mask] + + @pytest.mark.parametrize( + "other", + [ + Categorical(["b", "a"]), + Categorical(["b", "a"], categories=["b", "a"], ordered=True), + Categorical(["b", "a"], categories=["a", "b", "c"], ordered=True), + ], + ) + def test_setitem_same_ordered_raises(self, other): + # Gh-24142 + target = Categorical(["a", "b"], categories=["a", "b"], ordered=True) + mask = np.array([True, False]) + msg = "Cannot set a Categorical with another, without identical categories" + with pytest.raises(TypeError, match=msg): + target[mask] = other[mask] + + def test_setitem_tuple(self): + # GH#20439 + cat = Categorical([(0, 1), (0, 2), (0, 1)]) + + # This should not raise + cat[1] = cat[0] + assert cat[1] == (0, 1) + + def test_setitem_listlike(self): + # GH#9469 + # properly coerce the input indexers + + cat = Categorical( + np.random.default_rng(2).integers(0, 5, size=150000).astype(np.int8) + ).add_categories([-1000]) + indexer = np.array([100000]).astype(np.int64) + cat[indexer] = -1000 + + # we are asserting the code result here + # which maps to the -1000 category + result = cat.codes[np.array([100000]).astype(np.int64)] + tm.assert_numpy_array_equal(result, np.array([5], dtype="int8")) + + +class TestCategoricalIndexing: + def test_getitem_slice(self): + cat = Categorical(["a", "b", "c", "d", "a", "b", "c"]) + sliced = cat[3] + assert sliced == "d" + + sliced = cat[3:5] + expected = Categorical(["d", "a"], categories=["a", "b", "c", "d"]) + tm.assert_categorical_equal(sliced, expected) + + def test_getitem_listlike(self): + # GH 9469 + # properly coerce the input indexers + + c = Categorical( + np.random.default_rng(2).integers(0, 5, size=150000).astype(np.int8) + ) + result = c.codes[np.array([100000]).astype(np.int64)] + expected = c[np.array([100000]).astype(np.int64)].codes + tm.assert_numpy_array_equal(result, expected) + + def test_periodindex(self): + idx1 = PeriodIndex( + ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"], + freq="M", + ) + + cat1 = Categorical(idx1) + str(cat1) + exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.int8) + exp_idx = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M") + tm.assert_numpy_array_equal(cat1._codes, exp_arr) + tm.assert_index_equal(cat1.categories, exp_idx) + + idx2 = PeriodIndex( + ["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"], + freq="M", + ) + cat2 = Categorical(idx2, ordered=True) + str(cat2) + exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.int8) + exp_idx2 = PeriodIndex(["2014-01", "2014-02", "2014-03"], freq="M") + tm.assert_numpy_array_equal(cat2._codes, exp_arr) + tm.assert_index_equal(cat2.categories, exp_idx2) + + idx3 = PeriodIndex( + [ + "2013-12", + "2013-11", + "2013-10", + "2013-09", + "2013-08", + "2013-07", + "2013-05", + ], + freq="M", + ) + cat3 = Categorical(idx3, ordered=True) + exp_arr = np.array([6, 5, 4, 3, 2, 1, 0], dtype=np.int8) + exp_idx = PeriodIndex( + [ + "2013-05", + "2013-07", + "2013-08", + "2013-09", + "2013-10", + "2013-11", + "2013-12", + ], + freq="M", + ) + tm.assert_numpy_array_equal(cat3._codes, exp_arr) + tm.assert_index_equal(cat3.categories, exp_idx) + + @pytest.mark.parametrize( + "null_val", + [None, np.nan, NaT, NA, math.nan, "NaT", "nat", "NAT", "nan", "NaN", "NAN"], + ) + def test_periodindex_on_null_types(self, null_val): + # GH 46673 + result = PeriodIndex(["2022-04-06", "2022-04-07", null_val], freq="D") + expected = PeriodIndex(["2022-04-06", "2022-04-07", "NaT"], dtype="period[D]") + assert result[2] is NaT + tm.assert_index_equal(result, expected) + + @pytest.mark.parametrize("new_categories", [[1, 2, 3, 4], [1, 2]]) + def test_categories_assignments_wrong_length_raises(self, new_categories): + cat = Categorical(["a", "b", "c", "a"]) + msg = ( + "new categories need to have the same number of items " + "as the old categories!" + ) + with pytest.raises(ValueError, match=msg): + cat.rename_categories(new_categories) + + # Combinations of sorted/unique: + @pytest.mark.parametrize( + "idx_values", [[1, 2, 3, 4], [1, 3, 2, 4], [1, 3, 3, 4], [1, 2, 2, 4]] + ) + # Combinations of missing/unique + @pytest.mark.parametrize("key_values", [[1, 2], [1, 5], [1, 1], [5, 5]]) + @pytest.mark.parametrize("key_class", [Categorical, CategoricalIndex]) + @pytest.mark.parametrize("dtype", [None, "category", "key"]) + def test_get_indexer_non_unique(self, idx_values, key_values, key_class, dtype): + # GH 21448 + key = key_class(key_values, categories=range(1, 5)) + + if dtype == "key": + dtype = key.dtype + + # Test for flat index and CategoricalIndex with same/different cats: + idx = Index(idx_values, dtype=dtype) + expected, exp_miss = idx.get_indexer_non_unique(key_values) + result, res_miss = idx.get_indexer_non_unique(key) + + tm.assert_numpy_array_equal(expected, result) + tm.assert_numpy_array_equal(exp_miss, res_miss) + + exp_unique = idx.unique().get_indexer(key_values) + res_unique = idx.unique().get_indexer(key) + tm.assert_numpy_array_equal(res_unique, exp_unique) + + def test_where_unobserved_nan(self): + ser = Series(Categorical(["a", "b"])) + result = ser.where([True, False]) + expected = Series(Categorical(["a", None], categories=["a", "b"])) + tm.assert_series_equal(result, expected) + + # all NA + ser = Series(Categorical(["a", "b"])) + result = ser.where([False, False]) + expected = Series(Categorical([None, None], categories=["a", "b"])) + tm.assert_series_equal(result, expected) + + def test_where_unobserved_categories(self): + ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"])) + result = ser.where([True, True, False], other="b") + expected = Series(Categorical(["a", "b", "b"], categories=ser.cat.categories)) + tm.assert_series_equal(result, expected) + + def test_where_other_categorical(self): + ser = Series(Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"])) + other = Categorical(["b", "c", "a"], categories=["a", "c", "b", "d"]) + result = ser.where([True, False, True], other) + expected = Series(Categorical(["a", "c", "c"], dtype=ser.dtype)) + tm.assert_series_equal(result, expected) + + def test_where_new_category_raises(self): + ser = Series(Categorical(["a", "b", "c"])) + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + ser.where([True, False, True], "d") + + def test_where_ordered_differs_rasies(self): + ser = Series( + Categorical(["a", "b", "c"], categories=["d", "c", "b", "a"], ordered=True) + ) + other = Categorical( + ["b", "c", "a"], categories=["a", "c", "b", "d"], ordered=True + ) + with pytest.raises(TypeError, match="without identical categories"): + ser.where([True, False, True], other) + + +class TestContains: + def test_contains(self): + # GH#21508 + cat = Categorical(list("aabbca"), categories=list("cab")) + + assert "b" in cat + assert "z" not in cat + assert np.nan not in cat + with pytest.raises(TypeError, match="unhashable type: 'list'"): + assert [1] in cat + + # assert codes NOT in index + assert 0 not in cat + assert 1 not in cat + + cat = Categorical(list("aabbca") + [np.nan], categories=list("cab")) + assert np.nan in cat + + @pytest.mark.parametrize( + "item, expected", + [ + (Interval(0, 1), True), + (1.5, True), + (Interval(0.5, 1.5), False), + ("a", False), + (Timestamp(1), False), + (Timedelta(1), False), + ], + ids=str, + ) + def test_contains_interval(self, item, expected): + # GH#23705 + cat = Categorical(IntervalIndex.from_breaks(range(3))) + result = item in cat + assert result is expected + + def test_contains_list(self): + # GH#21729 + cat = Categorical([1, 2, 3]) + + assert "a" not in cat + + with pytest.raises(TypeError, match="unhashable type"): + ["a"] in cat + + with pytest.raises(TypeError, match="unhashable type"): + ["a", "b"] in cat + + +@pytest.mark.parametrize("index", [True, False]) +def test_mask_with_boolean(index): + ser = Series(range(3)) + idx = Categorical([True, False, True]) + if index: + idx = CategoricalIndex(idx) + + assert com.is_bool_indexer(idx) + result = ser[idx] + expected = ser[idx.astype("object")] + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("index", [True, False]) +def test_mask_with_boolean_na_treated_as_false(index): + # https://github.com/pandas-dev/pandas/issues/31503 + ser = Series(range(3)) + idx = Categorical([True, False, None]) + if index: + idx = CategoricalIndex(idx) + + result = ser[idx] + expected = ser[idx.fillna(False)] + + tm.assert_series_equal(result, expected) + + +@pytest.fixture +def non_coercible_categorical(monkeypatch): + """ + Monkeypatch Categorical.__array__ to ensure no implicit conversion. + + Raises + ------ + ValueError + When Categorical.__array__ is called. + """ + + # TODO(Categorical): identify other places where this may be + # useful and move to a conftest.py + def array(self, dtype=None): + raise ValueError("I cannot be converted.") + + with monkeypatch.context() as m: + m.setattr(Categorical, "__array__", array) + yield + + +def test_series_at(): + arr = Categorical(["a", "b", "c"]) + ser = Series(arr) + result = ser.at[0] + assert result == "a" diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_map.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_map.py new file mode 100644 index 0000000000000000000000000000000000000000..3d41b7cc7094d237fa8d31501ce90a99b04fe4e6 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_map.py @@ -0,0 +1,154 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + Index, + Series, +) +import pandas._testing as tm + + +@pytest.fixture(params=[None, "ignore"]) +def na_action(request): + return request.param + + +@pytest.mark.parametrize( + "data, categories", + [ + (list("abcbca"), list("cab")), + (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)), + ], + ids=["string", "interval"], +) +def test_map_str(data, categories, ordered, na_action): + # GH 31202 - override base class since we want to maintain categorical/ordered + cat = Categorical(data, categories=categories, ordered=ordered) + result = cat.map(str, na_action=na_action) + expected = Categorical( + map(str, data), categories=map(str, categories), ordered=ordered + ) + tm.assert_categorical_equal(result, expected) + + +def test_map(na_action): + cat = Categorical(list("ABABC"), categories=list("CBA"), ordered=True) + result = cat.map(lambda x: x.lower(), na_action=na_action) + exp = Categorical(list("ababc"), categories=list("cba"), ordered=True) + tm.assert_categorical_equal(result, exp) + + cat = Categorical(list("ABABC"), categories=list("BAC"), ordered=False) + result = cat.map(lambda x: x.lower(), na_action=na_action) + exp = Categorical(list("ababc"), categories=list("bac"), ordered=False) + tm.assert_categorical_equal(result, exp) + + # GH 12766: Return an index not an array + result = cat.map(lambda x: 1, na_action=na_action) + exp = Index(np.array([1] * 5, dtype=np.int64)) + tm.assert_index_equal(result, exp) + + # change categories dtype + cat = Categorical(list("ABABC"), categories=list("BAC"), ordered=False) + + def f(x): + return {"A": 10, "B": 20, "C": 30}.get(x) + + result = cat.map(f, na_action=na_action) + exp = Categorical([10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False) + tm.assert_categorical_equal(result, exp) + + mapper = Series([10, 20, 30], index=["A", "B", "C"]) + result = cat.map(mapper, na_action=na_action) + tm.assert_categorical_equal(result, exp) + + result = cat.map({"A": 10, "B": 20, "C": 30}, na_action=na_action) + tm.assert_categorical_equal(result, exp) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, Index([False, False, True])), + ([1, 2, np.nan], pd.isna, Index([False, False, True])), + ([1, 1, np.nan], {1: False}, Categorical([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + Categorical([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False] * 3), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_none(data, f, expected): # GH 24241 + values = Categorical(data) + result = values.map(f, na_action=None) + if isinstance(expected, Categorical): + tm.assert_categorical_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + +@pytest.mark.parametrize( + ("data", "f", "expected"), + ( + ([1, 1, np.nan], pd.isna, Categorical([False, False, np.nan])), + ([1, 2, np.nan], pd.isna, Index([False, False, np.nan])), + ([1, 1, np.nan], {1: False}, Categorical([False, False, np.nan])), + ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])), + ( + [1, 1, np.nan], + Series([False, False]), + Categorical([False, False, np.nan]), + ), + ( + [1, 2, np.nan], + Series([False, False, False]), + Index([False, False, np.nan]), + ), + ), +) +def test_map_with_nan_ignore(data, f, expected): # GH 24241 + values = Categorical(data) + result = values.map(f, na_action="ignore") + if data[1] == 1: + tm.assert_categorical_equal(result, expected) + else: + tm.assert_index_equal(result, expected) + + +def test_map_with_dict_or_series(na_action): + orig_values = ["a", "B", 1, "a"] + new_values = ["one", 2, 3.0, "one"] + cat = Categorical(orig_values) + + mapper = Series(new_values[:-1], index=orig_values[:-1]) + result = cat.map(mapper, na_action=na_action) + + # Order of categories in result can be different + expected = Categorical(new_values, categories=[3.0, 2, "one"]) + tm.assert_categorical_equal(result, expected) + + mapper = dict(zip(orig_values[:-1], new_values[:-1])) + result = cat.map(mapper, na_action=na_action) + # Order of categories in result can be different + tm.assert_categorical_equal(result, expected) + + +def test_map_na_action_no_default_deprecated(): + # GH51645 + cat = Categorical(["a", "b", "c"]) + msg = ( + "The default value of 'ignore' for the `na_action` parameter in " + "pandas.Categorical.map is deprecated and will be " + "changed to 'None' in a future version. Please set na_action to the " + "desired value to avoid seeing this warning" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + cat.map(lambda x: x) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_missing.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_missing.py new file mode 100644 index 0000000000000000000000000000000000000000..0eeb01b74608890daf81fef083adb29e797e57ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_missing.py @@ -0,0 +1,216 @@ +import collections + +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import CategoricalDtype + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Index, + Series, + isna, +) +import pandas._testing as tm + + +class TestCategoricalMissing: + def test_isna(self): + exp = np.array([False, False, True]) + cat = Categorical(["a", "b", np.nan]) + res = cat.isna() + + tm.assert_numpy_array_equal(res, exp) + + def test_na_flags_int_categories(self): + # #1457 + + categories = list(range(10)) + labels = np.random.default_rng(2).integers(0, 10, 20) + labels[::5] = -1 + + cat = Categorical(labels, categories) + repr(cat) + + tm.assert_numpy_array_equal(isna(cat), labels == -1) + + def test_nan_handling(self): + # Nans are represented as -1 in codes + c = Categorical(["a", "b", np.nan, "a"]) + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) + c[1] = np.nan + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, -1, -1, 0], dtype=np.int8)) + + # Adding nan to categories should make assigned nan point to the + # category! + c = Categorical(["a", "b", np.nan, "a"]) + tm.assert_index_equal(c.categories, Index(["a", "b"])) + tm.assert_numpy_array_equal(c._codes, np.array([0, 1, -1, 0], dtype=np.int8)) + + def test_set_dtype_nans(self): + c = Categorical(["a", "b", np.nan]) + result = c._set_dtype(CategoricalDtype(["a", "c"])) + tm.assert_numpy_array_equal(result.codes, np.array([0, -1, -1], dtype="int8")) + + def test_set_item_nan(self): + cat = Categorical([1, 2, 3]) + cat[1] = np.nan + + exp = Categorical([1, np.nan, 3], categories=[1, 2, 3]) + tm.assert_categorical_equal(cat, exp) + + @pytest.mark.parametrize( + "fillna_kwargs, msg", + [ + ( + {"value": 1, "method": "ffill"}, + "Cannot specify both 'value' and 'method'.", + ), + ({}, "Must specify a fill 'value' or 'method'."), + ({"method": "bad"}, "Invalid fill method. Expecting .* bad"), + ( + {"value": Series([1, 2, 3, 4, "a"])}, + "Cannot setitem on a Categorical with a new category", + ), + ], + ) + def test_fillna_raises(self, fillna_kwargs, msg): + # https://github.com/pandas-dev/pandas/issues/19682 + # https://github.com/pandas-dev/pandas/issues/13628 + cat = Categorical([1, 2, 3, None, None]) + + if len(fillna_kwargs) == 1 and "value" in fillna_kwargs: + err = TypeError + else: + err = ValueError + + with pytest.raises(err, match=msg): + cat.fillna(**fillna_kwargs) + + @pytest.mark.parametrize("named", [True, False]) + def test_fillna_iterable_category(self, named): + # https://github.com/pandas-dev/pandas/issues/21097 + if named: + Point = collections.namedtuple("Point", "x y") + else: + Point = lambda *args: args # tuple + cat = Categorical(np.array([Point(0, 0), Point(0, 1), None], dtype=object)) + result = cat.fillna(Point(0, 0)) + expected = Categorical([Point(0, 0), Point(0, 1), Point(0, 0)]) + + tm.assert_categorical_equal(result, expected) + + # Case where the Point is not among our categories; we want ValueError, + # not NotImplementedError GH#41914 + cat = Categorical(np.array([Point(1, 0), Point(0, 1), None], dtype=object)) + msg = "Cannot setitem on a Categorical with a new category" + with pytest.raises(TypeError, match=msg): + cat.fillna(Point(0, 0)) + + def test_fillna_array(self): + # accept Categorical or ndarray value if it holds appropriate values + cat = Categorical(["A", "B", "C", None, None]) + + other = cat.fillna("C") + result = cat.fillna(other) + tm.assert_categorical_equal(result, other) + assert isna(cat[-1]) # didn't modify original inplace + + other = np.array(["A", "B", "C", "B", "A"]) + result = cat.fillna(other) + expected = Categorical(["A", "B", "C", "B", "A"], dtype=cat.dtype) + tm.assert_categorical_equal(result, expected) + assert isna(cat[-1]) # didn't modify original inplace + + @pytest.mark.parametrize( + "values, expected", + [ + ([1, 2, 3], np.array([False, False, False])), + ([1, 2, np.nan], np.array([False, False, True])), + ([1, 2, np.inf], np.array([False, False, True])), + ([1, 2, pd.NA], np.array([False, False, True])), + ], + ) + def test_use_inf_as_na(self, values, expected): + # https://github.com/pandas-dev/pandas/issues/33594 + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + cat = Categorical(values) + result = cat.isna() + tm.assert_numpy_array_equal(result, expected) + + result = Series(cat).isna() + expected = Series(expected) + tm.assert_series_equal(result, expected) + + result = DataFrame(cat).isna() + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "values, expected", + [ + ([1, 2, 3], np.array([False, False, False])), + ([1, 2, np.nan], np.array([False, False, True])), + ([1, 2, np.inf], np.array([False, False, True])), + ([1, 2, pd.NA], np.array([False, False, True])), + ], + ) + def test_use_inf_as_na_outside_context(self, values, expected): + # https://github.com/pandas-dev/pandas/issues/33594 + # Using isna directly for Categorical will fail in general here + cat = Categorical(values) + + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + result = isna(cat) + tm.assert_numpy_array_equal(result, expected) + + result = isna(Series(cat)) + expected = Series(expected) + tm.assert_series_equal(result, expected) + + result = isna(DataFrame(cat)) + expected = DataFrame(expected) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "a1, a2, categories", + [ + (["a", "b", "c"], [np.nan, "a", "b"], ["a", "b", "c"]), + ([1, 2, 3], [np.nan, 1, 2], [1, 2, 3]), + ], + ) + def test_compare_categorical_with_missing(self, a1, a2, categories): + # GH 28384 + cat_type = CategoricalDtype(categories) + + # != + result = Series(a1, dtype=cat_type) != Series(a2, dtype=cat_type) + expected = Series(a1) != Series(a2) + tm.assert_series_equal(result, expected) + + # == + result = Series(a1, dtype=cat_type) == Series(a2, dtype=cat_type) + expected = Series(a1) == Series(a2) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "na_value, dtype", + [ + (pd.NaT, "datetime64[ns]"), + (None, "float64"), + (np.nan, "float64"), + (pd.NA, "float64"), + ], + ) + def test_categorical_only_missing_values_no_cast(self, na_value, dtype): + # GH#44900 + result = Categorical([na_value, na_value]) + tm.assert_index_equal(result.categories, Index([], dtype=dtype)) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_operators.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_operators.py new file mode 100644 index 0000000000000000000000000000000000000000..4174d2adc810b872e7ec0b1e3ca820e3d2c3920d --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_operators.py @@ -0,0 +1,414 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Categorical, + DataFrame, + Series, + Timestamp, + date_range, +) +import pandas._testing as tm + + +class TestCategoricalOpsWithFactor: + def test_categories_none_comparisons(self): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + tm.assert_categorical_equal(factor, factor) + + def test_comparisons(self): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + result = factor[factor == "a"] + expected = factor[np.asarray(factor) == "a"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor != "a"] + expected = factor[np.asarray(factor) != "a"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor < "c"] + expected = factor[np.asarray(factor) < "c"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor > "a"] + expected = factor[np.asarray(factor) > "a"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor >= "b"] + expected = factor[np.asarray(factor) >= "b"] + tm.assert_categorical_equal(result, expected) + + result = factor[factor <= "b"] + expected = factor[np.asarray(factor) <= "b"] + tm.assert_categorical_equal(result, expected) + + n = len(factor) + + other = factor[np.random.default_rng(2).permutation(n)] + result = factor == other + expected = np.asarray(factor) == np.asarray(other) + tm.assert_numpy_array_equal(result, expected) + + result = factor == "d" + expected = np.zeros(len(factor), dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + # comparisons with categoricals + cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True) + cat_rev_base = Categorical( + ["b", "b", "b"], categories=["c", "b", "a"], ordered=True + ) + cat = Categorical(["a", "b", "c"], ordered=True) + cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True) + + # comparisons need to take categories ordering into account + res_rev = cat_rev > cat_rev_base + exp_rev = np.array([True, False, False]) + tm.assert_numpy_array_equal(res_rev, exp_rev) + + res_rev = cat_rev < cat_rev_base + exp_rev = np.array([False, False, True]) + tm.assert_numpy_array_equal(res_rev, exp_rev) + + res = cat > cat_base + exp = np.array([False, False, True]) + tm.assert_numpy_array_equal(res, exp) + + # Only categories with same categories can be compared + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + cat > cat_rev + + cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"]) + + with pytest.raises(TypeError, match=msg): + cat_rev > cat_rev_base2 + + # Only categories with same ordering information can be compared + cat_unordered = cat.set_ordered(False) + assert not (cat > cat).any() + + with pytest.raises(TypeError, match=msg): + cat > cat_unordered + + # comparison (in both directions) with Series will raise + s = Series(["b", "b", "b"], dtype=object) + msg = ( + "Cannot compare a Categorical for op __gt__ with type " + r"" + ) + with pytest.raises(TypeError, match=msg): + cat > s + with pytest.raises(TypeError, match=msg): + cat_rev > s + with pytest.raises(TypeError, match=msg): + s < cat + with pytest.raises(TypeError, match=msg): + s < cat_rev + + # comparison with numpy.array will raise in both direction, but only on + # newer numpy versions + a = np.array(["b", "b", "b"], dtype=object) + with pytest.raises(TypeError, match=msg): + cat > a + with pytest.raises(TypeError, match=msg): + cat_rev > a + + # Make sure that unequal comparison take the categories order in + # account + cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True) + exp = np.array([True, False, False]) + res = cat_rev > "b" + tm.assert_numpy_array_equal(res, exp) + + # check that zero-dim array gets unboxed + res = cat_rev > np.array("b") + tm.assert_numpy_array_equal(res, exp) + + +class TestCategoricalOps: + @pytest.mark.parametrize( + "categories", + [["a", "b"], [0, 1], [Timestamp("2019"), Timestamp("2020")]], + ) + def test_not_equal_with_na(self, categories): + # https://github.com/pandas-dev/pandas/issues/32276 + c1 = Categorical.from_codes([-1, 0], categories=categories) + c2 = Categorical.from_codes([0, 1], categories=categories) + + result = c1 != c2 + + assert result.all() + + def test_compare_frame(self): + # GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame + data = ["a", "b", 2, "a"] + cat = Categorical(data) + + df = DataFrame(cat) + + result = cat == df.T + expected = DataFrame([[True, True, True, True]]) + tm.assert_frame_equal(result, expected) + + result = cat[::-1] != df.T + expected = DataFrame([[False, True, True, False]]) + tm.assert_frame_equal(result, expected) + + def test_compare_frame_raises(self, comparison_op): + # alignment raises unless we transpose + op = comparison_op + cat = Categorical(["a", "b", 2, "a"]) + df = DataFrame(cat) + msg = "Unable to coerce to Series, length must be 1: given 4" + with pytest.raises(ValueError, match=msg): + op(cat, df) + + def test_datetime_categorical_comparison(self): + dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True) + tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True])) + tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True])) + + def test_reflected_comparison_with_scalars(self): + # GH8658 + cat = Categorical([1, 2, 3], ordered=True) + tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True])) + tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True])) + + def test_comparison_with_unknown_scalars(self): + # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057 + # and following comparisons with scalars not in categories should raise + # for unequal comps, but not for equal/not equal + cat = Categorical([1, 2, 3], ordered=True) + + msg = "Invalid comparison between dtype=category and int" + with pytest.raises(TypeError, match=msg): + cat < 4 + with pytest.raises(TypeError, match=msg): + cat > 4 + with pytest.raises(TypeError, match=msg): + 4 < cat + with pytest.raises(TypeError, match=msg): + 4 > cat + + tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False])) + tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True])) + + def test_comparison_with_tuple(self): + cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object)) + + result = cat == "foo" + expected = np.array([True, False, False, False], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + result = cat == (0, 1) + expected = np.array([False, True, False, True], dtype=bool) + tm.assert_numpy_array_equal(result, expected) + + result = cat != (0, 1) + tm.assert_numpy_array_equal(result, ~expected) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_comparison_of_ordered_categorical_with_nan_to_scalar( + self, compare_operators_no_eq_ne + ): + # https://github.com/pandas-dev/pandas/issues/26504 + # BUG: fix ordered categorical comparison with missing values (#26504 ) + # and following comparisons with scalars in categories with missing + # values should be evaluated as False + + cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True) + scalar = 2 + expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar) + actual = getattr(cat, compare_operators_no_eq_ne)(scalar) + tm.assert_numpy_array_equal(actual, expected) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_comparison_of_ordered_categorical_with_nan_to_listlike( + self, compare_operators_no_eq_ne + ): + # https://github.com/pandas-dev/pandas/issues/26504 + # and following comparisons of missing values in ordered Categorical + # with listlike should be evaluated as False + + cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True) + other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True) + expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2) + actual = getattr(cat, compare_operators_no_eq_ne)(other) + tm.assert_numpy_array_equal(actual, expected) + + @pytest.mark.parametrize( + "data,reverse,base", + [(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])], + ) + def test_comparisons(self, data, reverse, base): + cat_rev = Series(Categorical(data, categories=reverse, ordered=True)) + cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True)) + cat = Series(Categorical(data, ordered=True)) + cat_base = Series( + Categorical(base, categories=cat.cat.categories, ordered=True) + ) + s = Series(base, dtype=object if base == list("bbb") else None) + a = np.array(base) + + # comparisons need to take categories ordering into account + res_rev = cat_rev > cat_rev_base + exp_rev = Series([True, False, False]) + tm.assert_series_equal(res_rev, exp_rev) + + res_rev = cat_rev < cat_rev_base + exp_rev = Series([False, False, True]) + tm.assert_series_equal(res_rev, exp_rev) + + res = cat > cat_base + exp = Series([False, False, True]) + tm.assert_series_equal(res, exp) + + scalar = base[1] + res = cat > scalar + exp = Series([False, False, True]) + exp2 = cat.values > scalar + tm.assert_series_equal(res, exp) + tm.assert_numpy_array_equal(res.values, exp2) + res_rev = cat_rev > scalar + exp_rev = Series([True, False, False]) + exp_rev2 = cat_rev.values > scalar + tm.assert_series_equal(res_rev, exp_rev) + tm.assert_numpy_array_equal(res_rev.values, exp_rev2) + + # Only categories with same categories can be compared + msg = "Categoricals can only be compared if 'categories' are the same" + with pytest.raises(TypeError, match=msg): + cat > cat_rev + + # categorical cannot be compared to Series or numpy array, and also + # not the other way around + msg = ( + "Cannot compare a Categorical for op __gt__ with type " + r"" + ) + with pytest.raises(TypeError, match=msg): + cat > s + with pytest.raises(TypeError, match=msg): + cat_rev > s + with pytest.raises(TypeError, match=msg): + cat > a + with pytest.raises(TypeError, match=msg): + cat_rev > a + + with pytest.raises(TypeError, match=msg): + s < cat + with pytest.raises(TypeError, match=msg): + s < cat_rev + + with pytest.raises(TypeError, match=msg): + a < cat + with pytest.raises(TypeError, match=msg): + a < cat_rev + + @pytest.mark.parametrize( + "ctor", + [ + lambda *args, **kwargs: Categorical(*args, **kwargs), + lambda *args, **kwargs: Series(Categorical(*args, **kwargs)), + ], + ) + def test_unordered_different_order_equal(self, ctor): + # https://github.com/pandas-dev/pandas/issues/16014 + c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False) + c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False) + assert (c1 == c2).all() + + c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False) + c2 = ctor(["b", "a"], categories=["b", "a"], ordered=False) + assert (c1 != c2).all() + + c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False) + c2 = ctor(["b", "b"], categories=["b", "a"], ordered=False) + assert (c1 != c2).all() + + c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False) + c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False) + result = c1 == c2 + tm.assert_numpy_array_equal(np.array(result), np.array([True, False])) + + def test_unordered_different_categories_raises(self): + c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False) + c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False) + + with pytest.raises(TypeError, match=("Categoricals can only be compared")): + c1 == c2 + + def test_compare_different_lengths(self): + c1 = Categorical([], categories=["a", "b"]) + c2 = Categorical([], categories=["a"]) + + msg = "Categoricals can only be compared if 'categories' are the same." + with pytest.raises(TypeError, match=msg): + c1 == c2 + + def test_compare_unordered_different_order(self): + # https://github.com/pandas-dev/pandas/issues/16603#issuecomment- + # 349290078 + a = Categorical(["a"], categories=["a", "b"]) + b = Categorical(["b"], categories=["b", "a"]) + assert not a.equals(b) + + def test_numeric_like_ops(self): + df = DataFrame({"value": np.random.default_rng(2).integers(0, 10000, 100)}) + labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)] + cat_labels = Categorical(labels, labels) + + df = df.sort_values(by=["value"], ascending=True) + df["value_group"] = pd.cut( + df.value, range(0, 10500, 500), right=False, labels=cat_labels + ) + + # numeric ops should not succeed + for op, str_rep in [ + ("__add__", r"\+"), + ("__sub__", "-"), + ("__mul__", r"\*"), + ("__truediv__", "/"), + ]: + msg = f"Series cannot perform the operation {str_rep}|unsupported operand" + with pytest.raises(TypeError, match=msg): + getattr(df, op)(df) + + # reduction ops should not succeed (unless specifically defined, e.g. + # min/max) + s = df["value_group"] + for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]: + msg = f"does not support reduction '{op}'" + with pytest.raises(TypeError, match=msg): + getattr(s, op)(numeric_only=False) + + def test_numeric_like_ops_series(self): + # numpy ops + s = Series(Categorical([1, 2, 3, 4])) + with pytest.raises(TypeError, match="does not support reduction 'sum'"): + np.sum(s) + + @pytest.mark.parametrize( + "op, str_rep", + [ + ("__add__", r"\+"), + ("__sub__", "-"), + ("__mul__", r"\*"), + ("__truediv__", "/"), + ], + ) + def test_numeric_like_ops_series_arith(self, op, str_rep): + # numeric ops on a Series + s = Series(Categorical([1, 2, 3, 4])) + msg = f"Series cannot perform the operation {str_rep}|unsupported operand" + with pytest.raises(TypeError, match=msg): + getattr(s, op)(2) + + def test_numeric_like_ops_series_invalid(self): + # invalid ufunc + s = Series(Categorical([1, 2, 3, 4])) + msg = "Object with dtype category cannot perform the numpy op log" + with pytest.raises(TypeError, match=msg): + np.log(s) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_replace.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_replace.py new file mode 100644 index 0000000000000000000000000000000000000000..3c677142846d73f7cfd08c6681ff0d7814b55bd1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_replace.py @@ -0,0 +1,111 @@ +import pytest + +import pandas as pd +from pandas import Categorical +import pandas._testing as tm + + +@pytest.mark.parametrize( + "to_replace,value,expected,flip_categories", + [ + # one-to-one + (1, 2, [2, 2, 3], False), + (1, 4, [4, 2, 3], False), + (4, 1, [1, 2, 3], False), + (5, 6, [1, 2, 3], False), + # many-to-one + ([1], 2, [2, 2, 3], False), + ([1, 2], 3, [3, 3, 3], False), + ([1, 2], 4, [4, 4, 3], False), + ((1, 2, 4), 5, [5, 5, 3], False), + ((5, 6), 2, [1, 2, 3], False), + ([1], [2], [2, 2, 3], False), + ([1, 4], [5, 2], [5, 2, 3], False), + # GH49404: overlap between to_replace and value + ([1, 2, 3], [2, 3, 4], [2, 3, 4], False), + # GH50872, GH46884: replace with null + (1, None, [None, 2, 3], False), + (1, pd.NA, [None, 2, 3], False), + # check_categorical sorts categories, which crashes on mixed dtypes + (3, "4", [1, 2, "4"], False), + ([1, 2, "3"], "5", ["5", "5", 3], True), + ], +) +@pytest.mark.filterwarnings( + "ignore:.*with CategoricalDtype is deprecated:FutureWarning" +) +def test_replace_categorical_series(to_replace, value, expected, flip_categories): + # GH 31720 + + ser = pd.Series([1, 2, 3], dtype="category") + result = ser.replace(to_replace, value) + expected = pd.Series(expected, dtype="category") + ser.replace(to_replace, value, inplace=True) + + if flip_categories: + expected = expected.cat.set_categories(expected.cat.categories[::-1]) + + tm.assert_series_equal(expected, result, check_category_order=False) + tm.assert_series_equal(expected, ser, check_category_order=False) + + +@pytest.mark.parametrize( + "to_replace, value, result, expected_error_msg", + [ + ("b", "c", ["a", "c"], "Categorical.categories are different"), + ("c", "d", ["a", "b"], None), + # https://github.com/pandas-dev/pandas/issues/33288 + ("a", "a", ["a", "b"], None), + ("b", None, ["a", None], "Categorical.categories length are different"), + ], +) +def test_replace_categorical(to_replace, value, result, expected_error_msg): + # GH#26988 + cat = Categorical(["a", "b"]) + expected = Categorical(result) + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + warn = FutureWarning if expected_error_msg is not None else None + with tm.assert_produces_warning(warn, match=msg): + result = pd.Series(cat, copy=False).replace(to_replace, value)._values + + tm.assert_categorical_equal(result, expected) + if to_replace == "b": # the "c" test is supposed to be unchanged + with pytest.raises(AssertionError, match=expected_error_msg): + # ensure non-inplace call does not affect original + tm.assert_categorical_equal(cat, expected) + + ser = pd.Series(cat, copy=False) + with tm.assert_produces_warning(warn, match=msg): + ser.replace(to_replace, value, inplace=True) + tm.assert_categorical_equal(cat, expected) + + +def test_replace_categorical_ea_dtype(): + # GH49404 + cat = Categorical(pd.array(["a", "b"], dtype="string")) + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = pd.Series(cat).replace(["a", "b"], ["c", pd.NA])._values + expected = Categorical(pd.array(["c", pd.NA], dtype="string")) + tm.assert_categorical_equal(result, expected) + + +def test_replace_maintain_ordering(): + # GH51016 + dtype = pd.CategoricalDtype([0, 1, 2], ordered=True) + ser = pd.Series([0, 1, 2], dtype=dtype) + msg = ( + r"The behavior of Series\.replace \(and DataFrame.replace\) " + "with CategoricalDtype" + ) + with tm.assert_produces_warning(FutureWarning, match=msg): + result = ser.replace(0, 2) + expected_dtype = pd.CategoricalDtype([1, 2], ordered=True) + expected = pd.Series([2, 1, 2], dtype=expected_dtype) + tm.assert_series_equal(expected, result, check_category_order=True) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_repr.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_repr.py new file mode 100644 index 0000000000000000000000000000000000000000..ef0315130215cc762e2fad6fc07a97c9f4b94eb8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_repr.py @@ -0,0 +1,550 @@ +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas import ( + Categorical, + CategoricalDtype, + CategoricalIndex, + Index, + Series, + date_range, + option_context, + period_range, + timedelta_range, +) + + +class TestCategoricalReprWithFactor: + def test_print(self, using_infer_string): + factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True) + if using_infer_string: + expected = [ + "['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']", + "Categories (3, string): [a < b < c]", + ] + else: + expected = [ + "['a', 'b', 'b', 'a', 'a', 'c', 'c', 'c']", + "Categories (3, object): ['a' < 'b' < 'c']", + ] + expected = "\n".join(expected) + actual = repr(factor) + assert actual == expected + + +class TestCategoricalRepr: + def test_big_print(self): + codes = np.array([0, 1, 2, 0, 1, 2] * 100) + dtype = CategoricalDtype(categories=Index(["a", "b", "c"], dtype=object)) + factor = Categorical.from_codes(codes, dtype=dtype) + expected = [ + "['a', 'b', 'c', 'a', 'b', ..., 'b', 'c', 'a', 'b', 'c']", + "Length: 600", + "Categories (3, object): ['a', 'b', 'c']", + ] + expected = "\n".join(expected) + + actual = repr(factor) + + assert actual == expected + + def test_empty_print(self): + factor = Categorical([], Index(["a", "b", "c"], dtype=object)) + expected = "[], Categories (3, object): ['a', 'b', 'c']" + actual = repr(factor) + assert actual == expected + + assert expected == actual + factor = Categorical([], Index(["a", "b", "c"], dtype=object), ordered=True) + expected = "[], Categories (3, object): ['a' < 'b' < 'c']" + actual = repr(factor) + assert expected == actual + + factor = Categorical([], []) + expected = "[], Categories (0, object): []" + assert expected == repr(factor) + + def test_print_none_width(self): + # GH10087 + a = Series(Categorical([1, 2, 3, 4])) + exp = ( + "0 1\n1 2\n2 3\n3 4\n" + "dtype: category\nCategories (4, int64): [1, 2, 3, 4]" + ) + + with option_context("display.width", None): + assert exp == repr(a) + + @pytest.mark.skipif( + using_pyarrow_string_dtype(), + reason="Change once infer_string is set to True by default", + ) + def test_unicode_print(self): + c = Categorical(["aaaaa", "bb", "cccc"] * 20) + expected = """\ +['aaaaa', 'bb', 'cccc', 'aaaaa', 'bb', ..., 'bb', 'cccc', 'aaaaa', 'bb', 'cccc'] +Length: 60 +Categories (3, object): ['aaaaa', 'bb', 'cccc']""" + + assert repr(c) == expected + + c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20) + expected = """\ +['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう'] +Length: 60 +Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501 + + assert repr(c) == expected + + # unicode option should not affect to Categorical, as it doesn't care + # the repr width + with option_context("display.unicode.east_asian_width", True): + c = Categorical(["ああああ", "いいいいい", "ううううううう"] * 20) + expected = """['ああああ', 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', ..., 'いいいいい', 'ううううううう', 'ああああ', 'いいいいい', 'ううううううう'] +Length: 60 +Categories (3, object): ['ああああ', 'いいいいい', 'ううううううう']""" # noqa: E501 + + assert repr(c) == expected + + def test_categorical_repr(self): + c = Categorical([1, 2, 3]) + exp = """[1, 2, 3] +Categories (3, int64): [1, 2, 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3]) + exp = """[1, 2, 3, 1, 2, 3] +Categories (3, int64): [1, 2, 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 4, 5] * 10) + exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] +Length: 50 +Categories (5, int64): [1, 2, 3, 4, 5]""" + + assert repr(c) == exp + + c = Categorical(np.arange(20, dtype=np.int64)) + exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] +Length: 20 +Categories (20, int64): [0, 1, 2, 3, ..., 16, 17, 18, 19]""" + + assert repr(c) == exp + + def test_categorical_repr_ordered(self): + c = Categorical([1, 2, 3], ordered=True) + exp = """[1, 2, 3] +Categories (3, int64): [1 < 2 < 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 1, 2, 3], categories=[1, 2, 3], ordered=True) + exp = """[1, 2, 3, 1, 2, 3] +Categories (3, int64): [1 < 2 < 3]""" + + assert repr(c) == exp + + c = Categorical([1, 2, 3, 4, 5] * 10, ordered=True) + exp = """[1, 2, 3, 4, 5, ..., 1, 2, 3, 4, 5] +Length: 50 +Categories (5, int64): [1 < 2 < 3 < 4 < 5]""" + + assert repr(c) == exp + + c = Categorical(np.arange(20, dtype=np.int64), ordered=True) + exp = """[0, 1, 2, 3, 4, ..., 15, 16, 17, 18, 19] +Length: 20 +Categories (20, int64): [0 < 1 < 2 < 3 ... 16 < 17 < 18 < 19]""" + + assert repr(c) == exp + + def test_categorical_repr_datetime(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + c = Categorical(idx) + + exp = ( + "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, " + "2011-01-01 12:00:00, 2011-01-01 13:00:00]\n" + "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n" + " 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]" + "" + ) + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = ( + "[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, " + "2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]\n" + "Categories (5, datetime64[ns]): [2011-01-01 09:00:00, " + "2011-01-01 10:00:00, 2011-01-01 11:00:00,\n" + " 2011-01-01 12:00:00, " + "2011-01-01 13:00:00]" + ) + + assert repr(c) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + c = Categorical(idx) + exp = ( + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, " + "2011-01-01 13:00:00-05:00]\n" + "Categories (5, datetime64[ns, US/Eastern]): " + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n" + " " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n" + " " + "2011-01-01 13:00:00-05:00]" + ) + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = ( + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, " + "2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, " + "2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, " + "2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00]\n" + "Categories (5, datetime64[ns, US/Eastern]): " + "[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00,\n" + " " + "2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00,\n" + " " + "2011-01-01 13:00:00-05:00]" + ) + + assert repr(c) == exp + + def test_categorical_repr_datetime_ordered(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] +Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00, 2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00] +Categories (5, datetime64[ns]): [2011-01-01 09:00:00 < 2011-01-01 10:00:00 < 2011-01-01 11:00:00 < + 2011-01-01 12:00:00 < 2011-01-01 13:00:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] +Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < + 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < + 2011-01-01 13:00:00-05:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00, 2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00] +Categories (5, datetime64[ns, US/Eastern]): [2011-01-01 09:00:00-05:00 < 2011-01-01 10:00:00-05:00 < + 2011-01-01 11:00:00-05:00 < 2011-01-01 12:00:00-05:00 < + 2011-01-01 13:00:00-05:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_int_with_nan(self): + c = Categorical([1, 2, np.nan]) + c_exp = """[1, 2, NaN]\nCategories (2, int64): [1, 2]""" + assert repr(c) == c_exp + + s = Series([1, 2, np.nan], dtype="object").astype("category") + s_exp = """0 1\n1 2\n2 NaN +dtype: category +Categories (2, int64): [1, 2]""" + assert repr(s) == s_exp + + def test_categorical_repr_period(self): + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + c = Categorical(idx) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[h]): [2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = period_range("2011-01", freq="M", periods=5) + c = Categorical(idx) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01, 2011-02, 2011-03, 2011-04, 2011-05]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_period_ordered(self): + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00, 2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00] +Categories (5, period[h]): [2011-01-01 09:00 < 2011-01-01 10:00 < 2011-01-01 11:00 < 2011-01-01 12:00 < + 2011-01-01 13:00]""" # noqa: E501 + + assert repr(c) == exp + + idx = period_range("2011-01", freq="M", periods=5) + c = Categorical(idx, ordered=True) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[2011-01, 2011-02, 2011-03, 2011-04, 2011-05, 2011-01, 2011-02, 2011-03, 2011-04, 2011-05] +Categories (5, period[M]): [2011-01 < 2011-02 < 2011-03 < 2011-04 < 2011-05]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_timedelta(self): + idx = timedelta_range("1 days", periods=5) + c = Categorical(idx) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days, 2 days, 3 days, 4 days, 5 days]""" # noqa: E501 + + assert repr(c) == exp + + idx = timedelta_range("1 hours", periods=20) + c = Categorical(idx) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 20 +Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, + 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, + 18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 40 +Categories (20, timedelta64[ns]): [0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, + 3 days 01:00:00, ..., 16 days 01:00:00, 17 days 01:00:00, + 18 days 01:00:00, 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_repr_timedelta_ordered(self): + idx = timedelta_range("1 days", periods=5) + c = Categorical(idx, ordered=True) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[1 days, 2 days, 3 days, 4 days, 5 days, 1 days, 2 days, 3 days, 4 days, 5 days] +Categories (5, timedelta64[ns]): [1 days < 2 days < 3 days < 4 days < 5 days]""" # noqa: E501 + + assert repr(c) == exp + + idx = timedelta_range("1 hours", periods=20) + c = Categorical(idx, ordered=True) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 20 +Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < + 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < + 18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + c = Categorical(idx.append(idx), categories=idx, ordered=True) + exp = """[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, 4 days 01:00:00, ..., 15 days 01:00:00, 16 days 01:00:00, 17 days 01:00:00, 18 days 01:00:00, 19 days 01:00:00] +Length: 40 +Categories (20, timedelta64[ns]): [0 days 01:00:00 < 1 days 01:00:00 < 2 days 01:00:00 < + 3 days 01:00:00 ... 16 days 01:00:00 < 17 days 01:00:00 < + 18 days 01:00:00 < 19 days 01:00:00]""" # noqa: E501 + + assert repr(c) == exp + + def test_categorical_index_repr(self): + idx = CategoricalIndex(Categorical([1, 2, 3])) + exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=False, dtype='category')""" # noqa: E501 + assert repr(idx) == exp + + i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64))) + exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_ordered(self): + i = CategoricalIndex(Categorical([1, 2, 3], ordered=True)) + exp = """CategoricalIndex([1, 2, 3], categories=[1, 2, 3], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + i = CategoricalIndex(Categorical(np.arange(10, dtype=np.int64), ordered=True)) + exp = """CategoricalIndex([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], categories=[0, 1, 2, 3, ..., 6, 7, 8, 9], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_datetime(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', + '2011-01-01 11:00:00', '2011-01-01 12:00:00', + '2011-01-01 13:00:00'], + categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_datetime_ordered(self): + idx = date_range("2011-01-01 09:00", freq="h", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00', '2011-01-01 10:00:00', + '2011-01-01 11:00:00', '2011-01-01 12:00:00', + '2011-01-01 13:00:00'], + categories=[2011-01-01 09:00:00, 2011-01-01 10:00:00, 2011-01-01 11:00:00, 2011-01-01 12:00:00, 2011-01-01 13:00:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = date_range("2011-01-01 09:00", freq="h", periods=5, tz="US/Eastern") + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + i = CategoricalIndex(Categorical(idx.append(idx), ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', + '2011-01-01 11:00:00-05:00', '2011-01-01 12:00:00-05:00', + '2011-01-01 13:00:00-05:00', '2011-01-01 09:00:00-05:00', + '2011-01-01 10:00:00-05:00', '2011-01-01 11:00:00-05:00', + '2011-01-01 12:00:00-05:00', '2011-01-01 13:00:00-05:00'], + categories=[2011-01-01 09:00:00-05:00, 2011-01-01 10:00:00-05:00, 2011-01-01 11:00:00-05:00, 2011-01-01 12:00:00-05:00, 2011-01-01 13:00:00-05:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_period(self): + # test all length + idx = period_range("2011-01-01 09:00", freq="h", periods=1) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00'], categories=[2011-01-01 09:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="h", periods=2) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="h", periods=3) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + i = CategoricalIndex(Categorical(idx.append(idx))) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00', '2011-01-01 09:00', + '2011-01-01 10:00', '2011-01-01 11:00', '2011-01-01 12:00', + '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = period_range("2011-01", freq="M", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_period_ordered(self): + idx = period_range("2011-01-01 09:00", freq="h", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00', + '2011-01-01 12:00', '2011-01-01 13:00'], + categories=[2011-01-01 09:00, 2011-01-01 10:00, 2011-01-01 11:00, 2011-01-01 12:00, 2011-01-01 13:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + idx = period_range("2011-01", freq="M", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['2011-01', '2011-02', '2011-03', '2011-04', '2011-05'], categories=[2011-01, 2011-02, 2011-03, 2011-04, 2011-05], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + def test_categorical_index_repr_timedelta(self): + idx = timedelta_range("1 days", periods=5) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=False, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = timedelta_range("1 hours", periods=10) + i = CategoricalIndex(Categorical(idx)) + exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', + '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', + '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', + '9 days 01:00:00'], + categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=False, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_index_repr_timedelta_ordered(self): + idx = timedelta_range("1 days", periods=5) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['1 days', '2 days', '3 days', '4 days', '5 days'], categories=[1 days, 2 days, 3 days, 4 days, 5 days], ordered=True, dtype='category')""" # noqa: E501 + assert repr(i) == exp + + idx = timedelta_range("1 hours", periods=10) + i = CategoricalIndex(Categorical(idx, ordered=True)) + exp = """CategoricalIndex(['0 days 01:00:00', '1 days 01:00:00', '2 days 01:00:00', + '3 days 01:00:00', '4 days 01:00:00', '5 days 01:00:00', + '6 days 01:00:00', '7 days 01:00:00', '8 days 01:00:00', + '9 days 01:00:00'], + categories=[0 days 01:00:00, 1 days 01:00:00, 2 days 01:00:00, 3 days 01:00:00, ..., 6 days 01:00:00, 7 days 01:00:00, 8 days 01:00:00, 9 days 01:00:00], ordered=True, dtype='category')""" # noqa: E501 + + assert repr(i) == exp + + def test_categorical_str_repr(self): + # GH 33676 + result = repr(Categorical([1, "2", 3, 4])) + expected = "[1, '2', 3, 4]\nCategories (4, object): [1, 3, 4, '2']" + assert result == expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_sorting.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_sorting.py new file mode 100644 index 0000000000000000000000000000000000000000..ae527065b3fb970263609881d217f5c6d2761231 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_sorting.py @@ -0,0 +1,128 @@ +import numpy as np +import pytest + +from pandas import ( + Categorical, + Index, +) +import pandas._testing as tm + + +class TestCategoricalSort: + def test_argsort(self): + c = Categorical([5, 3, 1, 4, 2], ordered=True) + + expected = np.array([2, 4, 1, 3, 0]) + tm.assert_numpy_array_equal( + c.argsort(ascending=True), expected, check_dtype=False + ) + + expected = expected[::-1] + tm.assert_numpy_array_equal( + c.argsort(ascending=False), expected, check_dtype=False + ) + + def test_numpy_argsort(self): + c = Categorical([5, 3, 1, 4, 2], ordered=True) + + expected = np.array([2, 4, 1, 3, 0]) + tm.assert_numpy_array_equal(np.argsort(c), expected, check_dtype=False) + + tm.assert_numpy_array_equal( + np.argsort(c, kind="mergesort"), expected, check_dtype=False + ) + + msg = "the 'axis' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(c, axis=0) + + msg = "the 'order' parameter is not supported" + with pytest.raises(ValueError, match=msg): + np.argsort(c, order="C") + + def test_sort_values(self): + # unordered cats are sortable + cat = Categorical(["a", "b", "b", "a"], ordered=False) + cat.sort_values() + + cat = Categorical(["a", "c", "b", "d"], ordered=True) + + # sort_values + res = cat.sort_values() + exp = np.array(["a", "b", "c", "d"], dtype=object) + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, cat.categories) + + cat = Categorical( + ["a", "c", "b", "d"], categories=["a", "b", "c", "d"], ordered=True + ) + res = cat.sort_values() + exp = np.array(["a", "b", "c", "d"], dtype=object) + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, cat.categories) + + res = cat.sort_values(ascending=False) + exp = np.array(["d", "c", "b", "a"], dtype=object) + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, cat.categories) + + # sort (inplace order) + cat1 = cat.copy() + orig_codes = cat1._codes + cat1.sort_values(inplace=True) + assert cat1._codes is orig_codes + exp = np.array(["a", "b", "c", "d"], dtype=object) + tm.assert_numpy_array_equal(cat1.__array__(), exp) + tm.assert_index_equal(res.categories, cat.categories) + + # reverse + cat = Categorical(["a", "c", "c", "b", "d"], ordered=True) + res = cat.sort_values(ascending=False) + exp_val = np.array(["d", "c", "c", "b", "a"], dtype=object) + exp_categories = Index(["a", "b", "c", "d"]) + tm.assert_numpy_array_equal(res.__array__(), exp_val) + tm.assert_index_equal(res.categories, exp_categories) + + def test_sort_values_na_position(self): + # see gh-12882 + cat = Categorical([5, 2, np.nan, 2, np.nan], ordered=True) + exp_categories = Index([2, 5]) + + exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) + res = cat.sort_values() # default arguments + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + exp = np.array([np.nan, np.nan, 2.0, 2.0, 5.0]) + res = cat.sort_values(ascending=True, na_position="first") + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + exp = np.array([np.nan, np.nan, 5.0, 2.0, 2.0]) + res = cat.sort_values(ascending=False, na_position="first") + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + exp = np.array([2.0, 2.0, 5.0, np.nan, np.nan]) + res = cat.sort_values(ascending=True, na_position="last") + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + exp = np.array([5.0, 2.0, 2.0, np.nan, np.nan]) + res = cat.sort_values(ascending=False, na_position="last") + tm.assert_numpy_array_equal(res.__array__(), exp) + tm.assert_index_equal(res.categories, exp_categories) + + cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) + res = cat.sort_values(ascending=False, na_position="last") + exp_val = np.array(["d", "c", "b", "a", np.nan], dtype=object) + exp_categories = Index(["a", "b", "c", "d"]) + tm.assert_numpy_array_equal(res.__array__(), exp_val) + tm.assert_index_equal(res.categories, exp_categories) + + cat = Categorical(["a", "c", "b", "d", np.nan], ordered=True) + res = cat.sort_values(ascending=False, na_position="first") + exp_val = np.array([np.nan, "d", "c", "b", "a"], dtype=object) + exp_categories = Index(["a", "b", "c", "d"]) + tm.assert_numpy_array_equal(res.__array__(), exp_val) + tm.assert_index_equal(res.categories, exp_categories) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_subclass.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_subclass.py new file mode 100644 index 0000000000000000000000000000000000000000..5b0c0a44e655d5dd943f95415336204aa12f0b67 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_subclass.py @@ -0,0 +1,26 @@ +from pandas import Categorical +import pandas._testing as tm + + +class SubclassedCategorical(Categorical): + pass + + +class TestCategoricalSubclassing: + def test_constructor(self): + sc = SubclassedCategorical(["a", "b", "c"]) + assert isinstance(sc, SubclassedCategorical) + tm.assert_categorical_equal(sc, Categorical(["a", "b", "c"])) + + def test_from_codes(self): + sc = SubclassedCategorical.from_codes([1, 0, 2], ["a", "b", "c"]) + assert isinstance(sc, SubclassedCategorical) + exp = Categorical.from_codes([1, 0, 2], ["a", "b", "c"]) + tm.assert_categorical_equal(sc, exp) + + def test_map(self): + sc = SubclassedCategorical(["a", "b", "c"]) + res = sc.map(lambda x: x.upper(), na_action=None) + assert isinstance(res, SubclassedCategorical) + exp = Categorical(["A", "B", "C"]) + tm.assert_categorical_equal(res, exp) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_take.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_take.py new file mode 100644 index 0000000000000000000000000000000000000000..373f1b30a13c2daff23e14a3e0640e7a716cceb3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/categorical/test_take.py @@ -0,0 +1,89 @@ +import numpy as np +import pytest + +from pandas import Categorical +import pandas._testing as tm + + +@pytest.fixture(params=[True, False]) +def allow_fill(request): + """Boolean 'allow_fill' parameter for Categorical.take""" + return request.param + + +class TestTake: + # https://github.com/pandas-dev/pandas/issues/20664 + + def test_take_default_allow_fill(self): + cat = Categorical(["a", "b"]) + with tm.assert_produces_warning(None): + result = cat.take([0, -1]) + + assert result.equals(cat) + + def test_take_positive_no_warning(self): + cat = Categorical(["a", "b"]) + with tm.assert_produces_warning(None): + cat.take([0, 0]) + + def test_take_bounds(self, allow_fill): + # https://github.com/pandas-dev/pandas/issues/20664 + cat = Categorical(["a", "b", "a"]) + if allow_fill: + msg = "indices are out-of-bounds" + else: + msg = "index 4 is out of bounds for( axis 0 with)? size 3" + with pytest.raises(IndexError, match=msg): + cat.take([4, 5], allow_fill=allow_fill) + + def test_take_empty(self, allow_fill): + # https://github.com/pandas-dev/pandas/issues/20664 + cat = Categorical([], categories=["a", "b"]) + if allow_fill: + msg = "indices are out-of-bounds" + else: + msg = "cannot do a non-empty take from an empty axes" + with pytest.raises(IndexError, match=msg): + cat.take([0], allow_fill=allow_fill) + + def test_positional_take(self, ordered): + cat = Categorical(["a", "a", "b", "b"], categories=["b", "a"], ordered=ordered) + result = cat.take([0, 1, 2], allow_fill=False) + expected = Categorical( + ["a", "a", "b"], categories=cat.categories, ordered=ordered + ) + tm.assert_categorical_equal(result, expected) + + def test_positional_take_unobserved(self, ordered): + cat = Categorical(["a", "b"], categories=["a", "b", "c"], ordered=ordered) + result = cat.take([1, 0], allow_fill=False) + expected = Categorical(["b", "a"], categories=cat.categories, ordered=ordered) + tm.assert_categorical_equal(result, expected) + + def test_take_allow_fill(self): + # https://github.com/pandas-dev/pandas/issues/23296 + cat = Categorical(["a", "a", "b"]) + result = cat.take([0, -1, -1], allow_fill=True) + expected = Categorical(["a", np.nan, np.nan], categories=["a", "b"]) + tm.assert_categorical_equal(result, expected) + + def test_take_fill_with_negative_one(self): + # -1 was a category + cat = Categorical([-1, 0, 1]) + result = cat.take([0, -1, 1], allow_fill=True, fill_value=-1) + expected = Categorical([-1, -1, 0], categories=[-1, 0, 1]) + tm.assert_categorical_equal(result, expected) + + def test_take_fill_value(self): + # https://github.com/pandas-dev/pandas/issues/23296 + cat = Categorical(["a", "b", "c"]) + result = cat.take([0, 1, -1], fill_value="a", allow_fill=True) + expected = Categorical(["a", "b", "a"], categories=["a", "b", "c"]) + tm.assert_categorical_equal(result, expected) + + def test_take_fill_value_new_raises(self): + # https://github.com/pandas-dev/pandas/issues/23296 + cat = Categorical(["a", "b", "c"]) + xpr = r"Cannot setitem on a Categorical with a new category \(d\)" + with pytest.raises(TypeError, match=xpr): + cat.take([0, 1, -1], fill_value="d", allow_fill=True) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16a4e814582f909119cf68f718d49b902c9362ac Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..74fa5d92f485bbf6cfacf513e60a95172377d3c4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e9e9bc3a1ec498c599a93d55c3f59b3bb19dc9f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_cumulative.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f404a025b5b6d67e64ae0bc6a85541977c12a3f4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/datetimes/__pycache__/test_reductions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_construction.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_construction.py new file mode 100644 index 0000000000000000000000000000000000000000..4007ee6b415c9b0f21f580f6240ed85ba1889781 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/floating/test_construction.py @@ -0,0 +1,204 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import FloatingArray +from pandas.core.arrays.floating import ( + Float32Dtype, + Float64Dtype, +) + + +def test_uses_pandas_na(): + a = pd.array([1, None], dtype=Float64Dtype()) + assert a[1] is pd.NA + + +def test_floating_array_constructor(): + values = np.array([1, 2, 3, 4], dtype="float64") + mask = np.array([False, False, False, True], dtype="bool") + + result = FloatingArray(values, mask) + expected = pd.array([1, 2, 3, np.nan], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + tm.assert_numpy_array_equal(result._data, values) + tm.assert_numpy_array_equal(result._mask, mask) + + msg = r".* should be .* numpy array. Use the 'pd.array' function instead" + with pytest.raises(TypeError, match=msg): + FloatingArray(values.tolist(), mask) + + with pytest.raises(TypeError, match=msg): + FloatingArray(values, mask.tolist()) + + with pytest.raises(TypeError, match=msg): + FloatingArray(values.astype(int), mask) + + msg = r"__init__\(\) missing 1 required positional argument: 'mask'" + with pytest.raises(TypeError, match=msg): + FloatingArray(values) + + +def test_floating_array_disallows_float16(): + # GH#44715 + arr = np.array([1, 2], dtype=np.float16) + mask = np.array([False, False]) + + msg = "FloatingArray does not support np.float16 dtype" + with pytest.raises(TypeError, match=msg): + FloatingArray(arr, mask) + + +def test_floating_array_disallows_Float16_dtype(request): + # GH#44715 + with pytest.raises(TypeError, match="data type 'Float16' not understood"): + pd.array([1.0, 2.0], dtype="Float16") + + +def test_floating_array_constructor_copy(): + values = np.array([1, 2, 3, 4], dtype="float64") + mask = np.array([False, False, False, True], dtype="bool") + + result = FloatingArray(values, mask) + assert result._data is values + assert result._mask is mask + + result = FloatingArray(values, mask, copy=True) + assert result._data is not values + assert result._mask is not mask + + +def test_to_array(): + result = pd.array([0.1, 0.2, 0.3, 0.4]) + expected = pd.array([0.1, 0.2, 0.3, 0.4], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "a, b", + [ + ([1, None], [1, pd.NA]), + ([None], [pd.NA]), + ([None, np.nan], [pd.NA, pd.NA]), + ([1, np.nan], [1, pd.NA]), + ([np.nan], [pd.NA]), + ], +) +def test_to_array_none_is_nan(a, b): + result = pd.array(a, dtype="Float64") + expected = pd.array(b, dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +def test_to_array_mixed_integer_float(): + result = pd.array([1, 2.0]) + expected = pd.array([1.0, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + result = pd.array([1, None, 2.0]) + expected = pd.array([1.0, None, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize( + "values", + [ + ["foo", "bar"], + "foo", + 1, + 1.0, + pd.date_range("20130101", periods=2), + np.array(["foo"]), + [[1, 2], [3, 4]], + [np.nan, {"a": 1}], + # GH#44514 all-NA case used to get quietly swapped out before checking ndim + np.array([pd.NA] * 6, dtype=object).reshape(3, 2), + ], +) +def test_to_array_error(values): + # error in converting existing arrays to FloatingArray + msg = "|".join( + [ + "cannot be converted to FloatingDtype", + "values must be a 1D list-like", + "Cannot pass scalar", + r"float\(\) argument must be a string or a (real )?number, not 'dict'", + "could not convert string to float: 'foo'", + r"could not convert string to float: np\.str_\('foo'\)", + ] + ) + with pytest.raises((TypeError, ValueError), match=msg): + pd.array(values, dtype="Float64") + + +@pytest.mark.parametrize("values", [["1", "2", None], ["1.5", "2", None]]) +def test_construct_from_float_strings(values): + # see also test_to_integer_array_str + expected = pd.array([float(values[0]), 2, None], dtype="Float64") + + res = pd.array(values, dtype="Float64") + tm.assert_extension_array_equal(res, expected) + + res = FloatingArray._from_sequence(values) + tm.assert_extension_array_equal(res, expected) + + +def test_to_array_inferred_dtype(): + # if values has dtype -> respect it + result = pd.array(np.array([1, 2], dtype="float32")) + assert result.dtype == Float32Dtype() + + # if values have no dtype -> always float64 + result = pd.array([1.0, 2.0]) + assert result.dtype == Float64Dtype() + + +def test_to_array_dtype_keyword(): + result = pd.array([1, 2], dtype="Float32") + assert result.dtype == Float32Dtype() + + # if values has dtype -> override it + result = pd.array(np.array([1, 2], dtype="float32"), dtype="Float64") + assert result.dtype == Float64Dtype() + + +def test_to_array_integer(): + result = pd.array([1, 2], dtype="Float64") + expected = pd.array([1.0, 2.0], dtype="Float64") + tm.assert_extension_array_equal(result, expected) + + # for integer dtypes, the itemsize is not preserved + # TODO can we specify "floating" in general? + result = pd.array(np.array([1, 2], dtype="int32"), dtype="Float64") + assert result.dtype == Float64Dtype() + + +@pytest.mark.parametrize( + "bool_values, values, target_dtype, expected_dtype", + [ + ([False, True], [0, 1], Float64Dtype(), Float64Dtype()), + ([False, True], [0, 1], "Float64", Float64Dtype()), + ([False, True, np.nan], [0, 1, np.nan], Float64Dtype(), Float64Dtype()), + ], +) +def test_to_array_bool(bool_values, values, target_dtype, expected_dtype): + result = pd.array(bool_values, dtype=target_dtype) + assert result.dtype == expected_dtype + expected = pd.array(values, dtype=target_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_series_from_float(data): + # construct from our dtype & string dtype + dtype = data.dtype + + # from float + expected = pd.Series(data) + result = pd.Series(data.to_numpy(na_value=np.nan, dtype="float"), dtype=str(dtype)) + tm.assert_series_equal(result, expected) + + # from list + expected = pd.Series(data) + result = pd.Series(np.array(data).tolist(), dtype=str(dtype)) + tm.assert_series_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97510ba1204187106cd2f6992d3388f1ae953735 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/conftest.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/conftest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdf2030124acf123a76865af37ba16ee28978288 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/conftest.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_arithmetic.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_arithmetic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68d9cd9c5ffe1421e50bd4bb4e2d816c984202f5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_arithmetic.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_comparison.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_comparison.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a68af5063492cde2508c55ddff80a84407e96b85 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_comparison.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_concat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1620605538523d770cf8b1614b15dcd89c8b1038 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_concat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_construction.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_construction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46b2c53dfebc63159eb25cdaeae5c6a4dc081767 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_construction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_dtypes.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c61f541447c09b488d07a837e28708ddb937c043 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_dtypes.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_function.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_function.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..904d007c86a26e0d0739451e51087b581a5d0d87 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_function.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_reduction.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..006aa51f8db4a472babaed8f8b32461f8a45c617 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_reduction.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_repr.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_repr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eee3cfc8aa737958da6eacb560ff0b05ec7c9947 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/__pycache__/test_repr.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_indexing.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..4b953d699108b2aed1c992cf3a33f3013b298254 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/integer/test_indexing.py @@ -0,0 +1,19 @@ +import pandas as pd +import pandas._testing as tm + + +def test_array_setitem_nullable_boolean_mask(): + # GH 31446 + ser = pd.Series([1, 2], dtype="Int64") + result = ser.where(ser > 1) + expected = pd.Series([pd.NA, 2], dtype="Int64") + tm.assert_series_equal(result, expected) + + +def test_array_setitem(): + # GH 31446 + arr = pd.Series([1, 2], dtype="Int64").array + arr[arr > 1] = 1 + + expected = pd.array([1, 1], dtype="Int64") + tm.assert_extension_array_equal(arr, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..590c689ae09a7d7024b47605572299ba87b97001 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0a69a7f50b6cabe71fda0c9fdf3fe6acb68e043 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_astype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_formats.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_formats.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83bc6f0b671fdf74b9aa90fc4593b2749aad8f0c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_formats.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_interval.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_interval.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e6b7da04916d6ba3e07c0f8b4877cc284b1cc27 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_interval.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_interval_pyarrow.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_interval_pyarrow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..025daca6b35742b3de80db8d1a111058492b1af7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_interval_pyarrow.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_overlaps.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_overlaps.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4394785014fb0799e99a1fbcd3baa3e5aecb5289 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/__pycache__/test_overlaps.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_astype.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..d7a2140f817f3a8e5689d001768cf5642118b105 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_astype.py @@ -0,0 +1,28 @@ +import pytest + +from pandas import ( + Categorical, + CategoricalDtype, + Index, + IntervalIndex, +) +import pandas._testing as tm + + +class TestAstype: + @pytest.mark.parametrize("ordered", [True, False]) + def test_astype_categorical_retains_ordered(self, ordered): + index = IntervalIndex.from_breaks(range(5)) + arr = index._data + + dtype = CategoricalDtype(None, ordered=ordered) + + expected = Categorical(list(arr), ordered=ordered) + result = arr.astype(dtype) + assert result.ordered is ordered + tm.assert_categorical_equal(result, expected) + + # test IntervalIndex.astype while we're at it. + result = index.astype(dtype) + expected = Index(expected) + tm.assert_index_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_formats.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_formats.py new file mode 100644 index 0000000000000000000000000000000000000000..535efee51937473071c9490330eb68364769d5aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_formats.py @@ -0,0 +1,13 @@ +from pandas.core.arrays import IntervalArray + + +def test_repr(): + # GH#25022 + arr = IntervalArray.from_tuples([(0, 1), (1, 2)]) + result = repr(arr) + expected = ( + "\n" + "[(0, 1], (1, 2]]\n" + "Length: 2, dtype: interval[int64, right]" + ) + assert result == expected diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_interval.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..be4b2c3e7e74cddfaf8b2efa08879d3a6d8f1757 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_interval.py @@ -0,0 +1,231 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + Index, + Interval, + IntervalIndex, + Timedelta, + Timestamp, + date_range, + timedelta_range, +) +import pandas._testing as tm +from pandas.core.arrays import IntervalArray + + +@pytest.fixture( + params=[ + (Index([0, 2, 4]), Index([1, 3, 5])), + (Index([0.0, 1.0, 2.0]), Index([1.0, 2.0, 3.0])), + (timedelta_range("0 days", periods=3), timedelta_range("1 day", periods=3)), + (date_range("20170101", periods=3), date_range("20170102", periods=3)), + ( + date_range("20170101", periods=3, tz="US/Eastern"), + date_range("20170102", periods=3, tz="US/Eastern"), + ), + ], + ids=lambda x: str(x[0].dtype), +) +def left_right_dtypes(request): + """ + Fixture for building an IntervalArray from various dtypes + """ + return request.param + + +class TestAttributes: + @pytest.mark.parametrize( + "left, right", + [ + (0, 1), + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timestamp("2018-01-02")), + ( + Timestamp("2018-01-01", tz="US/Eastern"), + Timestamp("2018-01-02", tz="US/Eastern"), + ), + ], + ) + @pytest.mark.parametrize("constructor", [IntervalArray, IntervalIndex]) + def test_is_empty(self, constructor, left, right, closed): + # GH27219 + tuples = [(left, left), (left, right), np.nan] + expected = np.array([closed != "both", False, False]) + result = constructor.from_tuples(tuples, closed=closed).is_empty + tm.assert_numpy_array_equal(result, expected) + + +class TestMethods: + @pytest.mark.parametrize("new_closed", ["left", "right", "both", "neither"]) + def test_set_closed(self, closed, new_closed): + # GH 21670 + array = IntervalArray.from_breaks(range(10), closed=closed) + result = array.set_closed(new_closed) + expected = IntervalArray.from_breaks(range(10), closed=new_closed) + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [ + Interval(0, 1, closed="right"), + IntervalArray.from_breaks([1, 2, 3, 4], closed="right"), + ], + ) + def test_where_raises(self, other): + # GH#45768 The IntervalArray methods raises; the Series method coerces + ser = pd.Series(IntervalArray.from_breaks([1, 2, 3, 4], closed="left")) + mask = np.array([True, False, True]) + match = "'value.closed' is 'right', expected 'left'." + with pytest.raises(ValueError, match=match): + ser.array._where(mask, other) + + res = ser.where(mask, other=other) + expected = ser.astype(object).where(mask, other) + tm.assert_series_equal(res, expected) + + def test_shift(self): + # https://github.com/pandas-dev/pandas/issues/31495, GH#22428, GH#31502 + a = IntervalArray.from_breaks([1, 2, 3]) + result = a.shift() + # int -> float + expected = IntervalArray.from_tuples([(np.nan, np.nan), (1.0, 2.0)]) + tm.assert_interval_array_equal(result, expected) + + msg = "can only insert Interval objects and NA into an IntervalArray" + with pytest.raises(TypeError, match=msg): + a.shift(1, fill_value=pd.NaT) + + def test_shift_datetime(self): + # GH#31502, GH#31504 + a = IntervalArray.from_breaks(date_range("2000", periods=4)) + result = a.shift(2) + expected = a.take([-1, -1, 0], allow_fill=True) + tm.assert_interval_array_equal(result, expected) + + result = a.shift(-1) + expected = a.take([1, 2, -1], allow_fill=True) + tm.assert_interval_array_equal(result, expected) + + msg = "can only insert Interval objects and NA into an IntervalArray" + with pytest.raises(TypeError, match=msg): + a.shift(1, fill_value=np.timedelta64("NaT", "ns")) + + +class TestSetitem: + def test_set_na(self, left_right_dtypes): + left, right = left_right_dtypes + left = left.copy(deep=True) + right = right.copy(deep=True) + result = IntervalArray.from_arrays(left, right) + + if result.dtype.subtype.kind not in ["m", "M"]: + msg = "'value' should be an interval type, got <.*NaTType'> instead." + with pytest.raises(TypeError, match=msg): + result[0] = pd.NaT + if result.dtype.subtype.kind in ["i", "u"]: + msg = "Cannot set float NaN to integer-backed IntervalArray" + # GH#45484 TypeError, not ValueError, matches what we get with + # non-NA un-holdable value. + with pytest.raises(TypeError, match=msg): + result[0] = np.nan + return + + result[0] = np.nan + + expected_left = Index([left._na_value] + list(left[1:])) + expected_right = Index([right._na_value] + list(right[1:])) + expected = IntervalArray.from_arrays(expected_left, expected_right) + + tm.assert_extension_array_equal(result, expected) + + def test_setitem_mismatched_closed(self): + arr = IntervalArray.from_breaks(range(4)) + orig = arr.copy() + other = arr.set_closed("both") + + msg = "'value.closed' is 'both', expected 'right'" + with pytest.raises(ValueError, match=msg): + arr[0] = other[0] + with pytest.raises(ValueError, match=msg): + arr[:1] = other[:1] + with pytest.raises(ValueError, match=msg): + arr[:0] = other[:0] + with pytest.raises(ValueError, match=msg): + arr[:] = other[::-1] + with pytest.raises(ValueError, match=msg): + arr[:] = list(other[::-1]) + with pytest.raises(ValueError, match=msg): + arr[:] = other[::-1].astype(object) + with pytest.raises(ValueError, match=msg): + arr[:] = other[::-1].astype("category") + + # empty list should be no-op + arr[:0] = [] + tm.assert_interval_array_equal(arr, orig) + + +class TestReductions: + def test_min_max_invalid_axis(self, left_right_dtypes): + left, right = left_right_dtypes + left = left.copy(deep=True) + right = right.copy(deep=True) + arr = IntervalArray.from_arrays(left, right) + + msg = "`axis` must be fewer than the number of dimensions" + for axis in [-2, 1]: + with pytest.raises(ValueError, match=msg): + arr.min(axis=axis) + with pytest.raises(ValueError, match=msg): + arr.max(axis=axis) + + msg = "'>=' not supported between" + with pytest.raises(TypeError, match=msg): + arr.min(axis="foo") + with pytest.raises(TypeError, match=msg): + arr.max(axis="foo") + + def test_min_max(self, left_right_dtypes, index_or_series_or_array): + # GH#44746 + left, right = left_right_dtypes + left = left.copy(deep=True) + right = right.copy(deep=True) + arr = IntervalArray.from_arrays(left, right) + + # The expected results below are only valid if monotonic + assert left.is_monotonic_increasing + assert Index(arr).is_monotonic_increasing + + MIN = arr[0] + MAX = arr[-1] + + indexer = np.arange(len(arr)) + np.random.default_rng(2).shuffle(indexer) + arr = arr.take(indexer) + + arr_na = arr.insert(2, np.nan) + + arr = index_or_series_or_array(arr) + arr_na = index_or_series_or_array(arr_na) + + for skipna in [True, False]: + res = arr.min(skipna=skipna) + assert res == MIN + assert type(res) == type(MIN) + + res = arr.max(skipna=skipna) + assert res == MAX + assert type(res) == type(MAX) + + res = arr_na.min(skipna=False) + assert np.isnan(res) + res = arr_na.max(skipna=False) + assert np.isnan(res) + + res = arr_na.min(skipna=True) + assert res == MIN + assert type(res) == type(MIN) + res = arr_na.max(skipna=True) + assert res == MAX + assert type(res) == type(MAX) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_interval_pyarrow.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_interval_pyarrow.py new file mode 100644 index 0000000000000000000000000000000000000000..ef8701be81e2b9248c29fc4e901161fd18d72bbe --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_interval_pyarrow.py @@ -0,0 +1,160 @@ +import numpy as np +import pytest + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import IntervalArray + + +def test_arrow_extension_type(): + pa = pytest.importorskip("pyarrow") + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + p1 = ArrowIntervalType(pa.int64(), "left") + p2 = ArrowIntervalType(pa.int64(), "left") + p3 = ArrowIntervalType(pa.int64(), "right") + + assert p1.closed == "left" + assert p1 == p2 + assert p1 != p3 + assert hash(p1) == hash(p2) + assert hash(p1) != hash(p3) + + +def test_arrow_array(): + pa = pytest.importorskip("pyarrow") + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + intervals = pd.interval_range(1, 5, freq=1).array + + result = pa.array(intervals) + assert isinstance(result.type, ArrowIntervalType) + assert result.type.closed == intervals.closed + assert result.type.subtype == pa.int64() + assert result.storage.field("left").equals(pa.array([1, 2, 3, 4], type="int64")) + assert result.storage.field("right").equals(pa.array([2, 3, 4, 5], type="int64")) + + expected = pa.array([{"left": i, "right": i + 1} for i in range(1, 5)]) + assert result.storage.equals(expected) + + # convert to its storage type + result = pa.array(intervals, type=expected.type) + assert result.equals(expected) + + # unsupported conversions + with pytest.raises(TypeError, match="Not supported to convert IntervalArray"): + pa.array(intervals, type="float64") + + with pytest.raises(TypeError, match="Not supported to convert IntervalArray"): + pa.array(intervals, type=ArrowIntervalType(pa.float64(), "left")) + + +def test_arrow_array_missing(): + pa = pytest.importorskip("pyarrow") + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + arr = IntervalArray.from_breaks([0.0, 1.0, 2.0, 3.0]) + arr[1] = None + + result = pa.array(arr) + assert isinstance(result.type, ArrowIntervalType) + assert result.type.closed == arr.closed + assert result.type.subtype == pa.float64() + + # fields have missing values (not NaN) + left = pa.array([0.0, None, 2.0], type="float64") + right = pa.array([1.0, None, 3.0], type="float64") + assert result.storage.field("left").equals(left) + assert result.storage.field("right").equals(right) + + # structarray itself also has missing values on the array level + vals = [ + {"left": 0.0, "right": 1.0}, + {"left": None, "right": None}, + {"left": 2.0, "right": 3.0}, + ] + expected = pa.StructArray.from_pandas(vals, mask=np.array([False, True, False])) + assert result.storage.equals(expected) + + +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +@pytest.mark.parametrize( + "breaks", + [[0.0, 1.0, 2.0, 3.0], pd.date_range("2017", periods=4, freq="D")], + ids=["float", "datetime64[ns]"], +) +def test_arrow_table_roundtrip(breaks): + pa = pytest.importorskip("pyarrow") + + from pandas.core.arrays.arrow.extension_types import ArrowIntervalType + + arr = IntervalArray.from_breaks(breaks) + arr[1] = None + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + assert isinstance(table.field("a").type, ArrowIntervalType) + result = table.to_pandas() + assert isinstance(result["a"].dtype, pd.IntervalDtype) + tm.assert_frame_equal(result, df) + + table2 = pa.concat_tables([table, table]) + result = table2.to_pandas() + expected = pd.concat([df, df], ignore_index=True) + tm.assert_frame_equal(result, expected) + + # GH#41040 + table = pa.table( + [pa.chunked_array([], type=table.column(0).type)], schema=table.schema + ) + result = table.to_pandas() + tm.assert_frame_equal(result, expected[0:0]) + + +@pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) +@pytest.mark.parametrize( + "breaks", + [[0.0, 1.0, 2.0, 3.0], pd.date_range("2017", periods=4, freq="D")], + ids=["float", "datetime64[ns]"], +) +def test_arrow_table_roundtrip_without_metadata(breaks): + pa = pytest.importorskip("pyarrow") + + arr = IntervalArray.from_breaks(breaks) + arr[1] = None + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + # remove the metadata + table = table.replace_schema_metadata() + assert table.schema.metadata is None + + result = table.to_pandas() + assert isinstance(result["a"].dtype, pd.IntervalDtype) + tm.assert_frame_equal(result, df) + + +def test_from_arrow_from_raw_struct_array(): + # in case pyarrow lost the Interval extension type (eg on parquet roundtrip + # with datetime64[ns] subtype, see GH-45881), still allow conversion + # from arrow to IntervalArray + pa = pytest.importorskip("pyarrow") + + arr = pa.array([{"left": 0, "right": 1}, {"left": 1, "right": 2}]) + dtype = pd.IntervalDtype(np.dtype("int64"), closed="neither") + + result = dtype.__from_arrow__(arr) + expected = IntervalArray.from_breaks( + np.array([0, 1, 2], dtype="int64"), closed="neither" + ) + tm.assert_extension_array_equal(result, expected) + + result = dtype.__from_arrow__(pa.chunked_array([arr])) + tm.assert_extension_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_overlaps.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_overlaps.py new file mode 100644 index 0000000000000000000000000000000000000000..4853bec51106c05781a4c11921f0e082934147ec --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/interval/test_overlaps.py @@ -0,0 +1,93 @@ +"""Tests for Interval-Interval operations, such as overlaps, contains, etc.""" +import numpy as np +import pytest + +from pandas import ( + Interval, + IntervalIndex, + Timedelta, + Timestamp, +) +import pandas._testing as tm +from pandas.core.arrays import IntervalArray + + +@pytest.fixture(params=[IntervalArray, IntervalIndex]) +def constructor(request): + """ + Fixture for testing both interval container classes. + """ + return request.param + + +@pytest.fixture( + params=[ + (Timedelta("0 days"), Timedelta("1 day")), + (Timestamp("2018-01-01"), Timedelta("1 day")), + (0, 1), + ], + ids=lambda x: type(x[0]).__name__, +) +def start_shift(request): + """ + Fixture for generating intervals of different types from a start value + and a shift value that can be added to start to generate an endpoint. + """ + return request.param + + +class TestOverlaps: + def test_overlaps_interval(self, constructor, start_shift, closed, other_closed): + start, shift = start_shift + interval = Interval(start, start + 3 * shift, other_closed) + + # intervals: identical, nested, spanning, partial, adjacent, disjoint + tuples = [ + (start, start + 3 * shift), + (start + shift, start + 2 * shift), + (start - shift, start + 4 * shift), + (start + 2 * shift, start + 4 * shift), + (start + 3 * shift, start + 4 * shift), + (start + 4 * shift, start + 5 * shift), + ] + interval_container = constructor.from_tuples(tuples, closed) + + adjacent = interval.closed_right and interval_container.closed_left + expected = np.array([True, True, True, True, adjacent, False]) + result = interval_container.overlaps(interval) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("other_constructor", [IntervalArray, IntervalIndex]) + def test_overlaps_interval_container(self, constructor, other_constructor): + # TODO: modify this test when implemented + interval_container = constructor.from_breaks(range(5)) + other_container = other_constructor.from_breaks(range(5)) + with pytest.raises(NotImplementedError, match="^$"): + interval_container.overlaps(other_container) + + def test_overlaps_na(self, constructor, start_shift): + """NA values are marked as False""" + start, shift = start_shift + interval = Interval(start, start + shift) + + tuples = [ + (start, start + shift), + np.nan, + (start + 2 * shift, start + 3 * shift), + ] + interval_container = constructor.from_tuples(tuples) + + expected = np.array([True, False, False]) + result = interval_container.overlaps(interval) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize( + "other", + [10, True, "foo", Timedelta("1 day"), Timestamp("2018-01-01")], + ids=lambda x: type(x).__name__, + ) + def test_overlaps_invalid_type(self, constructor, other): + interval_container = constructor.from_breaks(range(5)) + msg = f"`other` must be Interval-like, got {type(other).__name__}" + with pytest.raises(TypeError, match=msg): + interval_container.overlaps(other) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a037a7e78969747e686eeae283c2a1df31a4686c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_indexing.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d2ca5a64c9117d46fd0547892c5ebe192aca7512 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_indexing.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_numpy.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3435563234da7a0b8ed938ea77618ab8b6c2a7a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/__pycache__/test_numpy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_indexing.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_indexing.py new file mode 100644 index 0000000000000000000000000000000000000000..225d64ad7d2580f877505f0ac3a459e2ea4f0f53 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_indexing.py @@ -0,0 +1,41 @@ +import numpy as np + +from pandas.core.dtypes.common import is_scalar + +import pandas as pd +import pandas._testing as tm + + +class TestSearchsorted: + def test_searchsorted_string(self, string_dtype): + arr = pd.array(["a", "b", "c"], dtype=string_dtype) + + result = arr.searchsorted("a", side="left") + assert is_scalar(result) + assert result == 0 + + result = arr.searchsorted("a", side="right") + assert is_scalar(result) + assert result == 1 + + def test_searchsorted_numeric_dtypes_scalar(self, any_real_numpy_dtype): + arr = pd.array([1, 3, 90], dtype=any_real_numpy_dtype) + result = arr.searchsorted(30) + assert is_scalar(result) + assert result == 2 + + result = arr.searchsorted([30]) + expected = np.array([2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_searchsorted_numeric_dtypes_vector(self, any_real_numpy_dtype): + arr = pd.array([1, 3, 90], dtype=any_real_numpy_dtype) + result = arr.searchsorted([2, 30]) + expected = np.array([1, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + + def test_searchsorted_sorter(self, any_real_numpy_dtype): + arr = pd.array([3, 1, 2], dtype=any_real_numpy_dtype) + result = arr.searchsorted([0, 3], sorter=np.argsort(arr)) + expected = np.array([0, 2], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_numpy.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..5112ce262f7711d09e59870c399a588cb2a7fd91 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/numpy_/test_numpy.py @@ -0,0 +1,324 @@ +""" +Additional tests for NumpyExtensionArray that aren't covered by +the interface tests. +""" +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import NumpyEADtype + +import pandas as pd +import pandas._testing as tm +from pandas.arrays import NumpyExtensionArray + + +@pytest.fixture( + params=[ + np.array(["a", "b"], dtype=object), + np.array([0, 1], dtype=float), + np.array([0, 1], dtype=int), + np.array([0, 1 + 2j], dtype=complex), + np.array([True, False], dtype=bool), + np.array([0, 1], dtype="datetime64[ns]"), + np.array([0, 1], dtype="timedelta64[ns]"), + ] +) +def any_numpy_array(request): + """ + Parametrized fixture for NumPy arrays with different dtypes. + + This excludes string and bytes. + """ + return request.param + + +# ---------------------------------------------------------------------------- +# NumpyEADtype + + +@pytest.mark.parametrize( + "dtype, expected", + [ + ("bool", True), + ("int", True), + ("uint", True), + ("float", True), + ("complex", True), + ("str", False), + ("bytes", False), + ("datetime64[ns]", False), + ("object", False), + ("void", False), + ], +) +def test_is_numeric(dtype, expected): + dtype = NumpyEADtype(dtype) + assert dtype._is_numeric is expected + + +@pytest.mark.parametrize( + "dtype, expected", + [ + ("bool", True), + ("int", False), + ("uint", False), + ("float", False), + ("complex", False), + ("str", False), + ("bytes", False), + ("datetime64[ns]", False), + ("object", False), + ("void", False), + ], +) +def test_is_boolean(dtype, expected): + dtype = NumpyEADtype(dtype) + assert dtype._is_boolean is expected + + +def test_repr(): + dtype = NumpyEADtype(np.dtype("int64")) + assert repr(dtype) == "NumpyEADtype('int64')" + + +def test_constructor_from_string(): + result = NumpyEADtype.construct_from_string("int64") + expected = NumpyEADtype(np.dtype("int64")) + assert result == expected + + +def test_dtype_idempotent(any_numpy_dtype): + dtype = NumpyEADtype(any_numpy_dtype) + + result = NumpyEADtype(dtype) + assert result == dtype + + +# ---------------------------------------------------------------------------- +# Construction + + +def test_constructor_no_coercion(): + with pytest.raises(ValueError, match="NumPy array"): + NumpyExtensionArray([1, 2, 3]) + + +def test_series_constructor_with_copy(): + ndarray = np.array([1, 2, 3]) + ser = pd.Series(NumpyExtensionArray(ndarray), copy=True) + + assert ser.values is not ndarray + + +def test_series_constructor_with_astype(): + ndarray = np.array([1, 2, 3]) + result = pd.Series(NumpyExtensionArray(ndarray), dtype="float64") + expected = pd.Series([1.0, 2.0, 3.0], dtype="float64") + tm.assert_series_equal(result, expected) + + +def test_from_sequence_dtype(): + arr = np.array([1, 2, 3], dtype="int64") + result = NumpyExtensionArray._from_sequence(arr, dtype="uint64") + expected = NumpyExtensionArray(np.array([1, 2, 3], dtype="uint64")) + tm.assert_extension_array_equal(result, expected) + + +def test_constructor_copy(): + arr = np.array([0, 1]) + result = NumpyExtensionArray(arr, copy=True) + + assert not tm.shares_memory(result, arr) + + +def test_constructor_with_data(any_numpy_array): + nparr = any_numpy_array + arr = NumpyExtensionArray(nparr) + assert arr.dtype.numpy_dtype == nparr.dtype + + +# ---------------------------------------------------------------------------- +# Conversion + + +def test_to_numpy(): + arr = NumpyExtensionArray(np.array([1, 2, 3])) + result = arr.to_numpy() + assert result is arr._ndarray + + result = arr.to_numpy(copy=True) + assert result is not arr._ndarray + + result = arr.to_numpy(dtype="f8") + expected = np.array([1, 2, 3], dtype="f8") + tm.assert_numpy_array_equal(result, expected) + + +# ---------------------------------------------------------------------------- +# Setitem + + +def test_setitem_series(): + ser = pd.Series([1, 2, 3]) + ser.array[0] = 10 + expected = pd.Series([10, 2, 3]) + tm.assert_series_equal(ser, expected) + + +def test_setitem(any_numpy_array): + nparr = any_numpy_array + arr = NumpyExtensionArray(nparr, copy=True) + + arr[0] = arr[1] + nparr[0] = nparr[1] + + tm.assert_numpy_array_equal(arr.to_numpy(), nparr) + + +# ---------------------------------------------------------------------------- +# Reductions + + +def test_bad_reduce_raises(): + arr = np.array([1, 2, 3], dtype="int64") + arr = NumpyExtensionArray(arr) + msg = "cannot perform not_a_method with type int" + with pytest.raises(TypeError, match=msg): + arr._reduce(msg) + + +def test_validate_reduction_keyword_args(): + arr = NumpyExtensionArray(np.array([1, 2, 3])) + msg = "the 'keepdims' parameter is not supported .*all" + with pytest.raises(ValueError, match=msg): + arr.all(keepdims=True) + + +def test_np_max_nested_tuples(): + # case where checking in ufunc.nout works while checking for tuples + # does not + vals = [ + (("j", "k"), ("l", "m")), + (("l", "m"), ("o", "p")), + (("o", "p"), ("j", "k")), + ] + ser = pd.Series(vals) + arr = ser.array + + assert arr.max() is arr[2] + assert ser.max() is arr[2] + + result = np.maximum.reduce(arr) + assert result == arr[2] + + result = np.maximum.reduce(ser) + assert result == arr[2] + + +def test_np_reduce_2d(): + raw = np.arange(12).reshape(4, 3) + arr = NumpyExtensionArray(raw) + + res = np.maximum.reduce(arr, axis=0) + tm.assert_extension_array_equal(res, arr[-1]) + + alt = arr.max(axis=0) + tm.assert_extension_array_equal(alt, arr[-1]) + + +# ---------------------------------------------------------------------------- +# Ops + + +@pytest.mark.parametrize("ufunc", [np.abs, np.negative, np.positive]) +def test_ufunc_unary(ufunc): + arr = NumpyExtensionArray(np.array([-1.0, 0.0, 1.0])) + result = ufunc(arr) + expected = NumpyExtensionArray(ufunc(arr._ndarray)) + tm.assert_extension_array_equal(result, expected) + + # same thing but with the 'out' keyword + out = NumpyExtensionArray(np.array([-9.0, -9.0, -9.0])) + ufunc(arr, out=out) + tm.assert_extension_array_equal(out, expected) + + +def test_ufunc(): + arr = NumpyExtensionArray(np.array([-1.0, 0.0, 1.0])) + + r1, r2 = np.divmod(arr, np.add(arr, 2)) + e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2)) + e1 = NumpyExtensionArray(e1) + e2 = NumpyExtensionArray(e2) + tm.assert_extension_array_equal(r1, e1) + tm.assert_extension_array_equal(r2, e2) + + +def test_basic_binop(): + # Just a basic smoke test. The EA interface tests exercise this + # more thoroughly. + x = NumpyExtensionArray(np.array([1, 2, 3])) + result = x + x + expected = NumpyExtensionArray(np.array([2, 4, 6])) + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", [None, object]) +def test_setitem_object_typecode(dtype): + arr = NumpyExtensionArray(np.array(["a", "b", "c"], dtype=dtype)) + arr[0] = "t" + expected = NumpyExtensionArray(np.array(["t", "b", "c"], dtype=dtype)) + tm.assert_extension_array_equal(arr, expected) + + +def test_setitem_no_coercion(): + # https://github.com/pandas-dev/pandas/issues/28150 + arr = NumpyExtensionArray(np.array([1, 2, 3])) + with pytest.raises(ValueError, match="int"): + arr[0] = "a" + + # With a value that we do coerce, check that we coerce the value + # and not the underlying array. + arr[0] = 2.5 + assert isinstance(arr[0], (int, np.integer)), type(arr[0]) + + +def test_setitem_preserves_views(): + # GH#28150, see also extension test of the same name + arr = NumpyExtensionArray(np.array([1, 2, 3])) + view1 = arr.view() + view2 = arr[:] + view3 = np.asarray(arr) + + arr[0] = 9 + assert view1[0] == 9 + assert view2[0] == 9 + assert view3[0] == 9 + + arr[-1] = 2.5 + view1[-1] = 5 + assert arr[-1] == 5 + + +@pytest.mark.parametrize("dtype", [np.int64, np.uint64]) +def test_quantile_empty(dtype): + # we should get back np.nans, not -1s + arr = NumpyExtensionArray(np.array([], dtype=dtype)) + idx = pd.Index([0.0, 0.5]) + + result = arr._quantile(idx, interpolation="linear") + expected = NumpyExtensionArray(np.array([np.nan, np.nan])) + tm.assert_extension_array_equal(result, expected) + + +def test_factorize_unsigned(): + # don't raise when calling factorize on unsigned int NumpyExtensionArray + arr = np.array([1, 2, 3], dtype=np.uint64) + obj = NumpyExtensionArray(arr) + + res_codes, res_unique = obj.factorize() + exp_codes, exp_unique = pd.factorize(arr) + + tm.assert_numpy_array_equal(res_codes, exp_codes) + + tm.assert_extension_array_equal(res_unique, NumpyExtensionArray(exp_unique)) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..544425fb0bd644f49a7c3b8a9ae7ca976c3dad5a Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_arrow_compat.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_arrow_compat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59e38bcac0db2b95c1cf9e60f3359936f0901a9c Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_arrow_compat.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_astype.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_astype.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eac284f363572ff56ce91e021d0026ded8e15732 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_astype.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_constructors.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_constructors.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6734d69acc210fc3240a568f5f9e09b965d689b4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_constructors.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_reductions.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_reductions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90fa3d168b33c23d1d2988ff6a7a2bb6f0a5f7a5 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/__pycache__/test_reductions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_arrow_compat.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_arrow_compat.py new file mode 100644 index 0000000000000000000000000000000000000000..431309aca0df21dbe885ae015b10c3c21f0134a2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_arrow_compat.py @@ -0,0 +1,130 @@ +import pytest + +from pandas.compat.pyarrow import pa_version_under10p1 + +from pandas.core.dtypes.dtypes import PeriodDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ( + PeriodArray, + period_array, +) + +pytestmark = pytest.mark.filterwarnings( + "ignore:Passing a BlockManager to DataFrame:DeprecationWarning" +) + + +pa = pytest.importorskip("pyarrow") + + +def test_arrow_extension_type(): + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + p1 = ArrowPeriodType("D") + p2 = ArrowPeriodType("D") + p3 = ArrowPeriodType("M") + + assert p1.freq == "D" + assert p1 == p2 + assert p1 != p3 + assert hash(p1) == hash(p2) + assert hash(p1) != hash(p3) + + +@pytest.mark.xfail(not pa_version_under10p1, reason="Wrong behavior with pyarrow 10") +@pytest.mark.parametrize( + "data, freq", + [ + (pd.date_range("2017", periods=3), "D"), + (pd.date_range("2017", periods=3, freq="YE"), "Y-DEC"), + ], +) +def test_arrow_array(data, freq): + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + periods = period_array(data, freq=freq) + result = pa.array(periods) + assert isinstance(result.type, ArrowPeriodType) + assert result.type.freq == freq + expected = pa.array(periods.asi8, type="int64") + assert result.storage.equals(expected) + + # convert to its storage type + result = pa.array(periods, type=pa.int64()) + assert result.equals(expected) + + # unsupported conversions + msg = "Not supported to convert PeriodArray to 'double' type" + with pytest.raises(TypeError, match=msg): + pa.array(periods, type="float64") + + with pytest.raises(TypeError, match="different 'freq'"): + pa.array(periods, type=ArrowPeriodType("T")) + + +def test_arrow_array_missing(): + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + arr = PeriodArray([1, 2, 3], dtype="period[D]") + arr[1] = pd.NaT + + result = pa.array(arr) + assert isinstance(result.type, ArrowPeriodType) + assert result.type.freq == "D" + expected = pa.array([1, None, 3], type="int64") + assert result.storage.equals(expected) + + +def test_arrow_table_roundtrip(): + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + arr = PeriodArray([1, 2, 3], dtype="period[D]") + arr[1] = pd.NaT + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + assert isinstance(table.field("a").type, ArrowPeriodType) + result = table.to_pandas() + assert isinstance(result["a"].dtype, PeriodDtype) + tm.assert_frame_equal(result, df) + + table2 = pa.concat_tables([table, table]) + result = table2.to_pandas() + expected = pd.concat([df, df], ignore_index=True) + tm.assert_frame_equal(result, expected) + + +def test_arrow_load_from_zero_chunks(): + # GH-41040 + + from pandas.core.arrays.arrow.extension_types import ArrowPeriodType + + arr = PeriodArray([], dtype="period[D]") + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + assert isinstance(table.field("a").type, ArrowPeriodType) + table = pa.table( + [pa.chunked_array([], type=table.column(0).type)], schema=table.schema + ) + + result = table.to_pandas() + assert isinstance(result["a"].dtype, PeriodDtype) + tm.assert_frame_equal(result, df) + + +def test_arrow_table_roundtrip_without_metadata(): + arr = PeriodArray([1, 2, 3], dtype="period[h]") + arr[1] = pd.NaT + df = pd.DataFrame({"a": arr}) + + table = pa.table(df) + # remove the metadata + table = table.replace_schema_metadata() + assert table.schema.metadata is None + + result = table.to_pandas() + assert isinstance(result["a"].dtype, PeriodDtype) + tm.assert_frame_equal(result, df) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_astype.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_astype.py new file mode 100644 index 0000000000000000000000000000000000000000..9976c3a32580da0b5b237eaa2b839b2337363f51 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_astype.py @@ -0,0 +1,67 @@ +import numpy as np +import pytest + +from pandas.core.dtypes.dtypes import PeriodDtype + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import period_array + + +@pytest.mark.parametrize("dtype", [int, np.int32, np.int64, "uint32", "uint64"]) +def test_astype_int(dtype): + # We choose to ignore the sign and size of integers for + # Period/Datetime/Timedelta astype + arr = period_array(["2000", "2001", None], freq="D") + + if np.dtype(dtype) != np.int64: + with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"): + arr.astype(dtype) + return + + result = arr.astype(dtype) + expected = arr._ndarray.view("i8") + tm.assert_numpy_array_equal(result, expected) + + +def test_astype_copies(): + arr = period_array(["2000", "2001", None], freq="D") + result = arr.astype(np.int64, copy=False) + + # Add the `.base`, since we now use `.asi8` which returns a view. + # We could maybe override it in PeriodArray to return ._ndarray directly. + assert result.base is arr._ndarray + + result = arr.astype(np.int64, copy=True) + assert result is not arr._ndarray + tm.assert_numpy_array_equal(result, arr._ndarray.view("i8")) + + +def test_astype_categorical(): + arr = period_array(["2000", "2001", "2001", None], freq="D") + result = arr.astype("category") + categories = pd.PeriodIndex(["2000", "2001"], freq="D") + expected = pd.Categorical.from_codes([0, 1, 1, -1], categories=categories) + tm.assert_categorical_equal(result, expected) + + +def test_astype_period(): + arr = period_array(["2000", "2001", None], freq="D") + result = arr.astype(PeriodDtype("M")) + expected = period_array(["2000", "2001", None], freq="M") + tm.assert_period_array_equal(result, expected) + + +@pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"]) +def test_astype_datetime(dtype): + arr = period_array(["2000", "2001", None], freq="D") + # slice off the [ns] so that the regex matches. + if dtype == "timedelta64[ns]": + with pytest.raises(TypeError, match=dtype[:-4]): + arr.astype(dtype) + + else: + # GH#45038 allow period->dt64 because we allow dt64->period + result = arr.astype(dtype) + expected = pd.DatetimeIndex(["2000", "2001", pd.NaT], dtype=dtype)._data + tm.assert_datetime_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_constructors.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_constructors.py new file mode 100644 index 0000000000000000000000000000000000000000..d034162f1b46e11bd06204de7707c7343fd9b1b2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_constructors.py @@ -0,0 +1,156 @@ +import numpy as np +import pytest + +from pandas._libs.tslibs import iNaT +from pandas._libs.tslibs.offsets import MonthEnd +from pandas._libs.tslibs.period import IncompatibleFrequency + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays import ( + PeriodArray, + period_array, +) + + +@pytest.mark.parametrize( + "data, freq, expected", + [ + ([pd.Period("2017", "D")], None, [17167]), + ([pd.Period("2017", "D")], "D", [17167]), + ([2017], "D", [17167]), + (["2017"], "D", [17167]), + ([pd.Period("2017", "D")], pd.tseries.offsets.Day(), [17167]), + ([pd.Period("2017", "D"), None], None, [17167, iNaT]), + (pd.Series(pd.date_range("2017", periods=3)), None, [17167, 17168, 17169]), + (pd.date_range("2017", periods=3), None, [17167, 17168, 17169]), + (pd.period_range("2017", periods=4, freq="Q"), None, [188, 189, 190, 191]), + ], +) +def test_period_array_ok(data, freq, expected): + result = period_array(data, freq=freq).asi8 + expected = np.asarray(expected, dtype=np.int64) + tm.assert_numpy_array_equal(result, expected) + + +def test_period_array_readonly_object(): + # https://github.com/pandas-dev/pandas/issues/25403 + pa = period_array([pd.Period("2019-01-01")]) + arr = np.asarray(pa, dtype="object") + arr.setflags(write=False) + + result = period_array(arr) + tm.assert_period_array_equal(result, pa) + + result = pd.Series(arr) + tm.assert_series_equal(result, pd.Series(pa)) + + result = pd.DataFrame({"A": arr}) + tm.assert_frame_equal(result, pd.DataFrame({"A": pa})) + + +def test_from_datetime64_freq_changes(): + # https://github.com/pandas-dev/pandas/issues/23438 + arr = pd.date_range("2017", periods=3, freq="D") + result = PeriodArray._from_datetime64(arr, freq="M") + expected = period_array(["2017-01-01", "2017-01-01", "2017-01-01"], freq="M") + tm.assert_period_array_equal(result, expected) + + +@pytest.mark.parametrize("freq", ["2M", MonthEnd(2)]) +def test_from_datetime64_freq_2M(freq): + arr = np.array( + ["2020-01-01T00:00:00", "2020-01-02T00:00:00"], dtype="datetime64[ns]" + ) + result = PeriodArray._from_datetime64(arr, freq) + expected = period_array(["2020-01", "2020-01"], freq=freq) + tm.assert_period_array_equal(result, expected) + + +@pytest.mark.parametrize( + "data, freq, msg", + [ + ( + [pd.Period("2017", "D"), pd.Period("2017", "Y")], + None, + "Input has different freq", + ), + ([pd.Period("2017", "D")], "Y", "Input has different freq"), + ], +) +def test_period_array_raises(data, freq, msg): + with pytest.raises(IncompatibleFrequency, match=msg): + period_array(data, freq) + + +def test_period_array_non_period_series_raies(): + ser = pd.Series([1, 2, 3]) + with pytest.raises(TypeError, match="dtype"): + PeriodArray(ser, dtype="period[D]") + + +def test_period_array_freq_mismatch(): + arr = period_array(["2000", "2001"], freq="D") + with pytest.raises(IncompatibleFrequency, match="freq"): + PeriodArray(arr, dtype="period[M]") + + dtype = pd.PeriodDtype(pd.tseries.offsets.MonthEnd()) + with pytest.raises(IncompatibleFrequency, match="freq"): + PeriodArray(arr, dtype=dtype) + + +def test_from_sequence_disallows_i8(): + arr = period_array(["2000", "2001"], freq="D") + + msg = str(arr[0].ordinal) + with pytest.raises(TypeError, match=msg): + PeriodArray._from_sequence(arr.asi8, dtype=arr.dtype) + + with pytest.raises(TypeError, match=msg): + PeriodArray._from_sequence(list(arr.asi8), dtype=arr.dtype) + + +def test_from_td64nat_sequence_raises(): + # GH#44507 + td = pd.NaT.to_numpy("m8[ns]") + + dtype = pd.period_range("2005-01-01", periods=3, freq="D").dtype + + arr = np.array([None], dtype=object) + arr[0] = td + + msg = "Value must be Period, string, integer, or datetime" + with pytest.raises(ValueError, match=msg): + PeriodArray._from_sequence(arr, dtype=dtype) + + with pytest.raises(ValueError, match=msg): + pd.PeriodIndex(arr, dtype=dtype) + with pytest.raises(ValueError, match=msg): + pd.Index(arr, dtype=dtype) + with pytest.raises(ValueError, match=msg): + pd.array(arr, dtype=dtype) + with pytest.raises(ValueError, match=msg): + pd.Series(arr, dtype=dtype) + with pytest.raises(ValueError, match=msg): + pd.DataFrame(arr, dtype=dtype) + + +def test_freq_deprecated(): + # GH#52462 + data = np.arange(5).astype(np.int64) + msg = "The 'freq' keyword in the PeriodArray constructor is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + res = PeriodArray(data, freq="M") + + expected = PeriodArray(data, dtype="period[M]") + tm.assert_equal(res, expected) + + +def test_period_array_from_datetime64(): + arr = np.array( + ["2020-01-01T00:00:00", "2020-02-02T00:00:00"], dtype="datetime64[ns]" + ) + result = PeriodArray._from_datetime64(arr, freq=MonthEnd(2)) + + expected = period_array(["2020-01-01", "2020-02-01"], freq=MonthEnd(2)) + tm.assert_period_array_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_reductions.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_reductions.py new file mode 100644 index 0000000000000000000000000000000000000000..2889cc786dd71583ca345ad206553907af3a13fa --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/period/test_reductions.py @@ -0,0 +1,42 @@ +import pytest + +import pandas as pd +from pandas.core.arrays import period_array + + +class TestReductions: + def test_min_max(self): + arr = period_array( + [ + "2000-01-03", + "2000-01-03", + "NaT", + "2000-01-02", + "2000-01-05", + "2000-01-04", + ], + freq="D", + ) + + result = arr.min() + expected = pd.Period("2000-01-02", freq="D") + assert result == expected + + result = arr.max() + expected = pd.Period("2000-01-05", freq="D") + assert result == expected + + result = arr.min(skipna=False) + assert result is pd.NaT + + result = arr.max(skipna=False) + assert result is pd.NaT + + @pytest.mark.parametrize("skipna", [True, False]) + def test_min_max_empty(self, skipna): + arr = period_array([], freq="D") + result = arr.min(skipna=skipna) + assert result is pd.NaT + + result = arr.max(skipna=skipna) + assert result is pd.NaT diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__init__.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de78cf7f8d4db0c643fbdc70c1f41d2f29b06b94 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/test_string.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/test_string.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f5e35ea972bb59986a55ea9a8aee0ebd00ddd4f Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/test_string.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/test_string_arrow.cpython-310.pyc b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/test_string_arrow.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57dec327a6d5e8c11fc7a0471ba672d0980040d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/__pycache__/test_string_arrow.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/test_string.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/test_string.py new file mode 100644 index 0000000000000000000000000000000000000000..320bdca60a9327bee90ffde5483e735829aaa2c8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/test_string.py @@ -0,0 +1,703 @@ +""" +This module tests the functionality of StringArray and ArrowStringArray. +Tests for the str accessors are in pandas/tests/strings/test_string_array.py +""" +import operator + +import numpy as np +import pytest + +from pandas.compat.pyarrow import pa_version_under12p0 + +from pandas.core.dtypes.common import is_dtype_equal + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) + + +def na_val(dtype): + if dtype.storage == "pyarrow_numpy": + return np.nan + else: + return pd.NA + + +@pytest.fixture +def dtype(string_storage): + """Fixture giving StringDtype from parametrized 'string_storage'""" + return pd.StringDtype(storage=string_storage) + + +@pytest.fixture +def cls(dtype): + """Fixture giving array type from parametrized 'dtype'""" + return dtype.construct_array_type() + + +def test_repr(dtype): + df = pd.DataFrame({"A": pd.array(["a", pd.NA, "b"], dtype=dtype)}) + if dtype.storage == "pyarrow_numpy": + expected = " A\n0 a\n1 NaN\n2 b" + else: + expected = " A\n0 a\n1 \n2 b" + assert repr(df) == expected + + if dtype.storage == "pyarrow_numpy": + expected = "0 a\n1 NaN\n2 b\nName: A, dtype: string" + else: + expected = "0 a\n1 \n2 b\nName: A, dtype: string" + assert repr(df.A) == expected + + if dtype.storage == "pyarrow": + arr_name = "ArrowStringArray" + expected = f"<{arr_name}>\n['a', , 'b']\nLength: 3, dtype: string" + elif dtype.storage == "pyarrow_numpy": + arr_name = "ArrowStringArrayNumpySemantics" + expected = f"<{arr_name}>\n['a', nan, 'b']\nLength: 3, dtype: string" + else: + arr_name = "StringArray" + expected = f"<{arr_name}>\n['a', , 'b']\nLength: 3, dtype: string" + assert repr(df.A.array) == expected + + +def test_none_to_nan(cls, dtype): + a = cls._from_sequence(["a", None, "b"], dtype=dtype) + assert a[1] is not None + assert a[1] is na_val(a.dtype) + + +def test_setitem_validates(cls, dtype): + arr = cls._from_sequence(["a", "b"], dtype=dtype) + + if cls is pd.arrays.StringArray: + msg = "Cannot set non-string value '10' into a StringArray." + else: + msg = "Scalar must be NA or str" + with pytest.raises(TypeError, match=msg): + arr[0] = 10 + + if cls is pd.arrays.StringArray: + msg = "Must provide strings." + else: + msg = "Scalar must be NA or str" + with pytest.raises(TypeError, match=msg): + arr[:] = np.array([1, 2]) + + +def test_setitem_with_scalar_string(dtype): + # is_float_dtype considers some strings, like 'd', to be floats + # which can cause issues. + arr = pd.array(["a", "c"], dtype=dtype) + arr[0] = "d" + expected = pd.array(["d", "c"], dtype=dtype) + tm.assert_extension_array_equal(arr, expected) + + +def test_setitem_with_array_with_missing(dtype): + # ensure that when setting with an array of values, we don't mutate the + # array `value` in __setitem__(self, key, value) + arr = pd.array(["a", "b", "c"], dtype=dtype) + value = np.array(["A", None]) + value_orig = value.copy() + arr[[0, 1]] = value + + expected = pd.array(["A", pd.NA, "c"], dtype=dtype) + tm.assert_extension_array_equal(arr, expected) + tm.assert_numpy_array_equal(value, value_orig) + + +def test_astype_roundtrip(dtype): + ser = pd.Series(pd.date_range("2000", periods=12)) + ser[0] = None + + casted = ser.astype(dtype) + assert is_dtype_equal(casted.dtype, dtype) + + result = casted.astype("datetime64[ns]") + tm.assert_series_equal(result, ser) + + # GH#38509 same thing for timedelta64 + ser2 = ser - ser.iloc[-1] + casted2 = ser2.astype(dtype) + assert is_dtype_equal(casted2.dtype, dtype) + + result2 = casted2.astype(ser2.dtype) + tm.assert_series_equal(result2, ser2) + + +def test_add(dtype): + a = pd.Series(["a", "b", "c", None, None], dtype=dtype) + b = pd.Series(["x", "y", None, "z", None], dtype=dtype) + + result = a + b + expected = pd.Series(["ax", "by", None, None, None], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = a.add(b) + tm.assert_series_equal(result, expected) + + result = a.radd(b) + expected = pd.Series(["xa", "yb", None, None, None], dtype=dtype) + tm.assert_series_equal(result, expected) + + result = a.add(b, fill_value="-") + expected = pd.Series(["ax", "by", "c-", "-z", None], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_add_2d(dtype, request, arrow_string_storage): + if dtype.storage in arrow_string_storage: + reason = "Failed: DID NOT RAISE " + mark = pytest.mark.xfail(raises=None, reason=reason) + request.applymarker(mark) + + a = pd.array(["a", "b", "c"], dtype=dtype) + b = np.array([["a", "b", "c"]], dtype=object) + with pytest.raises(ValueError, match="3 != 1"): + a + b + + s = pd.Series(a) + with pytest.raises(ValueError, match="3 != 1"): + s + b + + +def test_add_sequence(dtype): + a = pd.array(["a", "b", None, None], dtype=dtype) + other = ["x", None, "y", None] + + result = a + other + expected = pd.array(["ax", None, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = other + a + expected = pd.array(["xa", None, None, None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_mul(dtype): + a = pd.array(["a", "b", None], dtype=dtype) + result = a * 2 + expected = pd.array(["aa", "bb", None], dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + result = 2 * a + tm.assert_extension_array_equal(result, expected) + + +@pytest.mark.xfail(reason="GH-28527") +def test_add_strings(dtype): + arr = pd.array(["a", "b", "c", "d"], dtype=dtype) + df = pd.DataFrame([["t", "y", "v", "w"]], dtype=object) + assert arr.__add__(df) is NotImplemented + + result = arr + df + expected = pd.DataFrame([["at", "by", "cv", "dw"]]).astype(dtype) + tm.assert_frame_equal(result, expected) + + result = df + arr + expected = pd.DataFrame([["ta", "yb", "vc", "wd"]]).astype(dtype) + tm.assert_frame_equal(result, expected) + + +@pytest.mark.xfail(reason="GH-28527") +def test_add_frame(dtype): + arr = pd.array(["a", "b", np.nan, np.nan], dtype=dtype) + df = pd.DataFrame([["x", np.nan, "y", np.nan]]) + + assert arr.__add__(df) is NotImplemented + + result = arr + df + expected = pd.DataFrame([["ax", np.nan, np.nan, np.nan]]).astype(dtype) + tm.assert_frame_equal(result, expected) + + result = df + arr + expected = pd.DataFrame([["xa", np.nan, np.nan, np.nan]]).astype(dtype) + tm.assert_frame_equal(result, expected) + + +def test_comparison_methods_scalar(comparison_op, dtype): + op_name = f"__{comparison_op.__name__}__" + a = pd.array(["a", None, "c"], dtype=dtype) + other = "a" + result = getattr(a, op_name)(other) + if dtype.storage == "pyarrow_numpy": + expected = np.array([getattr(item, op_name)(other) for item in a]) + if comparison_op == operator.ne: + expected[1] = True + else: + expected[1] = False + tm.assert_numpy_array_equal(result, expected.astype(np.bool_)) + else: + expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean" + expected = np.array([getattr(item, op_name)(other) for item in a], dtype=object) + expected = pd.array(expected, dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_comparison_methods_scalar_pd_na(comparison_op, dtype): + op_name = f"__{comparison_op.__name__}__" + a = pd.array(["a", None, "c"], dtype=dtype) + result = getattr(a, op_name)(pd.NA) + + if dtype.storage == "pyarrow_numpy": + if operator.ne == comparison_op: + expected = np.array([True, True, True]) + else: + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) + else: + expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean" + expected = pd.array([None, None, None], dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + tm.assert_extension_array_equal(result, expected) + + +def test_comparison_methods_scalar_not_string(comparison_op, dtype): + op_name = f"__{comparison_op.__name__}__" + + a = pd.array(["a", None, "c"], dtype=dtype) + other = 42 + + if op_name not in ["__eq__", "__ne__"]: + with pytest.raises(TypeError, match="Invalid comparison|not supported between"): + getattr(a, op_name)(other) + + return + + result = getattr(a, op_name)(other) + + if dtype.storage == "pyarrow_numpy": + expected_data = { + "__eq__": [False, False, False], + "__ne__": [True, True, True], + }[op_name] + expected = np.array(expected_data) + tm.assert_numpy_array_equal(result, expected) + else: + expected_data = {"__eq__": [False, None, False], "__ne__": [True, None, True]}[ + op_name + ] + expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean" + expected = pd.array(expected_data, dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_comparison_methods_array(comparison_op, dtype): + op_name = f"__{comparison_op.__name__}__" + + a = pd.array(["a", None, "c"], dtype=dtype) + other = [None, None, "c"] + result = getattr(a, op_name)(other) + if dtype.storage == "pyarrow_numpy": + if operator.ne == comparison_op: + expected = np.array([True, True, False]) + else: + expected = np.array([False, False, False]) + expected[-1] = getattr(other[-1], op_name)(a[-1]) + tm.assert_numpy_array_equal(result, expected) + + result = getattr(a, op_name)(pd.NA) + if operator.ne == comparison_op: + expected = np.array([True, True, True]) + else: + expected = np.array([False, False, False]) + tm.assert_numpy_array_equal(result, expected) + + else: + expected_dtype = "boolean[pyarrow]" if dtype.storage == "pyarrow" else "boolean" + expected = np.full(len(a), fill_value=None, dtype="object") + expected[-1] = getattr(other[-1], op_name)(a[-1]) + expected = pd.array(expected, dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + result = getattr(a, op_name)(pd.NA) + expected = pd.array([None, None, None], dtype=expected_dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_constructor_raises(cls): + if cls is pd.arrays.StringArray: + msg = "StringArray requires a sequence of strings or pandas.NA" + else: + msg = "Unsupported type '' for ArrowExtensionArray" + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", "b"], dtype="S1")) + + with pytest.raises(ValueError, match=msg): + cls(np.array([])) + + if cls is pd.arrays.StringArray: + # GH#45057 np.nan and None do NOT raise, as they are considered valid NAs + # for string dtype + cls(np.array(["a", np.nan], dtype=object)) + cls(np.array(["a", None], dtype=object)) + else: + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", np.nan], dtype=object)) + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", None], dtype=object)) + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", pd.NaT], dtype=object)) + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", np.datetime64("NaT", "ns")], dtype=object)) + + with pytest.raises(ValueError, match=msg): + cls(np.array(["a", np.timedelta64("NaT", "ns")], dtype=object)) + + +@pytest.mark.parametrize("na", [np.nan, np.float64("nan"), float("nan"), None, pd.NA]) +def test_constructor_nan_like(na): + expected = pd.arrays.StringArray(np.array(["a", pd.NA])) + tm.assert_extension_array_equal( + pd.arrays.StringArray(np.array(["a", na], dtype="object")), expected + ) + + +@pytest.mark.parametrize("copy", [True, False]) +def test_from_sequence_no_mutate(copy, cls, dtype): + nan_arr = np.array(["a", np.nan], dtype=object) + expected_input = nan_arr.copy() + na_arr = np.array(["a", pd.NA], dtype=object) + + result = cls._from_sequence(nan_arr, dtype=dtype, copy=copy) + + if cls in (ArrowStringArray, ArrowStringArrayNumpySemantics): + import pyarrow as pa + + expected = cls(pa.array(na_arr, type=pa.string(), from_pandas=True)) + else: + expected = cls(na_arr) + + tm.assert_extension_array_equal(result, expected) + tm.assert_numpy_array_equal(nan_arr, expected_input) + + +def test_astype_int(dtype): + arr = pd.array(["1", "2", "3"], dtype=dtype) + result = arr.astype("int64") + expected = np.array([1, 2, 3], dtype="int64") + tm.assert_numpy_array_equal(result, expected) + + arr = pd.array(["1", pd.NA, "3"], dtype=dtype) + if dtype.storage == "pyarrow_numpy": + err = ValueError + msg = "cannot convert float NaN to integer" + else: + err = TypeError + msg = ( + r"int\(\) argument must be a string, a bytes-like " + r"object or a( real)? number" + ) + with pytest.raises(err, match=msg): + arr.astype("int64") + + +def test_astype_nullable_int(dtype): + arr = pd.array(["1", pd.NA, "3"], dtype=dtype) + + result = arr.astype("Int64") + expected = pd.array([1, pd.NA, 3], dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + +def test_astype_float(dtype, any_float_dtype): + # Don't compare arrays (37974) + ser = pd.Series(["1.1", pd.NA, "3.3"], dtype=dtype) + result = ser.astype(any_float_dtype) + expected = pd.Series([1.1, np.nan, 3.3], dtype=any_float_dtype) + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.xfail(reason="Not implemented StringArray.sum") +def test_reduce(skipna, dtype): + arr = pd.Series(["a", "b", "c"], dtype=dtype) + result = arr.sum(skipna=skipna) + assert result == "abc" + + +@pytest.mark.parametrize("skipna", [True, False]) +@pytest.mark.xfail(reason="Not implemented StringArray.sum") +def test_reduce_missing(skipna, dtype): + arr = pd.Series([None, "a", None, "b", "c", None], dtype=dtype) + result = arr.sum(skipna=skipna) + if skipna: + assert result == "abc" + else: + assert pd.isna(result) + + +@pytest.mark.parametrize("method", ["min", "max"]) +@pytest.mark.parametrize("skipna", [True, False]) +def test_min_max(method, skipna, dtype): + arr = pd.Series(["a", "b", "c", None], dtype=dtype) + result = getattr(arr, method)(skipna=skipna) + if skipna: + expected = "a" if method == "min" else "c" + assert result == expected + else: + assert result is na_val(arr.dtype) + + +@pytest.mark.parametrize("method", ["min", "max"]) +@pytest.mark.parametrize("box", [pd.Series, pd.array]) +def test_min_max_numpy(method, box, dtype, request, arrow_string_storage): + if dtype.storage in arrow_string_storage and box is pd.array: + if box is pd.array: + reason = "'<=' not supported between instances of 'str' and 'NoneType'" + else: + reason = "'ArrowStringArray' object has no attribute 'max'" + mark = pytest.mark.xfail(raises=TypeError, reason=reason) + request.applymarker(mark) + + arr = box(["a", "b", "c", None], dtype=dtype) + result = getattr(np, method)(arr) + expected = "a" if method == "min" else "c" + assert result == expected + + +def test_fillna_args(dtype, arrow_string_storage): + # GH 37987 + + arr = pd.array(["a", pd.NA], dtype=dtype) + + res = arr.fillna(value="b") + expected = pd.array(["a", "b"], dtype=dtype) + tm.assert_extension_array_equal(res, expected) + + res = arr.fillna(value=np.str_("b")) + expected = pd.array(["a", "b"], dtype=dtype) + tm.assert_extension_array_equal(res, expected) + + if dtype.storage in arrow_string_storage: + msg = "Invalid value '1' for dtype string" + else: + msg = "Cannot set non-string value '1' into a StringArray." + with pytest.raises(TypeError, match=msg): + arr.fillna(value=1) + + +def test_arrow_array(dtype): + # protocol added in 0.15.0 + pa = pytest.importorskip("pyarrow") + import pyarrow.compute as pc + + data = pd.array(["a", "b", "c"], dtype=dtype) + arr = pa.array(data) + expected = pa.array(list(data), type=pa.large_string(), from_pandas=True) + if dtype.storage in ("pyarrow", "pyarrow_numpy") and pa_version_under12p0: + expected = pa.chunked_array(expected) + if dtype.storage == "python": + expected = pc.cast(expected, pa.string()) + assert arr.equals(expected) + + +@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning") +def test_arrow_roundtrip(dtype, string_storage2, request, using_infer_string): + # roundtrip possible from arrow 1.0.0 + pa = pytest.importorskip("pyarrow") + + if using_infer_string and string_storage2 != "pyarrow_numpy": + request.applymarker( + pytest.mark.xfail( + reason="infer_string takes precedence over string storage" + ) + ) + + data = pd.array(["a", "b", None], dtype=dtype) + df = pd.DataFrame({"a": data}) + table = pa.table(df) + if dtype.storage == "python": + assert table.field("a").type == "string" + else: + assert table.field("a").type == "large_string" + with pd.option_context("string_storage", string_storage2): + result = table.to_pandas() + assert isinstance(result["a"].dtype, pd.StringDtype) + expected = df.astype(f"string[{string_storage2}]") + tm.assert_frame_equal(result, expected) + # ensure the missing value is represented by NA and not np.nan or None + assert result.loc[2, "a"] is na_val(result["a"].dtype) + + +@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning") +def test_arrow_load_from_zero_chunks( + dtype, string_storage2, request, using_infer_string +): + # GH-41040 + pa = pytest.importorskip("pyarrow") + + if using_infer_string and string_storage2 != "pyarrow_numpy": + request.applymarker( + pytest.mark.xfail( + reason="infer_string takes precedence over string storage" + ) + ) + + data = pd.array([], dtype=dtype) + df = pd.DataFrame({"a": data}) + table = pa.table(df) + if dtype.storage == "python": + assert table.field("a").type == "string" + else: + assert table.field("a").type == "large_string" + # Instantiate the same table with no chunks at all + table = pa.table([pa.chunked_array([], type=pa.string())], schema=table.schema) + with pd.option_context("string_storage", string_storage2): + result = table.to_pandas() + assert isinstance(result["a"].dtype, pd.StringDtype) + expected = df.astype(f"string[{string_storage2}]") + tm.assert_frame_equal(result, expected) + + +def test_value_counts_na(dtype): + if getattr(dtype, "storage", "") == "pyarrow": + exp_dtype = "int64[pyarrow]" + elif getattr(dtype, "storage", "") == "pyarrow_numpy": + exp_dtype = "int64" + else: + exp_dtype = "Int64" + arr = pd.array(["a", "b", "a", pd.NA], dtype=dtype) + result = arr.value_counts(dropna=False) + expected = pd.Series([2, 1, 1], index=arr[[0, 1, 3]], dtype=exp_dtype, name="count") + tm.assert_series_equal(result, expected) + + result = arr.value_counts(dropna=True) + expected = pd.Series([2, 1], index=arr[:2], dtype=exp_dtype, name="count") + tm.assert_series_equal(result, expected) + + +def test_value_counts_with_normalize(dtype): + if getattr(dtype, "storage", "") == "pyarrow": + exp_dtype = "double[pyarrow]" + elif getattr(dtype, "storage", "") == "pyarrow_numpy": + exp_dtype = np.float64 + else: + exp_dtype = "Float64" + ser = pd.Series(["a", "b", "a", pd.NA], dtype=dtype) + result = ser.value_counts(normalize=True) + expected = pd.Series([2, 1], index=ser[:2], dtype=exp_dtype, name="proportion") / 3 + tm.assert_series_equal(result, expected) + + +@pytest.mark.parametrize( + "values, expected", + [ + (["a", "b", "c"], np.array([False, False, False])), + (["a", "b", None], np.array([False, False, True])), + ], +) +def test_use_inf_as_na(values, expected, dtype): + # https://github.com/pandas-dev/pandas/issues/33655 + values = pd.array(values, dtype=dtype) + msg = "use_inf_as_na option is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + with pd.option_context("mode.use_inf_as_na", True): + result = values.isna() + tm.assert_numpy_array_equal(result, expected) + + result = pd.Series(values).isna() + expected = pd.Series(expected) + tm.assert_series_equal(result, expected) + + result = pd.DataFrame(values).isna() + expected = pd.DataFrame(expected) + tm.assert_frame_equal(result, expected) + + +def test_memory_usage(dtype, arrow_string_storage): + # GH 33963 + + if dtype.storage in arrow_string_storage: + pytest.skip(f"not applicable for {dtype.storage}") + + series = pd.Series(["a", "b", "c"], dtype=dtype) + + assert 0 < series.nbytes <= series.memory_usage() < series.memory_usage(deep=True) + + +@pytest.mark.parametrize("float_dtype", [np.float16, np.float32, np.float64]) +def test_astype_from_float_dtype(float_dtype, dtype): + # https://github.com/pandas-dev/pandas/issues/36451 + ser = pd.Series([0.1], dtype=float_dtype) + result = ser.astype(dtype) + expected = pd.Series(["0.1"], dtype=dtype) + tm.assert_series_equal(result, expected) + + +def test_to_numpy_returns_pdna_default(dtype): + arr = pd.array(["a", pd.NA, "b"], dtype=dtype) + result = np.array(arr) + expected = np.array(["a", na_val(dtype), "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_to_numpy_na_value(dtype, nulls_fixture): + na_value = nulls_fixture + arr = pd.array(["a", pd.NA, "b"], dtype=dtype) + result = arr.to_numpy(na_value=na_value) + expected = np.array(["a", na_value, "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_isin(dtype, fixed_now_ts): + s = pd.Series(["a", "b", None], dtype=dtype) + + result = s.isin(["a", "c"]) + expected = pd.Series([True, False, False]) + tm.assert_series_equal(result, expected) + + result = s.isin(["a", pd.NA]) + expected = pd.Series([True, False, True]) + tm.assert_series_equal(result, expected) + + result = s.isin([]) + expected = pd.Series([False, False, False]) + tm.assert_series_equal(result, expected) + + result = s.isin(["a", fixed_now_ts]) + expected = pd.Series([True, False, False]) + tm.assert_series_equal(result, expected) + + +def test_setitem_scalar_with_mask_validation(dtype): + # https://github.com/pandas-dev/pandas/issues/47628 + # setting None with a boolean mask (through _putmaks) should still result + # in pd.NA values in the underlying array + ser = pd.Series(["a", "b", "c"], dtype=dtype) + mask = np.array([False, True, False]) + + ser[mask] = None + assert ser.array[1] is na_val(ser.dtype) + + # for other non-string we should also raise an error + ser = pd.Series(["a", "b", "c"], dtype=dtype) + if type(ser.array) is pd.arrays.StringArray: + msg = "Cannot set non-string value" + else: + msg = "Scalar must be NA or str" + with pytest.raises(TypeError, match=msg): + ser[mask] = 1 + + +def test_from_numpy_str(dtype): + vals = ["a", "b", "c"] + arr = np.array(vals, dtype=np.str_) + result = pd.array(arr, dtype=dtype) + expected = pd.array(vals, dtype=dtype) + tm.assert_extension_array_equal(result, expected) + + +def test_tolist(dtype): + vals = ["a", "b", "c"] + arr = pd.array(vals, dtype=dtype) + result = arr.tolist() + expected = vals + tm.assert_equal(result, expected) diff --git a/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/test_string_arrow.py b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/test_string_arrow.py new file mode 100644 index 0000000000000000000000000000000000000000..d7811b6fed8838cb792cc6faf2354d2b4237d4de --- /dev/null +++ b/venv/lib/python3.10/site-packages/pandas/tests/arrays/string_/test_string_arrow.py @@ -0,0 +1,265 @@ +import pickle +import re + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +import pandas as pd +import pandas._testing as tm +from pandas.core.arrays.string_ import ( + StringArray, + StringDtype, +) +from pandas.core.arrays.string_arrow import ( + ArrowStringArray, + ArrowStringArrayNumpySemantics, +) + + +def test_eq_all_na(): + pytest.importorskip("pyarrow") + a = pd.array([pd.NA, pd.NA], dtype=StringDtype("pyarrow")) + result = a == a + expected = pd.array([pd.NA, pd.NA], dtype="boolean[pyarrow]") + tm.assert_extension_array_equal(result, expected) + + +def test_config(string_storage, request, using_infer_string): + if using_infer_string and string_storage != "pyarrow_numpy": + request.applymarker(pytest.mark.xfail(reason="infer string takes precedence")) + with pd.option_context("string_storage", string_storage): + assert StringDtype().storage == string_storage + result = pd.array(["a", "b"]) + assert result.dtype.storage == string_storage + + dtype = StringDtype(string_storage) + expected = dtype.construct_array_type()._from_sequence(["a", "b"], dtype=dtype) + tm.assert_equal(result, expected) + + +def test_config_bad_storage_raises(): + msg = re.escape("Value must be one of python|pyarrow") + with pytest.raises(ValueError, match=msg): + pd.options.mode.string_storage = "foo" + + +@pytest.mark.parametrize("chunked", [True, False]) +@pytest.mark.parametrize("array", ["numpy", "pyarrow"]) +def test_constructor_not_string_type_raises(array, chunked, arrow_string_storage): + pa = pytest.importorskip("pyarrow") + + array = pa if array in arrow_string_storage else np + + arr = array.array([1, 2, 3]) + if chunked: + if array is np: + pytest.skip("chunked not applicable to numpy array") + arr = pa.chunked_array(arr) + if array is np: + msg = "Unsupported type '' for ArrowExtensionArray" + else: + msg = re.escape( + "ArrowStringArray requires a PyArrow (chunked) array of large_string type" + ) + with pytest.raises(ValueError, match=msg): + ArrowStringArray(arr) + + +@pytest.mark.parametrize("chunked", [True, False]) +def test_constructor_not_string_type_value_dictionary_raises(chunked): + pa = pytest.importorskip("pyarrow") + + arr = pa.array([1, 2, 3], pa.dictionary(pa.int32(), pa.int32())) + if chunked: + arr = pa.chunked_array(arr) + + msg = re.escape( + "ArrowStringArray requires a PyArrow (chunked) array of large_string type" + ) + with pytest.raises(ValueError, match=msg): + ArrowStringArray(arr) + + +@pytest.mark.xfail( + reason="dict conversion does not seem to be implemented for large string in arrow" +) +@pytest.mark.parametrize("chunked", [True, False]) +def test_constructor_valid_string_type_value_dictionary(chunked): + pa = pytest.importorskip("pyarrow") + + arr = pa.array(["1", "2", "3"], pa.large_string()).dictionary_encode() + if chunked: + arr = pa.chunked_array(arr) + + arr = ArrowStringArray(arr) + assert pa.types.is_string(arr._pa_array.type.value_type) + + +def test_constructor_from_list(): + # GH#27673 + pytest.importorskip("pyarrow") + result = pd.Series(["E"], dtype=StringDtype(storage="pyarrow")) + assert isinstance(result.dtype, StringDtype) + assert result.dtype.storage == "pyarrow" + + +def test_from_sequence_wrong_dtype_raises(using_infer_string): + pytest.importorskip("pyarrow") + with pd.option_context("string_storage", "python"): + ArrowStringArray._from_sequence(["a", None, "c"], dtype="string") + + with pd.option_context("string_storage", "pyarrow"): + ArrowStringArray._from_sequence(["a", None, "c"], dtype="string") + + with pytest.raises(AssertionError, match=None): + ArrowStringArray._from_sequence(["a", None, "c"], dtype="string[python]") + + ArrowStringArray._from_sequence(["a", None, "c"], dtype="string[pyarrow]") + + if not using_infer_string: + with pytest.raises(AssertionError, match=None): + with pd.option_context("string_storage", "python"): + ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype()) + + with pd.option_context("string_storage", "pyarrow"): + ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype()) + + if not using_infer_string: + with pytest.raises(AssertionError, match=None): + ArrowStringArray._from_sequence( + ["a", None, "c"], dtype=StringDtype("python") + ) + + ArrowStringArray._from_sequence(["a", None, "c"], dtype=StringDtype("pyarrow")) + + with pd.option_context("string_storage", "python"): + StringArray._from_sequence(["a", None, "c"], dtype="string") + + with pd.option_context("string_storage", "pyarrow"): + StringArray._from_sequence(["a", None, "c"], dtype="string") + + StringArray._from_sequence(["a", None, "c"], dtype="string[python]") + + with pytest.raises(AssertionError, match=None): + StringArray._from_sequence(["a", None, "c"], dtype="string[pyarrow]") + + if not using_infer_string: + with pd.option_context("string_storage", "python"): + StringArray._from_sequence(["a", None, "c"], dtype=StringDtype()) + + if not using_infer_string: + with pytest.raises(AssertionError, match=None): + with pd.option_context("string_storage", "pyarrow"): + StringArray._from_sequence(["a", None, "c"], dtype=StringDtype()) + + StringArray._from_sequence(["a", None, "c"], dtype=StringDtype("python")) + + with pytest.raises(AssertionError, match=None): + StringArray._from_sequence(["a", None, "c"], dtype=StringDtype("pyarrow")) + + +@td.skip_if_installed("pyarrow") +def test_pyarrow_not_installed_raises(): + msg = re.escape("pyarrow>=10.0.1 is required for PyArrow backed") + + with pytest.raises(ImportError, match=msg): + StringDtype(storage="pyarrow") + + with pytest.raises(ImportError, match=msg): + ArrowStringArray([]) + + with pytest.raises(ImportError, match=msg): + ArrowStringArrayNumpySemantics([]) + + with pytest.raises(ImportError, match=msg): + ArrowStringArray._from_sequence(["a", None, "b"]) + + +@pytest.mark.parametrize("multiple_chunks", [False, True]) +@pytest.mark.parametrize( + "key, value, expected", + [ + (-1, "XX", ["a", "b", "c", "d", "XX"]), + (1, "XX", ["a", "XX", "c", "d", "e"]), + (1, None, ["a", None, "c", "d", "e"]), + (1, pd.NA, ["a", None, "c", "d", "e"]), + ([1, 3], "XX", ["a", "XX", "c", "XX", "e"]), + ([1, 3], ["XX", "YY"], ["a", "XX", "c", "YY", "e"]), + ([1, 3], ["XX", None], ["a", "XX", "c", None, "e"]), + ([1, 3], ["XX", pd.NA], ["a", "XX", "c", None, "e"]), + ([0, -1], ["XX", "YY"], ["XX", "b", "c", "d", "YY"]), + ([-1, 0], ["XX", "YY"], ["YY", "b", "c", "d", "XX"]), + (slice(3, None), "XX", ["a", "b", "c", "XX", "XX"]), + (slice(2, 4), ["XX", "YY"], ["a", "b", "XX", "YY", "e"]), + (slice(3, 1, -1), ["XX", "YY"], ["a", "b", "YY", "XX", "e"]), + (slice(None), "XX", ["XX", "XX", "XX", "XX", "XX"]), + ([False, True, False, True, False], ["XX", "YY"], ["a", "XX", "c", "YY", "e"]), + ], +) +def test_setitem(multiple_chunks, key, value, expected): + pa = pytest.importorskip("pyarrow") + + result = pa.array(list("abcde")) + expected = pa.array(expected) + + if multiple_chunks: + result = pa.chunked_array([result[:3], result[3:]]) + expected = pa.chunked_array([expected[:3], expected[3:]]) + + result = ArrowStringArray(result) + expected = ArrowStringArray(expected) + + result[key] = value + tm.assert_equal(result, expected) + + +def test_setitem_invalid_indexer_raises(): + pa = pytest.importorskip("pyarrow") + + arr = ArrowStringArray(pa.array(list("abcde"))) + + with pytest.raises(IndexError, match=None): + arr[5] = "foo" + + with pytest.raises(IndexError, match=None): + arr[-6] = "foo" + + with pytest.raises(IndexError, match=None): + arr[[0, 5]] = "foo" + + with pytest.raises(IndexError, match=None): + arr[[0, -6]] = "foo" + + with pytest.raises(IndexError, match=None): + arr[[True, True, False]] = "foo" + + with pytest.raises(ValueError, match=None): + arr[[0, 1]] = ["foo", "bar", "baz"] + + +@pytest.mark.parametrize("dtype", ["string[pyarrow]", "string[pyarrow_numpy]"]) +def test_pickle_roundtrip(dtype): + # GH 42600 + pytest.importorskip("pyarrow") + expected = pd.Series(range(10), dtype=dtype) + expected_sliced = expected.head(2) + full_pickled = pickle.dumps(expected) + sliced_pickled = pickle.dumps(expected_sliced) + + assert len(full_pickled) > len(sliced_pickled) + + result = pickle.loads(full_pickled) + tm.assert_series_equal(result, expected) + + result_sliced = pickle.loads(sliced_pickled) + tm.assert_series_equal(result_sliced, expected_sliced) + + +def test_string_dtype_error_message(): + # GH#55051 + pytest.importorskip("pyarrow") + msg = "Storage must be 'python', 'pyarrow' or 'pyarrow_numpy'." + with pytest.raises(ValueError, match=msg): + StringDtype("bla")