Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_downstream.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_errors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_expressions.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_flags.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_multilevel.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_sorting.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_take.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/setitem.py +451 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/conftest.py +230 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__init__.py +6 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/array.py +188 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__init__.py +8 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/array.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/test_decimal.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/array.py +311 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/test_decimal.py +567 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__init__.py +7 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/test_list.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/array.py +137 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/test_list.py +33 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_arrow.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py +200 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_common.py +105 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py +144 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_extension.py +26 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_interval.py +98 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_masked.py +417 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_numpy.py +426 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_period.py +119 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py +498 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_string.py +242 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_impl.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_spec_conformance.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (177 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_aggregation.cpython-310.pyc
ADDED
Binary file (3.08 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_algos.cpython-310.pyc
ADDED
Binary file (60.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_common.cpython-310.pyc
ADDED
Binary file (7.96 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_downstream.cpython-310.pyc
ADDED
Binary file (9.45 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_errors.cpython-310.pyc
ADDED
Binary file (3.02 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_expressions.cpython-310.pyc
ADDED
Binary file (11.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_flags.cpython-310.pyc
ADDED
Binary file (1.89 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_multilevel.cpython-310.pyc
ADDED
Binary file (10.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_nanops.cpython-310.pyc
ADDED
Binary file (32.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_optional_dependency.cpython-310.pyc
ADDED
Binary file (2.71 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_register_accessor.cpython-310.pyc
ADDED
Binary file (4.23 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_sorting.cpython-310.pyc
ADDED
Binary file (14.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/__pycache__/test_take.cpython-310.pyc
ADDED
Binary file (9.61 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc
ADDED
Binary file (9.38 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc
ADDED
Binary file (5.34 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc
ADDED
Binary file (991 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc
ADDED
Binary file (4.35 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/base/setitem.py
ADDED
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
|
7 |
+
|
8 |
+
class BaseSetitemTests:
|
9 |
+
@pytest.fixture(
|
10 |
+
params=[
|
11 |
+
lambda x: x.index,
|
12 |
+
lambda x: list(x.index),
|
13 |
+
lambda x: slice(None),
|
14 |
+
lambda x: slice(0, len(x)),
|
15 |
+
lambda x: range(len(x)),
|
16 |
+
lambda x: list(range(len(x))),
|
17 |
+
lambda x: np.ones(len(x), dtype=bool),
|
18 |
+
],
|
19 |
+
ids=[
|
20 |
+
"index",
|
21 |
+
"list[index]",
|
22 |
+
"null_slice",
|
23 |
+
"full_slice",
|
24 |
+
"range",
|
25 |
+
"list(range)",
|
26 |
+
"mask",
|
27 |
+
],
|
28 |
+
)
|
29 |
+
def full_indexer(self, request):
|
30 |
+
"""
|
31 |
+
Fixture for an indexer to pass to obj.loc to get/set the full length of the
|
32 |
+
object.
|
33 |
+
|
34 |
+
In some cases, assumes that obj.index is the default RangeIndex.
|
35 |
+
"""
|
36 |
+
return request.param
|
37 |
+
|
38 |
+
@pytest.fixture(autouse=True)
|
39 |
+
def skip_if_immutable(self, dtype, request):
|
40 |
+
if dtype._is_immutable:
|
41 |
+
node = request.node
|
42 |
+
if node.name.split("[")[0] == "test_is_immutable":
|
43 |
+
# This fixture is auto-used, but we want to not-skip
|
44 |
+
# test_is_immutable.
|
45 |
+
return
|
46 |
+
|
47 |
+
# When BaseSetitemTests is mixed into ExtensionTests, we only
|
48 |
+
# want this fixture to operate on the tests defined in this
|
49 |
+
# class/file.
|
50 |
+
defined_in = node.function.__qualname__.split(".")[0]
|
51 |
+
if defined_in == "BaseSetitemTests":
|
52 |
+
pytest.skip("__setitem__ test not applicable with immutable dtype")
|
53 |
+
|
54 |
+
def test_is_immutable(self, data):
|
55 |
+
if data.dtype._is_immutable:
|
56 |
+
with pytest.raises(TypeError):
|
57 |
+
data[0] = data[0]
|
58 |
+
else:
|
59 |
+
data[0] = data[1]
|
60 |
+
assert data[0] == data[1]
|
61 |
+
|
62 |
+
def test_setitem_scalar_series(self, data, box_in_series):
|
63 |
+
if box_in_series:
|
64 |
+
data = pd.Series(data)
|
65 |
+
data[0] = data[1]
|
66 |
+
assert data[0] == data[1]
|
67 |
+
|
68 |
+
def test_setitem_sequence(self, data, box_in_series):
|
69 |
+
if box_in_series:
|
70 |
+
data = pd.Series(data)
|
71 |
+
original = data.copy()
|
72 |
+
|
73 |
+
data[[0, 1]] = [data[1], data[0]]
|
74 |
+
assert data[0] == original[1]
|
75 |
+
assert data[1] == original[0]
|
76 |
+
|
77 |
+
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
|
78 |
+
ser = pd.Series(data)
|
79 |
+
original = ser.copy()
|
80 |
+
value = [data[0]]
|
81 |
+
if as_array:
|
82 |
+
value = data._from_sequence(value, dtype=data.dtype)
|
83 |
+
|
84 |
+
xpr = "cannot set using a {} indexer with a different length"
|
85 |
+
with pytest.raises(ValueError, match=xpr.format("list-like")):
|
86 |
+
ser[[0, 1]] = value
|
87 |
+
# Ensure no modifications made before the exception
|
88 |
+
tm.assert_series_equal(ser, original)
|
89 |
+
|
90 |
+
with pytest.raises(ValueError, match=xpr.format("slice")):
|
91 |
+
ser[slice(3)] = value
|
92 |
+
tm.assert_series_equal(ser, original)
|
93 |
+
|
94 |
+
def test_setitem_empty_indexer(self, data, box_in_series):
|
95 |
+
if box_in_series:
|
96 |
+
data = pd.Series(data)
|
97 |
+
original = data.copy()
|
98 |
+
data[np.array([], dtype=int)] = []
|
99 |
+
tm.assert_equal(data, original)
|
100 |
+
|
101 |
+
def test_setitem_sequence_broadcasts(self, data, box_in_series):
|
102 |
+
if box_in_series:
|
103 |
+
data = pd.Series(data)
|
104 |
+
data[[0, 1]] = data[2]
|
105 |
+
assert data[0] == data[2]
|
106 |
+
assert data[1] == data[2]
|
107 |
+
|
108 |
+
@pytest.mark.parametrize("setter", ["loc", "iloc"])
|
109 |
+
def test_setitem_scalar(self, data, setter):
|
110 |
+
arr = pd.Series(data)
|
111 |
+
setter = getattr(arr, setter)
|
112 |
+
setter[0] = data[1]
|
113 |
+
assert arr[0] == data[1]
|
114 |
+
|
115 |
+
def test_setitem_loc_scalar_mixed(self, data):
|
116 |
+
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
|
117 |
+
df.loc[0, "B"] = data[1]
|
118 |
+
assert df.loc[0, "B"] == data[1]
|
119 |
+
|
120 |
+
def test_setitem_loc_scalar_single(self, data):
|
121 |
+
df = pd.DataFrame({"B": data})
|
122 |
+
df.loc[10, "B"] = data[1]
|
123 |
+
assert df.loc[10, "B"] == data[1]
|
124 |
+
|
125 |
+
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
|
126 |
+
df = pd.DataFrame({"A": data, "B": data})
|
127 |
+
df.loc[10, "B"] = data[1]
|
128 |
+
assert df.loc[10, "B"] == data[1]
|
129 |
+
|
130 |
+
def test_setitem_iloc_scalar_mixed(self, data):
|
131 |
+
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
|
132 |
+
df.iloc[0, 1] = data[1]
|
133 |
+
assert df.loc[0, "B"] == data[1]
|
134 |
+
|
135 |
+
def test_setitem_iloc_scalar_single(self, data):
|
136 |
+
df = pd.DataFrame({"B": data})
|
137 |
+
df.iloc[10, 0] = data[1]
|
138 |
+
assert df.loc[10, "B"] == data[1]
|
139 |
+
|
140 |
+
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
|
141 |
+
df = pd.DataFrame({"A": data, "B": data})
|
142 |
+
df.iloc[10, 1] = data[1]
|
143 |
+
assert df.loc[10, "B"] == data[1]
|
144 |
+
|
145 |
+
@pytest.mark.parametrize(
|
146 |
+
"mask",
|
147 |
+
[
|
148 |
+
np.array([True, True, True, False, False]),
|
149 |
+
pd.array([True, True, True, False, False], dtype="boolean"),
|
150 |
+
pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
|
151 |
+
],
|
152 |
+
ids=["numpy-array", "boolean-array", "boolean-array-na"],
|
153 |
+
)
|
154 |
+
def test_setitem_mask(self, data, mask, box_in_series):
|
155 |
+
arr = data[:5].copy()
|
156 |
+
expected = arr.take([0, 0, 0, 3, 4])
|
157 |
+
if box_in_series:
|
158 |
+
arr = pd.Series(arr)
|
159 |
+
expected = pd.Series(expected)
|
160 |
+
arr[mask] = data[0]
|
161 |
+
tm.assert_equal(expected, arr)
|
162 |
+
|
163 |
+
def test_setitem_mask_raises(self, data, box_in_series):
|
164 |
+
# wrong length
|
165 |
+
mask = np.array([True, False])
|
166 |
+
|
167 |
+
if box_in_series:
|
168 |
+
data = pd.Series(data)
|
169 |
+
|
170 |
+
with pytest.raises(IndexError, match="wrong length"):
|
171 |
+
data[mask] = data[0]
|
172 |
+
|
173 |
+
mask = pd.array(mask, dtype="boolean")
|
174 |
+
with pytest.raises(IndexError, match="wrong length"):
|
175 |
+
data[mask] = data[0]
|
176 |
+
|
177 |
+
def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
|
178 |
+
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
|
179 |
+
mask[:3] = True
|
180 |
+
mask[3:5] = pd.NA
|
181 |
+
|
182 |
+
if box_in_series:
|
183 |
+
data = pd.Series(data)
|
184 |
+
|
185 |
+
data[mask] = data[0]
|
186 |
+
|
187 |
+
assert (data[:3] == data[0]).all()
|
188 |
+
|
189 |
+
@pytest.mark.parametrize(
|
190 |
+
"idx",
|
191 |
+
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
|
192 |
+
ids=["list", "integer-array", "numpy-array"],
|
193 |
+
)
|
194 |
+
def test_setitem_integer_array(self, data, idx, box_in_series):
|
195 |
+
arr = data[:5].copy()
|
196 |
+
expected = data.take([0, 0, 0, 3, 4])
|
197 |
+
|
198 |
+
if box_in_series:
|
199 |
+
arr = pd.Series(arr)
|
200 |
+
expected = pd.Series(expected)
|
201 |
+
|
202 |
+
arr[idx] = arr[0]
|
203 |
+
tm.assert_equal(arr, expected)
|
204 |
+
|
205 |
+
@pytest.mark.parametrize(
|
206 |
+
"idx, box_in_series",
|
207 |
+
[
|
208 |
+
([0, 1, 2, pd.NA], False),
|
209 |
+
pytest.param(
|
210 |
+
[0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")
|
211 |
+
),
|
212 |
+
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
|
213 |
+
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
|
214 |
+
],
|
215 |
+
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
|
216 |
+
)
|
217 |
+
def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
|
218 |
+
arr = data.copy()
|
219 |
+
|
220 |
+
# TODO(xfail) this raises KeyError about labels not found (it tries label-based)
|
221 |
+
# for list of labels with Series
|
222 |
+
if box_in_series:
|
223 |
+
arr = pd.Series(data, index=[chr(100 + i) for i in range(len(data))])
|
224 |
+
|
225 |
+
msg = "Cannot index with an integer indexer containing NA values"
|
226 |
+
with pytest.raises(ValueError, match=msg):
|
227 |
+
arr[idx] = arr[0]
|
228 |
+
|
229 |
+
@pytest.mark.parametrize("as_callable", [True, False])
|
230 |
+
@pytest.mark.parametrize("setter", ["loc", None])
|
231 |
+
def test_setitem_mask_aligned(self, data, as_callable, setter):
|
232 |
+
ser = pd.Series(data)
|
233 |
+
mask = np.zeros(len(data), dtype=bool)
|
234 |
+
mask[:2] = True
|
235 |
+
|
236 |
+
if as_callable:
|
237 |
+
mask2 = lambda x: mask
|
238 |
+
else:
|
239 |
+
mask2 = mask
|
240 |
+
|
241 |
+
if setter:
|
242 |
+
# loc
|
243 |
+
target = getattr(ser, setter)
|
244 |
+
else:
|
245 |
+
# Series.__setitem__
|
246 |
+
target = ser
|
247 |
+
|
248 |
+
target[mask2] = data[5:7]
|
249 |
+
|
250 |
+
ser[mask2] = data[5:7]
|
251 |
+
assert ser[0] == data[5]
|
252 |
+
assert ser[1] == data[6]
|
253 |
+
|
254 |
+
@pytest.mark.parametrize("setter", ["loc", None])
|
255 |
+
def test_setitem_mask_broadcast(self, data, setter):
|
256 |
+
ser = pd.Series(data)
|
257 |
+
mask = np.zeros(len(data), dtype=bool)
|
258 |
+
mask[:2] = True
|
259 |
+
|
260 |
+
if setter: # loc
|
261 |
+
target = getattr(ser, setter)
|
262 |
+
else: # __setitem__
|
263 |
+
target = ser
|
264 |
+
|
265 |
+
target[mask] = data[10]
|
266 |
+
assert ser[0] == data[10]
|
267 |
+
assert ser[1] == data[10]
|
268 |
+
|
269 |
+
def test_setitem_expand_columns(self, data):
|
270 |
+
df = pd.DataFrame({"A": data})
|
271 |
+
result = df.copy()
|
272 |
+
result["B"] = 1
|
273 |
+
expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
|
274 |
+
tm.assert_frame_equal(result, expected)
|
275 |
+
|
276 |
+
result = df.copy()
|
277 |
+
result.loc[:, "B"] = 1
|
278 |
+
tm.assert_frame_equal(result, expected)
|
279 |
+
|
280 |
+
# overwrite with new type
|
281 |
+
result["B"] = data
|
282 |
+
expected = pd.DataFrame({"A": data, "B": data})
|
283 |
+
tm.assert_frame_equal(result, expected)
|
284 |
+
|
285 |
+
def test_setitem_expand_with_extension(self, data):
|
286 |
+
df = pd.DataFrame({"A": [1] * len(data)})
|
287 |
+
result = df.copy()
|
288 |
+
result["B"] = data
|
289 |
+
expected = pd.DataFrame({"A": [1] * len(data), "B": data})
|
290 |
+
tm.assert_frame_equal(result, expected)
|
291 |
+
|
292 |
+
result = df.copy()
|
293 |
+
result.loc[:, "B"] = data
|
294 |
+
tm.assert_frame_equal(result, expected)
|
295 |
+
|
296 |
+
def test_setitem_frame_invalid_length(self, data):
|
297 |
+
df = pd.DataFrame({"A": [1] * len(data)})
|
298 |
+
xpr = (
|
299 |
+
rf"Length of values \({len(data[:5])}\) "
|
300 |
+
rf"does not match length of index \({len(df)}\)"
|
301 |
+
)
|
302 |
+
with pytest.raises(ValueError, match=xpr):
|
303 |
+
df["B"] = data[:5]
|
304 |
+
|
305 |
+
def test_setitem_tuple_index(self, data):
|
306 |
+
ser = pd.Series(data[:2], index=[(0, 0), (0, 1)])
|
307 |
+
expected = pd.Series(data.take([1, 1]), index=ser.index)
|
308 |
+
ser[(0, 0)] = data[1]
|
309 |
+
tm.assert_series_equal(ser, expected)
|
310 |
+
|
311 |
+
def test_setitem_slice(self, data, box_in_series):
|
312 |
+
arr = data[:5].copy()
|
313 |
+
expected = data.take([0, 0, 0, 3, 4])
|
314 |
+
if box_in_series:
|
315 |
+
arr = pd.Series(arr)
|
316 |
+
expected = pd.Series(expected)
|
317 |
+
|
318 |
+
arr[:3] = data[0]
|
319 |
+
tm.assert_equal(arr, expected)
|
320 |
+
|
321 |
+
def test_setitem_loc_iloc_slice(self, data):
|
322 |
+
arr = data[:5].copy()
|
323 |
+
s = pd.Series(arr, index=["a", "b", "c", "d", "e"])
|
324 |
+
expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index)
|
325 |
+
|
326 |
+
result = s.copy()
|
327 |
+
result.iloc[:3] = data[0]
|
328 |
+
tm.assert_equal(result, expected)
|
329 |
+
|
330 |
+
result = s.copy()
|
331 |
+
result.loc[:"c"] = data[0]
|
332 |
+
tm.assert_equal(result, expected)
|
333 |
+
|
334 |
+
def test_setitem_slice_mismatch_length_raises(self, data):
|
335 |
+
arr = data[:5]
|
336 |
+
with pytest.raises(ValueError):
|
337 |
+
arr[:1] = arr[:2]
|
338 |
+
|
339 |
+
def test_setitem_slice_array(self, data):
|
340 |
+
arr = data[:5].copy()
|
341 |
+
arr[:5] = data[-5:]
|
342 |
+
tm.assert_extension_array_equal(arr, data[-5:])
|
343 |
+
|
344 |
+
def test_setitem_scalar_key_sequence_raise(self, data):
|
345 |
+
arr = data[:5].copy()
|
346 |
+
with pytest.raises(ValueError):
|
347 |
+
arr[0] = arr[[0, 1]]
|
348 |
+
|
349 |
+
def test_setitem_preserves_views(self, data):
|
350 |
+
# GH#28150 setitem shouldn't swap the underlying data
|
351 |
+
view1 = data.view()
|
352 |
+
view2 = data[:]
|
353 |
+
|
354 |
+
data[0] = data[1]
|
355 |
+
assert view1[0] == data[1]
|
356 |
+
assert view2[0] == data[1]
|
357 |
+
|
358 |
+
def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
|
359 |
+
# https://github.com/pandas-dev/pandas/issues/32395
|
360 |
+
df = expected = pd.DataFrame({0: pd.Series(data)})
|
361 |
+
result = pd.DataFrame(index=df.index)
|
362 |
+
|
363 |
+
key = full_indexer(df)
|
364 |
+
result.loc[key, 0] = df[0]
|
365 |
+
|
366 |
+
tm.assert_frame_equal(result, expected)
|
367 |
+
|
368 |
+
def test_setitem_with_expansion_row(self, data, na_value):
|
369 |
+
df = pd.DataFrame({"data": data[:1]})
|
370 |
+
|
371 |
+
df.loc[1, "data"] = data[1]
|
372 |
+
expected = pd.DataFrame({"data": data[:2]})
|
373 |
+
tm.assert_frame_equal(df, expected)
|
374 |
+
|
375 |
+
# https://github.com/pandas-dev/pandas/issues/47284
|
376 |
+
df.loc[2, "data"] = na_value
|
377 |
+
expected = pd.DataFrame(
|
378 |
+
{"data": pd.Series([data[0], data[1], na_value], dtype=data.dtype)}
|
379 |
+
)
|
380 |
+
tm.assert_frame_equal(df, expected)
|
381 |
+
|
382 |
+
def test_setitem_series(self, data, full_indexer):
|
383 |
+
# https://github.com/pandas-dev/pandas/issues/32395
|
384 |
+
ser = pd.Series(data, name="data")
|
385 |
+
result = pd.Series(index=ser.index, dtype=object, name="data")
|
386 |
+
|
387 |
+
# because result has object dtype, the attempt to do setting inplace
|
388 |
+
# is successful, and object dtype is retained
|
389 |
+
key = full_indexer(ser)
|
390 |
+
result.loc[key] = ser
|
391 |
+
|
392 |
+
expected = pd.Series(
|
393 |
+
data.astype(object), index=ser.index, name="data", dtype=object
|
394 |
+
)
|
395 |
+
tm.assert_series_equal(result, expected)
|
396 |
+
|
397 |
+
def test_setitem_frame_2d_values(self, data):
|
398 |
+
# GH#44514
|
399 |
+
df = pd.DataFrame({"A": data})
|
400 |
+
|
401 |
+
# Avoiding using_array_manager fixture
|
402 |
+
# https://github.com/pandas-dev/pandas/pull/44514#discussion_r754002410
|
403 |
+
using_array_manager = isinstance(df._mgr, pd.core.internals.ArrayManager)
|
404 |
+
using_copy_on_write = pd.options.mode.copy_on_write
|
405 |
+
|
406 |
+
blk_data = df._mgr.arrays[0]
|
407 |
+
|
408 |
+
orig = df.copy()
|
409 |
+
|
410 |
+
df.iloc[:] = df.copy()
|
411 |
+
tm.assert_frame_equal(df, orig)
|
412 |
+
|
413 |
+
df.iloc[:-1] = df.iloc[:-1].copy()
|
414 |
+
tm.assert_frame_equal(df, orig)
|
415 |
+
|
416 |
+
df.iloc[:] = df.values
|
417 |
+
tm.assert_frame_equal(df, orig)
|
418 |
+
if not using_array_manager and not using_copy_on_write:
|
419 |
+
# GH#33457 Check that this setting occurred in-place
|
420 |
+
# FIXME(ArrayManager): this should work there too
|
421 |
+
assert df._mgr.arrays[0] is blk_data
|
422 |
+
|
423 |
+
df.iloc[:-1] = df.values[:-1]
|
424 |
+
tm.assert_frame_equal(df, orig)
|
425 |
+
|
426 |
+
def test_delitem_series(self, data):
|
427 |
+
# GH#40763
|
428 |
+
ser = pd.Series(data, name="data")
|
429 |
+
|
430 |
+
taker = np.arange(len(ser))
|
431 |
+
taker = np.delete(taker, 1)
|
432 |
+
|
433 |
+
expected = ser[taker]
|
434 |
+
del ser[1]
|
435 |
+
tm.assert_series_equal(ser, expected)
|
436 |
+
|
437 |
+
def test_setitem_invalid(self, data, invalid_scalar):
|
438 |
+
msg = "" # messages vary by subclass, so we do not test it
|
439 |
+
with pytest.raises((ValueError, TypeError), match=msg):
|
440 |
+
data[0] = invalid_scalar
|
441 |
+
|
442 |
+
with pytest.raises((ValueError, TypeError), match=msg):
|
443 |
+
data[:] = invalid_scalar
|
444 |
+
|
445 |
+
def test_setitem_2d_values(self, data):
|
446 |
+
# GH50085
|
447 |
+
original = data.copy()
|
448 |
+
df = pd.DataFrame({"a": data, "b": data})
|
449 |
+
df.loc[[0, 1], :] = df.loc[[1, 0], :].values
|
450 |
+
assert (df.loc[0, :] == original[1]).all()
|
451 |
+
assert (df.loc[1, :] == original[0]).all()
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/conftest.py
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas._config.config import _get_option
|
6 |
+
|
7 |
+
from pandas import (
|
8 |
+
Series,
|
9 |
+
options,
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
@pytest.fixture
|
14 |
+
def dtype():
|
15 |
+
"""A fixture providing the ExtensionDtype to validate."""
|
16 |
+
raise NotImplementedError
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.fixture
|
20 |
+
def data():
|
21 |
+
"""
|
22 |
+
Length-100 array for this type.
|
23 |
+
|
24 |
+
* data[0] and data[1] should both be non missing
|
25 |
+
* data[0] and data[1] should not be equal
|
26 |
+
"""
|
27 |
+
raise NotImplementedError
|
28 |
+
|
29 |
+
|
30 |
+
@pytest.fixture
|
31 |
+
def data_for_twos(dtype):
|
32 |
+
"""
|
33 |
+
Length-100 array in which all the elements are two.
|
34 |
+
|
35 |
+
Call pytest.skip in your fixture if the dtype does not support divmod.
|
36 |
+
"""
|
37 |
+
if not (dtype._is_numeric or dtype.kind == "m"):
|
38 |
+
# Object-dtypes may want to allow this, but for the most part
|
39 |
+
# only numeric and timedelta-like dtypes will need to implement this.
|
40 |
+
pytest.skip(f"{dtype} is not a numeric dtype")
|
41 |
+
|
42 |
+
raise NotImplementedError
|
43 |
+
|
44 |
+
|
45 |
+
@pytest.fixture
|
46 |
+
def data_missing():
|
47 |
+
"""Length-2 array with [NA, Valid]"""
|
48 |
+
raise NotImplementedError
|
49 |
+
|
50 |
+
|
51 |
+
@pytest.fixture(params=["data", "data_missing"])
|
52 |
+
def all_data(request, data, data_missing):
|
53 |
+
"""Parametrized fixture giving 'data' and 'data_missing'"""
|
54 |
+
if request.param == "data":
|
55 |
+
return data
|
56 |
+
elif request.param == "data_missing":
|
57 |
+
return data_missing
|
58 |
+
|
59 |
+
|
60 |
+
@pytest.fixture
|
61 |
+
def data_repeated(data):
|
62 |
+
"""
|
63 |
+
Generate many datasets.
|
64 |
+
|
65 |
+
Parameters
|
66 |
+
----------
|
67 |
+
data : fixture implementing `data`
|
68 |
+
|
69 |
+
Returns
|
70 |
+
-------
|
71 |
+
Callable[[int], Generator]:
|
72 |
+
A callable that takes a `count` argument and
|
73 |
+
returns a generator yielding `count` datasets.
|
74 |
+
"""
|
75 |
+
|
76 |
+
def gen(count):
|
77 |
+
for _ in range(count):
|
78 |
+
yield data
|
79 |
+
|
80 |
+
return gen
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.fixture
|
84 |
+
def data_for_sorting():
|
85 |
+
"""
|
86 |
+
Length-3 array with a known sort order.
|
87 |
+
|
88 |
+
This should be three items [B, C, A] with
|
89 |
+
A < B < C
|
90 |
+
|
91 |
+
For boolean dtypes (for which there are only 2 values available),
|
92 |
+
set B=C=True
|
93 |
+
"""
|
94 |
+
raise NotImplementedError
|
95 |
+
|
96 |
+
|
97 |
+
@pytest.fixture
|
98 |
+
def data_missing_for_sorting():
|
99 |
+
"""
|
100 |
+
Length-3 array with a known sort order.
|
101 |
+
|
102 |
+
This should be three items [B, NA, A] with
|
103 |
+
A < B and NA missing.
|
104 |
+
"""
|
105 |
+
raise NotImplementedError
|
106 |
+
|
107 |
+
|
108 |
+
@pytest.fixture
|
109 |
+
def na_cmp():
|
110 |
+
"""
|
111 |
+
Binary operator for comparing NA values.
|
112 |
+
|
113 |
+
Should return a function of two arguments that returns
|
114 |
+
True if both arguments are (scalar) NA for your type.
|
115 |
+
|
116 |
+
By default, uses ``operator.is_``
|
117 |
+
"""
|
118 |
+
return operator.is_
|
119 |
+
|
120 |
+
|
121 |
+
@pytest.fixture
|
122 |
+
def na_value(dtype):
|
123 |
+
"""
|
124 |
+
The scalar missing value for this type. Default dtype.na_value.
|
125 |
+
|
126 |
+
TODO: can be removed in 3.x (see https://github.com/pandas-dev/pandas/pull/54930)
|
127 |
+
"""
|
128 |
+
return dtype.na_value
|
129 |
+
|
130 |
+
|
131 |
+
@pytest.fixture
|
132 |
+
def data_for_grouping():
|
133 |
+
"""
|
134 |
+
Data for factorization, grouping, and unique tests.
|
135 |
+
|
136 |
+
Expected to be like [B, B, NA, NA, A, A, B, C]
|
137 |
+
|
138 |
+
Where A < B < C and NA is missing.
|
139 |
+
|
140 |
+
If a dtype has _is_boolean = True, i.e. only 2 unique non-NA entries,
|
141 |
+
then set C=B.
|
142 |
+
"""
|
143 |
+
raise NotImplementedError
|
144 |
+
|
145 |
+
|
146 |
+
@pytest.fixture(params=[True, False])
|
147 |
+
def box_in_series(request):
|
148 |
+
"""Whether to box the data in a Series"""
|
149 |
+
return request.param
|
150 |
+
|
151 |
+
|
152 |
+
@pytest.fixture(
|
153 |
+
params=[
|
154 |
+
lambda x: 1,
|
155 |
+
lambda x: [1] * len(x),
|
156 |
+
lambda x: Series([1] * len(x)),
|
157 |
+
lambda x: x,
|
158 |
+
],
|
159 |
+
ids=["scalar", "list", "series", "object"],
|
160 |
+
)
|
161 |
+
def groupby_apply_op(request):
|
162 |
+
"""
|
163 |
+
Functions to test groupby.apply().
|
164 |
+
"""
|
165 |
+
return request.param
|
166 |
+
|
167 |
+
|
168 |
+
@pytest.fixture(params=[True, False])
|
169 |
+
def as_frame(request):
|
170 |
+
"""
|
171 |
+
Boolean fixture to support Series and Series.to_frame() comparison testing.
|
172 |
+
"""
|
173 |
+
return request.param
|
174 |
+
|
175 |
+
|
176 |
+
@pytest.fixture(params=[True, False])
|
177 |
+
def as_series(request):
|
178 |
+
"""
|
179 |
+
Boolean fixture to support arr and Series(arr) comparison testing.
|
180 |
+
"""
|
181 |
+
return request.param
|
182 |
+
|
183 |
+
|
184 |
+
@pytest.fixture(params=[True, False])
|
185 |
+
def use_numpy(request):
|
186 |
+
"""
|
187 |
+
Boolean fixture to support comparison testing of ExtensionDtype array
|
188 |
+
and numpy array.
|
189 |
+
"""
|
190 |
+
return request.param
|
191 |
+
|
192 |
+
|
193 |
+
@pytest.fixture(params=["ffill", "bfill"])
|
194 |
+
def fillna_method(request):
|
195 |
+
"""
|
196 |
+
Parametrized fixture giving method parameters 'ffill' and 'bfill' for
|
197 |
+
Series.fillna(method=<method>) testing.
|
198 |
+
"""
|
199 |
+
return request.param
|
200 |
+
|
201 |
+
|
202 |
+
@pytest.fixture(params=[True, False])
|
203 |
+
def as_array(request):
|
204 |
+
"""
|
205 |
+
Boolean fixture to support ExtensionDtype _from_sequence method testing.
|
206 |
+
"""
|
207 |
+
return request.param
|
208 |
+
|
209 |
+
|
210 |
+
@pytest.fixture
|
211 |
+
def invalid_scalar(data):
|
212 |
+
"""
|
213 |
+
A scalar that *cannot* be held by this ExtensionArray.
|
214 |
+
|
215 |
+
The default should work for most subclasses, but is not guaranteed.
|
216 |
+
|
217 |
+
If the array can hold any item (i.e. object dtype), then use pytest.skip.
|
218 |
+
"""
|
219 |
+
return object.__new__(object)
|
220 |
+
|
221 |
+
|
222 |
+
@pytest.fixture
|
223 |
+
def using_copy_on_write() -> bool:
|
224 |
+
"""
|
225 |
+
Fixture to check if Copy-on-Write is enabled.
|
226 |
+
"""
|
227 |
+
return (
|
228 |
+
options.mode.copy_on_write is True
|
229 |
+
and _get_option("mode.data_manager", silent=True) == "block"
|
230 |
+
)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.tests.extension.date.array import (
|
2 |
+
DateArray,
|
3 |
+
DateDtype,
|
4 |
+
)
|
5 |
+
|
6 |
+
__all__ = ["DateArray", "DateDtype"]
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/date/array.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import datetime as dt
|
4 |
+
from typing import (
|
5 |
+
TYPE_CHECKING,
|
6 |
+
Any,
|
7 |
+
cast,
|
8 |
+
)
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas.core.dtypes.dtypes import register_extension_dtype
|
13 |
+
|
14 |
+
from pandas.api.extensions import (
|
15 |
+
ExtensionArray,
|
16 |
+
ExtensionDtype,
|
17 |
+
)
|
18 |
+
from pandas.api.types import pandas_dtype
|
19 |
+
|
20 |
+
if TYPE_CHECKING:
|
21 |
+
from collections.abc import Sequence
|
22 |
+
|
23 |
+
from pandas._typing import (
|
24 |
+
Dtype,
|
25 |
+
PositionalIndexer,
|
26 |
+
)
|
27 |
+
|
28 |
+
|
29 |
+
@register_extension_dtype
|
30 |
+
class DateDtype(ExtensionDtype):
|
31 |
+
@property
|
32 |
+
def type(self):
|
33 |
+
return dt.date
|
34 |
+
|
35 |
+
@property
|
36 |
+
def name(self):
|
37 |
+
return "DateDtype"
|
38 |
+
|
39 |
+
@classmethod
|
40 |
+
def construct_from_string(cls, string: str):
|
41 |
+
if not isinstance(string, str):
|
42 |
+
raise TypeError(
|
43 |
+
f"'construct_from_string' expects a string, got {type(string)}"
|
44 |
+
)
|
45 |
+
|
46 |
+
if string == cls.__name__:
|
47 |
+
return cls()
|
48 |
+
else:
|
49 |
+
raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
|
50 |
+
|
51 |
+
@classmethod
|
52 |
+
def construct_array_type(cls):
|
53 |
+
return DateArray
|
54 |
+
|
55 |
+
@property
|
56 |
+
def na_value(self):
|
57 |
+
return dt.date.min
|
58 |
+
|
59 |
+
def __repr__(self) -> str:
|
60 |
+
return self.name
|
61 |
+
|
62 |
+
|
63 |
+
class DateArray(ExtensionArray):
|
64 |
+
def __init__(
|
65 |
+
self,
|
66 |
+
dates: (
|
67 |
+
dt.date
|
68 |
+
| Sequence[dt.date]
|
69 |
+
| tuple[np.ndarray, np.ndarray, np.ndarray]
|
70 |
+
| np.ndarray
|
71 |
+
),
|
72 |
+
) -> None:
|
73 |
+
if isinstance(dates, dt.date):
|
74 |
+
self._year = np.array([dates.year])
|
75 |
+
self._month = np.array([dates.month])
|
76 |
+
self._day = np.array([dates.year])
|
77 |
+
return
|
78 |
+
|
79 |
+
ldates = len(dates)
|
80 |
+
if isinstance(dates, list):
|
81 |
+
# pre-allocate the arrays since we know the size before hand
|
82 |
+
self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999)
|
83 |
+
self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)
|
84 |
+
self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)
|
85 |
+
# populate them
|
86 |
+
for i, (y, m, d) in enumerate(
|
87 |
+
(date.year, date.month, date.day) for date in dates
|
88 |
+
):
|
89 |
+
self._year[i] = y
|
90 |
+
self._month[i] = m
|
91 |
+
self._day[i] = d
|
92 |
+
|
93 |
+
elif isinstance(dates, tuple):
|
94 |
+
# only support triples
|
95 |
+
if ldates != 3:
|
96 |
+
raise ValueError("only triples are valid")
|
97 |
+
# check if all elements have the same type
|
98 |
+
if any(not isinstance(x, np.ndarray) for x in dates):
|
99 |
+
raise TypeError("invalid type")
|
100 |
+
ly, lm, ld = (len(cast(np.ndarray, d)) for d in dates)
|
101 |
+
if not ly == lm == ld:
|
102 |
+
raise ValueError(
|
103 |
+
f"tuple members must have the same length: {(ly, lm, ld)}"
|
104 |
+
)
|
105 |
+
self._year = dates[0].astype(np.uint16)
|
106 |
+
self._month = dates[1].astype(np.uint8)
|
107 |
+
self._day = dates[2].astype(np.uint8)
|
108 |
+
|
109 |
+
elif isinstance(dates, np.ndarray) and dates.dtype == "U10":
|
110 |
+
self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999)
|
111 |
+
self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)
|
112 |
+
self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)
|
113 |
+
|
114 |
+
# error: "object_" object is not iterable
|
115 |
+
obj = np.char.split(dates, sep="-")
|
116 |
+
for (i,), (y, m, d) in np.ndenumerate(obj): # type: ignore[misc]
|
117 |
+
self._year[i] = int(y)
|
118 |
+
self._month[i] = int(m)
|
119 |
+
self._day[i] = int(d)
|
120 |
+
|
121 |
+
else:
|
122 |
+
raise TypeError(f"{type(dates)} is not supported")
|
123 |
+
|
124 |
+
@property
|
125 |
+
def dtype(self) -> ExtensionDtype:
|
126 |
+
return DateDtype()
|
127 |
+
|
128 |
+
def astype(self, dtype, copy=True):
|
129 |
+
dtype = pandas_dtype(dtype)
|
130 |
+
|
131 |
+
if isinstance(dtype, DateDtype):
|
132 |
+
data = self.copy() if copy else self
|
133 |
+
else:
|
134 |
+
data = self.to_numpy(dtype=dtype, copy=copy, na_value=dt.date.min)
|
135 |
+
|
136 |
+
return data
|
137 |
+
|
138 |
+
@property
|
139 |
+
def nbytes(self) -> int:
|
140 |
+
return self._year.nbytes + self._month.nbytes + self._day.nbytes
|
141 |
+
|
142 |
+
def __len__(self) -> int:
|
143 |
+
return len(self._year) # all 3 arrays are enforced to have the same length
|
144 |
+
|
145 |
+
def __getitem__(self, item: PositionalIndexer):
|
146 |
+
if isinstance(item, int):
|
147 |
+
return dt.date(self._year[item], self._month[item], self._day[item])
|
148 |
+
else:
|
149 |
+
raise NotImplementedError("only ints are supported as indexes")
|
150 |
+
|
151 |
+
def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None:
|
152 |
+
if not isinstance(key, int):
|
153 |
+
raise NotImplementedError("only ints are supported as indexes")
|
154 |
+
|
155 |
+
if not isinstance(value, dt.date):
|
156 |
+
raise TypeError("you can only set datetime.date types")
|
157 |
+
|
158 |
+
self._year[key] = value.year
|
159 |
+
self._month[key] = value.month
|
160 |
+
self._day[key] = value.day
|
161 |
+
|
162 |
+
def __repr__(self) -> str:
|
163 |
+
return f"DateArray{list(zip(self._year, self._month, self._day))}"
|
164 |
+
|
165 |
+
def copy(self) -> DateArray:
|
166 |
+
return DateArray((self._year.copy(), self._month.copy(), self._day.copy()))
|
167 |
+
|
168 |
+
def isna(self) -> np.ndarray:
|
169 |
+
return np.logical_and(
|
170 |
+
np.logical_and(
|
171 |
+
self._year == dt.date.min.year, self._month == dt.date.min.month
|
172 |
+
),
|
173 |
+
self._day == dt.date.min.day,
|
174 |
+
)
|
175 |
+
|
176 |
+
@classmethod
|
177 |
+
def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False):
|
178 |
+
if isinstance(scalars, dt.date):
|
179 |
+
raise TypeError
|
180 |
+
elif isinstance(scalars, DateArray):
|
181 |
+
if dtype is not None:
|
182 |
+
return scalars.astype(dtype, copy=copy)
|
183 |
+
if copy:
|
184 |
+
return scalars.copy()
|
185 |
+
return scalars[:]
|
186 |
+
elif isinstance(scalars, np.ndarray):
|
187 |
+
scalars = scalars.astype("U10") # 10 chars for yyyy-mm-dd
|
188 |
+
return DateArray(scalars)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__init__.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.tests.extension.decimal.array import (
|
2 |
+
DecimalArray,
|
3 |
+
DecimalDtype,
|
4 |
+
make_data,
|
5 |
+
to_decimal,
|
6 |
+
)
|
7 |
+
|
8 |
+
__all__ = ["DecimalArray", "DecimalDtype", "to_decimal", "make_data"]
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (373 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/array.cpython-310.pyc
ADDED
Binary file (10.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/test_decimal.cpython-310.pyc
ADDED
Binary file (18.7 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/array.py
ADDED
@@ -0,0 +1,311 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import decimal
|
4 |
+
import numbers
|
5 |
+
import sys
|
6 |
+
from typing import TYPE_CHECKING
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
11 |
+
from pandas.core.dtypes.common import (
|
12 |
+
is_dtype_equal,
|
13 |
+
is_float,
|
14 |
+
is_integer,
|
15 |
+
pandas_dtype,
|
16 |
+
)
|
17 |
+
|
18 |
+
import pandas as pd
|
19 |
+
from pandas.api.extensions import (
|
20 |
+
no_default,
|
21 |
+
register_extension_dtype,
|
22 |
+
)
|
23 |
+
from pandas.api.types import (
|
24 |
+
is_list_like,
|
25 |
+
is_scalar,
|
26 |
+
)
|
27 |
+
from pandas.core import arraylike
|
28 |
+
from pandas.core.algorithms import value_counts_internal as value_counts
|
29 |
+
from pandas.core.arraylike import OpsMixin
|
30 |
+
from pandas.core.arrays import (
|
31 |
+
ExtensionArray,
|
32 |
+
ExtensionScalarOpsMixin,
|
33 |
+
)
|
34 |
+
from pandas.core.indexers import check_array_indexer
|
35 |
+
|
36 |
+
if TYPE_CHECKING:
|
37 |
+
from pandas._typing import type_t
|
38 |
+
|
39 |
+
|
40 |
+
@register_extension_dtype
|
41 |
+
class DecimalDtype(ExtensionDtype):
|
42 |
+
type = decimal.Decimal
|
43 |
+
name = "decimal"
|
44 |
+
na_value = decimal.Decimal("NaN")
|
45 |
+
_metadata = ("context",)
|
46 |
+
|
47 |
+
def __init__(self, context=None) -> None:
|
48 |
+
self.context = context or decimal.getcontext()
|
49 |
+
|
50 |
+
def __repr__(self) -> str:
|
51 |
+
return f"DecimalDtype(context={self.context})"
|
52 |
+
|
53 |
+
@classmethod
|
54 |
+
def construct_array_type(cls) -> type_t[DecimalArray]:
|
55 |
+
"""
|
56 |
+
Return the array type associated with this dtype.
|
57 |
+
|
58 |
+
Returns
|
59 |
+
-------
|
60 |
+
type
|
61 |
+
"""
|
62 |
+
return DecimalArray
|
63 |
+
|
64 |
+
@property
|
65 |
+
def _is_numeric(self) -> bool:
|
66 |
+
return True
|
67 |
+
|
68 |
+
|
69 |
+
class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray):
|
70 |
+
__array_priority__ = 1000
|
71 |
+
|
72 |
+
def __init__(self, values, dtype=None, copy=False, context=None) -> None:
|
73 |
+
for i, val in enumerate(values):
|
74 |
+
if is_float(val) or is_integer(val):
|
75 |
+
if np.isnan(val):
|
76 |
+
values[i] = DecimalDtype.na_value
|
77 |
+
else:
|
78 |
+
# error: Argument 1 has incompatible type "float | int |
|
79 |
+
# integer[Any]"; expected "Decimal | float | str | tuple[int,
|
80 |
+
# Sequence[int], int]"
|
81 |
+
values[i] = DecimalDtype.type(val) # type: ignore[arg-type]
|
82 |
+
elif not isinstance(val, decimal.Decimal):
|
83 |
+
raise TypeError("All values must be of type " + str(decimal.Decimal))
|
84 |
+
values = np.asarray(values, dtype=object)
|
85 |
+
|
86 |
+
self._data = values
|
87 |
+
# Some aliases for common attribute names to ensure pandas supports
|
88 |
+
# these
|
89 |
+
self._items = self.data = self._data
|
90 |
+
# those aliases are currently not working due to assumptions
|
91 |
+
# in internal code (GH-20735)
|
92 |
+
# self._values = self.values = self.data
|
93 |
+
self._dtype = DecimalDtype(context)
|
94 |
+
|
95 |
+
@property
|
96 |
+
def dtype(self):
|
97 |
+
return self._dtype
|
98 |
+
|
99 |
+
@classmethod
|
100 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
|
101 |
+
return cls(scalars)
|
102 |
+
|
103 |
+
@classmethod
|
104 |
+
def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
|
105 |
+
return cls._from_sequence(
|
106 |
+
[decimal.Decimal(x) for x in strings], dtype=dtype, copy=copy
|
107 |
+
)
|
108 |
+
|
109 |
+
@classmethod
|
110 |
+
def _from_factorized(cls, values, original):
|
111 |
+
return cls(values)
|
112 |
+
|
113 |
+
_HANDLED_TYPES = (decimal.Decimal, numbers.Number, np.ndarray)
|
114 |
+
|
115 |
+
def to_numpy(
|
116 |
+
self,
|
117 |
+
dtype=None,
|
118 |
+
copy: bool = False,
|
119 |
+
na_value: object = no_default,
|
120 |
+
decimals=None,
|
121 |
+
) -> np.ndarray:
|
122 |
+
result = np.asarray(self, dtype=dtype)
|
123 |
+
if decimals is not None:
|
124 |
+
result = np.asarray([round(x, decimals) for x in result])
|
125 |
+
return result
|
126 |
+
|
127 |
+
def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
|
128 |
+
#
|
129 |
+
if not all(
|
130 |
+
isinstance(t, self._HANDLED_TYPES + (DecimalArray,)) for t in inputs
|
131 |
+
):
|
132 |
+
return NotImplemented
|
133 |
+
|
134 |
+
result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
|
135 |
+
self, ufunc, method, *inputs, **kwargs
|
136 |
+
)
|
137 |
+
if result is not NotImplemented:
|
138 |
+
# e.g. test_array_ufunc_series_scalar_other
|
139 |
+
return result
|
140 |
+
|
141 |
+
if "out" in kwargs:
|
142 |
+
return arraylike.dispatch_ufunc_with_out(
|
143 |
+
self, ufunc, method, *inputs, **kwargs
|
144 |
+
)
|
145 |
+
|
146 |
+
inputs = tuple(x._data if isinstance(x, DecimalArray) else x for x in inputs)
|
147 |
+
result = getattr(ufunc, method)(*inputs, **kwargs)
|
148 |
+
|
149 |
+
if method == "reduce":
|
150 |
+
result = arraylike.dispatch_reduction_ufunc(
|
151 |
+
self, ufunc, method, *inputs, **kwargs
|
152 |
+
)
|
153 |
+
if result is not NotImplemented:
|
154 |
+
return result
|
155 |
+
|
156 |
+
def reconstruct(x):
|
157 |
+
if isinstance(x, (decimal.Decimal, numbers.Number)):
|
158 |
+
return x
|
159 |
+
else:
|
160 |
+
return type(self)._from_sequence(x, dtype=self.dtype)
|
161 |
+
|
162 |
+
if ufunc.nout > 1:
|
163 |
+
return tuple(reconstruct(x) for x in result)
|
164 |
+
else:
|
165 |
+
return reconstruct(result)
|
166 |
+
|
167 |
+
def __getitem__(self, item):
|
168 |
+
if isinstance(item, numbers.Integral):
|
169 |
+
return self._data[item]
|
170 |
+
else:
|
171 |
+
# array, slice.
|
172 |
+
item = pd.api.indexers.check_array_indexer(self, item)
|
173 |
+
return type(self)(self._data[item])
|
174 |
+
|
175 |
+
def take(self, indexer, allow_fill=False, fill_value=None):
|
176 |
+
from pandas.api.extensions import take
|
177 |
+
|
178 |
+
data = self._data
|
179 |
+
if allow_fill and fill_value is None:
|
180 |
+
fill_value = self.dtype.na_value
|
181 |
+
|
182 |
+
result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
|
183 |
+
return self._from_sequence(result, dtype=self.dtype)
|
184 |
+
|
185 |
+
def copy(self):
|
186 |
+
return type(self)(self._data.copy(), dtype=self.dtype)
|
187 |
+
|
188 |
+
def astype(self, dtype, copy=True):
|
189 |
+
if is_dtype_equal(dtype, self._dtype):
|
190 |
+
if not copy:
|
191 |
+
return self
|
192 |
+
dtype = pandas_dtype(dtype)
|
193 |
+
if isinstance(dtype, type(self.dtype)):
|
194 |
+
return type(self)(self._data, copy=copy, context=dtype.context)
|
195 |
+
|
196 |
+
return super().astype(dtype, copy=copy)
|
197 |
+
|
198 |
+
def __setitem__(self, key, value) -> None:
|
199 |
+
if is_list_like(value):
|
200 |
+
if is_scalar(key):
|
201 |
+
raise ValueError("setting an array element with a sequence.")
|
202 |
+
value = [decimal.Decimal(v) for v in value]
|
203 |
+
else:
|
204 |
+
value = decimal.Decimal(value)
|
205 |
+
|
206 |
+
key = check_array_indexer(self, key)
|
207 |
+
self._data[key] = value
|
208 |
+
|
209 |
+
def __len__(self) -> int:
|
210 |
+
return len(self._data)
|
211 |
+
|
212 |
+
def __contains__(self, item) -> bool | np.bool_:
|
213 |
+
if not isinstance(item, decimal.Decimal):
|
214 |
+
return False
|
215 |
+
elif item.is_nan():
|
216 |
+
return self.isna().any()
|
217 |
+
else:
|
218 |
+
return super().__contains__(item)
|
219 |
+
|
220 |
+
@property
|
221 |
+
def nbytes(self) -> int:
|
222 |
+
n = len(self)
|
223 |
+
if n:
|
224 |
+
return n * sys.getsizeof(self[0])
|
225 |
+
return 0
|
226 |
+
|
227 |
+
def isna(self):
|
228 |
+
return np.array([x.is_nan() for x in self._data], dtype=bool)
|
229 |
+
|
230 |
+
@property
|
231 |
+
def _na_value(self):
|
232 |
+
return decimal.Decimal("NaN")
|
233 |
+
|
234 |
+
def _formatter(self, boxed=False):
|
235 |
+
if boxed:
|
236 |
+
return "Decimal: {}".format
|
237 |
+
return repr
|
238 |
+
|
239 |
+
@classmethod
|
240 |
+
def _concat_same_type(cls, to_concat):
|
241 |
+
return cls(np.concatenate([x._data for x in to_concat]))
|
242 |
+
|
243 |
+
def _reduce(
|
244 |
+
self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
|
245 |
+
):
|
246 |
+
if skipna and self.isna().any():
|
247 |
+
# If we don't have any NAs, we can ignore skipna
|
248 |
+
other = self[~self.isna()]
|
249 |
+
result = other._reduce(name, **kwargs)
|
250 |
+
elif name == "sum" and len(self) == 0:
|
251 |
+
# GH#29630 avoid returning int 0 or np.bool_(False) on old numpy
|
252 |
+
result = decimal.Decimal(0)
|
253 |
+
else:
|
254 |
+
try:
|
255 |
+
op = getattr(self.data, name)
|
256 |
+
except AttributeError as err:
|
257 |
+
raise NotImplementedError(
|
258 |
+
f"decimal does not support the {name} operation"
|
259 |
+
) from err
|
260 |
+
result = op(axis=0)
|
261 |
+
|
262 |
+
if keepdims:
|
263 |
+
return type(self)([result])
|
264 |
+
else:
|
265 |
+
return result
|
266 |
+
|
267 |
+
def _cmp_method(self, other, op):
|
268 |
+
# For use with OpsMixin
|
269 |
+
def convert_values(param):
|
270 |
+
if isinstance(param, ExtensionArray) or is_list_like(param):
|
271 |
+
ovalues = param
|
272 |
+
else:
|
273 |
+
# Assume it's an object
|
274 |
+
ovalues = [param] * len(self)
|
275 |
+
return ovalues
|
276 |
+
|
277 |
+
lvalues = self
|
278 |
+
rvalues = convert_values(other)
|
279 |
+
|
280 |
+
# If the operator is not defined for the underlying objects,
|
281 |
+
# a TypeError should be raised
|
282 |
+
res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
|
283 |
+
|
284 |
+
return np.asarray(res, dtype=bool)
|
285 |
+
|
286 |
+
def value_counts(self, dropna: bool = True):
|
287 |
+
return value_counts(self.to_numpy(), dropna=dropna)
|
288 |
+
|
289 |
+
# We override fillna here to simulate a 3rd party EA that has done so. This
|
290 |
+
# lets us test the deprecation telling authors to implement _pad_or_backfill
|
291 |
+
# Simulate a 3rd-party EA that has not yet updated to include a "copy"
|
292 |
+
# keyword in its fillna method.
|
293 |
+
# error: Signature of "fillna" incompatible with supertype "ExtensionArray"
|
294 |
+
def fillna( # type: ignore[override]
|
295 |
+
self,
|
296 |
+
value=None,
|
297 |
+
method=None,
|
298 |
+
limit: int | None = None,
|
299 |
+
):
|
300 |
+
return super().fillna(value=value, method=method, limit=limit, copy=True)
|
301 |
+
|
302 |
+
|
303 |
+
def to_decimal(values, context=None):
|
304 |
+
return DecimalArray([decimal.Decimal(x) for x in values], context=context)
|
305 |
+
|
306 |
+
|
307 |
+
def make_data():
|
308 |
+
return [decimal.Decimal(val) for val in np.random.default_rng(2).random(100)]
|
309 |
+
|
310 |
+
|
311 |
+
DecimalArray._add_arithmetic_ops()
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/decimal/test_decimal.py
ADDED
@@ -0,0 +1,567 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import decimal
|
4 |
+
import operator
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
import pandas as pd
|
10 |
+
import pandas._testing as tm
|
11 |
+
from pandas.tests.extension import base
|
12 |
+
from pandas.tests.extension.decimal.array import (
|
13 |
+
DecimalArray,
|
14 |
+
DecimalDtype,
|
15 |
+
make_data,
|
16 |
+
to_decimal,
|
17 |
+
)
|
18 |
+
|
19 |
+
|
20 |
+
@pytest.fixture
|
21 |
+
def dtype():
|
22 |
+
return DecimalDtype()
|
23 |
+
|
24 |
+
|
25 |
+
@pytest.fixture
|
26 |
+
def data():
|
27 |
+
return DecimalArray(make_data())
|
28 |
+
|
29 |
+
|
30 |
+
@pytest.fixture
|
31 |
+
def data_for_twos():
|
32 |
+
return DecimalArray([decimal.Decimal(2) for _ in range(100)])
|
33 |
+
|
34 |
+
|
35 |
+
@pytest.fixture
|
36 |
+
def data_missing():
|
37 |
+
return DecimalArray([decimal.Decimal("NaN"), decimal.Decimal(1)])
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture
|
41 |
+
def data_for_sorting():
|
42 |
+
return DecimalArray(
|
43 |
+
[decimal.Decimal("1"), decimal.Decimal("2"), decimal.Decimal("0")]
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
@pytest.fixture
|
48 |
+
def data_missing_for_sorting():
|
49 |
+
return DecimalArray(
|
50 |
+
[decimal.Decimal("1"), decimal.Decimal("NaN"), decimal.Decimal("0")]
|
51 |
+
)
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.fixture
|
55 |
+
def na_cmp():
|
56 |
+
return lambda x, y: x.is_nan() and y.is_nan()
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.fixture
|
60 |
+
def data_for_grouping():
|
61 |
+
b = decimal.Decimal("1.0")
|
62 |
+
a = decimal.Decimal("0.0")
|
63 |
+
c = decimal.Decimal("2.0")
|
64 |
+
na = decimal.Decimal("NaN")
|
65 |
+
return DecimalArray([b, b, na, na, a, a, b, c])
|
66 |
+
|
67 |
+
|
68 |
+
class TestDecimalArray(base.ExtensionTests):
|
69 |
+
def _get_expected_exception(
|
70 |
+
self, op_name: str, obj, other
|
71 |
+
) -> type[Exception] | None:
|
72 |
+
return None
|
73 |
+
|
74 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
75 |
+
return True
|
76 |
+
|
77 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
78 |
+
if op_name == "count":
|
79 |
+
return super().check_reduce(ser, op_name, skipna)
|
80 |
+
else:
|
81 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
82 |
+
expected = getattr(np.asarray(ser), op_name)()
|
83 |
+
tm.assert_almost_equal(result, expected)
|
84 |
+
|
85 |
+
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
|
86 |
+
if all_numeric_reductions in ["kurt", "skew", "sem", "median"]:
|
87 |
+
mark = pytest.mark.xfail(raises=NotImplementedError)
|
88 |
+
request.applymarker(mark)
|
89 |
+
super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
|
90 |
+
|
91 |
+
def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
|
92 |
+
op_name = all_numeric_reductions
|
93 |
+
if op_name in ["skew", "median"]:
|
94 |
+
mark = pytest.mark.xfail(raises=NotImplementedError)
|
95 |
+
request.applymarker(mark)
|
96 |
+
|
97 |
+
return super().test_reduce_frame(data, all_numeric_reductions, skipna)
|
98 |
+
|
99 |
+
def test_compare_scalar(self, data, comparison_op):
|
100 |
+
ser = pd.Series(data)
|
101 |
+
self._compare_other(ser, data, comparison_op, 0.5)
|
102 |
+
|
103 |
+
def test_compare_array(self, data, comparison_op):
|
104 |
+
ser = pd.Series(data)
|
105 |
+
|
106 |
+
alter = np.random.default_rng(2).choice([-1, 0, 1], len(data))
|
107 |
+
# Randomly double, halve or keep same value
|
108 |
+
other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter]
|
109 |
+
self._compare_other(ser, data, comparison_op, other)
|
110 |
+
|
111 |
+
def test_arith_series_with_array(self, data, all_arithmetic_operators):
|
112 |
+
op_name = all_arithmetic_operators
|
113 |
+
ser = pd.Series(data)
|
114 |
+
|
115 |
+
context = decimal.getcontext()
|
116 |
+
divbyzerotrap = context.traps[decimal.DivisionByZero]
|
117 |
+
invalidoptrap = context.traps[decimal.InvalidOperation]
|
118 |
+
context.traps[decimal.DivisionByZero] = 0
|
119 |
+
context.traps[decimal.InvalidOperation] = 0
|
120 |
+
|
121 |
+
# Decimal supports ops with int, but not float
|
122 |
+
other = pd.Series([int(d * 100) for d in data])
|
123 |
+
self.check_opname(ser, op_name, other)
|
124 |
+
|
125 |
+
if "mod" not in op_name:
|
126 |
+
self.check_opname(ser, op_name, ser * 2)
|
127 |
+
|
128 |
+
self.check_opname(ser, op_name, 0)
|
129 |
+
self.check_opname(ser, op_name, 5)
|
130 |
+
context.traps[decimal.DivisionByZero] = divbyzerotrap
|
131 |
+
context.traps[decimal.InvalidOperation] = invalidoptrap
|
132 |
+
|
133 |
+
def test_fillna_frame(self, data_missing):
|
134 |
+
msg = "ExtensionArray.fillna added a 'copy' keyword"
|
135 |
+
with tm.assert_produces_warning(
|
136 |
+
DeprecationWarning, match=msg, check_stacklevel=False
|
137 |
+
):
|
138 |
+
super().test_fillna_frame(data_missing)
|
139 |
+
|
140 |
+
def test_fillna_limit_pad(self, data_missing):
|
141 |
+
msg = "ExtensionArray.fillna 'method' keyword is deprecated"
|
142 |
+
with tm.assert_produces_warning(
|
143 |
+
DeprecationWarning,
|
144 |
+
match=msg,
|
145 |
+
check_stacklevel=False,
|
146 |
+
raise_on_extra_warnings=False,
|
147 |
+
):
|
148 |
+
super().test_fillna_limit_pad(data_missing)
|
149 |
+
|
150 |
+
msg = "The 'method' keyword in DecimalArray.fillna is deprecated"
|
151 |
+
with tm.assert_produces_warning(
|
152 |
+
FutureWarning,
|
153 |
+
match=msg,
|
154 |
+
check_stacklevel=False,
|
155 |
+
raise_on_extra_warnings=False,
|
156 |
+
):
|
157 |
+
super().test_fillna_limit_pad(data_missing)
|
158 |
+
|
159 |
+
@pytest.mark.parametrize(
|
160 |
+
"limit_area, input_ilocs, expected_ilocs",
|
161 |
+
[
|
162 |
+
("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),
|
163 |
+
("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),
|
164 |
+
("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),
|
165 |
+
("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),
|
166 |
+
("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),
|
167 |
+
("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),
|
168 |
+
("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),
|
169 |
+
("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),
|
170 |
+
],
|
171 |
+
)
|
172 |
+
def test_ffill_limit_area(
|
173 |
+
self, data_missing, limit_area, input_ilocs, expected_ilocs
|
174 |
+
):
|
175 |
+
# GH#56616
|
176 |
+
msg = "ExtensionArray.fillna 'method' keyword is deprecated"
|
177 |
+
with tm.assert_produces_warning(
|
178 |
+
DeprecationWarning,
|
179 |
+
match=msg,
|
180 |
+
check_stacklevel=False,
|
181 |
+
raise_on_extra_warnings=False,
|
182 |
+
):
|
183 |
+
msg = "DecimalArray does not implement limit_area"
|
184 |
+
with pytest.raises(NotImplementedError, match=msg):
|
185 |
+
super().test_ffill_limit_area(
|
186 |
+
data_missing, limit_area, input_ilocs, expected_ilocs
|
187 |
+
)
|
188 |
+
|
189 |
+
def test_fillna_limit_backfill(self, data_missing):
|
190 |
+
msg = "Series.fillna with 'method' is deprecated"
|
191 |
+
with tm.assert_produces_warning(
|
192 |
+
FutureWarning,
|
193 |
+
match=msg,
|
194 |
+
check_stacklevel=False,
|
195 |
+
raise_on_extra_warnings=False,
|
196 |
+
):
|
197 |
+
super().test_fillna_limit_backfill(data_missing)
|
198 |
+
|
199 |
+
msg = "ExtensionArray.fillna 'method' keyword is deprecated"
|
200 |
+
with tm.assert_produces_warning(
|
201 |
+
DeprecationWarning,
|
202 |
+
match=msg,
|
203 |
+
check_stacklevel=False,
|
204 |
+
raise_on_extra_warnings=False,
|
205 |
+
):
|
206 |
+
super().test_fillna_limit_backfill(data_missing)
|
207 |
+
|
208 |
+
msg = "The 'method' keyword in DecimalArray.fillna is deprecated"
|
209 |
+
with tm.assert_produces_warning(
|
210 |
+
FutureWarning,
|
211 |
+
match=msg,
|
212 |
+
check_stacklevel=False,
|
213 |
+
raise_on_extra_warnings=False,
|
214 |
+
):
|
215 |
+
super().test_fillna_limit_backfill(data_missing)
|
216 |
+
|
217 |
+
def test_fillna_no_op_returns_copy(self, data):
|
218 |
+
msg = "|".join(
|
219 |
+
[
|
220 |
+
"ExtensionArray.fillna 'method' keyword is deprecated",
|
221 |
+
"The 'method' keyword in DecimalArray.fillna is deprecated",
|
222 |
+
]
|
223 |
+
)
|
224 |
+
with tm.assert_produces_warning(
|
225 |
+
(FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False
|
226 |
+
):
|
227 |
+
super().test_fillna_no_op_returns_copy(data)
|
228 |
+
|
229 |
+
def test_fillna_series(self, data_missing):
|
230 |
+
msg = "ExtensionArray.fillna added a 'copy' keyword"
|
231 |
+
with tm.assert_produces_warning(
|
232 |
+
DeprecationWarning, match=msg, check_stacklevel=False
|
233 |
+
):
|
234 |
+
super().test_fillna_series(data_missing)
|
235 |
+
|
236 |
+
def test_fillna_series_method(self, data_missing, fillna_method):
|
237 |
+
msg = "|".join(
|
238 |
+
[
|
239 |
+
"ExtensionArray.fillna 'method' keyword is deprecated",
|
240 |
+
"The 'method' keyword in DecimalArray.fillna is deprecated",
|
241 |
+
]
|
242 |
+
)
|
243 |
+
with tm.assert_produces_warning(
|
244 |
+
(FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False
|
245 |
+
):
|
246 |
+
super().test_fillna_series_method(data_missing, fillna_method)
|
247 |
+
|
248 |
+
def test_fillna_copy_frame(self, data_missing, using_copy_on_write):
|
249 |
+
warn = DeprecationWarning if not using_copy_on_write else None
|
250 |
+
msg = "ExtensionArray.fillna added a 'copy' keyword"
|
251 |
+
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
|
252 |
+
super().test_fillna_copy_frame(data_missing)
|
253 |
+
|
254 |
+
def test_fillna_copy_series(self, data_missing, using_copy_on_write):
|
255 |
+
warn = DeprecationWarning if not using_copy_on_write else None
|
256 |
+
msg = "ExtensionArray.fillna added a 'copy' keyword"
|
257 |
+
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
|
258 |
+
super().test_fillna_copy_series(data_missing)
|
259 |
+
|
260 |
+
@pytest.mark.parametrize("dropna", [True, False])
|
261 |
+
def test_value_counts(self, all_data, dropna, request):
|
262 |
+
all_data = all_data[:10]
|
263 |
+
if dropna:
|
264 |
+
other = np.array(all_data[~all_data.isna()])
|
265 |
+
else:
|
266 |
+
other = all_data
|
267 |
+
|
268 |
+
vcs = pd.Series(all_data).value_counts(dropna=dropna)
|
269 |
+
vcs_ex = pd.Series(other).value_counts(dropna=dropna)
|
270 |
+
|
271 |
+
with decimal.localcontext() as ctx:
|
272 |
+
# avoid raising when comparing Decimal("NAN") < Decimal(2)
|
273 |
+
ctx.traps[decimal.InvalidOperation] = False
|
274 |
+
|
275 |
+
result = vcs.sort_index()
|
276 |
+
expected = vcs_ex.sort_index()
|
277 |
+
|
278 |
+
tm.assert_series_equal(result, expected)
|
279 |
+
|
280 |
+
def test_series_repr(self, data):
|
281 |
+
# Overriding this base test to explicitly test that
|
282 |
+
# the custom _formatter is used
|
283 |
+
ser = pd.Series(data)
|
284 |
+
assert data.dtype.name in repr(ser)
|
285 |
+
assert "Decimal: " in repr(ser)
|
286 |
+
|
287 |
+
@pytest.mark.xfail(reason="Inconsistent array-vs-scalar behavior")
|
288 |
+
@pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs])
|
289 |
+
def test_unary_ufunc_dunder_equivalence(self, data, ufunc):
|
290 |
+
super().test_unary_ufunc_dunder_equivalence(data, ufunc)
|
291 |
+
|
292 |
+
|
293 |
+
def test_take_na_value_other_decimal():
|
294 |
+
arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
|
295 |
+
result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0"))
|
296 |
+
expected = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("-1.0")])
|
297 |
+
tm.assert_extension_array_equal(result, expected)
|
298 |
+
|
299 |
+
|
300 |
+
def test_series_constructor_coerce_data_to_extension_dtype():
|
301 |
+
dtype = DecimalDtype()
|
302 |
+
ser = pd.Series([0, 1, 2], dtype=dtype)
|
303 |
+
|
304 |
+
arr = DecimalArray(
|
305 |
+
[decimal.Decimal(0), decimal.Decimal(1), decimal.Decimal(2)],
|
306 |
+
dtype=dtype,
|
307 |
+
)
|
308 |
+
exp = pd.Series(arr)
|
309 |
+
tm.assert_series_equal(ser, exp)
|
310 |
+
|
311 |
+
|
312 |
+
def test_series_constructor_with_dtype():
|
313 |
+
arr = DecimalArray([decimal.Decimal("10.0")])
|
314 |
+
result = pd.Series(arr, dtype=DecimalDtype())
|
315 |
+
expected = pd.Series(arr)
|
316 |
+
tm.assert_series_equal(result, expected)
|
317 |
+
|
318 |
+
result = pd.Series(arr, dtype="int64")
|
319 |
+
expected = pd.Series([10])
|
320 |
+
tm.assert_series_equal(result, expected)
|
321 |
+
|
322 |
+
|
323 |
+
def test_dataframe_constructor_with_dtype():
|
324 |
+
arr = DecimalArray([decimal.Decimal("10.0")])
|
325 |
+
|
326 |
+
result = pd.DataFrame({"A": arr}, dtype=DecimalDtype())
|
327 |
+
expected = pd.DataFrame({"A": arr})
|
328 |
+
tm.assert_frame_equal(result, expected)
|
329 |
+
|
330 |
+
arr = DecimalArray([decimal.Decimal("10.0")])
|
331 |
+
result = pd.DataFrame({"A": arr}, dtype="int64")
|
332 |
+
expected = pd.DataFrame({"A": [10]})
|
333 |
+
tm.assert_frame_equal(result, expected)
|
334 |
+
|
335 |
+
|
336 |
+
@pytest.mark.parametrize("frame", [True, False])
|
337 |
+
def test_astype_dispatches(frame):
|
338 |
+
# This is a dtype-specific test that ensures Series[decimal].astype
|
339 |
+
# gets all the way through to ExtensionArray.astype
|
340 |
+
# Designing a reliable smoke test that works for arbitrary data types
|
341 |
+
# is difficult.
|
342 |
+
data = pd.Series(DecimalArray([decimal.Decimal(2)]), name="a")
|
343 |
+
ctx = decimal.Context()
|
344 |
+
ctx.prec = 5
|
345 |
+
|
346 |
+
if frame:
|
347 |
+
data = data.to_frame()
|
348 |
+
|
349 |
+
result = data.astype(DecimalDtype(ctx))
|
350 |
+
|
351 |
+
if frame:
|
352 |
+
result = result["a"]
|
353 |
+
|
354 |
+
assert result.dtype.context.prec == ctx.prec
|
355 |
+
|
356 |
+
|
357 |
+
class DecimalArrayWithoutFromSequence(DecimalArray):
|
358 |
+
"""Helper class for testing error handling in _from_sequence."""
|
359 |
+
|
360 |
+
@classmethod
|
361 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
|
362 |
+
raise KeyError("For the test")
|
363 |
+
|
364 |
+
|
365 |
+
class DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence):
|
366 |
+
@classmethod
|
367 |
+
def _create_arithmetic_method(cls, op):
|
368 |
+
return cls._create_method(op, coerce_to_dtype=False)
|
369 |
+
|
370 |
+
|
371 |
+
DecimalArrayWithoutCoercion._add_arithmetic_ops()
|
372 |
+
|
373 |
+
|
374 |
+
def test_combine_from_sequence_raises(monkeypatch):
|
375 |
+
# https://github.com/pandas-dev/pandas/issues/22850
|
376 |
+
cls = DecimalArrayWithoutFromSequence
|
377 |
+
|
378 |
+
@classmethod
|
379 |
+
def construct_array_type(cls):
|
380 |
+
return DecimalArrayWithoutFromSequence
|
381 |
+
|
382 |
+
monkeypatch.setattr(DecimalDtype, "construct_array_type", construct_array_type)
|
383 |
+
|
384 |
+
arr = cls([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
|
385 |
+
ser = pd.Series(arr)
|
386 |
+
result = ser.combine(ser, operator.add)
|
387 |
+
|
388 |
+
# note: object dtype
|
389 |
+
expected = pd.Series(
|
390 |
+
[decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object"
|
391 |
+
)
|
392 |
+
tm.assert_series_equal(result, expected)
|
393 |
+
|
394 |
+
|
395 |
+
@pytest.mark.parametrize(
|
396 |
+
"class_", [DecimalArrayWithoutFromSequence, DecimalArrayWithoutCoercion]
|
397 |
+
)
|
398 |
+
def test_scalar_ops_from_sequence_raises(class_):
|
399 |
+
# op(EA, EA) should return an EA, or an ndarray if it's not possible
|
400 |
+
# to return an EA with the return values.
|
401 |
+
arr = class_([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
|
402 |
+
result = arr + arr
|
403 |
+
expected = np.array(
|
404 |
+
[decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object"
|
405 |
+
)
|
406 |
+
tm.assert_numpy_array_equal(result, expected)
|
407 |
+
|
408 |
+
|
409 |
+
@pytest.mark.parametrize(
|
410 |
+
"reverse, expected_div, expected_mod",
|
411 |
+
[(False, [0, 1, 1, 2], [1, 0, 1, 0]), (True, [2, 1, 0, 0], [0, 0, 2, 2])],
|
412 |
+
)
|
413 |
+
def test_divmod_array(reverse, expected_div, expected_mod):
|
414 |
+
# https://github.com/pandas-dev/pandas/issues/22930
|
415 |
+
arr = to_decimal([1, 2, 3, 4])
|
416 |
+
if reverse:
|
417 |
+
div, mod = divmod(2, arr)
|
418 |
+
else:
|
419 |
+
div, mod = divmod(arr, 2)
|
420 |
+
expected_div = to_decimal(expected_div)
|
421 |
+
expected_mod = to_decimal(expected_mod)
|
422 |
+
|
423 |
+
tm.assert_extension_array_equal(div, expected_div)
|
424 |
+
tm.assert_extension_array_equal(mod, expected_mod)
|
425 |
+
|
426 |
+
|
427 |
+
def test_ufunc_fallback(data):
|
428 |
+
a = data[:5]
|
429 |
+
s = pd.Series(a, index=range(3, 8))
|
430 |
+
result = np.abs(s)
|
431 |
+
expected = pd.Series(np.abs(a), index=range(3, 8))
|
432 |
+
tm.assert_series_equal(result, expected)
|
433 |
+
|
434 |
+
|
435 |
+
def test_array_ufunc():
|
436 |
+
a = to_decimal([1, 2, 3])
|
437 |
+
result = np.exp(a)
|
438 |
+
expected = to_decimal(np.exp(a._data))
|
439 |
+
tm.assert_extension_array_equal(result, expected)
|
440 |
+
|
441 |
+
|
442 |
+
def test_array_ufunc_series():
|
443 |
+
a = to_decimal([1, 2, 3])
|
444 |
+
s = pd.Series(a)
|
445 |
+
result = np.exp(s)
|
446 |
+
expected = pd.Series(to_decimal(np.exp(a._data)))
|
447 |
+
tm.assert_series_equal(result, expected)
|
448 |
+
|
449 |
+
|
450 |
+
def test_array_ufunc_series_scalar_other():
|
451 |
+
# check _HANDLED_TYPES
|
452 |
+
a = to_decimal([1, 2, 3])
|
453 |
+
s = pd.Series(a)
|
454 |
+
result = np.add(s, decimal.Decimal(1))
|
455 |
+
expected = pd.Series(np.add(a, decimal.Decimal(1)))
|
456 |
+
tm.assert_series_equal(result, expected)
|
457 |
+
|
458 |
+
|
459 |
+
def test_array_ufunc_series_defer():
|
460 |
+
a = to_decimal([1, 2, 3])
|
461 |
+
s = pd.Series(a)
|
462 |
+
|
463 |
+
expected = pd.Series(to_decimal([2, 4, 6]))
|
464 |
+
r1 = np.add(s, a)
|
465 |
+
r2 = np.add(a, s)
|
466 |
+
|
467 |
+
tm.assert_series_equal(r1, expected)
|
468 |
+
tm.assert_series_equal(r2, expected)
|
469 |
+
|
470 |
+
|
471 |
+
def test_groupby_agg():
|
472 |
+
# Ensure that the result of agg is inferred to be decimal dtype
|
473 |
+
# https://github.com/pandas-dev/pandas/issues/29141
|
474 |
+
|
475 |
+
data = make_data()[:5]
|
476 |
+
df = pd.DataFrame(
|
477 |
+
{"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
|
478 |
+
)
|
479 |
+
|
480 |
+
# single key, selected column
|
481 |
+
expected = pd.Series(to_decimal([data[0], data[3]]))
|
482 |
+
result = df.groupby("id1")["decimals"].agg(lambda x: x.iloc[0])
|
483 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
484 |
+
result = df["decimals"].groupby(df["id1"]).agg(lambda x: x.iloc[0])
|
485 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
486 |
+
|
487 |
+
# multiple keys, selected column
|
488 |
+
expected = pd.Series(
|
489 |
+
to_decimal([data[0], data[1], data[3]]),
|
490 |
+
index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 1)]),
|
491 |
+
)
|
492 |
+
result = df.groupby(["id1", "id2"])["decimals"].agg(lambda x: x.iloc[0])
|
493 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
494 |
+
result = df["decimals"].groupby([df["id1"], df["id2"]]).agg(lambda x: x.iloc[0])
|
495 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
496 |
+
|
497 |
+
# multiple columns
|
498 |
+
expected = pd.DataFrame({"id2": [0, 1], "decimals": to_decimal([data[0], data[3]])})
|
499 |
+
result = df.groupby("id1").agg(lambda x: x.iloc[0])
|
500 |
+
tm.assert_frame_equal(result, expected, check_names=False)
|
501 |
+
|
502 |
+
|
503 |
+
def test_groupby_agg_ea_method(monkeypatch):
|
504 |
+
# Ensure that the result of agg is inferred to be decimal dtype
|
505 |
+
# https://github.com/pandas-dev/pandas/issues/29141
|
506 |
+
|
507 |
+
def DecimalArray__my_sum(self):
|
508 |
+
return np.sum(np.array(self))
|
509 |
+
|
510 |
+
monkeypatch.setattr(DecimalArray, "my_sum", DecimalArray__my_sum, raising=False)
|
511 |
+
|
512 |
+
data = make_data()[:5]
|
513 |
+
df = pd.DataFrame({"id": [0, 0, 0, 1, 1], "decimals": DecimalArray(data)})
|
514 |
+
expected = pd.Series(to_decimal([data[0] + data[1] + data[2], data[3] + data[4]]))
|
515 |
+
|
516 |
+
result = df.groupby("id")["decimals"].agg(lambda x: x.values.my_sum())
|
517 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
518 |
+
s = pd.Series(DecimalArray(data))
|
519 |
+
grouper = np.array([0, 0, 0, 1, 1], dtype=np.int64)
|
520 |
+
result = s.groupby(grouper).agg(lambda x: x.values.my_sum())
|
521 |
+
tm.assert_series_equal(result, expected, check_names=False)
|
522 |
+
|
523 |
+
|
524 |
+
def test_indexing_no_materialize(monkeypatch):
|
525 |
+
# See https://github.com/pandas-dev/pandas/issues/29708
|
526 |
+
# Ensure that indexing operations do not materialize (convert to a numpy
|
527 |
+
# array) the ExtensionArray unnecessary
|
528 |
+
|
529 |
+
def DecimalArray__array__(self, dtype=None):
|
530 |
+
raise Exception("tried to convert a DecimalArray to a numpy array")
|
531 |
+
|
532 |
+
monkeypatch.setattr(DecimalArray, "__array__", DecimalArray__array__, raising=False)
|
533 |
+
|
534 |
+
data = make_data()
|
535 |
+
s = pd.Series(DecimalArray(data))
|
536 |
+
df = pd.DataFrame({"a": s, "b": range(len(s))})
|
537 |
+
|
538 |
+
# ensure the following operations do not raise an error
|
539 |
+
s[s > 0.5]
|
540 |
+
df[s > 0.5]
|
541 |
+
s.at[0]
|
542 |
+
df.at[0, "a"]
|
543 |
+
|
544 |
+
|
545 |
+
def test_to_numpy_keyword():
|
546 |
+
# test the extra keyword
|
547 |
+
values = [decimal.Decimal("1.1111"), decimal.Decimal("2.2222")]
|
548 |
+
expected = np.array(
|
549 |
+
[decimal.Decimal("1.11"), decimal.Decimal("2.22")], dtype="object"
|
550 |
+
)
|
551 |
+
a = pd.array(values, dtype="decimal")
|
552 |
+
result = a.to_numpy(decimals=2)
|
553 |
+
tm.assert_numpy_array_equal(result, expected)
|
554 |
+
|
555 |
+
result = pd.Series(a).to_numpy(decimals=2)
|
556 |
+
tm.assert_numpy_array_equal(result, expected)
|
557 |
+
|
558 |
+
|
559 |
+
def test_array_copy_on_write(using_copy_on_write):
|
560 |
+
df = pd.DataFrame({"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype="object")
|
561 |
+
df2 = df.astype(DecimalDtype())
|
562 |
+
df.iloc[0, 0] = 0
|
563 |
+
if using_copy_on_write:
|
564 |
+
expected = pd.DataFrame(
|
565 |
+
{"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype=DecimalDtype()
|
566 |
+
)
|
567 |
+
tm.assert_equal(df2.values, expected.values)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.tests.extension.list.array import (
|
2 |
+
ListArray,
|
3 |
+
ListDtype,
|
4 |
+
make_data,
|
5 |
+
)
|
6 |
+
|
7 |
+
__all__ = ["ListArray", "ListDtype", "make_data"]
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (318 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/array.cpython-310.pyc
ADDED
Binary file (5.12 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/__pycache__/test_list.cpython-310.pyc
ADDED
Binary file (902 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/array.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Test extension array for storing nested data in a pandas container.
|
3 |
+
|
4 |
+
The ListArray stores an ndarray of lists.
|
5 |
+
"""
|
6 |
+
from __future__ import annotations
|
7 |
+
|
8 |
+
import numbers
|
9 |
+
import string
|
10 |
+
from typing import TYPE_CHECKING
|
11 |
+
|
12 |
+
import numpy as np
|
13 |
+
|
14 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
15 |
+
|
16 |
+
import pandas as pd
|
17 |
+
from pandas.api.types import (
|
18 |
+
is_object_dtype,
|
19 |
+
is_string_dtype,
|
20 |
+
)
|
21 |
+
from pandas.core.arrays import ExtensionArray
|
22 |
+
|
23 |
+
if TYPE_CHECKING:
|
24 |
+
from pandas._typing import type_t
|
25 |
+
|
26 |
+
|
27 |
+
class ListDtype(ExtensionDtype):
|
28 |
+
type = list
|
29 |
+
name = "list"
|
30 |
+
na_value = np.nan
|
31 |
+
|
32 |
+
@classmethod
|
33 |
+
def construct_array_type(cls) -> type_t[ListArray]:
|
34 |
+
"""
|
35 |
+
Return the array type associated with this dtype.
|
36 |
+
|
37 |
+
Returns
|
38 |
+
-------
|
39 |
+
type
|
40 |
+
"""
|
41 |
+
return ListArray
|
42 |
+
|
43 |
+
|
44 |
+
class ListArray(ExtensionArray):
|
45 |
+
dtype = ListDtype()
|
46 |
+
__array_priority__ = 1000
|
47 |
+
|
48 |
+
def __init__(self, values, dtype=None, copy=False) -> None:
|
49 |
+
if not isinstance(values, np.ndarray):
|
50 |
+
raise TypeError("Need to pass a numpy array as values")
|
51 |
+
for val in values:
|
52 |
+
if not isinstance(val, self.dtype.type) and not pd.isna(val):
|
53 |
+
raise TypeError("All values must be of type " + str(self.dtype.type))
|
54 |
+
self.data = values
|
55 |
+
|
56 |
+
@classmethod
|
57 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
|
58 |
+
data = np.empty(len(scalars), dtype=object)
|
59 |
+
data[:] = scalars
|
60 |
+
return cls(data)
|
61 |
+
|
62 |
+
def __getitem__(self, item):
|
63 |
+
if isinstance(item, numbers.Integral):
|
64 |
+
return self.data[item]
|
65 |
+
else:
|
66 |
+
# slice, list-like, mask
|
67 |
+
return type(self)(self.data[item])
|
68 |
+
|
69 |
+
def __len__(self) -> int:
|
70 |
+
return len(self.data)
|
71 |
+
|
72 |
+
def isna(self):
|
73 |
+
return np.array(
|
74 |
+
[not isinstance(x, list) and np.isnan(x) for x in self.data], dtype=bool
|
75 |
+
)
|
76 |
+
|
77 |
+
def take(self, indexer, allow_fill=False, fill_value=None):
|
78 |
+
# re-implement here, since NumPy has trouble setting
|
79 |
+
# sized objects like UserDicts into scalar slots of
|
80 |
+
# an ndarary.
|
81 |
+
indexer = np.asarray(indexer)
|
82 |
+
msg = (
|
83 |
+
"Index is out of bounds or cannot do a "
|
84 |
+
"non-empty take from an empty array."
|
85 |
+
)
|
86 |
+
|
87 |
+
if allow_fill:
|
88 |
+
if fill_value is None:
|
89 |
+
fill_value = self.dtype.na_value
|
90 |
+
# bounds check
|
91 |
+
if (indexer < -1).any():
|
92 |
+
raise ValueError
|
93 |
+
try:
|
94 |
+
output = [
|
95 |
+
self.data[loc] if loc != -1 else fill_value for loc in indexer
|
96 |
+
]
|
97 |
+
except IndexError as err:
|
98 |
+
raise IndexError(msg) from err
|
99 |
+
else:
|
100 |
+
try:
|
101 |
+
output = [self.data[loc] for loc in indexer]
|
102 |
+
except IndexError as err:
|
103 |
+
raise IndexError(msg) from err
|
104 |
+
|
105 |
+
return self._from_sequence(output)
|
106 |
+
|
107 |
+
def copy(self):
|
108 |
+
return type(self)(self.data[:])
|
109 |
+
|
110 |
+
def astype(self, dtype, copy=True):
|
111 |
+
if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
|
112 |
+
if copy:
|
113 |
+
return self.copy()
|
114 |
+
return self
|
115 |
+
elif is_string_dtype(dtype) and not is_object_dtype(dtype):
|
116 |
+
# numpy has problems with astype(str) for nested elements
|
117 |
+
return np.array([str(x) for x in self.data], dtype=dtype)
|
118 |
+
elif not copy:
|
119 |
+
return np.asarray(self.data, dtype=dtype)
|
120 |
+
else:
|
121 |
+
return np.array(self.data, dtype=dtype, copy=copy)
|
122 |
+
|
123 |
+
@classmethod
|
124 |
+
def _concat_same_type(cls, to_concat):
|
125 |
+
data = np.concatenate([x.data for x in to_concat])
|
126 |
+
return cls(data)
|
127 |
+
|
128 |
+
|
129 |
+
def make_data():
|
130 |
+
# TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer
|
131 |
+
rng = np.random.default_rng(2)
|
132 |
+
data = np.empty(100, dtype=object)
|
133 |
+
data[:] = [
|
134 |
+
[rng.choice(list(string.ascii_letters)) for _ in range(rng.integers(0, 10))]
|
135 |
+
for _ in range(100)
|
136 |
+
]
|
137 |
+
return data
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/list/test_list.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
from pandas.tests.extension.list.array import (
|
5 |
+
ListArray,
|
6 |
+
ListDtype,
|
7 |
+
make_data,
|
8 |
+
)
|
9 |
+
|
10 |
+
|
11 |
+
@pytest.fixture
|
12 |
+
def dtype():
|
13 |
+
return ListDtype()
|
14 |
+
|
15 |
+
|
16 |
+
@pytest.fixture
|
17 |
+
def data():
|
18 |
+
"""Length-100 ListArray for semantics test."""
|
19 |
+
data = make_data()
|
20 |
+
|
21 |
+
while len(data[0]) == len(data[1]):
|
22 |
+
data = make_data()
|
23 |
+
|
24 |
+
return ListArray(data)
|
25 |
+
|
26 |
+
|
27 |
+
def test_to_csv(data):
|
28 |
+
# https://github.com/pandas-dev/pandas/issues/28840
|
29 |
+
# array with list-likes fail when doing astype(str) on the numpy array
|
30 |
+
# which was done in get_values_for_csv
|
31 |
+
df = pd.DataFrame({"a": data})
|
32 |
+
res = df.to_csv()
|
33 |
+
assert str(data[0]) in res
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_arrow.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
import string
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
import pytest
|
20 |
+
|
21 |
+
from pandas._config import using_pyarrow_string_dtype
|
22 |
+
|
23 |
+
import pandas as pd
|
24 |
+
from pandas import Categorical
|
25 |
+
import pandas._testing as tm
|
26 |
+
from pandas.api.types import CategoricalDtype
|
27 |
+
from pandas.tests.extension import base
|
28 |
+
|
29 |
+
|
30 |
+
def make_data():
|
31 |
+
while True:
|
32 |
+
values = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)
|
33 |
+
# ensure we meet the requirements
|
34 |
+
# 1. first two not null
|
35 |
+
# 2. first and second are different
|
36 |
+
if values[0] != values[1]:
|
37 |
+
break
|
38 |
+
return values
|
39 |
+
|
40 |
+
|
41 |
+
@pytest.fixture
|
42 |
+
def dtype():
|
43 |
+
return CategoricalDtype()
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.fixture
|
47 |
+
def data():
|
48 |
+
"""Length-100 array for this type.
|
49 |
+
|
50 |
+
* data[0] and data[1] should both be non missing
|
51 |
+
* data[0] and data[1] should not be equal
|
52 |
+
"""
|
53 |
+
return Categorical(make_data())
|
54 |
+
|
55 |
+
|
56 |
+
@pytest.fixture
|
57 |
+
def data_missing():
|
58 |
+
"""Length 2 array with [NA, Valid]"""
|
59 |
+
return Categorical([np.nan, "A"])
|
60 |
+
|
61 |
+
|
62 |
+
@pytest.fixture
|
63 |
+
def data_for_sorting():
|
64 |
+
return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)
|
65 |
+
|
66 |
+
|
67 |
+
@pytest.fixture
|
68 |
+
def data_missing_for_sorting():
|
69 |
+
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
|
70 |
+
|
71 |
+
|
72 |
+
@pytest.fixture
|
73 |
+
def data_for_grouping():
|
74 |
+
return Categorical(["a", "a", None, None, "b", "b", "a", "c"])
|
75 |
+
|
76 |
+
|
77 |
+
class TestCategorical(base.ExtensionTests):
|
78 |
+
@pytest.mark.xfail(reason="Memory usage doesn't match")
|
79 |
+
def test_memory_usage(self, data):
|
80 |
+
# TODO: Is this deliberate?
|
81 |
+
super().test_memory_usage(data)
|
82 |
+
|
83 |
+
def test_contains(self, data, data_missing):
|
84 |
+
# GH-37867
|
85 |
+
# na value handling in Categorical.__contains__ is deprecated.
|
86 |
+
# See base.BaseInterFaceTests.test_contains for more details.
|
87 |
+
|
88 |
+
na_value = data.dtype.na_value
|
89 |
+
# ensure data without missing values
|
90 |
+
data = data[~data.isna()]
|
91 |
+
|
92 |
+
# first elements are non-missing
|
93 |
+
assert data[0] in data
|
94 |
+
assert data_missing[0] in data_missing
|
95 |
+
|
96 |
+
# check the presence of na_value
|
97 |
+
assert na_value in data_missing
|
98 |
+
assert na_value not in data
|
99 |
+
|
100 |
+
# Categoricals can contain other nan-likes than na_value
|
101 |
+
for na_value_obj in tm.NULL_OBJECTS:
|
102 |
+
if na_value_obj is na_value:
|
103 |
+
continue
|
104 |
+
assert na_value_obj not in data
|
105 |
+
# this section suffers from super method
|
106 |
+
if not using_pyarrow_string_dtype():
|
107 |
+
assert na_value_obj in data_missing
|
108 |
+
|
109 |
+
def test_empty(self, dtype):
|
110 |
+
cls = dtype.construct_array_type()
|
111 |
+
result = cls._empty((4,), dtype=dtype)
|
112 |
+
|
113 |
+
assert isinstance(result, cls)
|
114 |
+
# the dtype we passed is not initialized, so will not match the
|
115 |
+
# dtype on our result.
|
116 |
+
assert result.dtype == CategoricalDtype([])
|
117 |
+
|
118 |
+
@pytest.mark.skip(reason="Backwards compatibility")
|
119 |
+
def test_getitem_scalar(self, data):
|
120 |
+
# CategoricalDtype.type isn't "correct" since it should
|
121 |
+
# be a parent of the elements (object). But don't want
|
122 |
+
# to break things by changing.
|
123 |
+
super().test_getitem_scalar(data)
|
124 |
+
|
125 |
+
@pytest.mark.xfail(reason="Unobserved categories included")
|
126 |
+
def test_value_counts(self, all_data, dropna):
|
127 |
+
return super().test_value_counts(all_data, dropna)
|
128 |
+
|
129 |
+
def test_combine_add(self, data_repeated):
|
130 |
+
# GH 20825
|
131 |
+
# When adding categoricals in combine, result is a string
|
132 |
+
orig_data1, orig_data2 = data_repeated(2)
|
133 |
+
s1 = pd.Series(orig_data1)
|
134 |
+
s2 = pd.Series(orig_data2)
|
135 |
+
result = s1.combine(s2, lambda x1, x2: x1 + x2)
|
136 |
+
expected = pd.Series(
|
137 |
+
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
|
138 |
+
)
|
139 |
+
tm.assert_series_equal(result, expected)
|
140 |
+
|
141 |
+
val = s1.iloc[0]
|
142 |
+
result = s1.combine(val, lambda x1, x2: x1 + x2)
|
143 |
+
expected = pd.Series([a + val for a in list(orig_data1)])
|
144 |
+
tm.assert_series_equal(result, expected)
|
145 |
+
|
146 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
147 |
+
def test_map(self, data, na_action):
|
148 |
+
result = data.map(lambda x: x, na_action=na_action)
|
149 |
+
tm.assert_extension_array_equal(result, data)
|
150 |
+
|
151 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
|
152 |
+
# frame & scalar
|
153 |
+
op_name = all_arithmetic_operators
|
154 |
+
if op_name == "__rmod__":
|
155 |
+
request.applymarker(
|
156 |
+
pytest.mark.xfail(
|
157 |
+
reason="rmod never called when string is first argument"
|
158 |
+
)
|
159 |
+
)
|
160 |
+
super().test_arith_frame_with_scalar(data, op_name)
|
161 |
+
|
162 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):
|
163 |
+
op_name = all_arithmetic_operators
|
164 |
+
if op_name == "__rmod__":
|
165 |
+
request.applymarker(
|
166 |
+
pytest.mark.xfail(
|
167 |
+
reason="rmod never called when string is first argument"
|
168 |
+
)
|
169 |
+
)
|
170 |
+
super().test_arith_series_with_scalar(data, op_name)
|
171 |
+
|
172 |
+
def _compare_other(self, ser: pd.Series, data, op, other):
|
173 |
+
op_name = f"__{op.__name__}__"
|
174 |
+
if op_name not in ["__eq__", "__ne__"]:
|
175 |
+
msg = "Unordered Categoricals can only compare equality or not"
|
176 |
+
with pytest.raises(TypeError, match=msg):
|
177 |
+
op(data, other)
|
178 |
+
else:
|
179 |
+
return super()._compare_other(ser, data, op, other)
|
180 |
+
|
181 |
+
@pytest.mark.xfail(reason="Categorical overrides __repr__")
|
182 |
+
@pytest.mark.parametrize("size", ["big", "small"])
|
183 |
+
def test_array_repr(self, data, size):
|
184 |
+
super().test_array_repr(data, size)
|
185 |
+
|
186 |
+
@pytest.mark.xfail(reason="TBD")
|
187 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
188 |
+
def test_groupby_extension_agg(self, as_index, data_for_grouping):
|
189 |
+
super().test_groupby_extension_agg(as_index, data_for_grouping)
|
190 |
+
|
191 |
+
|
192 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
193 |
+
def test_repr_2d(self, data):
|
194 |
+
# Categorical __repr__ doesn't include "Categorical", so we need
|
195 |
+
# to special-case
|
196 |
+
res = repr(data.reshape(1, -1))
|
197 |
+
assert res.count("\nCategories") == 1
|
198 |
+
|
199 |
+
res = repr(data.reshape(-1, 1))
|
200 |
+
assert res.count("\nCategories") == 1
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_common.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.core.dtypes import dtypes
|
5 |
+
from pandas.core.dtypes.common import is_extension_array_dtype
|
6 |
+
|
7 |
+
import pandas as pd
|
8 |
+
import pandas._testing as tm
|
9 |
+
from pandas.core.arrays import ExtensionArray
|
10 |
+
|
11 |
+
|
12 |
+
class DummyDtype(dtypes.ExtensionDtype):
|
13 |
+
pass
|
14 |
+
|
15 |
+
|
16 |
+
class DummyArray(ExtensionArray):
|
17 |
+
def __init__(self, data) -> None:
|
18 |
+
self.data = data
|
19 |
+
|
20 |
+
def __array__(self, dtype=None, copy=None):
|
21 |
+
return self.data
|
22 |
+
|
23 |
+
@property
|
24 |
+
def dtype(self):
|
25 |
+
return DummyDtype()
|
26 |
+
|
27 |
+
def astype(self, dtype, copy=True):
|
28 |
+
# we don't support anything but a single dtype
|
29 |
+
if isinstance(dtype, DummyDtype):
|
30 |
+
if copy:
|
31 |
+
return type(self)(self.data)
|
32 |
+
return self
|
33 |
+
elif not copy:
|
34 |
+
return np.asarray(self, dtype=dtype)
|
35 |
+
else:
|
36 |
+
return np.array(self, dtype=dtype, copy=copy)
|
37 |
+
|
38 |
+
|
39 |
+
class TestExtensionArrayDtype:
|
40 |
+
@pytest.mark.parametrize(
|
41 |
+
"values",
|
42 |
+
[
|
43 |
+
pd.Categorical([]),
|
44 |
+
pd.Categorical([]).dtype,
|
45 |
+
pd.Series(pd.Categorical([])),
|
46 |
+
DummyDtype(),
|
47 |
+
DummyArray(np.array([1, 2])),
|
48 |
+
],
|
49 |
+
)
|
50 |
+
def test_is_extension_array_dtype(self, values):
|
51 |
+
assert is_extension_array_dtype(values)
|
52 |
+
|
53 |
+
@pytest.mark.parametrize("values", [np.array([]), pd.Series(np.array([]))])
|
54 |
+
def test_is_not_extension_array_dtype(self, values):
|
55 |
+
assert not is_extension_array_dtype(values)
|
56 |
+
|
57 |
+
|
58 |
+
def test_astype():
|
59 |
+
arr = DummyArray(np.array([1, 2, 3]))
|
60 |
+
expected = np.array([1, 2, 3], dtype=object)
|
61 |
+
|
62 |
+
result = arr.astype(object)
|
63 |
+
tm.assert_numpy_array_equal(result, expected)
|
64 |
+
|
65 |
+
result = arr.astype("object")
|
66 |
+
tm.assert_numpy_array_equal(result, expected)
|
67 |
+
|
68 |
+
|
69 |
+
def test_astype_no_copy():
|
70 |
+
arr = DummyArray(np.array([1, 2, 3], dtype=np.int64))
|
71 |
+
result = arr.astype(arr.dtype, copy=False)
|
72 |
+
|
73 |
+
assert arr is result
|
74 |
+
|
75 |
+
result = arr.astype(arr.dtype)
|
76 |
+
assert arr is not result
|
77 |
+
|
78 |
+
|
79 |
+
@pytest.mark.parametrize("dtype", [dtypes.CategoricalDtype(), dtypes.IntervalDtype()])
|
80 |
+
def test_is_extension_array_dtype(dtype):
|
81 |
+
assert isinstance(dtype, dtypes.ExtensionDtype)
|
82 |
+
assert is_extension_array_dtype(dtype)
|
83 |
+
|
84 |
+
|
85 |
+
class CapturingStringArray(pd.arrays.StringArray):
|
86 |
+
"""Extend StringArray to capture arguments to __getitem__"""
|
87 |
+
|
88 |
+
def __getitem__(self, item):
|
89 |
+
self.last_item_arg = item
|
90 |
+
return super().__getitem__(item)
|
91 |
+
|
92 |
+
|
93 |
+
def test_ellipsis_index():
|
94 |
+
# GH#42430 1D slices over extension types turn into N-dimensional slices
|
95 |
+
# over ExtensionArrays
|
96 |
+
df = pd.DataFrame(
|
97 |
+
{"col1": CapturingStringArray(np.array(["hello", "world"], dtype=object))}
|
98 |
+
)
|
99 |
+
_ = df.iloc[:1]
|
100 |
+
|
101 |
+
# String comparison because there's no native way to compare slices.
|
102 |
+
# Before the fix for GH#42430, last_item_arg would get set to the 2D slice
|
103 |
+
# (Ellipsis, slice(None, 1, None))
|
104 |
+
out = df["col1"].array.last_item_arg
|
105 |
+
assert str(out) == "slice(None, 1, None)"
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
import numpy as np
|
17 |
+
import pytest
|
18 |
+
|
19 |
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
20 |
+
|
21 |
+
import pandas as pd
|
22 |
+
import pandas._testing as tm
|
23 |
+
from pandas.core.arrays import DatetimeArray
|
24 |
+
from pandas.tests.extension import base
|
25 |
+
|
26 |
+
|
27 |
+
@pytest.fixture(params=["US/Central"])
|
28 |
+
def dtype(request):
|
29 |
+
return DatetimeTZDtype(unit="ns", tz=request.param)
|
30 |
+
|
31 |
+
|
32 |
+
@pytest.fixture
|
33 |
+
def data(dtype):
|
34 |
+
data = DatetimeArray._from_sequence(
|
35 |
+
pd.date_range("2000", periods=100, tz=dtype.tz), dtype=dtype
|
36 |
+
)
|
37 |
+
return data
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture
|
41 |
+
def data_missing(dtype):
|
42 |
+
return DatetimeArray._from_sequence(
|
43 |
+
np.array(["NaT", "2000-01-01"], dtype="datetime64[ns]"), dtype=dtype
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
@pytest.fixture
|
48 |
+
def data_for_sorting(dtype):
|
49 |
+
a = pd.Timestamp("2000-01-01")
|
50 |
+
b = pd.Timestamp("2000-01-02")
|
51 |
+
c = pd.Timestamp("2000-01-03")
|
52 |
+
return DatetimeArray._from_sequence(
|
53 |
+
np.array([b, c, a], dtype="datetime64[ns]"), dtype=dtype
|
54 |
+
)
|
55 |
+
|
56 |
+
|
57 |
+
@pytest.fixture
|
58 |
+
def data_missing_for_sorting(dtype):
|
59 |
+
a = pd.Timestamp("2000-01-01")
|
60 |
+
b = pd.Timestamp("2000-01-02")
|
61 |
+
return DatetimeArray._from_sequence(
|
62 |
+
np.array([b, "NaT", a], dtype="datetime64[ns]"), dtype=dtype
|
63 |
+
)
|
64 |
+
|
65 |
+
|
66 |
+
@pytest.fixture
|
67 |
+
def data_for_grouping(dtype):
|
68 |
+
"""
|
69 |
+
Expected to be like [B, B, NA, NA, A, A, B, C]
|
70 |
+
|
71 |
+
Where A < B < C and NA is missing
|
72 |
+
"""
|
73 |
+
a = pd.Timestamp("2000-01-01")
|
74 |
+
b = pd.Timestamp("2000-01-02")
|
75 |
+
c = pd.Timestamp("2000-01-03")
|
76 |
+
na = "NaT"
|
77 |
+
return DatetimeArray._from_sequence(
|
78 |
+
np.array([b, b, na, na, a, a, b, c], dtype="datetime64[ns]"), dtype=dtype
|
79 |
+
)
|
80 |
+
|
81 |
+
|
82 |
+
@pytest.fixture
|
83 |
+
def na_cmp():
|
84 |
+
def cmp(a, b):
|
85 |
+
return a is pd.NaT and a is b
|
86 |
+
|
87 |
+
return cmp
|
88 |
+
|
89 |
+
|
90 |
+
# ----------------------------------------------------------------------------
|
91 |
+
class TestDatetimeArray(base.ExtensionTests):
|
92 |
+
def _get_expected_exception(self, op_name, obj, other):
|
93 |
+
if op_name in ["__sub__", "__rsub__"]:
|
94 |
+
return None
|
95 |
+
return super()._get_expected_exception(op_name, obj, other)
|
96 |
+
|
97 |
+
def _supports_accumulation(self, ser, op_name: str) -> bool:
|
98 |
+
return op_name in ["cummin", "cummax"]
|
99 |
+
|
100 |
+
def _supports_reduction(self, obj, op_name: str) -> bool:
|
101 |
+
return op_name in ["min", "max", "median", "mean", "std", "any", "all"]
|
102 |
+
|
103 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
104 |
+
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
|
105 |
+
meth = all_boolean_reductions
|
106 |
+
msg = f"'{meth}' with datetime64 dtypes is deprecated and will raise in"
|
107 |
+
with tm.assert_produces_warning(
|
108 |
+
FutureWarning, match=msg, check_stacklevel=False
|
109 |
+
):
|
110 |
+
super().test_reduce_series_boolean(data, all_boolean_reductions, skipna)
|
111 |
+
|
112 |
+
def test_series_constructor(self, data):
|
113 |
+
# Series construction drops any .freq attr
|
114 |
+
data = data._with_freq(None)
|
115 |
+
super().test_series_constructor(data)
|
116 |
+
|
117 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
118 |
+
def test_map(self, data, na_action):
|
119 |
+
result = data.map(lambda x: x, na_action=na_action)
|
120 |
+
tm.assert_extension_array_equal(result, data)
|
121 |
+
|
122 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
123 |
+
if op_name in ["median", "mean", "std"]:
|
124 |
+
alt = ser.astype("int64")
|
125 |
+
|
126 |
+
res_op = getattr(ser, op_name)
|
127 |
+
exp_op = getattr(alt, op_name)
|
128 |
+
result = res_op(skipna=skipna)
|
129 |
+
expected = exp_op(skipna=skipna)
|
130 |
+
if op_name in ["mean", "median"]:
|
131 |
+
# error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype"
|
132 |
+
# has no attribute "tz"
|
133 |
+
tz = ser.dtype.tz # type: ignore[union-attr]
|
134 |
+
expected = pd.Timestamp(expected, tz=tz)
|
135 |
+
else:
|
136 |
+
expected = pd.Timedelta(expected)
|
137 |
+
tm.assert_almost_equal(result, expected)
|
138 |
+
|
139 |
+
else:
|
140 |
+
return super().check_reduce(ser, op_name, skipna)
|
141 |
+
|
142 |
+
|
143 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
144 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_extension.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for behavior if an author does *not* implement EA methods.
|
3 |
+
"""
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from pandas.core.arrays import ExtensionArray
|
8 |
+
|
9 |
+
|
10 |
+
class MyEA(ExtensionArray):
|
11 |
+
def __init__(self, values) -> None:
|
12 |
+
self._values = values
|
13 |
+
|
14 |
+
|
15 |
+
@pytest.fixture
|
16 |
+
def data():
|
17 |
+
arr = np.arange(10)
|
18 |
+
return MyEA(arr)
|
19 |
+
|
20 |
+
|
21 |
+
class TestExtensionArray:
|
22 |
+
def test_errors(self, data, all_arithmetic_operators):
|
23 |
+
# invalid ops
|
24 |
+
op_name = all_arithmetic_operators
|
25 |
+
with pytest.raises(AttributeError):
|
26 |
+
getattr(data, op_name)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_interval.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
from typing import TYPE_CHECKING
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import pytest
|
22 |
+
|
23 |
+
from pandas.core.dtypes.dtypes import IntervalDtype
|
24 |
+
|
25 |
+
from pandas import Interval
|
26 |
+
from pandas.core.arrays import IntervalArray
|
27 |
+
from pandas.tests.extension import base
|
28 |
+
|
29 |
+
if TYPE_CHECKING:
|
30 |
+
import pandas as pd
|
31 |
+
|
32 |
+
|
33 |
+
def make_data():
|
34 |
+
N = 100
|
35 |
+
left_array = np.random.default_rng(2).uniform(size=N).cumsum()
|
36 |
+
right_array = left_array + np.random.default_rng(2).uniform(size=N)
|
37 |
+
return [Interval(left, right) for left, right in zip(left_array, right_array)]
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture
|
41 |
+
def dtype():
|
42 |
+
return IntervalDtype()
|
43 |
+
|
44 |
+
|
45 |
+
@pytest.fixture
|
46 |
+
def data():
|
47 |
+
"""Length-100 PeriodArray for semantics test."""
|
48 |
+
return IntervalArray(make_data())
|
49 |
+
|
50 |
+
|
51 |
+
@pytest.fixture
|
52 |
+
def data_missing():
|
53 |
+
"""Length 2 array with [NA, Valid]"""
|
54 |
+
return IntervalArray.from_tuples([None, (0, 1)])
|
55 |
+
|
56 |
+
|
57 |
+
@pytest.fixture
|
58 |
+
def data_for_twos():
|
59 |
+
pytest.skip("Interval is not a numeric dtype")
|
60 |
+
|
61 |
+
|
62 |
+
@pytest.fixture
|
63 |
+
def data_for_sorting():
|
64 |
+
return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)])
|
65 |
+
|
66 |
+
|
67 |
+
@pytest.fixture
|
68 |
+
def data_missing_for_sorting():
|
69 |
+
return IntervalArray.from_tuples([(1, 2), None, (0, 1)])
|
70 |
+
|
71 |
+
|
72 |
+
@pytest.fixture
|
73 |
+
def data_for_grouping():
|
74 |
+
a = (0, 1)
|
75 |
+
b = (1, 2)
|
76 |
+
c = (2, 3)
|
77 |
+
return IntervalArray.from_tuples([b, b, None, None, a, a, b, c])
|
78 |
+
|
79 |
+
|
80 |
+
class TestIntervalArray(base.ExtensionTests):
|
81 |
+
divmod_exc = TypeError
|
82 |
+
|
83 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
84 |
+
return op_name in ["min", "max"]
|
85 |
+
|
86 |
+
@pytest.mark.xfail(
|
87 |
+
reason="Raises with incorrect message bc it disallows *all* listlikes "
|
88 |
+
"instead of just wrong-length listlikes"
|
89 |
+
)
|
90 |
+
def test_fillna_length_mismatch(self, data_missing):
|
91 |
+
super().test_fillna_length_mismatch(data_missing)
|
92 |
+
|
93 |
+
|
94 |
+
# TODO: either belongs in tests.arrays.interval or move into base tests.
|
95 |
+
def test_fillna_non_scalar_raises(data_missing):
|
96 |
+
msg = "can only insert Interval objects and NA into an IntervalArray"
|
97 |
+
with pytest.raises(TypeError, match=msg):
|
98 |
+
data_missing.fillna([1, 1])
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_masked.py
ADDED
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
import warnings
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
import pytest
|
20 |
+
|
21 |
+
from pandas.compat import (
|
22 |
+
IS64,
|
23 |
+
is_platform_windows,
|
24 |
+
)
|
25 |
+
from pandas.compat.numpy import np_version_gt2
|
26 |
+
|
27 |
+
from pandas.core.dtypes.common import (
|
28 |
+
is_float_dtype,
|
29 |
+
is_signed_integer_dtype,
|
30 |
+
is_unsigned_integer_dtype,
|
31 |
+
)
|
32 |
+
|
33 |
+
import pandas as pd
|
34 |
+
import pandas._testing as tm
|
35 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
36 |
+
from pandas.core.arrays.floating import (
|
37 |
+
Float32Dtype,
|
38 |
+
Float64Dtype,
|
39 |
+
)
|
40 |
+
from pandas.core.arrays.integer import (
|
41 |
+
Int8Dtype,
|
42 |
+
Int16Dtype,
|
43 |
+
Int32Dtype,
|
44 |
+
Int64Dtype,
|
45 |
+
UInt8Dtype,
|
46 |
+
UInt16Dtype,
|
47 |
+
UInt32Dtype,
|
48 |
+
UInt64Dtype,
|
49 |
+
)
|
50 |
+
from pandas.tests.extension import base
|
51 |
+
|
52 |
+
is_windows_or_32bit = (is_platform_windows() and not np_version_gt2) or not IS64
|
53 |
+
|
54 |
+
pytestmark = [
|
55 |
+
pytest.mark.filterwarnings(
|
56 |
+
"ignore:invalid value encountered in divide:RuntimeWarning"
|
57 |
+
),
|
58 |
+
pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning"),
|
59 |
+
# overflow only relevant for Floating dtype cases cases
|
60 |
+
pytest.mark.filterwarnings("ignore:overflow encountered in reduce:RuntimeWarning"),
|
61 |
+
]
|
62 |
+
|
63 |
+
|
64 |
+
def make_data():
|
65 |
+
return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100]
|
66 |
+
|
67 |
+
|
68 |
+
def make_float_data():
|
69 |
+
return (
|
70 |
+
list(np.arange(0.1, 0.9, 0.1))
|
71 |
+
+ [pd.NA]
|
72 |
+
+ list(np.arange(1, 9.8, 0.1))
|
73 |
+
+ [pd.NA]
|
74 |
+
+ [9.9, 10.0]
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
def make_bool_data():
|
79 |
+
return [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False]
|
80 |
+
|
81 |
+
|
82 |
+
@pytest.fixture(
|
83 |
+
params=[
|
84 |
+
Int8Dtype,
|
85 |
+
Int16Dtype,
|
86 |
+
Int32Dtype,
|
87 |
+
Int64Dtype,
|
88 |
+
UInt8Dtype,
|
89 |
+
UInt16Dtype,
|
90 |
+
UInt32Dtype,
|
91 |
+
UInt64Dtype,
|
92 |
+
Float32Dtype,
|
93 |
+
Float64Dtype,
|
94 |
+
BooleanDtype,
|
95 |
+
]
|
96 |
+
)
|
97 |
+
def dtype(request):
|
98 |
+
return request.param()
|
99 |
+
|
100 |
+
|
101 |
+
@pytest.fixture
|
102 |
+
def data(dtype):
|
103 |
+
if dtype.kind == "f":
|
104 |
+
data = make_float_data()
|
105 |
+
elif dtype.kind == "b":
|
106 |
+
data = make_bool_data()
|
107 |
+
else:
|
108 |
+
data = make_data()
|
109 |
+
return pd.array(data, dtype=dtype)
|
110 |
+
|
111 |
+
|
112 |
+
@pytest.fixture
|
113 |
+
def data_for_twos(dtype):
|
114 |
+
if dtype.kind == "b":
|
115 |
+
return pd.array(np.ones(100), dtype=dtype)
|
116 |
+
return pd.array(np.ones(100) * 2, dtype=dtype)
|
117 |
+
|
118 |
+
|
119 |
+
@pytest.fixture
|
120 |
+
def data_missing(dtype):
|
121 |
+
if dtype.kind == "f":
|
122 |
+
return pd.array([pd.NA, 0.1], dtype=dtype)
|
123 |
+
elif dtype.kind == "b":
|
124 |
+
return pd.array([np.nan, True], dtype=dtype)
|
125 |
+
return pd.array([pd.NA, 1], dtype=dtype)
|
126 |
+
|
127 |
+
|
128 |
+
@pytest.fixture
|
129 |
+
def data_for_sorting(dtype):
|
130 |
+
if dtype.kind == "f":
|
131 |
+
return pd.array([0.1, 0.2, 0.0], dtype=dtype)
|
132 |
+
elif dtype.kind == "b":
|
133 |
+
return pd.array([True, True, False], dtype=dtype)
|
134 |
+
return pd.array([1, 2, 0], dtype=dtype)
|
135 |
+
|
136 |
+
|
137 |
+
@pytest.fixture
|
138 |
+
def data_missing_for_sorting(dtype):
|
139 |
+
if dtype.kind == "f":
|
140 |
+
return pd.array([0.1, pd.NA, 0.0], dtype=dtype)
|
141 |
+
elif dtype.kind == "b":
|
142 |
+
return pd.array([True, np.nan, False], dtype=dtype)
|
143 |
+
return pd.array([1, pd.NA, 0], dtype=dtype)
|
144 |
+
|
145 |
+
|
146 |
+
@pytest.fixture
|
147 |
+
def na_cmp():
|
148 |
+
# we are pd.NA
|
149 |
+
return lambda x, y: x is pd.NA and y is pd.NA
|
150 |
+
|
151 |
+
|
152 |
+
@pytest.fixture
|
153 |
+
def data_for_grouping(dtype):
|
154 |
+
if dtype.kind == "f":
|
155 |
+
b = 0.1
|
156 |
+
a = 0.0
|
157 |
+
c = 0.2
|
158 |
+
elif dtype.kind == "b":
|
159 |
+
b = True
|
160 |
+
a = False
|
161 |
+
c = b
|
162 |
+
else:
|
163 |
+
b = 1
|
164 |
+
a = 0
|
165 |
+
c = 2
|
166 |
+
|
167 |
+
na = pd.NA
|
168 |
+
return pd.array([b, b, na, na, a, a, b, c], dtype=dtype)
|
169 |
+
|
170 |
+
|
171 |
+
class TestMaskedArrays(base.ExtensionTests):
|
172 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
173 |
+
def test_map(self, data_missing, na_action):
|
174 |
+
result = data_missing.map(lambda x: x, na_action=na_action)
|
175 |
+
if data_missing.dtype == Float32Dtype():
|
176 |
+
# map roundtrips through objects, which converts to float64
|
177 |
+
expected = data_missing.to_numpy(dtype="float64", na_value=np.nan)
|
178 |
+
else:
|
179 |
+
expected = data_missing.to_numpy()
|
180 |
+
tm.assert_numpy_array_equal(result, expected)
|
181 |
+
|
182 |
+
def test_map_na_action_ignore(self, data_missing_for_sorting):
|
183 |
+
zero = data_missing_for_sorting[2]
|
184 |
+
result = data_missing_for_sorting.map(lambda x: zero, na_action="ignore")
|
185 |
+
if data_missing_for_sorting.dtype.kind == "b":
|
186 |
+
expected = np.array([False, pd.NA, False], dtype=object)
|
187 |
+
else:
|
188 |
+
expected = np.array([zero, np.nan, zero])
|
189 |
+
tm.assert_numpy_array_equal(result, expected)
|
190 |
+
|
191 |
+
def _get_expected_exception(self, op_name, obj, other):
|
192 |
+
try:
|
193 |
+
dtype = tm.get_dtype(obj)
|
194 |
+
except AttributeError:
|
195 |
+
# passed arguments reversed
|
196 |
+
dtype = tm.get_dtype(other)
|
197 |
+
|
198 |
+
if dtype.kind == "b":
|
199 |
+
if op_name.strip("_").lstrip("r") in ["pow", "truediv", "floordiv"]:
|
200 |
+
# match behavior with non-masked bool dtype
|
201 |
+
return NotImplementedError
|
202 |
+
elif op_name in ["__sub__", "__rsub__"]:
|
203 |
+
# exception message would include "numpy boolean subtract""
|
204 |
+
return TypeError
|
205 |
+
return None
|
206 |
+
return None
|
207 |
+
|
208 |
+
def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):
|
209 |
+
sdtype = tm.get_dtype(obj)
|
210 |
+
expected = pointwise_result
|
211 |
+
|
212 |
+
if op_name in ("eq", "ne", "le", "ge", "lt", "gt"):
|
213 |
+
return expected.astype("boolean")
|
214 |
+
|
215 |
+
if sdtype.kind in "iu":
|
216 |
+
if op_name in ("__rtruediv__", "__truediv__", "__div__"):
|
217 |
+
with warnings.catch_warnings():
|
218 |
+
warnings.filterwarnings(
|
219 |
+
"ignore",
|
220 |
+
"Downcasting object dtype arrays",
|
221 |
+
category=FutureWarning,
|
222 |
+
)
|
223 |
+
filled = expected.fillna(np.nan)
|
224 |
+
expected = filled.astype("Float64")
|
225 |
+
else:
|
226 |
+
# combine method result in 'biggest' (int64) dtype
|
227 |
+
expected = expected.astype(sdtype)
|
228 |
+
elif sdtype.kind == "b":
|
229 |
+
if op_name in (
|
230 |
+
"__floordiv__",
|
231 |
+
"__rfloordiv__",
|
232 |
+
"__pow__",
|
233 |
+
"__rpow__",
|
234 |
+
"__mod__",
|
235 |
+
"__rmod__",
|
236 |
+
):
|
237 |
+
# combine keeps boolean type
|
238 |
+
expected = expected.astype("Int8")
|
239 |
+
|
240 |
+
elif op_name in ("__truediv__", "__rtruediv__"):
|
241 |
+
# combine with bools does not generate the correct result
|
242 |
+
# (numpy behaviour for div is to regard the bools as numeric)
|
243 |
+
op = self.get_op_from_name(op_name)
|
244 |
+
expected = self._combine(obj.astype(float), other, op)
|
245 |
+
expected = expected.astype("Float64")
|
246 |
+
|
247 |
+
if op_name == "__rpow__":
|
248 |
+
# for rpow, combine does not propagate NaN
|
249 |
+
result = getattr(obj, op_name)(other)
|
250 |
+
expected[result.isna()] = np.nan
|
251 |
+
else:
|
252 |
+
# combine method result in 'biggest' (float64) dtype
|
253 |
+
expected = expected.astype(sdtype)
|
254 |
+
return expected
|
255 |
+
|
256 |
+
def test_divmod_series_array(self, data, data_for_twos, request):
|
257 |
+
if data.dtype.kind == "b":
|
258 |
+
mark = pytest.mark.xfail(
|
259 |
+
reason="Inconsistency between floordiv and divmod; we raise for "
|
260 |
+
"floordiv but not for divmod. This matches what we do for "
|
261 |
+
"non-masked bool dtype."
|
262 |
+
)
|
263 |
+
request.applymarker(mark)
|
264 |
+
super().test_divmod_series_array(data, data_for_twos)
|
265 |
+
|
266 |
+
def test_combine_le(self, data_repeated):
|
267 |
+
# TODO: patching self is a bad pattern here
|
268 |
+
orig_data1, orig_data2 = data_repeated(2)
|
269 |
+
if orig_data1.dtype.kind == "b":
|
270 |
+
self._combine_le_expected_dtype = "boolean"
|
271 |
+
else:
|
272 |
+
# TODO: can we make this boolean?
|
273 |
+
self._combine_le_expected_dtype = object
|
274 |
+
super().test_combine_le(data_repeated)
|
275 |
+
|
276 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
277 |
+
if op_name in ["any", "all"] and ser.dtype.kind != "b":
|
278 |
+
pytest.skip(reason="Tested in tests/reductions/test_reductions.py")
|
279 |
+
return True
|
280 |
+
|
281 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
282 |
+
# overwrite to ensure pd.NA is tested instead of np.nan
|
283 |
+
# https://github.com/pandas-dev/pandas/issues/30958
|
284 |
+
|
285 |
+
cmp_dtype = "int64"
|
286 |
+
if ser.dtype.kind == "f":
|
287 |
+
# Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has
|
288 |
+
# no attribute "numpy_dtype"
|
289 |
+
cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr]
|
290 |
+
elif ser.dtype.kind == "b":
|
291 |
+
if op_name in ["min", "max"]:
|
292 |
+
cmp_dtype = "bool"
|
293 |
+
|
294 |
+
# TODO: prod with integer dtypes does *not* match the result we would
|
295 |
+
# get if we used object for cmp_dtype. In that cae the object result
|
296 |
+
# is a large integer while the non-object case overflows and returns 0
|
297 |
+
alt = ser.dropna().astype(cmp_dtype)
|
298 |
+
if op_name == "count":
|
299 |
+
result = getattr(ser, op_name)()
|
300 |
+
expected = getattr(alt, op_name)()
|
301 |
+
else:
|
302 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
303 |
+
expected = getattr(alt, op_name)(skipna=skipna)
|
304 |
+
if not skipna and ser.isna().any() and op_name not in ["any", "all"]:
|
305 |
+
expected = pd.NA
|
306 |
+
tm.assert_almost_equal(result, expected)
|
307 |
+
|
308 |
+
def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
|
309 |
+
if is_float_dtype(arr.dtype):
|
310 |
+
cmp_dtype = arr.dtype.name
|
311 |
+
elif op_name in ["mean", "median", "var", "std", "skew"]:
|
312 |
+
cmp_dtype = "Float64"
|
313 |
+
elif op_name in ["max", "min"]:
|
314 |
+
cmp_dtype = arr.dtype.name
|
315 |
+
elif arr.dtype in ["Int64", "UInt64"]:
|
316 |
+
cmp_dtype = arr.dtype.name
|
317 |
+
elif is_signed_integer_dtype(arr.dtype):
|
318 |
+
# TODO: Why does Window Numpy 2.0 dtype depend on skipna?
|
319 |
+
cmp_dtype = (
|
320 |
+
"Int32"
|
321 |
+
if (is_platform_windows() and (not np_version_gt2 or not skipna))
|
322 |
+
or not IS64
|
323 |
+
else "Int64"
|
324 |
+
)
|
325 |
+
elif is_unsigned_integer_dtype(arr.dtype):
|
326 |
+
cmp_dtype = (
|
327 |
+
"UInt32"
|
328 |
+
if (is_platform_windows() and (not np_version_gt2 or not skipna))
|
329 |
+
or not IS64
|
330 |
+
else "UInt64"
|
331 |
+
)
|
332 |
+
elif arr.dtype.kind == "b":
|
333 |
+
if op_name in ["mean", "median", "var", "std", "skew"]:
|
334 |
+
cmp_dtype = "Float64"
|
335 |
+
elif op_name in ["min", "max"]:
|
336 |
+
cmp_dtype = "boolean"
|
337 |
+
elif op_name in ["sum", "prod"]:
|
338 |
+
cmp_dtype = (
|
339 |
+
"Int32"
|
340 |
+
if (is_platform_windows() and (not np_version_gt2 or not skipna))
|
341 |
+
or not IS64
|
342 |
+
else "Int64"
|
343 |
+
)
|
344 |
+
else:
|
345 |
+
raise TypeError("not supposed to reach this")
|
346 |
+
else:
|
347 |
+
raise TypeError("not supposed to reach this")
|
348 |
+
return cmp_dtype
|
349 |
+
|
350 |
+
def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
|
351 |
+
return True
|
352 |
+
|
353 |
+
def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):
|
354 |
+
# overwrite to ensure pd.NA is tested instead of np.nan
|
355 |
+
# https://github.com/pandas-dev/pandas/issues/30958
|
356 |
+
length = 64
|
357 |
+
if is_windows_or_32bit:
|
358 |
+
# Item "ExtensionDtype" of "Union[dtype[Any], ExtensionDtype]" has
|
359 |
+
# no attribute "itemsize"
|
360 |
+
if not ser.dtype.itemsize == 8: # type: ignore[union-attr]
|
361 |
+
length = 32
|
362 |
+
|
363 |
+
if ser.dtype.name.startswith("U"):
|
364 |
+
expected_dtype = f"UInt{length}"
|
365 |
+
elif ser.dtype.name.startswith("I"):
|
366 |
+
expected_dtype = f"Int{length}"
|
367 |
+
elif ser.dtype.name.startswith("F"):
|
368 |
+
# Incompatible types in assignment (expression has type
|
369 |
+
# "Union[dtype[Any], ExtensionDtype]", variable has type "str")
|
370 |
+
expected_dtype = ser.dtype # type: ignore[assignment]
|
371 |
+
elif ser.dtype.kind == "b":
|
372 |
+
if op_name in ("cummin", "cummax"):
|
373 |
+
expected_dtype = "boolean"
|
374 |
+
else:
|
375 |
+
expected_dtype = f"Int{length}"
|
376 |
+
|
377 |
+
if expected_dtype == "Float32" and op_name == "cumprod" and skipna:
|
378 |
+
# TODO: xfail?
|
379 |
+
pytest.skip(
|
380 |
+
f"Float32 precision lead to large differences with op {op_name} "
|
381 |
+
f"and skipna={skipna}"
|
382 |
+
)
|
383 |
+
|
384 |
+
if op_name == "cumsum":
|
385 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
386 |
+
expected = pd.Series(
|
387 |
+
pd.array(
|
388 |
+
getattr(ser.astype("float64"), op_name)(skipna=skipna),
|
389 |
+
dtype=expected_dtype,
|
390 |
+
)
|
391 |
+
)
|
392 |
+
tm.assert_series_equal(result, expected)
|
393 |
+
elif op_name in ["cummax", "cummin"]:
|
394 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
395 |
+
expected = pd.Series(
|
396 |
+
pd.array(
|
397 |
+
getattr(ser.astype("float64"), op_name)(skipna=skipna),
|
398 |
+
dtype=ser.dtype,
|
399 |
+
)
|
400 |
+
)
|
401 |
+
tm.assert_series_equal(result, expected)
|
402 |
+
elif op_name == "cumprod":
|
403 |
+
result = getattr(ser[:12], op_name)(skipna=skipna)
|
404 |
+
expected = pd.Series(
|
405 |
+
pd.array(
|
406 |
+
getattr(ser[:12].astype("float64"), op_name)(skipna=skipna),
|
407 |
+
dtype=expected_dtype,
|
408 |
+
)
|
409 |
+
)
|
410 |
+
tm.assert_series_equal(result, expected)
|
411 |
+
|
412 |
+
else:
|
413 |
+
raise NotImplementedError(f"{op_name} not supported")
|
414 |
+
|
415 |
+
|
416 |
+
class Test2DCompat(base.Dim2CompatTests):
|
417 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_numpy.py
ADDED
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
Note: we do not bother with base.BaseIndexTests because NumpyExtensionArray
|
16 |
+
will never be held in an Index.
|
17 |
+
"""
|
18 |
+
import numpy as np
|
19 |
+
import pytest
|
20 |
+
|
21 |
+
from pandas.core.dtypes.dtypes import NumpyEADtype
|
22 |
+
|
23 |
+
import pandas as pd
|
24 |
+
import pandas._testing as tm
|
25 |
+
from pandas.api.types import is_object_dtype
|
26 |
+
from pandas.core.arrays.numpy_ import NumpyExtensionArray
|
27 |
+
from pandas.tests.extension import base
|
28 |
+
|
29 |
+
orig_assert_attr_equal = tm.assert_attr_equal
|
30 |
+
|
31 |
+
|
32 |
+
def _assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
|
33 |
+
"""
|
34 |
+
patch tm.assert_attr_equal so NumpyEADtype("object") is closed enough to
|
35 |
+
np.dtype("object")
|
36 |
+
"""
|
37 |
+
if attr == "dtype":
|
38 |
+
lattr = getattr(left, "dtype", None)
|
39 |
+
rattr = getattr(right, "dtype", None)
|
40 |
+
if isinstance(lattr, NumpyEADtype) and not isinstance(rattr, NumpyEADtype):
|
41 |
+
left = left.astype(lattr.numpy_dtype)
|
42 |
+
elif isinstance(rattr, NumpyEADtype) and not isinstance(lattr, NumpyEADtype):
|
43 |
+
right = right.astype(rattr.numpy_dtype)
|
44 |
+
|
45 |
+
orig_assert_attr_equal(attr, left, right, obj)
|
46 |
+
|
47 |
+
|
48 |
+
@pytest.fixture(params=["float", "object"])
|
49 |
+
def dtype(request):
|
50 |
+
return NumpyEADtype(np.dtype(request.param))
|
51 |
+
|
52 |
+
|
53 |
+
@pytest.fixture
|
54 |
+
def allow_in_pandas(monkeypatch):
|
55 |
+
"""
|
56 |
+
A monkeypatch to tells pandas to let us in.
|
57 |
+
|
58 |
+
By default, passing a NumpyExtensionArray to an index / series / frame
|
59 |
+
constructor will unbox that NumpyExtensionArray to an ndarray, and treat
|
60 |
+
it as a non-EA column. We don't want people using EAs without
|
61 |
+
reason.
|
62 |
+
|
63 |
+
The mechanism for this is a check against ABCNumpyExtensionArray
|
64 |
+
in each constructor.
|
65 |
+
|
66 |
+
But, for testing, we need to allow them in pandas. So we patch
|
67 |
+
the _typ of NumpyExtensionArray, so that we evade the ABCNumpyExtensionArray
|
68 |
+
check.
|
69 |
+
"""
|
70 |
+
with monkeypatch.context() as m:
|
71 |
+
m.setattr(NumpyExtensionArray, "_typ", "extension")
|
72 |
+
m.setattr(tm.asserters, "assert_attr_equal", _assert_attr_equal)
|
73 |
+
yield
|
74 |
+
|
75 |
+
|
76 |
+
@pytest.fixture
|
77 |
+
def data(allow_in_pandas, dtype):
|
78 |
+
if dtype.numpy_dtype == "object":
|
79 |
+
return pd.Series([(i,) for i in range(100)]).array
|
80 |
+
return NumpyExtensionArray(np.arange(1, 101, dtype=dtype._dtype))
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.fixture
|
84 |
+
def data_missing(allow_in_pandas, dtype):
|
85 |
+
if dtype.numpy_dtype == "object":
|
86 |
+
return NumpyExtensionArray(np.array([np.nan, (1,)], dtype=object))
|
87 |
+
return NumpyExtensionArray(np.array([np.nan, 1.0]))
|
88 |
+
|
89 |
+
|
90 |
+
@pytest.fixture
|
91 |
+
def na_cmp():
|
92 |
+
def cmp(a, b):
|
93 |
+
return np.isnan(a) and np.isnan(b)
|
94 |
+
|
95 |
+
return cmp
|
96 |
+
|
97 |
+
|
98 |
+
@pytest.fixture
|
99 |
+
def data_for_sorting(allow_in_pandas, dtype):
|
100 |
+
"""Length-3 array with a known sort order.
|
101 |
+
|
102 |
+
This should be three items [B, C, A] with
|
103 |
+
A < B < C
|
104 |
+
"""
|
105 |
+
if dtype.numpy_dtype == "object":
|
106 |
+
# Use an empty tuple for first element, then remove,
|
107 |
+
# to disable np.array's shape inference.
|
108 |
+
return NumpyExtensionArray(np.array([(), (2,), (3,), (1,)], dtype=object)[1:])
|
109 |
+
return NumpyExtensionArray(np.array([1, 2, 0]))
|
110 |
+
|
111 |
+
|
112 |
+
@pytest.fixture
|
113 |
+
def data_missing_for_sorting(allow_in_pandas, dtype):
|
114 |
+
"""Length-3 array with a known sort order.
|
115 |
+
|
116 |
+
This should be three items [B, NA, A] with
|
117 |
+
A < B and NA missing.
|
118 |
+
"""
|
119 |
+
if dtype.numpy_dtype == "object":
|
120 |
+
return NumpyExtensionArray(np.array([(1,), np.nan, (0,)], dtype=object))
|
121 |
+
return NumpyExtensionArray(np.array([1, np.nan, 0]))
|
122 |
+
|
123 |
+
|
124 |
+
@pytest.fixture
|
125 |
+
def data_for_grouping(allow_in_pandas, dtype):
|
126 |
+
"""Data for factorization, grouping, and unique tests.
|
127 |
+
|
128 |
+
Expected to be like [B, B, NA, NA, A, A, B, C]
|
129 |
+
|
130 |
+
Where A < B < C and NA is missing
|
131 |
+
"""
|
132 |
+
if dtype.numpy_dtype == "object":
|
133 |
+
a, b, c = (1,), (2,), (3,)
|
134 |
+
else:
|
135 |
+
a, b, c = np.arange(3)
|
136 |
+
return NumpyExtensionArray(
|
137 |
+
np.array([b, b, np.nan, np.nan, a, a, b, c], dtype=dtype.numpy_dtype)
|
138 |
+
)
|
139 |
+
|
140 |
+
|
141 |
+
@pytest.fixture
|
142 |
+
def data_for_twos(dtype):
|
143 |
+
if dtype.kind == "O":
|
144 |
+
pytest.skip(f"{dtype} is not a numeric dtype")
|
145 |
+
arr = np.ones(100) * 2
|
146 |
+
return NumpyExtensionArray._from_sequence(arr, dtype=dtype)
|
147 |
+
|
148 |
+
|
149 |
+
@pytest.fixture
|
150 |
+
def skip_numpy_object(dtype, request):
|
151 |
+
"""
|
152 |
+
Tests for NumpyExtensionArray with nested data. Users typically won't create
|
153 |
+
these objects via `pd.array`, but they can show up through `.array`
|
154 |
+
on a Series with nested data. Many of the base tests fail, as they aren't
|
155 |
+
appropriate for nested data.
|
156 |
+
|
157 |
+
This fixture allows these tests to be skipped when used as a usefixtures
|
158 |
+
marker to either an individual test or a test class.
|
159 |
+
"""
|
160 |
+
if dtype == "object":
|
161 |
+
mark = pytest.mark.xfail(reason="Fails for object dtype")
|
162 |
+
request.applymarker(mark)
|
163 |
+
|
164 |
+
|
165 |
+
skip_nested = pytest.mark.usefixtures("skip_numpy_object")
|
166 |
+
|
167 |
+
|
168 |
+
class TestNumpyExtensionArray(base.ExtensionTests):
|
169 |
+
@pytest.mark.skip(reason="We don't register our dtype")
|
170 |
+
# We don't want to register. This test should probably be split in two.
|
171 |
+
def test_from_dtype(self, data):
|
172 |
+
pass
|
173 |
+
|
174 |
+
@skip_nested
|
175 |
+
def test_series_constructor_scalar_with_index(self, data, dtype):
|
176 |
+
# ValueError: Length of passed values is 1, index implies 3.
|
177 |
+
super().test_series_constructor_scalar_with_index(data, dtype)
|
178 |
+
|
179 |
+
def test_check_dtype(self, data, request, using_infer_string):
|
180 |
+
if data.dtype.numpy_dtype == "object":
|
181 |
+
request.applymarker(
|
182 |
+
pytest.mark.xfail(
|
183 |
+
reason=f"NumpyExtensionArray expectedly clashes with a "
|
184 |
+
f"NumPy name: {data.dtype.numpy_dtype}"
|
185 |
+
)
|
186 |
+
)
|
187 |
+
super().test_check_dtype(data)
|
188 |
+
|
189 |
+
def test_is_not_object_type(self, dtype, request):
|
190 |
+
if dtype.numpy_dtype == "object":
|
191 |
+
# Different from BaseDtypeTests.test_is_not_object_type
|
192 |
+
# because NumpyEADtype(object) is an object type
|
193 |
+
assert is_object_dtype(dtype)
|
194 |
+
else:
|
195 |
+
super().test_is_not_object_type(dtype)
|
196 |
+
|
197 |
+
@skip_nested
|
198 |
+
def test_getitem_scalar(self, data):
|
199 |
+
# AssertionError
|
200 |
+
super().test_getitem_scalar(data)
|
201 |
+
|
202 |
+
@skip_nested
|
203 |
+
def test_shift_fill_value(self, data):
|
204 |
+
# np.array shape inference. Shift implementation fails.
|
205 |
+
super().test_shift_fill_value(data)
|
206 |
+
|
207 |
+
@skip_nested
|
208 |
+
def test_fillna_copy_frame(self, data_missing):
|
209 |
+
# The "scalar" for this array isn't a scalar.
|
210 |
+
super().test_fillna_copy_frame(data_missing)
|
211 |
+
|
212 |
+
@skip_nested
|
213 |
+
def test_fillna_copy_series(self, data_missing):
|
214 |
+
# The "scalar" for this array isn't a scalar.
|
215 |
+
super().test_fillna_copy_series(data_missing)
|
216 |
+
|
217 |
+
@skip_nested
|
218 |
+
def test_searchsorted(self, data_for_sorting, as_series):
|
219 |
+
# TODO: NumpyExtensionArray.searchsorted calls ndarray.searchsorted which
|
220 |
+
# isn't quite what we want in nested data cases. Instead we need to
|
221 |
+
# adapt something like libindex._bin_search.
|
222 |
+
super().test_searchsorted(data_for_sorting, as_series)
|
223 |
+
|
224 |
+
@pytest.mark.xfail(reason="NumpyExtensionArray.diff may fail on dtype")
|
225 |
+
def test_diff(self, data, periods):
|
226 |
+
return super().test_diff(data, periods)
|
227 |
+
|
228 |
+
def test_insert(self, data, request):
|
229 |
+
if data.dtype.numpy_dtype == object:
|
230 |
+
mark = pytest.mark.xfail(reason="Dimension mismatch in np.concatenate")
|
231 |
+
request.applymarker(mark)
|
232 |
+
|
233 |
+
super().test_insert(data)
|
234 |
+
|
235 |
+
@skip_nested
|
236 |
+
def test_insert_invalid(self, data, invalid_scalar):
|
237 |
+
# NumpyExtensionArray[object] can hold anything, so skip
|
238 |
+
super().test_insert_invalid(data, invalid_scalar)
|
239 |
+
|
240 |
+
divmod_exc = None
|
241 |
+
series_scalar_exc = None
|
242 |
+
frame_scalar_exc = None
|
243 |
+
series_array_exc = None
|
244 |
+
|
245 |
+
def test_divmod(self, data):
|
246 |
+
divmod_exc = None
|
247 |
+
if data.dtype.kind == "O":
|
248 |
+
divmod_exc = TypeError
|
249 |
+
self.divmod_exc = divmod_exc
|
250 |
+
super().test_divmod(data)
|
251 |
+
|
252 |
+
def test_divmod_series_array(self, data):
|
253 |
+
ser = pd.Series(data)
|
254 |
+
exc = None
|
255 |
+
if data.dtype.kind == "O":
|
256 |
+
exc = TypeError
|
257 |
+
self.divmod_exc = exc
|
258 |
+
self._check_divmod_op(ser, divmod, data)
|
259 |
+
|
260 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):
|
261 |
+
opname = all_arithmetic_operators
|
262 |
+
series_scalar_exc = None
|
263 |
+
if data.dtype.numpy_dtype == object:
|
264 |
+
if opname in ["__mul__", "__rmul__"]:
|
265 |
+
mark = pytest.mark.xfail(
|
266 |
+
reason="the Series.combine step raises but not the Series method."
|
267 |
+
)
|
268 |
+
request.node.add_marker(mark)
|
269 |
+
series_scalar_exc = TypeError
|
270 |
+
self.series_scalar_exc = series_scalar_exc
|
271 |
+
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
|
272 |
+
|
273 |
+
def test_arith_series_with_array(self, data, all_arithmetic_operators):
|
274 |
+
opname = all_arithmetic_operators
|
275 |
+
series_array_exc = None
|
276 |
+
if data.dtype.numpy_dtype == object and opname not in ["__add__", "__radd__"]:
|
277 |
+
series_array_exc = TypeError
|
278 |
+
self.series_array_exc = series_array_exc
|
279 |
+
super().test_arith_series_with_array(data, all_arithmetic_operators)
|
280 |
+
|
281 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
|
282 |
+
opname = all_arithmetic_operators
|
283 |
+
frame_scalar_exc = None
|
284 |
+
if data.dtype.numpy_dtype == object:
|
285 |
+
if opname in ["__mul__", "__rmul__"]:
|
286 |
+
mark = pytest.mark.xfail(
|
287 |
+
reason="the Series.combine step raises but not the Series method."
|
288 |
+
)
|
289 |
+
request.node.add_marker(mark)
|
290 |
+
frame_scalar_exc = TypeError
|
291 |
+
self.frame_scalar_exc = frame_scalar_exc
|
292 |
+
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
|
293 |
+
|
294 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
295 |
+
if ser.dtype.kind == "O":
|
296 |
+
return op_name in ["sum", "min", "max", "any", "all"]
|
297 |
+
return True
|
298 |
+
|
299 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
300 |
+
res_op = getattr(ser, op_name)
|
301 |
+
# avoid coercing int -> float. Just cast to the actual numpy type.
|
302 |
+
# error: Item "ExtensionDtype" of "dtype[Any] | ExtensionDtype" has
|
303 |
+
# no attribute "numpy_dtype"
|
304 |
+
cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr]
|
305 |
+
alt = ser.astype(cmp_dtype)
|
306 |
+
exp_op = getattr(alt, op_name)
|
307 |
+
if op_name == "count":
|
308 |
+
result = res_op()
|
309 |
+
expected = exp_op()
|
310 |
+
else:
|
311 |
+
result = res_op(skipna=skipna)
|
312 |
+
expected = exp_op(skipna=skipna)
|
313 |
+
tm.assert_almost_equal(result, expected)
|
314 |
+
|
315 |
+
@pytest.mark.skip("TODO: tests not written yet")
|
316 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
317 |
+
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
|
318 |
+
pass
|
319 |
+
|
320 |
+
@skip_nested
|
321 |
+
def test_fillna_series(self, data_missing):
|
322 |
+
# Non-scalar "scalar" values.
|
323 |
+
super().test_fillna_series(data_missing)
|
324 |
+
|
325 |
+
@skip_nested
|
326 |
+
def test_fillna_frame(self, data_missing):
|
327 |
+
# Non-scalar "scalar" values.
|
328 |
+
super().test_fillna_frame(data_missing)
|
329 |
+
|
330 |
+
@skip_nested
|
331 |
+
def test_setitem_invalid(self, data, invalid_scalar):
|
332 |
+
# object dtype can hold anything, so doesn't raise
|
333 |
+
super().test_setitem_invalid(data, invalid_scalar)
|
334 |
+
|
335 |
+
@skip_nested
|
336 |
+
def test_setitem_sequence_broadcasts(self, data, box_in_series):
|
337 |
+
# ValueError: cannot set using a list-like indexer with a different
|
338 |
+
# length than the value
|
339 |
+
super().test_setitem_sequence_broadcasts(data, box_in_series)
|
340 |
+
|
341 |
+
@skip_nested
|
342 |
+
@pytest.mark.parametrize("setter", ["loc", None])
|
343 |
+
def test_setitem_mask_broadcast(self, data, setter):
|
344 |
+
# ValueError: cannot set using a list-like indexer with a different
|
345 |
+
# length than the value
|
346 |
+
super().test_setitem_mask_broadcast(data, setter)
|
347 |
+
|
348 |
+
@skip_nested
|
349 |
+
def test_setitem_scalar_key_sequence_raise(self, data):
|
350 |
+
# Failed: DID NOT RAISE <class 'ValueError'>
|
351 |
+
super().test_setitem_scalar_key_sequence_raise(data)
|
352 |
+
|
353 |
+
# TODO: there is some issue with NumpyExtensionArray, therefore,
|
354 |
+
# skip the setitem test for now, and fix it later (GH 31446)
|
355 |
+
|
356 |
+
@skip_nested
|
357 |
+
@pytest.mark.parametrize(
|
358 |
+
"mask",
|
359 |
+
[
|
360 |
+
np.array([True, True, True, False, False]),
|
361 |
+
pd.array([True, True, True, False, False], dtype="boolean"),
|
362 |
+
],
|
363 |
+
ids=["numpy-array", "boolean-array"],
|
364 |
+
)
|
365 |
+
def test_setitem_mask(self, data, mask, box_in_series):
|
366 |
+
super().test_setitem_mask(data, mask, box_in_series)
|
367 |
+
|
368 |
+
@skip_nested
|
369 |
+
@pytest.mark.parametrize(
|
370 |
+
"idx",
|
371 |
+
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
|
372 |
+
ids=["list", "integer-array", "numpy-array"],
|
373 |
+
)
|
374 |
+
def test_setitem_integer_array(self, data, idx, box_in_series):
|
375 |
+
super().test_setitem_integer_array(data, idx, box_in_series)
|
376 |
+
|
377 |
+
@pytest.mark.parametrize(
|
378 |
+
"idx, box_in_series",
|
379 |
+
[
|
380 |
+
([0, 1, 2, pd.NA], False),
|
381 |
+
pytest.param([0, 1, 2, pd.NA], True, marks=pytest.mark.xfail),
|
382 |
+
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
|
383 |
+
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
|
384 |
+
],
|
385 |
+
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
|
386 |
+
)
|
387 |
+
def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
|
388 |
+
super().test_setitem_integer_with_missing_raises(data, idx, box_in_series)
|
389 |
+
|
390 |
+
@skip_nested
|
391 |
+
def test_setitem_slice(self, data, box_in_series):
|
392 |
+
super().test_setitem_slice(data, box_in_series)
|
393 |
+
|
394 |
+
@skip_nested
|
395 |
+
def test_setitem_loc_iloc_slice(self, data):
|
396 |
+
super().test_setitem_loc_iloc_slice(data)
|
397 |
+
|
398 |
+
def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
|
399 |
+
# https://github.com/pandas-dev/pandas/issues/32395
|
400 |
+
df = expected = pd.DataFrame({"data": pd.Series(data)})
|
401 |
+
result = pd.DataFrame(index=df.index)
|
402 |
+
|
403 |
+
# because result has object dtype, the attempt to do setting inplace
|
404 |
+
# is successful, and object dtype is retained
|
405 |
+
key = full_indexer(df)
|
406 |
+
result.loc[key, "data"] = df["data"]
|
407 |
+
|
408 |
+
# base class method has expected = df; NumpyExtensionArray behaves oddly because
|
409 |
+
# we patch _typ for these tests.
|
410 |
+
if data.dtype.numpy_dtype != object:
|
411 |
+
if not isinstance(key, slice) or key != slice(None):
|
412 |
+
expected = pd.DataFrame({"data": data.to_numpy()})
|
413 |
+
tm.assert_frame_equal(result, expected, check_column_type=False)
|
414 |
+
|
415 |
+
@pytest.mark.xfail(reason="NumpyEADtype is unpacked")
|
416 |
+
def test_index_from_listlike_with_dtype(self, data):
|
417 |
+
super().test_index_from_listlike_with_dtype(data)
|
418 |
+
|
419 |
+
@skip_nested
|
420 |
+
@pytest.mark.parametrize("engine", ["c", "python"])
|
421 |
+
def test_EA_types(self, engine, data, request):
|
422 |
+
super().test_EA_types(engine, data, request)
|
423 |
+
|
424 |
+
|
425 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
426 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_period.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
from typing import TYPE_CHECKING
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import pytest
|
22 |
+
|
23 |
+
from pandas._libs import (
|
24 |
+
Period,
|
25 |
+
iNaT,
|
26 |
+
)
|
27 |
+
from pandas.compat import is_platform_windows
|
28 |
+
from pandas.compat.numpy import np_version_gte1p24
|
29 |
+
|
30 |
+
from pandas.core.dtypes.dtypes import PeriodDtype
|
31 |
+
|
32 |
+
import pandas._testing as tm
|
33 |
+
from pandas.core.arrays import PeriodArray
|
34 |
+
from pandas.tests.extension import base
|
35 |
+
|
36 |
+
if TYPE_CHECKING:
|
37 |
+
import pandas as pd
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture(params=["D", "2D"])
|
41 |
+
def dtype(request):
|
42 |
+
return PeriodDtype(freq=request.param)
|
43 |
+
|
44 |
+
|
45 |
+
@pytest.fixture
|
46 |
+
def data(dtype):
|
47 |
+
return PeriodArray(np.arange(1970, 2070), dtype=dtype)
|
48 |
+
|
49 |
+
|
50 |
+
@pytest.fixture
|
51 |
+
def data_for_sorting(dtype):
|
52 |
+
return PeriodArray([2018, 2019, 2017], dtype=dtype)
|
53 |
+
|
54 |
+
|
55 |
+
@pytest.fixture
|
56 |
+
def data_missing(dtype):
|
57 |
+
return PeriodArray([iNaT, 2017], dtype=dtype)
|
58 |
+
|
59 |
+
|
60 |
+
@pytest.fixture
|
61 |
+
def data_missing_for_sorting(dtype):
|
62 |
+
return PeriodArray([2018, iNaT, 2017], dtype=dtype)
|
63 |
+
|
64 |
+
|
65 |
+
@pytest.fixture
|
66 |
+
def data_for_grouping(dtype):
|
67 |
+
B = 2018
|
68 |
+
NA = iNaT
|
69 |
+
A = 2017
|
70 |
+
C = 2019
|
71 |
+
return PeriodArray([B, B, NA, NA, A, A, B, C], dtype=dtype)
|
72 |
+
|
73 |
+
|
74 |
+
class TestPeriodArray(base.ExtensionTests):
|
75 |
+
def _get_expected_exception(self, op_name, obj, other):
|
76 |
+
if op_name in ("__sub__", "__rsub__"):
|
77 |
+
return None
|
78 |
+
return super()._get_expected_exception(op_name, obj, other)
|
79 |
+
|
80 |
+
def _supports_accumulation(self, ser, op_name: str) -> bool:
|
81 |
+
return op_name in ["cummin", "cummax"]
|
82 |
+
|
83 |
+
def _supports_reduction(self, obj, op_name: str) -> bool:
|
84 |
+
return op_name in ["min", "max", "median"]
|
85 |
+
|
86 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
87 |
+
if op_name == "median":
|
88 |
+
res_op = getattr(ser, op_name)
|
89 |
+
|
90 |
+
alt = ser.astype("int64")
|
91 |
+
|
92 |
+
exp_op = getattr(alt, op_name)
|
93 |
+
result = res_op(skipna=skipna)
|
94 |
+
expected = exp_op(skipna=skipna)
|
95 |
+
# error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no
|
96 |
+
# attribute "freq"
|
97 |
+
freq = ser.dtype.freq # type: ignore[union-attr]
|
98 |
+
expected = Period._from_ordinal(int(expected), freq=freq)
|
99 |
+
tm.assert_almost_equal(result, expected)
|
100 |
+
|
101 |
+
else:
|
102 |
+
return super().check_reduce(ser, op_name, skipna)
|
103 |
+
|
104 |
+
@pytest.mark.parametrize("periods", [1, -2])
|
105 |
+
def test_diff(self, data, periods):
|
106 |
+
if is_platform_windows() and np_version_gte1p24:
|
107 |
+
with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False):
|
108 |
+
super().test_diff(data, periods)
|
109 |
+
else:
|
110 |
+
super().test_diff(data, periods)
|
111 |
+
|
112 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
113 |
+
def test_map(self, data, na_action):
|
114 |
+
result = data.map(lambda x: x, na_action=na_action)
|
115 |
+
tm.assert_extension_array_equal(result, data)
|
116 |
+
|
117 |
+
|
118 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
119 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py
ADDED
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
from pandas.errors import PerformanceWarning
|
21 |
+
|
22 |
+
import pandas as pd
|
23 |
+
from pandas import SparseDtype
|
24 |
+
import pandas._testing as tm
|
25 |
+
from pandas.arrays import SparseArray
|
26 |
+
from pandas.tests.extension import base
|
27 |
+
|
28 |
+
|
29 |
+
def make_data(fill_value):
|
30 |
+
rng = np.random.default_rng(2)
|
31 |
+
if np.isnan(fill_value):
|
32 |
+
data = rng.uniform(size=100)
|
33 |
+
else:
|
34 |
+
data = rng.integers(1, 100, size=100, dtype=int)
|
35 |
+
if data[0] == data[1]:
|
36 |
+
data[0] += 1
|
37 |
+
|
38 |
+
data[2::3] = fill_value
|
39 |
+
return data
|
40 |
+
|
41 |
+
|
42 |
+
@pytest.fixture
|
43 |
+
def dtype():
|
44 |
+
return SparseDtype()
|
45 |
+
|
46 |
+
|
47 |
+
@pytest.fixture(params=[0, np.nan])
|
48 |
+
def data(request):
|
49 |
+
"""Length-100 PeriodArray for semantics test."""
|
50 |
+
res = SparseArray(make_data(request.param), fill_value=request.param)
|
51 |
+
return res
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.fixture
|
55 |
+
def data_for_twos():
|
56 |
+
return SparseArray(np.ones(100) * 2)
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.fixture(params=[0, np.nan])
|
60 |
+
def data_missing(request):
|
61 |
+
"""Length 2 array with [NA, Valid]"""
|
62 |
+
return SparseArray([np.nan, 1], fill_value=request.param)
|
63 |
+
|
64 |
+
|
65 |
+
@pytest.fixture(params=[0, np.nan])
|
66 |
+
def data_repeated(request):
|
67 |
+
"""Return different versions of data for count times"""
|
68 |
+
|
69 |
+
def gen(count):
|
70 |
+
for _ in range(count):
|
71 |
+
yield SparseArray(make_data(request.param), fill_value=request.param)
|
72 |
+
|
73 |
+
yield gen
|
74 |
+
|
75 |
+
|
76 |
+
@pytest.fixture(params=[0, np.nan])
|
77 |
+
def data_for_sorting(request):
|
78 |
+
return SparseArray([2, 3, 1], fill_value=request.param)
|
79 |
+
|
80 |
+
|
81 |
+
@pytest.fixture(params=[0, np.nan])
|
82 |
+
def data_missing_for_sorting(request):
|
83 |
+
return SparseArray([2, np.nan, 1], fill_value=request.param)
|
84 |
+
|
85 |
+
|
86 |
+
@pytest.fixture
|
87 |
+
def na_cmp():
|
88 |
+
return lambda left, right: pd.isna(left) and pd.isna(right)
|
89 |
+
|
90 |
+
|
91 |
+
@pytest.fixture(params=[0, np.nan])
|
92 |
+
def data_for_grouping(request):
|
93 |
+
return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param)
|
94 |
+
|
95 |
+
|
96 |
+
@pytest.fixture(params=[0, np.nan])
|
97 |
+
def data_for_compare(request):
|
98 |
+
return SparseArray([0, 0, np.nan, -2, -1, 4, 2, 3, 0, 0], fill_value=request.param)
|
99 |
+
|
100 |
+
|
101 |
+
class TestSparseArray(base.ExtensionTests):
|
102 |
+
def _supports_reduction(self, obj, op_name: str) -> bool:
|
103 |
+
return True
|
104 |
+
|
105 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
106 |
+
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
|
107 |
+
if all_numeric_reductions in [
|
108 |
+
"prod",
|
109 |
+
"median",
|
110 |
+
"var",
|
111 |
+
"std",
|
112 |
+
"sem",
|
113 |
+
"skew",
|
114 |
+
"kurt",
|
115 |
+
]:
|
116 |
+
mark = pytest.mark.xfail(
|
117 |
+
reason="This should be viable but is not implemented"
|
118 |
+
)
|
119 |
+
request.node.add_marker(mark)
|
120 |
+
elif (
|
121 |
+
all_numeric_reductions in ["sum", "max", "min", "mean"]
|
122 |
+
and data.dtype.kind == "f"
|
123 |
+
and not skipna
|
124 |
+
):
|
125 |
+
mark = pytest.mark.xfail(reason="getting a non-nan float")
|
126 |
+
request.node.add_marker(mark)
|
127 |
+
|
128 |
+
super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
|
129 |
+
|
130 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
131 |
+
def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
|
132 |
+
if all_numeric_reductions in [
|
133 |
+
"prod",
|
134 |
+
"median",
|
135 |
+
"var",
|
136 |
+
"std",
|
137 |
+
"sem",
|
138 |
+
"skew",
|
139 |
+
"kurt",
|
140 |
+
]:
|
141 |
+
mark = pytest.mark.xfail(
|
142 |
+
reason="This should be viable but is not implemented"
|
143 |
+
)
|
144 |
+
request.node.add_marker(mark)
|
145 |
+
elif (
|
146 |
+
all_numeric_reductions in ["sum", "max", "min", "mean"]
|
147 |
+
and data.dtype.kind == "f"
|
148 |
+
and not skipna
|
149 |
+
):
|
150 |
+
mark = pytest.mark.xfail(reason="ExtensionArray NA mask are different")
|
151 |
+
request.node.add_marker(mark)
|
152 |
+
|
153 |
+
super().test_reduce_frame(data, all_numeric_reductions, skipna)
|
154 |
+
|
155 |
+
def _check_unsupported(self, data):
|
156 |
+
if data.dtype == SparseDtype(int, 0):
|
157 |
+
pytest.skip("Can't store nan in int array.")
|
158 |
+
|
159 |
+
def test_concat_mixed_dtypes(self, data):
|
160 |
+
# https://github.com/pandas-dev/pandas/issues/20762
|
161 |
+
# This should be the same, aside from concat([sparse, float])
|
162 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
163 |
+
df2 = pd.DataFrame({"A": [1, 2, 3]})
|
164 |
+
df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
|
165 |
+
dfs = [df1, df2, df3]
|
166 |
+
|
167 |
+
# dataframes
|
168 |
+
result = pd.concat(dfs)
|
169 |
+
expected = pd.concat(
|
170 |
+
[x.apply(lambda s: np.asarray(s).astype(object)) for x in dfs]
|
171 |
+
)
|
172 |
+
tm.assert_frame_equal(result, expected)
|
173 |
+
|
174 |
+
@pytest.mark.filterwarnings(
|
175 |
+
"ignore:The previous implementation of stack is deprecated"
|
176 |
+
)
|
177 |
+
@pytest.mark.parametrize(
|
178 |
+
"columns",
|
179 |
+
[
|
180 |
+
["A", "B"],
|
181 |
+
pd.MultiIndex.from_tuples(
|
182 |
+
[("A", "a"), ("A", "b")], names=["outer", "inner"]
|
183 |
+
),
|
184 |
+
],
|
185 |
+
)
|
186 |
+
@pytest.mark.parametrize("future_stack", [True, False])
|
187 |
+
def test_stack(self, data, columns, future_stack):
|
188 |
+
super().test_stack(data, columns, future_stack)
|
189 |
+
|
190 |
+
def test_concat_columns(self, data, na_value):
|
191 |
+
self._check_unsupported(data)
|
192 |
+
super().test_concat_columns(data, na_value)
|
193 |
+
|
194 |
+
def test_concat_extension_arrays_copy_false(self, data, na_value):
|
195 |
+
self._check_unsupported(data)
|
196 |
+
super().test_concat_extension_arrays_copy_false(data, na_value)
|
197 |
+
|
198 |
+
def test_align(self, data, na_value):
|
199 |
+
self._check_unsupported(data)
|
200 |
+
super().test_align(data, na_value)
|
201 |
+
|
202 |
+
def test_align_frame(self, data, na_value):
|
203 |
+
self._check_unsupported(data)
|
204 |
+
super().test_align_frame(data, na_value)
|
205 |
+
|
206 |
+
def test_align_series_frame(self, data, na_value):
|
207 |
+
self._check_unsupported(data)
|
208 |
+
super().test_align_series_frame(data, na_value)
|
209 |
+
|
210 |
+
def test_merge(self, data, na_value):
|
211 |
+
self._check_unsupported(data)
|
212 |
+
super().test_merge(data, na_value)
|
213 |
+
|
214 |
+
def test_get(self, data):
|
215 |
+
ser = pd.Series(data, index=[2 * i for i in range(len(data))])
|
216 |
+
if np.isnan(ser.values.fill_value):
|
217 |
+
assert np.isnan(ser.get(4)) and np.isnan(ser.iloc[2])
|
218 |
+
else:
|
219 |
+
assert ser.get(4) == ser.iloc[2]
|
220 |
+
assert ser.get(2) == ser.iloc[1]
|
221 |
+
|
222 |
+
def test_reindex(self, data, na_value):
|
223 |
+
self._check_unsupported(data)
|
224 |
+
super().test_reindex(data, na_value)
|
225 |
+
|
226 |
+
def test_isna(self, data_missing):
|
227 |
+
sarr = SparseArray(data_missing)
|
228 |
+
expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))
|
229 |
+
expected = SparseArray([True, False], dtype=expected_dtype)
|
230 |
+
result = sarr.isna()
|
231 |
+
tm.assert_sp_array_equal(result, expected)
|
232 |
+
|
233 |
+
# test isna for arr without na
|
234 |
+
sarr = sarr.fillna(0)
|
235 |
+
expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))
|
236 |
+
expected = SparseArray([False, False], fill_value=False, dtype=expected_dtype)
|
237 |
+
tm.assert_equal(sarr.isna(), expected)
|
238 |
+
|
239 |
+
def test_fillna_limit_backfill(self, data_missing):
|
240 |
+
warns = (PerformanceWarning, FutureWarning)
|
241 |
+
with tm.assert_produces_warning(warns, check_stacklevel=False):
|
242 |
+
super().test_fillna_limit_backfill(data_missing)
|
243 |
+
|
244 |
+
def test_fillna_no_op_returns_copy(self, data, request):
|
245 |
+
if np.isnan(data.fill_value):
|
246 |
+
request.applymarker(
|
247 |
+
pytest.mark.xfail(reason="returns array with different fill value")
|
248 |
+
)
|
249 |
+
super().test_fillna_no_op_returns_copy(data)
|
250 |
+
|
251 |
+
@pytest.mark.xfail(reason="Unsupported")
|
252 |
+
def test_fillna_series(self, data_missing):
|
253 |
+
# this one looks doable.
|
254 |
+
# TODO: this fails bc we do not pass through data_missing. If we did,
|
255 |
+
# the 0-fill case would xpass
|
256 |
+
super().test_fillna_series()
|
257 |
+
|
258 |
+
def test_fillna_frame(self, data_missing):
|
259 |
+
# Have to override to specify that fill_value will change.
|
260 |
+
fill_value = data_missing[1]
|
261 |
+
|
262 |
+
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
|
263 |
+
|
264 |
+
if pd.isna(data_missing.fill_value):
|
265 |
+
dtype = SparseDtype(data_missing.dtype, fill_value)
|
266 |
+
else:
|
267 |
+
dtype = data_missing.dtype
|
268 |
+
|
269 |
+
expected = pd.DataFrame(
|
270 |
+
{
|
271 |
+
"A": data_missing._from_sequence([fill_value, fill_value], dtype=dtype),
|
272 |
+
"B": [1, 2],
|
273 |
+
}
|
274 |
+
)
|
275 |
+
|
276 |
+
tm.assert_frame_equal(result, expected)
|
277 |
+
|
278 |
+
_combine_le_expected_dtype = "Sparse[bool]"
|
279 |
+
|
280 |
+
def test_fillna_copy_frame(self, data_missing, using_copy_on_write):
|
281 |
+
arr = data_missing.take([1, 1])
|
282 |
+
df = pd.DataFrame({"A": arr}, copy=False)
|
283 |
+
|
284 |
+
filled_val = df.iloc[0, 0]
|
285 |
+
result = df.fillna(filled_val)
|
286 |
+
|
287 |
+
if hasattr(df._mgr, "blocks"):
|
288 |
+
if using_copy_on_write:
|
289 |
+
assert df.values.base is result.values.base
|
290 |
+
else:
|
291 |
+
assert df.values.base is not result.values.base
|
292 |
+
assert df.A._values.to_dense() is arr.to_dense()
|
293 |
+
|
294 |
+
def test_fillna_copy_series(self, data_missing, using_copy_on_write):
|
295 |
+
arr = data_missing.take([1, 1])
|
296 |
+
ser = pd.Series(arr, copy=False)
|
297 |
+
|
298 |
+
filled_val = ser[0]
|
299 |
+
result = ser.fillna(filled_val)
|
300 |
+
|
301 |
+
if using_copy_on_write:
|
302 |
+
assert ser._values is result._values
|
303 |
+
|
304 |
+
else:
|
305 |
+
assert ser._values is not result._values
|
306 |
+
assert ser._values.to_dense() is arr.to_dense()
|
307 |
+
|
308 |
+
@pytest.mark.xfail(reason="Not Applicable")
|
309 |
+
def test_fillna_length_mismatch(self, data_missing):
|
310 |
+
super().test_fillna_length_mismatch(data_missing)
|
311 |
+
|
312 |
+
def test_where_series(self, data, na_value):
|
313 |
+
assert data[0] != data[1]
|
314 |
+
cls = type(data)
|
315 |
+
a, b = data[:2]
|
316 |
+
|
317 |
+
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
|
318 |
+
|
319 |
+
cond = np.array([True, True, False, False])
|
320 |
+
result = ser.where(cond)
|
321 |
+
|
322 |
+
new_dtype = SparseDtype("float", 0.0)
|
323 |
+
expected = pd.Series(
|
324 |
+
cls._from_sequence([a, a, na_value, na_value], dtype=new_dtype)
|
325 |
+
)
|
326 |
+
tm.assert_series_equal(result, expected)
|
327 |
+
|
328 |
+
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
|
329 |
+
cond = np.array([True, False, True, True])
|
330 |
+
result = ser.where(cond, other)
|
331 |
+
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
|
332 |
+
tm.assert_series_equal(result, expected)
|
333 |
+
|
334 |
+
def test_searchsorted(self, data_for_sorting, as_series):
|
335 |
+
with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False):
|
336 |
+
super().test_searchsorted(data_for_sorting, as_series)
|
337 |
+
|
338 |
+
def test_shift_0_periods(self, data):
|
339 |
+
# GH#33856 shifting with periods=0 should return a copy, not same obj
|
340 |
+
result = data.shift(0)
|
341 |
+
|
342 |
+
data._sparse_values[0] = data._sparse_values[1]
|
343 |
+
assert result._sparse_values[0] != result._sparse_values[1]
|
344 |
+
|
345 |
+
@pytest.mark.parametrize("method", ["argmax", "argmin"])
|
346 |
+
def test_argmin_argmax_all_na(self, method, data, na_value):
|
347 |
+
# overriding because Sparse[int64, 0] cannot handle na_value
|
348 |
+
self._check_unsupported(data)
|
349 |
+
super().test_argmin_argmax_all_na(method, data, na_value)
|
350 |
+
|
351 |
+
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
|
352 |
+
def test_equals(self, data, na_value, as_series, box):
|
353 |
+
self._check_unsupported(data)
|
354 |
+
super().test_equals(data, na_value, as_series, box)
|
355 |
+
|
356 |
+
@pytest.mark.parametrize(
|
357 |
+
"func, na_action, expected",
|
358 |
+
[
|
359 |
+
(lambda x: x, None, SparseArray([1.0, np.nan])),
|
360 |
+
(lambda x: x, "ignore", SparseArray([1.0, np.nan])),
|
361 |
+
(str, None, SparseArray(["1.0", "nan"], fill_value="nan")),
|
362 |
+
(str, "ignore", SparseArray(["1.0", np.nan])),
|
363 |
+
],
|
364 |
+
)
|
365 |
+
def test_map(self, func, na_action, expected):
|
366 |
+
# GH52096
|
367 |
+
data = SparseArray([1, np.nan])
|
368 |
+
result = data.map(func, na_action=na_action)
|
369 |
+
tm.assert_extension_array_equal(result, expected)
|
370 |
+
|
371 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
372 |
+
def test_map_raises(self, data, na_action):
|
373 |
+
# GH52096
|
374 |
+
msg = "fill value in the sparse values not supported"
|
375 |
+
with pytest.raises(ValueError, match=msg):
|
376 |
+
data.map(lambda x: np.nan, na_action=na_action)
|
377 |
+
|
378 |
+
@pytest.mark.xfail(raises=TypeError, reason="no sparse StringDtype")
|
379 |
+
def test_astype_string(self, data, nullable_string_dtype):
|
380 |
+
# TODO: this fails bc we do not pass through nullable_string_dtype;
|
381 |
+
# If we did, the 0-cases would xpass
|
382 |
+
super().test_astype_string(data)
|
383 |
+
|
384 |
+
series_scalar_exc = None
|
385 |
+
frame_scalar_exc = None
|
386 |
+
divmod_exc = None
|
387 |
+
series_array_exc = None
|
388 |
+
|
389 |
+
def _skip_if_different_combine(self, data):
|
390 |
+
if data.fill_value == 0:
|
391 |
+
# arith ops call on dtype.fill_value so that the sparsity
|
392 |
+
# is maintained. Combine can't be called on a dtype in
|
393 |
+
# general, so we can't make the expected. This is tested elsewhere
|
394 |
+
pytest.skip("Incorrected expected from Series.combine and tested elsewhere")
|
395 |
+
|
396 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
|
397 |
+
self._skip_if_different_combine(data)
|
398 |
+
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
|
399 |
+
|
400 |
+
def test_arith_series_with_array(self, data, all_arithmetic_operators):
|
401 |
+
self._skip_if_different_combine(data)
|
402 |
+
super().test_arith_series_with_array(data, all_arithmetic_operators)
|
403 |
+
|
404 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
|
405 |
+
if data.dtype.fill_value != 0:
|
406 |
+
pass
|
407 |
+
elif all_arithmetic_operators.strip("_") not in [
|
408 |
+
"mul",
|
409 |
+
"rmul",
|
410 |
+
"floordiv",
|
411 |
+
"rfloordiv",
|
412 |
+
"pow",
|
413 |
+
"mod",
|
414 |
+
"rmod",
|
415 |
+
]:
|
416 |
+
mark = pytest.mark.xfail(reason="result dtype.fill_value mismatch")
|
417 |
+
request.applymarker(mark)
|
418 |
+
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
|
419 |
+
|
420 |
+
def _compare_other(
|
421 |
+
self, ser: pd.Series, data_for_compare: SparseArray, comparison_op, other
|
422 |
+
):
|
423 |
+
op = comparison_op
|
424 |
+
|
425 |
+
result = op(data_for_compare, other)
|
426 |
+
if isinstance(other, pd.Series):
|
427 |
+
assert isinstance(result, pd.Series)
|
428 |
+
assert isinstance(result.dtype, SparseDtype)
|
429 |
+
else:
|
430 |
+
assert isinstance(result, SparseArray)
|
431 |
+
assert result.dtype.subtype == np.bool_
|
432 |
+
|
433 |
+
if isinstance(other, pd.Series):
|
434 |
+
fill_value = op(data_for_compare.fill_value, other._values.fill_value)
|
435 |
+
expected = SparseArray(
|
436 |
+
op(data_for_compare.to_dense(), np.asarray(other)),
|
437 |
+
fill_value=fill_value,
|
438 |
+
dtype=np.bool_,
|
439 |
+
)
|
440 |
+
|
441 |
+
else:
|
442 |
+
fill_value = np.all(
|
443 |
+
op(np.asarray(data_for_compare.fill_value), np.asarray(other))
|
444 |
+
)
|
445 |
+
|
446 |
+
expected = SparseArray(
|
447 |
+
op(data_for_compare.to_dense(), np.asarray(other)),
|
448 |
+
fill_value=fill_value,
|
449 |
+
dtype=np.bool_,
|
450 |
+
)
|
451 |
+
if isinstance(other, pd.Series):
|
452 |
+
# error: Incompatible types in assignment
|
453 |
+
expected = pd.Series(expected) # type: ignore[assignment]
|
454 |
+
tm.assert_equal(result, expected)
|
455 |
+
|
456 |
+
def test_scalar(self, data_for_compare: SparseArray, comparison_op):
|
457 |
+
ser = pd.Series(data_for_compare)
|
458 |
+
self._compare_other(ser, data_for_compare, comparison_op, 0)
|
459 |
+
self._compare_other(ser, data_for_compare, comparison_op, 1)
|
460 |
+
self._compare_other(ser, data_for_compare, comparison_op, -1)
|
461 |
+
self._compare_other(ser, data_for_compare, comparison_op, np.nan)
|
462 |
+
|
463 |
+
def test_array(self, data_for_compare: SparseArray, comparison_op, request):
|
464 |
+
if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ in [
|
465 |
+
"eq",
|
466 |
+
"ge",
|
467 |
+
"le",
|
468 |
+
]:
|
469 |
+
mark = pytest.mark.xfail(reason="Wrong fill_value")
|
470 |
+
request.applymarker(mark)
|
471 |
+
|
472 |
+
arr = np.linspace(-4, 5, 10)
|
473 |
+
ser = pd.Series(data_for_compare)
|
474 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
475 |
+
|
476 |
+
def test_sparse_array(self, data_for_compare: SparseArray, comparison_op, request):
|
477 |
+
if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ != "gt":
|
478 |
+
mark = pytest.mark.xfail(reason="Wrong fill_value")
|
479 |
+
request.applymarker(mark)
|
480 |
+
|
481 |
+
ser = pd.Series(data_for_compare)
|
482 |
+
arr = data_for_compare + 1
|
483 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
484 |
+
arr = data_for_compare * 2
|
485 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
486 |
+
|
487 |
+
@pytest.mark.xfail(reason="Different repr")
|
488 |
+
def test_array_repr(self, data, size):
|
489 |
+
super().test_array_repr(data, size)
|
490 |
+
|
491 |
+
@pytest.mark.xfail(reason="result does not match expected")
|
492 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
493 |
+
def test_groupby_extension_agg(self, as_index, data_for_grouping):
|
494 |
+
super().test_groupby_extension_agg(as_index, data_for_grouping)
|
495 |
+
|
496 |
+
|
497 |
+
def test_array_type_with_arg(dtype):
|
498 |
+
assert dtype.construct_array_type() is SparseArray
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/extension/test_string.py
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
import string
|
19 |
+
from typing import cast
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
import pytest
|
23 |
+
|
24 |
+
import pandas as pd
|
25 |
+
import pandas._testing as tm
|
26 |
+
from pandas.api.types import is_string_dtype
|
27 |
+
from pandas.core.arrays import ArrowStringArray
|
28 |
+
from pandas.core.arrays.string_ import StringDtype
|
29 |
+
from pandas.tests.extension import base
|
30 |
+
|
31 |
+
|
32 |
+
def maybe_split_array(arr, chunked):
|
33 |
+
if not chunked:
|
34 |
+
return arr
|
35 |
+
elif arr.dtype.storage != "pyarrow":
|
36 |
+
return arr
|
37 |
+
|
38 |
+
pa = pytest.importorskip("pyarrow")
|
39 |
+
|
40 |
+
arrow_array = arr._pa_array
|
41 |
+
split = len(arrow_array) // 2
|
42 |
+
arrow_array = pa.chunked_array(
|
43 |
+
[*arrow_array[:split].chunks, *arrow_array[split:].chunks]
|
44 |
+
)
|
45 |
+
assert arrow_array.num_chunks == 2
|
46 |
+
return type(arr)(arrow_array)
|
47 |
+
|
48 |
+
|
49 |
+
@pytest.fixture(params=[True, False])
|
50 |
+
def chunked(request):
|
51 |
+
return request.param
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.fixture
|
55 |
+
def dtype(string_storage):
|
56 |
+
return StringDtype(storage=string_storage)
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.fixture
|
60 |
+
def data(dtype, chunked):
|
61 |
+
strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)
|
62 |
+
while strings[0] == strings[1]:
|
63 |
+
strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)
|
64 |
+
|
65 |
+
arr = dtype.construct_array_type()._from_sequence(strings, dtype=dtype)
|
66 |
+
return maybe_split_array(arr, chunked)
|
67 |
+
|
68 |
+
|
69 |
+
@pytest.fixture
|
70 |
+
def data_missing(dtype, chunked):
|
71 |
+
"""Length 2 array with [NA, Valid]"""
|
72 |
+
arr = dtype.construct_array_type()._from_sequence([pd.NA, "A"], dtype=dtype)
|
73 |
+
return maybe_split_array(arr, chunked)
|
74 |
+
|
75 |
+
|
76 |
+
@pytest.fixture
|
77 |
+
def data_for_sorting(dtype, chunked):
|
78 |
+
arr = dtype.construct_array_type()._from_sequence(["B", "C", "A"], dtype=dtype)
|
79 |
+
return maybe_split_array(arr, chunked)
|
80 |
+
|
81 |
+
|
82 |
+
@pytest.fixture
|
83 |
+
def data_missing_for_sorting(dtype, chunked):
|
84 |
+
arr = dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"], dtype=dtype)
|
85 |
+
return maybe_split_array(arr, chunked)
|
86 |
+
|
87 |
+
|
88 |
+
@pytest.fixture
|
89 |
+
def data_for_grouping(dtype, chunked):
|
90 |
+
arr = dtype.construct_array_type()._from_sequence(
|
91 |
+
["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"], dtype=dtype
|
92 |
+
)
|
93 |
+
return maybe_split_array(arr, chunked)
|
94 |
+
|
95 |
+
|
96 |
+
class TestStringArray(base.ExtensionTests):
|
97 |
+
def test_eq_with_str(self, dtype):
|
98 |
+
assert dtype == f"string[{dtype.storage}]"
|
99 |
+
super().test_eq_with_str(dtype)
|
100 |
+
|
101 |
+
def test_is_not_string_type(self, dtype):
|
102 |
+
# Different from BaseDtypeTests.test_is_not_string_type
|
103 |
+
# because StringDtype is a string type
|
104 |
+
assert is_string_dtype(dtype)
|
105 |
+
|
106 |
+
def test_view(self, data, request, arrow_string_storage):
|
107 |
+
if data.dtype.storage in arrow_string_storage:
|
108 |
+
pytest.skip(reason="2D support not implemented for ArrowStringArray")
|
109 |
+
super().test_view(data)
|
110 |
+
|
111 |
+
def test_from_dtype(self, data):
|
112 |
+
# base test uses string representation of dtype
|
113 |
+
pass
|
114 |
+
|
115 |
+
def test_transpose(self, data, request, arrow_string_storage):
|
116 |
+
if data.dtype.storage in arrow_string_storage:
|
117 |
+
pytest.skip(reason="2D support not implemented for ArrowStringArray")
|
118 |
+
super().test_transpose(data)
|
119 |
+
|
120 |
+
def test_setitem_preserves_views(self, data, request, arrow_string_storage):
|
121 |
+
if data.dtype.storage in arrow_string_storage:
|
122 |
+
pytest.skip(reason="2D support not implemented for ArrowStringArray")
|
123 |
+
super().test_setitem_preserves_views(data)
|
124 |
+
|
125 |
+
def test_dropna_array(self, data_missing):
|
126 |
+
result = data_missing.dropna()
|
127 |
+
expected = data_missing[[1]]
|
128 |
+
tm.assert_extension_array_equal(result, expected)
|
129 |
+
|
130 |
+
def test_fillna_no_op_returns_copy(self, data):
|
131 |
+
data = data[~data.isna()]
|
132 |
+
|
133 |
+
valid = data[0]
|
134 |
+
result = data.fillna(valid)
|
135 |
+
assert result is not data
|
136 |
+
tm.assert_extension_array_equal(result, data)
|
137 |
+
|
138 |
+
result = data.fillna(method="backfill")
|
139 |
+
assert result is not data
|
140 |
+
tm.assert_extension_array_equal(result, data)
|
141 |
+
|
142 |
+
def _get_expected_exception(
|
143 |
+
self, op_name: str, obj, other
|
144 |
+
) -> type[Exception] | None:
|
145 |
+
if op_name in ["__divmod__", "__rdivmod__"]:
|
146 |
+
if isinstance(obj, pd.Series) and cast(
|
147 |
+
StringDtype, tm.get_dtype(obj)
|
148 |
+
).storage in [
|
149 |
+
"pyarrow",
|
150 |
+
"pyarrow_numpy",
|
151 |
+
]:
|
152 |
+
# TODO: re-raise as TypeError?
|
153 |
+
return NotImplementedError
|
154 |
+
elif isinstance(other, pd.Series) and cast(
|
155 |
+
StringDtype, tm.get_dtype(other)
|
156 |
+
).storage in [
|
157 |
+
"pyarrow",
|
158 |
+
"pyarrow_numpy",
|
159 |
+
]:
|
160 |
+
# TODO: re-raise as TypeError?
|
161 |
+
return NotImplementedError
|
162 |
+
return TypeError
|
163 |
+
elif op_name in ["__mod__", "__rmod__", "__pow__", "__rpow__"]:
|
164 |
+
if cast(StringDtype, tm.get_dtype(obj)).storage in [
|
165 |
+
"pyarrow",
|
166 |
+
"pyarrow_numpy",
|
167 |
+
]:
|
168 |
+
return NotImplementedError
|
169 |
+
return TypeError
|
170 |
+
elif op_name in ["__mul__", "__rmul__"]:
|
171 |
+
# Can only multiply strings by integers
|
172 |
+
return TypeError
|
173 |
+
elif op_name in [
|
174 |
+
"__truediv__",
|
175 |
+
"__rtruediv__",
|
176 |
+
"__floordiv__",
|
177 |
+
"__rfloordiv__",
|
178 |
+
"__sub__",
|
179 |
+
"__rsub__",
|
180 |
+
]:
|
181 |
+
if cast(StringDtype, tm.get_dtype(obj)).storage in [
|
182 |
+
"pyarrow",
|
183 |
+
"pyarrow_numpy",
|
184 |
+
]:
|
185 |
+
import pyarrow as pa
|
186 |
+
|
187 |
+
# TODO: better to re-raise as TypeError?
|
188 |
+
return pa.ArrowNotImplementedError
|
189 |
+
return TypeError
|
190 |
+
|
191 |
+
return None
|
192 |
+
|
193 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
194 |
+
return (
|
195 |
+
op_name in ["min", "max"]
|
196 |
+
or ser.dtype.storage == "pyarrow_numpy" # type: ignore[union-attr]
|
197 |
+
and op_name in ("any", "all")
|
198 |
+
)
|
199 |
+
|
200 |
+
def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):
|
201 |
+
dtype = cast(StringDtype, tm.get_dtype(obj))
|
202 |
+
if op_name in ["__add__", "__radd__"]:
|
203 |
+
cast_to = dtype
|
204 |
+
elif dtype.storage == "pyarrow":
|
205 |
+
cast_to = "boolean[pyarrow]" # type: ignore[assignment]
|
206 |
+
elif dtype.storage == "pyarrow_numpy":
|
207 |
+
cast_to = np.bool_ # type: ignore[assignment]
|
208 |
+
else:
|
209 |
+
cast_to = "boolean" # type: ignore[assignment]
|
210 |
+
return pointwise_result.astype(cast_to)
|
211 |
+
|
212 |
+
def test_compare_scalar(self, data, comparison_op):
|
213 |
+
ser = pd.Series(data)
|
214 |
+
self._compare_other(ser, data, comparison_op, "abc")
|
215 |
+
|
216 |
+
@pytest.mark.filterwarnings("ignore:Falling back:pandas.errors.PerformanceWarning")
|
217 |
+
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
|
218 |
+
super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)
|
219 |
+
|
220 |
+
|
221 |
+
class Test2DCompat(base.Dim2CompatTests):
|
222 |
+
@pytest.fixture(autouse=True)
|
223 |
+
def arrow_not_supported(self, data):
|
224 |
+
if isinstance(data, ArrowStringArray):
|
225 |
+
pytest.skip(reason="2D support not implemented for ArrowStringArray")
|
226 |
+
|
227 |
+
|
228 |
+
def test_searchsorted_with_na_raises(data_for_sorting, as_series):
|
229 |
+
# GH50447
|
230 |
+
b, c, a = data_for_sorting
|
231 |
+
arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c]
|
232 |
+
arr[-1] = pd.NA
|
233 |
+
|
234 |
+
if as_series:
|
235 |
+
arr = pd.Series(arr)
|
236 |
+
|
237 |
+
msg = (
|
238 |
+
"searchsorted requires array to be sorted, "
|
239 |
+
"which is impossible with NAs present."
|
240 |
+
)
|
241 |
+
with pytest.raises(ValueError, match=msg):
|
242 |
+
arr.searchsorted(b)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (189 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_impl.cpython-310.pyc
ADDED
Binary file (17.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/interchange/__pycache__/test_spec_conformance.cpython-310.pyc
ADDED
Binary file (5.26 kB). View file
|
|