Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__init__.py +6 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/__init__.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/array.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/test_array_with_attr.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/array.py +89 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py +33 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__init__.py +131 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/accumulate.py +39 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/base.py +2 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/constructors.py +142 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/dtype.py +123 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/groupby.py +174 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/index.py +19 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/interface.py +137 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/io.py +39 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py +720 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/missing.py +188 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/ops.py +299 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/printing.py +41 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/reduce.py +153 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py +379 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/setitem.py +451 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/conftest.py +230 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_arrow.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py +200 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_common.py +105 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py +144 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_extension.py +26 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_interval.py +98 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_masked.py +417 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_numpy.py +426 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_period.py +119 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py +498 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_string.py +242 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__init__.py +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_almost_equal.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_attr_equal.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_categorical_equal.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_extension_array_equal.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_index_equal.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_interval_array_equal.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_numpy_array_equal.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_kwarg.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_nonkeyword_arguments.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_doc.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_numba.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_rewrite_warning.cpython-310.pyc +0 -0
- llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_shares_memory.cpython-310.pyc +0 -0
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas.tests.extension.array_with_attr.array import (
|
2 |
+
FloatAttrArray,
|
3 |
+
FloatAttrDtype,
|
4 |
+
)
|
5 |
+
|
6 |
+
__all__ = ["FloatAttrArray", "FloatAttrDtype"]
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (348 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/array.cpython-310.pyc
ADDED
Binary file (3.48 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/__pycache__/test_array_with_attr.cpython-310.pyc
ADDED
Binary file (1.22 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/array.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Test extension array that has custom attribute information (not stored on the dtype).
|
3 |
+
|
4 |
+
"""
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
import numbers
|
8 |
+
from typing import TYPE_CHECKING
|
9 |
+
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
from pandas.core.dtypes.base import ExtensionDtype
|
13 |
+
|
14 |
+
import pandas as pd
|
15 |
+
from pandas.core.arrays import ExtensionArray
|
16 |
+
|
17 |
+
if TYPE_CHECKING:
|
18 |
+
from pandas._typing import type_t
|
19 |
+
|
20 |
+
|
21 |
+
class FloatAttrDtype(ExtensionDtype):
|
22 |
+
type = float
|
23 |
+
name = "float_attr"
|
24 |
+
na_value = np.nan
|
25 |
+
|
26 |
+
@classmethod
|
27 |
+
def construct_array_type(cls) -> type_t[FloatAttrArray]:
|
28 |
+
"""
|
29 |
+
Return the array type associated with this dtype.
|
30 |
+
|
31 |
+
Returns
|
32 |
+
-------
|
33 |
+
type
|
34 |
+
"""
|
35 |
+
return FloatAttrArray
|
36 |
+
|
37 |
+
|
38 |
+
class FloatAttrArray(ExtensionArray):
|
39 |
+
dtype = FloatAttrDtype()
|
40 |
+
__array_priority__ = 1000
|
41 |
+
|
42 |
+
def __init__(self, values, attr=None) -> None:
|
43 |
+
if not isinstance(values, np.ndarray):
|
44 |
+
raise TypeError("Need to pass a numpy array of float64 dtype as values")
|
45 |
+
if not values.dtype == "float64":
|
46 |
+
raise TypeError("Need to pass a numpy array of float64 dtype as values")
|
47 |
+
self.data = values
|
48 |
+
self.attr = attr
|
49 |
+
|
50 |
+
@classmethod
|
51 |
+
def _from_sequence(cls, scalars, *, dtype=None, copy=False):
|
52 |
+
if not copy:
|
53 |
+
data = np.asarray(scalars, dtype="float64")
|
54 |
+
else:
|
55 |
+
data = np.array(scalars, dtype="float64", copy=copy)
|
56 |
+
return cls(data)
|
57 |
+
|
58 |
+
def __getitem__(self, item):
|
59 |
+
if isinstance(item, numbers.Integral):
|
60 |
+
return self.data[item]
|
61 |
+
else:
|
62 |
+
# slice, list-like, mask
|
63 |
+
item = pd.api.indexers.check_array_indexer(self, item)
|
64 |
+
return type(self)(self.data[item], self.attr)
|
65 |
+
|
66 |
+
def __len__(self) -> int:
|
67 |
+
return len(self.data)
|
68 |
+
|
69 |
+
def isna(self):
|
70 |
+
return np.isnan(self.data)
|
71 |
+
|
72 |
+
def take(self, indexer, allow_fill=False, fill_value=None):
|
73 |
+
from pandas.api.extensions import take
|
74 |
+
|
75 |
+
data = self.data
|
76 |
+
if allow_fill and fill_value is None:
|
77 |
+
fill_value = self.dtype.na_value
|
78 |
+
|
79 |
+
result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
|
80 |
+
return type(self)(result, self.attr)
|
81 |
+
|
82 |
+
def copy(self):
|
83 |
+
return type(self)(self.data.copy(), self.attr)
|
84 |
+
|
85 |
+
@classmethod
|
86 |
+
def _concat_same_type(cls, to_concat):
|
87 |
+
data = np.concatenate([x.data for x in to_concat])
|
88 |
+
attr = to_concat[0].attr if len(to_concat) else None
|
89 |
+
return cls(data, attr)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/array_with_attr/test_array_with_attr.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
import pandas._testing as tm
|
5 |
+
from pandas.tests.extension.array_with_attr import FloatAttrArray
|
6 |
+
|
7 |
+
|
8 |
+
def test_concat_with_all_na():
|
9 |
+
# https://github.com/pandas-dev/pandas/pull/47762
|
10 |
+
# ensure that attribute of the column array is preserved (when it gets
|
11 |
+
# preserved in reindexing the array) during merge/concat
|
12 |
+
arr = FloatAttrArray(np.array([np.nan, np.nan], dtype="float64"), attr="test")
|
13 |
+
|
14 |
+
df1 = pd.DataFrame({"col": arr, "key": [0, 1]})
|
15 |
+
df2 = pd.DataFrame({"key": [0, 1], "col2": [1, 2]})
|
16 |
+
result = pd.merge(df1, df2, on="key")
|
17 |
+
expected = pd.DataFrame({"col": arr, "key": [0, 1], "col2": [1, 2]})
|
18 |
+
tm.assert_frame_equal(result, expected)
|
19 |
+
assert result["col"].array.attr == "test"
|
20 |
+
|
21 |
+
df1 = pd.DataFrame({"col": arr, "key": [0, 1]})
|
22 |
+
df2 = pd.DataFrame({"key": [0, 2], "col2": [1, 2]})
|
23 |
+
result = pd.merge(df1, df2, on="key")
|
24 |
+
expected = pd.DataFrame({"col": arr.take([0]), "key": [0], "col2": [1]})
|
25 |
+
tm.assert_frame_equal(result, expected)
|
26 |
+
assert result["col"].array.attr == "test"
|
27 |
+
|
28 |
+
result = pd.concat([df1.set_index("key"), df2.set_index("key")], axis=1)
|
29 |
+
expected = pd.DataFrame(
|
30 |
+
{"col": arr.take([0, 1, -1]), "col2": [1, np.nan, 2], "key": [0, 1, 2]}
|
31 |
+
).set_index("key")
|
32 |
+
tm.assert_frame_equal(result, expected)
|
33 |
+
assert result["col"].array.attr == "test"
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__init__.py
ADDED
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Base test suite for extension arrays.
|
3 |
+
|
4 |
+
These tests are intended for third-party libraries to subclass to validate
|
5 |
+
that their extension arrays and dtypes satisfy the interface. Moving or
|
6 |
+
renaming the tests should not be done lightly.
|
7 |
+
|
8 |
+
Libraries are expected to implement a few pytest fixtures to provide data
|
9 |
+
for the tests. The fixtures may be located in either
|
10 |
+
|
11 |
+
* The same module as your test class.
|
12 |
+
* A ``conftest.py`` in the same directory as your test class.
|
13 |
+
|
14 |
+
The full list of fixtures may be found in the ``conftest.py`` next to this
|
15 |
+
file.
|
16 |
+
|
17 |
+
.. code-block:: python
|
18 |
+
|
19 |
+
import pytest
|
20 |
+
from pandas.tests.extension.base import BaseDtypeTests
|
21 |
+
|
22 |
+
|
23 |
+
@pytest.fixture
|
24 |
+
def dtype():
|
25 |
+
return MyDtype()
|
26 |
+
|
27 |
+
|
28 |
+
class TestMyDtype(BaseDtypeTests):
|
29 |
+
pass
|
30 |
+
|
31 |
+
|
32 |
+
Your class ``TestDtype`` will inherit all the tests defined on
|
33 |
+
``BaseDtypeTests``. pytest's fixture discover will supply your ``dtype``
|
34 |
+
wherever the test requires it. You're free to implement additional tests.
|
35 |
+
|
36 |
+
"""
|
37 |
+
from pandas.tests.extension.base.accumulate import BaseAccumulateTests
|
38 |
+
from pandas.tests.extension.base.casting import BaseCastingTests
|
39 |
+
from pandas.tests.extension.base.constructors import BaseConstructorsTests
|
40 |
+
from pandas.tests.extension.base.dim2 import ( # noqa: F401
|
41 |
+
Dim2CompatTests,
|
42 |
+
NDArrayBacked2DTests,
|
43 |
+
)
|
44 |
+
from pandas.tests.extension.base.dtype import BaseDtypeTests
|
45 |
+
from pandas.tests.extension.base.getitem import BaseGetitemTests
|
46 |
+
from pandas.tests.extension.base.groupby import BaseGroupbyTests
|
47 |
+
from pandas.tests.extension.base.index import BaseIndexTests
|
48 |
+
from pandas.tests.extension.base.interface import BaseInterfaceTests
|
49 |
+
from pandas.tests.extension.base.io import BaseParsingTests
|
50 |
+
from pandas.tests.extension.base.methods import BaseMethodsTests
|
51 |
+
from pandas.tests.extension.base.missing import BaseMissingTests
|
52 |
+
from pandas.tests.extension.base.ops import ( # noqa: F401
|
53 |
+
BaseArithmeticOpsTests,
|
54 |
+
BaseComparisonOpsTests,
|
55 |
+
BaseOpsUtil,
|
56 |
+
BaseUnaryOpsTests,
|
57 |
+
)
|
58 |
+
from pandas.tests.extension.base.printing import BasePrintingTests
|
59 |
+
from pandas.tests.extension.base.reduce import BaseReduceTests
|
60 |
+
from pandas.tests.extension.base.reshaping import BaseReshapingTests
|
61 |
+
from pandas.tests.extension.base.setitem import BaseSetitemTests
|
62 |
+
|
63 |
+
|
64 |
+
# One test class that you can inherit as an alternative to inheriting all the
|
65 |
+
# test classes above.
|
66 |
+
# Note 1) this excludes Dim2CompatTests and NDArrayBacked2DTests.
|
67 |
+
# Note 2) this uses BaseReduceTests and and _not_ BaseBooleanReduceTests,
|
68 |
+
# BaseNoReduceTests, or BaseNumericReduceTests
|
69 |
+
class ExtensionTests(
|
70 |
+
BaseAccumulateTests,
|
71 |
+
BaseCastingTests,
|
72 |
+
BaseConstructorsTests,
|
73 |
+
BaseDtypeTests,
|
74 |
+
BaseGetitemTests,
|
75 |
+
BaseGroupbyTests,
|
76 |
+
BaseIndexTests,
|
77 |
+
BaseInterfaceTests,
|
78 |
+
BaseParsingTests,
|
79 |
+
BaseMethodsTests,
|
80 |
+
BaseMissingTests,
|
81 |
+
BaseArithmeticOpsTests,
|
82 |
+
BaseComparisonOpsTests,
|
83 |
+
BaseUnaryOpsTests,
|
84 |
+
BasePrintingTests,
|
85 |
+
BaseReduceTests,
|
86 |
+
BaseReshapingTests,
|
87 |
+
BaseSetitemTests,
|
88 |
+
Dim2CompatTests,
|
89 |
+
):
|
90 |
+
pass
|
91 |
+
|
92 |
+
|
93 |
+
def __getattr__(name: str):
|
94 |
+
import warnings
|
95 |
+
|
96 |
+
if name == "BaseNoReduceTests":
|
97 |
+
warnings.warn(
|
98 |
+
"BaseNoReduceTests is deprecated and will be removed in a "
|
99 |
+
"future version. Use BaseReduceTests and override "
|
100 |
+
"`_supports_reduction` instead.",
|
101 |
+
FutureWarning,
|
102 |
+
)
|
103 |
+
from pandas.tests.extension.base.reduce import BaseNoReduceTests
|
104 |
+
|
105 |
+
return BaseNoReduceTests
|
106 |
+
|
107 |
+
elif name == "BaseNumericReduceTests":
|
108 |
+
warnings.warn(
|
109 |
+
"BaseNumericReduceTests is deprecated and will be removed in a "
|
110 |
+
"future version. Use BaseReduceTests and override "
|
111 |
+
"`_supports_reduction` instead.",
|
112 |
+
FutureWarning,
|
113 |
+
)
|
114 |
+
from pandas.tests.extension.base.reduce import BaseNumericReduceTests
|
115 |
+
|
116 |
+
return BaseNumericReduceTests
|
117 |
+
|
118 |
+
elif name == "BaseBooleanReduceTests":
|
119 |
+
warnings.warn(
|
120 |
+
"BaseBooleanReduceTests is deprecated and will be removed in a "
|
121 |
+
"future version. Use BaseReduceTests and override "
|
122 |
+
"`_supports_reduction` instead.",
|
123 |
+
FutureWarning,
|
124 |
+
)
|
125 |
+
from pandas.tests.extension.base.reduce import BaseBooleanReduceTests
|
126 |
+
|
127 |
+
return BaseBooleanReduceTests
|
128 |
+
|
129 |
+
raise AttributeError(
|
130 |
+
f"module 'pandas.tests.extension.base' has no attribute '{name}'"
|
131 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/accumulate.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
import pandas as pd
|
4 |
+
import pandas._testing as tm
|
5 |
+
|
6 |
+
|
7 |
+
class BaseAccumulateTests:
|
8 |
+
"""
|
9 |
+
Accumulation specific tests. Generally these only
|
10 |
+
make sense for numeric/boolean operations.
|
11 |
+
"""
|
12 |
+
|
13 |
+
def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
|
14 |
+
# Do we expect this accumulation to be supported for this dtype?
|
15 |
+
# We default to assuming "no"; subclass authors should override here.
|
16 |
+
return False
|
17 |
+
|
18 |
+
def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):
|
19 |
+
try:
|
20 |
+
alt = ser.astype("float64")
|
21 |
+
except TypeError:
|
22 |
+
# e.g. Period can't be cast to float64
|
23 |
+
alt = ser.astype(object)
|
24 |
+
|
25 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
26 |
+
expected = getattr(alt, op_name)(skipna=skipna)
|
27 |
+
tm.assert_series_equal(result, expected, check_dtype=False)
|
28 |
+
|
29 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
30 |
+
def test_accumulate_series(self, data, all_numeric_accumulations, skipna):
|
31 |
+
op_name = all_numeric_accumulations
|
32 |
+
ser = pd.Series(data)
|
33 |
+
|
34 |
+
if self._supports_accumulation(ser, op_name):
|
35 |
+
self.check_accumulate(ser, op_name, skipna)
|
36 |
+
else:
|
37 |
+
with pytest.raises((NotImplementedError, TypeError)):
|
38 |
+
# TODO: require TypeError for things that will _never_ work?
|
39 |
+
getattr(ser, op_name)(skipna=skipna)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/base.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
class BaseExtensionTests:
|
2 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/constructors.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
from pandas.api.extensions import ExtensionArray
|
7 |
+
from pandas.core.internals.blocks import EABackedBlock
|
8 |
+
|
9 |
+
|
10 |
+
class BaseConstructorsTests:
|
11 |
+
def test_from_sequence_from_cls(self, data):
|
12 |
+
result = type(data)._from_sequence(data, dtype=data.dtype)
|
13 |
+
tm.assert_extension_array_equal(result, data)
|
14 |
+
|
15 |
+
data = data[:0]
|
16 |
+
result = type(data)._from_sequence(data, dtype=data.dtype)
|
17 |
+
tm.assert_extension_array_equal(result, data)
|
18 |
+
|
19 |
+
def test_array_from_scalars(self, data):
|
20 |
+
scalars = [data[0], data[1], data[2]]
|
21 |
+
result = data._from_sequence(scalars, dtype=data.dtype)
|
22 |
+
assert isinstance(result, type(data))
|
23 |
+
|
24 |
+
def test_series_constructor(self, data):
|
25 |
+
result = pd.Series(data, copy=False)
|
26 |
+
assert result.dtype == data.dtype
|
27 |
+
assert len(result) == len(data)
|
28 |
+
if hasattr(result._mgr, "blocks"):
|
29 |
+
assert isinstance(result._mgr.blocks[0], EABackedBlock)
|
30 |
+
assert result._mgr.array is data
|
31 |
+
|
32 |
+
# Series[EA] is unboxed / boxed correctly
|
33 |
+
result2 = pd.Series(result)
|
34 |
+
assert result2.dtype == data.dtype
|
35 |
+
if hasattr(result._mgr, "blocks"):
|
36 |
+
assert isinstance(result2._mgr.blocks[0], EABackedBlock)
|
37 |
+
|
38 |
+
def test_series_constructor_no_data_with_index(self, dtype, na_value):
|
39 |
+
result = pd.Series(index=[1, 2, 3], dtype=dtype)
|
40 |
+
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
|
41 |
+
tm.assert_series_equal(result, expected)
|
42 |
+
|
43 |
+
# GH 33559 - empty index
|
44 |
+
result = pd.Series(index=[], dtype=dtype)
|
45 |
+
expected = pd.Series([], index=pd.Index([], dtype="object"), dtype=dtype)
|
46 |
+
tm.assert_series_equal(result, expected)
|
47 |
+
|
48 |
+
def test_series_constructor_scalar_na_with_index(self, dtype, na_value):
|
49 |
+
result = pd.Series(na_value, index=[1, 2, 3], dtype=dtype)
|
50 |
+
expected = pd.Series([na_value] * 3, index=[1, 2, 3], dtype=dtype)
|
51 |
+
tm.assert_series_equal(result, expected)
|
52 |
+
|
53 |
+
def test_series_constructor_scalar_with_index(self, data, dtype):
|
54 |
+
scalar = data[0]
|
55 |
+
result = pd.Series(scalar, index=[1, 2, 3], dtype=dtype)
|
56 |
+
expected = pd.Series([scalar] * 3, index=[1, 2, 3], dtype=dtype)
|
57 |
+
tm.assert_series_equal(result, expected)
|
58 |
+
|
59 |
+
result = pd.Series(scalar, index=["foo"], dtype=dtype)
|
60 |
+
expected = pd.Series([scalar], index=["foo"], dtype=dtype)
|
61 |
+
tm.assert_series_equal(result, expected)
|
62 |
+
|
63 |
+
@pytest.mark.parametrize("from_series", [True, False])
|
64 |
+
def test_dataframe_constructor_from_dict(self, data, from_series):
|
65 |
+
if from_series:
|
66 |
+
data = pd.Series(data)
|
67 |
+
result = pd.DataFrame({"A": data})
|
68 |
+
assert result.dtypes["A"] == data.dtype
|
69 |
+
assert result.shape == (len(data), 1)
|
70 |
+
if hasattr(result._mgr, "blocks"):
|
71 |
+
assert isinstance(result._mgr.blocks[0], EABackedBlock)
|
72 |
+
assert isinstance(result._mgr.arrays[0], ExtensionArray)
|
73 |
+
|
74 |
+
def test_dataframe_from_series(self, data):
|
75 |
+
result = pd.DataFrame(pd.Series(data))
|
76 |
+
assert result.dtypes[0] == data.dtype
|
77 |
+
assert result.shape == (len(data), 1)
|
78 |
+
if hasattr(result._mgr, "blocks"):
|
79 |
+
assert isinstance(result._mgr.blocks[0], EABackedBlock)
|
80 |
+
assert isinstance(result._mgr.arrays[0], ExtensionArray)
|
81 |
+
|
82 |
+
def test_series_given_mismatched_index_raises(self, data):
|
83 |
+
msg = r"Length of values \(3\) does not match length of index \(5\)"
|
84 |
+
with pytest.raises(ValueError, match=msg):
|
85 |
+
pd.Series(data[:3], index=[0, 1, 2, 3, 4])
|
86 |
+
|
87 |
+
def test_from_dtype(self, data):
|
88 |
+
# construct from our dtype & string dtype
|
89 |
+
dtype = data.dtype
|
90 |
+
|
91 |
+
expected = pd.Series(data)
|
92 |
+
result = pd.Series(list(data), dtype=dtype)
|
93 |
+
tm.assert_series_equal(result, expected)
|
94 |
+
|
95 |
+
result = pd.Series(list(data), dtype=str(dtype))
|
96 |
+
tm.assert_series_equal(result, expected)
|
97 |
+
|
98 |
+
# gh-30280
|
99 |
+
|
100 |
+
expected = pd.DataFrame(data).astype(dtype)
|
101 |
+
result = pd.DataFrame(list(data), dtype=dtype)
|
102 |
+
tm.assert_frame_equal(result, expected)
|
103 |
+
|
104 |
+
result = pd.DataFrame(list(data), dtype=str(dtype))
|
105 |
+
tm.assert_frame_equal(result, expected)
|
106 |
+
|
107 |
+
def test_pandas_array(self, data):
|
108 |
+
# pd.array(extension_array) should be idempotent...
|
109 |
+
result = pd.array(data)
|
110 |
+
tm.assert_extension_array_equal(result, data)
|
111 |
+
|
112 |
+
def test_pandas_array_dtype(self, data):
|
113 |
+
# ... but specifying dtype will override idempotency
|
114 |
+
result = pd.array(data, dtype=np.dtype(object))
|
115 |
+
expected = pd.arrays.NumpyExtensionArray(np.asarray(data, dtype=object))
|
116 |
+
tm.assert_equal(result, expected)
|
117 |
+
|
118 |
+
def test_construct_empty_dataframe(self, dtype):
|
119 |
+
# GH 33623
|
120 |
+
result = pd.DataFrame(columns=["a"], dtype=dtype)
|
121 |
+
expected = pd.DataFrame(
|
122 |
+
{"a": pd.array([], dtype=dtype)}, index=pd.RangeIndex(0)
|
123 |
+
)
|
124 |
+
tm.assert_frame_equal(result, expected)
|
125 |
+
|
126 |
+
def test_empty(self, dtype):
|
127 |
+
cls = dtype.construct_array_type()
|
128 |
+
result = cls._empty((4,), dtype=dtype)
|
129 |
+
assert isinstance(result, cls)
|
130 |
+
assert result.dtype == dtype
|
131 |
+
assert result.shape == (4,)
|
132 |
+
|
133 |
+
# GH#19600 method on ExtensionDtype
|
134 |
+
result2 = dtype.empty((4,))
|
135 |
+
assert isinstance(result2, cls)
|
136 |
+
assert result2.dtype == dtype
|
137 |
+
assert result2.shape == (4,)
|
138 |
+
|
139 |
+
result2 = dtype.empty(4)
|
140 |
+
assert isinstance(result2, cls)
|
141 |
+
assert result2.dtype == dtype
|
142 |
+
assert result2.shape == (4,)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/dtype.py
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
from pandas.api.types import (
|
7 |
+
infer_dtype,
|
8 |
+
is_object_dtype,
|
9 |
+
is_string_dtype,
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
class BaseDtypeTests:
|
14 |
+
"""Base class for ExtensionDtype classes"""
|
15 |
+
|
16 |
+
def test_name(self, dtype):
|
17 |
+
assert isinstance(dtype.name, str)
|
18 |
+
|
19 |
+
def test_kind(self, dtype):
|
20 |
+
valid = set("biufcmMOSUV")
|
21 |
+
assert dtype.kind in valid
|
22 |
+
|
23 |
+
def test_is_dtype_from_name(self, dtype):
|
24 |
+
result = type(dtype).is_dtype(dtype.name)
|
25 |
+
assert result is True
|
26 |
+
|
27 |
+
def test_is_dtype_unboxes_dtype(self, data, dtype):
|
28 |
+
assert dtype.is_dtype(data) is True
|
29 |
+
|
30 |
+
def test_is_dtype_from_self(self, dtype):
|
31 |
+
result = type(dtype).is_dtype(dtype)
|
32 |
+
assert result is True
|
33 |
+
|
34 |
+
def test_is_dtype_other_input(self, dtype):
|
35 |
+
assert dtype.is_dtype([1, 2, 3]) is False
|
36 |
+
|
37 |
+
def test_is_not_string_type(self, dtype):
|
38 |
+
assert not is_string_dtype(dtype)
|
39 |
+
|
40 |
+
def test_is_not_object_type(self, dtype):
|
41 |
+
assert not is_object_dtype(dtype)
|
42 |
+
|
43 |
+
def test_eq_with_str(self, dtype):
|
44 |
+
assert dtype == dtype.name
|
45 |
+
assert dtype != dtype.name + "-suffix"
|
46 |
+
|
47 |
+
def test_eq_with_numpy_object(self, dtype):
|
48 |
+
assert dtype != np.dtype("object")
|
49 |
+
|
50 |
+
def test_eq_with_self(self, dtype):
|
51 |
+
assert dtype == dtype
|
52 |
+
assert dtype != object()
|
53 |
+
|
54 |
+
def test_array_type(self, data, dtype):
|
55 |
+
assert dtype.construct_array_type() is type(data)
|
56 |
+
|
57 |
+
def test_check_dtype(self, data):
|
58 |
+
dtype = data.dtype
|
59 |
+
|
60 |
+
# check equivalency for using .dtypes
|
61 |
+
df = pd.DataFrame(
|
62 |
+
{
|
63 |
+
"A": pd.Series(data, dtype=dtype),
|
64 |
+
"B": data,
|
65 |
+
"C": pd.Series(["foo"] * len(data), dtype=object),
|
66 |
+
"D": 1,
|
67 |
+
}
|
68 |
+
)
|
69 |
+
result = df.dtypes == str(dtype)
|
70 |
+
assert np.dtype("int64") != "Int64"
|
71 |
+
|
72 |
+
expected = pd.Series([True, True, False, False], index=list("ABCD"))
|
73 |
+
|
74 |
+
tm.assert_series_equal(result, expected)
|
75 |
+
|
76 |
+
expected = pd.Series([True, True, False, False], index=list("ABCD"))
|
77 |
+
result = df.dtypes.apply(str) == str(dtype)
|
78 |
+
tm.assert_series_equal(result, expected)
|
79 |
+
|
80 |
+
def test_hashable(self, dtype):
|
81 |
+
hash(dtype) # no error
|
82 |
+
|
83 |
+
def test_str(self, dtype):
|
84 |
+
assert str(dtype) == dtype.name
|
85 |
+
|
86 |
+
def test_eq(self, dtype):
|
87 |
+
assert dtype == dtype.name
|
88 |
+
assert dtype != "anonther_type"
|
89 |
+
|
90 |
+
def test_construct_from_string_own_name(self, dtype):
|
91 |
+
result = dtype.construct_from_string(dtype.name)
|
92 |
+
assert type(result) is type(dtype)
|
93 |
+
|
94 |
+
# check OK as classmethod
|
95 |
+
result = type(dtype).construct_from_string(dtype.name)
|
96 |
+
assert type(result) is type(dtype)
|
97 |
+
|
98 |
+
def test_construct_from_string_another_type_raises(self, dtype):
|
99 |
+
msg = f"Cannot construct a '{type(dtype).__name__}' from 'another_type'"
|
100 |
+
with pytest.raises(TypeError, match=msg):
|
101 |
+
type(dtype).construct_from_string("another_type")
|
102 |
+
|
103 |
+
def test_construct_from_string_wrong_type_raises(self, dtype):
|
104 |
+
with pytest.raises(
|
105 |
+
TypeError,
|
106 |
+
match="'construct_from_string' expects a string, got <class 'int'>",
|
107 |
+
):
|
108 |
+
type(dtype).construct_from_string(0)
|
109 |
+
|
110 |
+
def test_get_common_dtype(self, dtype):
|
111 |
+
# in practice we will not typically call this with a 1-length list
|
112 |
+
# (we shortcut to just use that dtype as the common dtype), but
|
113 |
+
# still testing as good practice to have this working (and it is the
|
114 |
+
# only case we can test in general)
|
115 |
+
assert dtype._get_common_dtype([dtype]) == dtype
|
116 |
+
|
117 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
118 |
+
def test_infer_dtype(self, data, data_missing, skipna):
|
119 |
+
# only testing that this works without raising an error
|
120 |
+
res = infer_dtype(data, skipna=skipna)
|
121 |
+
assert isinstance(res, str)
|
122 |
+
res = infer_dtype(data_missing, skipna=skipna)
|
123 |
+
assert isinstance(res, str)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/groupby.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas.core.dtypes.common import (
|
6 |
+
is_bool_dtype,
|
7 |
+
is_numeric_dtype,
|
8 |
+
is_object_dtype,
|
9 |
+
is_string_dtype,
|
10 |
+
)
|
11 |
+
|
12 |
+
import pandas as pd
|
13 |
+
import pandas._testing as tm
|
14 |
+
|
15 |
+
|
16 |
+
@pytest.mark.filterwarnings(
|
17 |
+
"ignore:The default of observed=False is deprecated:FutureWarning"
|
18 |
+
)
|
19 |
+
class BaseGroupbyTests:
|
20 |
+
"""Groupby-specific tests."""
|
21 |
+
|
22 |
+
def test_grouping_grouper(self, data_for_grouping):
|
23 |
+
df = pd.DataFrame(
|
24 |
+
{
|
25 |
+
"A": pd.Series(
|
26 |
+
["B", "B", None, None, "A", "A", "B", "C"], dtype=object
|
27 |
+
),
|
28 |
+
"B": data_for_grouping,
|
29 |
+
}
|
30 |
+
)
|
31 |
+
gr1 = df.groupby("A")._grouper.groupings[0]
|
32 |
+
gr2 = df.groupby("B")._grouper.groupings[0]
|
33 |
+
|
34 |
+
tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)
|
35 |
+
tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)
|
36 |
+
|
37 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
38 |
+
def test_groupby_extension_agg(self, as_index, data_for_grouping):
|
39 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
40 |
+
|
41 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
42 |
+
if is_bool:
|
43 |
+
# only 2 unique values, and the final entry has c==b
|
44 |
+
# (see data_for_grouping docstring)
|
45 |
+
df = df.iloc[:-1]
|
46 |
+
|
47 |
+
result = df.groupby("B", as_index=as_index).A.mean()
|
48 |
+
_, uniques = pd.factorize(data_for_grouping, sort=True)
|
49 |
+
|
50 |
+
exp_vals = [3.0, 1.0, 4.0]
|
51 |
+
if is_bool:
|
52 |
+
exp_vals = exp_vals[:-1]
|
53 |
+
if as_index:
|
54 |
+
index = pd.Index(uniques, name="B")
|
55 |
+
expected = pd.Series(exp_vals, index=index, name="A")
|
56 |
+
tm.assert_series_equal(result, expected)
|
57 |
+
else:
|
58 |
+
expected = pd.DataFrame({"B": uniques, "A": exp_vals})
|
59 |
+
tm.assert_frame_equal(result, expected)
|
60 |
+
|
61 |
+
def test_groupby_agg_extension(self, data_for_grouping):
|
62 |
+
# GH#38980 groupby agg on extension type fails for non-numeric types
|
63 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
64 |
+
|
65 |
+
expected = df.iloc[[0, 2, 4, 7]]
|
66 |
+
expected = expected.set_index("A")
|
67 |
+
|
68 |
+
result = df.groupby("A").agg({"B": "first"})
|
69 |
+
tm.assert_frame_equal(result, expected)
|
70 |
+
|
71 |
+
result = df.groupby("A").agg("first")
|
72 |
+
tm.assert_frame_equal(result, expected)
|
73 |
+
|
74 |
+
result = df.groupby("A").first()
|
75 |
+
tm.assert_frame_equal(result, expected)
|
76 |
+
|
77 |
+
def test_groupby_extension_no_sort(self, data_for_grouping):
|
78 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
79 |
+
|
80 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
81 |
+
if is_bool:
|
82 |
+
# only 2 unique values, and the final entry has c==b
|
83 |
+
# (see data_for_grouping docstring)
|
84 |
+
df = df.iloc[:-1]
|
85 |
+
|
86 |
+
result = df.groupby("B", sort=False).A.mean()
|
87 |
+
_, index = pd.factorize(data_for_grouping, sort=False)
|
88 |
+
|
89 |
+
index = pd.Index(index, name="B")
|
90 |
+
exp_vals = [1.0, 3.0, 4.0]
|
91 |
+
if is_bool:
|
92 |
+
exp_vals = exp_vals[:-1]
|
93 |
+
expected = pd.Series(exp_vals, index=index, name="A")
|
94 |
+
tm.assert_series_equal(result, expected)
|
95 |
+
|
96 |
+
def test_groupby_extension_transform(self, data_for_grouping):
|
97 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
98 |
+
|
99 |
+
valid = data_for_grouping[~data_for_grouping.isna()]
|
100 |
+
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})
|
101 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
102 |
+
if is_bool:
|
103 |
+
# only 2 unique values, and the final entry has c==b
|
104 |
+
# (see data_for_grouping docstring)
|
105 |
+
df = df.iloc[:-1]
|
106 |
+
|
107 |
+
result = df.groupby("B").A.transform(len)
|
108 |
+
expected = pd.Series([3, 3, 2, 2, 3, 1], name="A")
|
109 |
+
if is_bool:
|
110 |
+
expected = expected[:-1]
|
111 |
+
|
112 |
+
tm.assert_series_equal(result, expected)
|
113 |
+
|
114 |
+
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
|
115 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
116 |
+
msg = "DataFrameGroupBy.apply operated on the grouping columns"
|
117 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
118 |
+
df.groupby("B", group_keys=False, observed=False).apply(groupby_apply_op)
|
119 |
+
df.groupby("B", group_keys=False, observed=False).A.apply(groupby_apply_op)
|
120 |
+
msg = "DataFrameGroupBy.apply operated on the grouping columns"
|
121 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
122 |
+
df.groupby("A", group_keys=False, observed=False).apply(groupby_apply_op)
|
123 |
+
df.groupby("A", group_keys=False, observed=False).B.apply(groupby_apply_op)
|
124 |
+
|
125 |
+
def test_groupby_apply_identity(self, data_for_grouping):
|
126 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
127 |
+
result = df.groupby("A").B.apply(lambda x: x.array)
|
128 |
+
expected = pd.Series(
|
129 |
+
[
|
130 |
+
df.B.iloc[[0, 1, 6]].array,
|
131 |
+
df.B.iloc[[2, 3]].array,
|
132 |
+
df.B.iloc[[4, 5]].array,
|
133 |
+
df.B.iloc[[7]].array,
|
134 |
+
],
|
135 |
+
index=pd.Index([1, 2, 3, 4], name="A"),
|
136 |
+
name="B",
|
137 |
+
)
|
138 |
+
tm.assert_series_equal(result, expected)
|
139 |
+
|
140 |
+
def test_in_numeric_groupby(self, data_for_grouping):
|
141 |
+
df = pd.DataFrame(
|
142 |
+
{
|
143 |
+
"A": [1, 1, 2, 2, 3, 3, 1, 4],
|
144 |
+
"B": data_for_grouping,
|
145 |
+
"C": [1, 1, 1, 1, 1, 1, 1, 1],
|
146 |
+
}
|
147 |
+
)
|
148 |
+
|
149 |
+
dtype = data_for_grouping.dtype
|
150 |
+
if (
|
151 |
+
is_numeric_dtype(dtype)
|
152 |
+
or is_bool_dtype(dtype)
|
153 |
+
or dtype.name == "decimal"
|
154 |
+
or is_string_dtype(dtype)
|
155 |
+
or is_object_dtype(dtype)
|
156 |
+
or dtype.kind == "m" # in particular duration[*][pyarrow]
|
157 |
+
):
|
158 |
+
expected = pd.Index(["B", "C"])
|
159 |
+
result = df.groupby("A").sum().columns
|
160 |
+
else:
|
161 |
+
expected = pd.Index(["C"])
|
162 |
+
|
163 |
+
msg = "|".join(
|
164 |
+
[
|
165 |
+
# period/datetime
|
166 |
+
"does not support sum operations",
|
167 |
+
# all others
|
168 |
+
re.escape(f"agg function failed [how->sum,dtype->{dtype}"),
|
169 |
+
]
|
170 |
+
)
|
171 |
+
with pytest.raises(TypeError, match=msg):
|
172 |
+
df.groupby("A").sum()
|
173 |
+
result = df.groupby("A").sum(numeric_only=True).columns
|
174 |
+
tm.assert_index_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/index.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for Indexes backed by arbitrary ExtensionArrays.
|
3 |
+
"""
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
|
7 |
+
class BaseIndexTests:
|
8 |
+
"""Tests for Index object backed by an ExtensionArray"""
|
9 |
+
|
10 |
+
def test_index_from_array(self, data):
|
11 |
+
idx = pd.Index(data)
|
12 |
+
assert data.dtype == idx.dtype
|
13 |
+
|
14 |
+
def test_index_from_listlike_with_dtype(self, data):
|
15 |
+
idx = pd.Index(data, dtype=data.dtype)
|
16 |
+
assert idx.dtype == data.dtype
|
17 |
+
|
18 |
+
idx = pd.Index(list(data), dtype=data.dtype)
|
19 |
+
assert idx.dtype == data.dtype
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/interface.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
|
5 |
+
from pandas.core.dtypes.common import is_extension_array_dtype
|
6 |
+
from pandas.core.dtypes.dtypes import ExtensionDtype
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
import pandas._testing as tm
|
10 |
+
|
11 |
+
|
12 |
+
class BaseInterfaceTests:
|
13 |
+
"""Tests that the basic interface is satisfied."""
|
14 |
+
|
15 |
+
# ------------------------------------------------------------------------
|
16 |
+
# Interface
|
17 |
+
# ------------------------------------------------------------------------
|
18 |
+
|
19 |
+
def test_len(self, data):
|
20 |
+
assert len(data) == 100
|
21 |
+
|
22 |
+
def test_size(self, data):
|
23 |
+
assert data.size == 100
|
24 |
+
|
25 |
+
def test_ndim(self, data):
|
26 |
+
assert data.ndim == 1
|
27 |
+
|
28 |
+
def test_can_hold_na_valid(self, data):
|
29 |
+
# GH-20761
|
30 |
+
assert data._can_hold_na is True
|
31 |
+
|
32 |
+
def test_contains(self, data, data_missing):
|
33 |
+
# GH-37867
|
34 |
+
# Tests for membership checks. Membership checks for nan-likes is tricky and
|
35 |
+
# the settled on rule is: `nan_like in arr` is True if nan_like is
|
36 |
+
# arr.dtype.na_value and arr.isna().any() is True. Else the check returns False.
|
37 |
+
|
38 |
+
na_value = data.dtype.na_value
|
39 |
+
# ensure data without missing values
|
40 |
+
data = data[~data.isna()]
|
41 |
+
|
42 |
+
# first elements are non-missing
|
43 |
+
assert data[0] in data
|
44 |
+
assert data_missing[0] in data_missing
|
45 |
+
|
46 |
+
# check the presence of na_value
|
47 |
+
assert na_value in data_missing
|
48 |
+
assert na_value not in data
|
49 |
+
|
50 |
+
# the data can never contain other nan-likes than na_value
|
51 |
+
for na_value_obj in tm.NULL_OBJECTS:
|
52 |
+
if na_value_obj is na_value or type(na_value_obj) == type(na_value):
|
53 |
+
# type check for e.g. two instances of Decimal("NAN")
|
54 |
+
continue
|
55 |
+
assert na_value_obj not in data
|
56 |
+
assert na_value_obj not in data_missing
|
57 |
+
|
58 |
+
def test_memory_usage(self, data):
|
59 |
+
s = pd.Series(data)
|
60 |
+
result = s.memory_usage(index=False)
|
61 |
+
assert result == s.nbytes
|
62 |
+
|
63 |
+
def test_array_interface(self, data):
|
64 |
+
result = np.array(data)
|
65 |
+
assert result[0] == data[0]
|
66 |
+
|
67 |
+
result = np.array(data, dtype=object)
|
68 |
+
expected = np.array(list(data), dtype=object)
|
69 |
+
if expected.ndim > 1:
|
70 |
+
# nested data, explicitly construct as 1D
|
71 |
+
expected = construct_1d_object_array_from_listlike(list(data))
|
72 |
+
tm.assert_numpy_array_equal(result, expected)
|
73 |
+
|
74 |
+
def test_is_extension_array_dtype(self, data):
|
75 |
+
assert is_extension_array_dtype(data)
|
76 |
+
assert is_extension_array_dtype(data.dtype)
|
77 |
+
assert is_extension_array_dtype(pd.Series(data))
|
78 |
+
assert isinstance(data.dtype, ExtensionDtype)
|
79 |
+
|
80 |
+
def test_no_values_attribute(self, data):
|
81 |
+
# GH-20735: EA's with .values attribute give problems with internal
|
82 |
+
# code, disallowing this for now until solved
|
83 |
+
assert not hasattr(data, "values")
|
84 |
+
assert not hasattr(data, "_values")
|
85 |
+
|
86 |
+
def test_is_numeric_honored(self, data):
|
87 |
+
result = pd.Series(data)
|
88 |
+
if hasattr(result._mgr, "blocks"):
|
89 |
+
assert result._mgr.blocks[0].is_numeric is data.dtype._is_numeric
|
90 |
+
|
91 |
+
def test_isna_extension_array(self, data_missing):
|
92 |
+
# If your `isna` returns an ExtensionArray, you must also implement
|
93 |
+
# _reduce. At the *very* least, you must implement any and all
|
94 |
+
na = data_missing.isna()
|
95 |
+
if is_extension_array_dtype(na):
|
96 |
+
assert na._reduce("any")
|
97 |
+
assert na.any()
|
98 |
+
|
99 |
+
assert not na._reduce("all")
|
100 |
+
assert not na.all()
|
101 |
+
|
102 |
+
assert na.dtype._is_boolean
|
103 |
+
|
104 |
+
def test_copy(self, data):
|
105 |
+
# GH#27083 removing deep keyword from EA.copy
|
106 |
+
assert data[0] != data[1]
|
107 |
+
result = data.copy()
|
108 |
+
|
109 |
+
if data.dtype._is_immutable:
|
110 |
+
pytest.skip(f"test_copy assumes mutability and {data.dtype} is immutable")
|
111 |
+
|
112 |
+
data[1] = data[0]
|
113 |
+
assert result[1] != result[0]
|
114 |
+
|
115 |
+
def test_view(self, data):
|
116 |
+
# view with no dtype should return a shallow copy, *not* the same
|
117 |
+
# object
|
118 |
+
assert data[1] != data[0]
|
119 |
+
|
120 |
+
result = data.view()
|
121 |
+
assert result is not data
|
122 |
+
assert type(result) == type(data)
|
123 |
+
|
124 |
+
if data.dtype._is_immutable:
|
125 |
+
pytest.skip(f"test_view assumes mutability and {data.dtype} is immutable")
|
126 |
+
|
127 |
+
result[1] = result[0]
|
128 |
+
assert data[1] == data[0]
|
129 |
+
|
130 |
+
# check specifically that the `dtype` kwarg is accepted
|
131 |
+
data.view(dtype=None)
|
132 |
+
|
133 |
+
def test_tolist(self, data):
|
134 |
+
result = data.tolist()
|
135 |
+
expected = list(data)
|
136 |
+
assert isinstance(result, list)
|
137 |
+
assert result == expected
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/io.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import StringIO
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.core.arrays import ExtensionArray
|
9 |
+
|
10 |
+
|
11 |
+
class BaseParsingTests:
|
12 |
+
@pytest.mark.parametrize("engine", ["c", "python"])
|
13 |
+
def test_EA_types(self, engine, data, request):
|
14 |
+
if isinstance(data.dtype, pd.CategoricalDtype):
|
15 |
+
# in parsers.pyx _convert_with_dtype there is special-casing for
|
16 |
+
# Categorical that pre-empts _from_sequence_of_strings
|
17 |
+
pass
|
18 |
+
elif isinstance(data.dtype, pd.core.dtypes.dtypes.NumpyEADtype):
|
19 |
+
# These get unwrapped internally so are treated as numpy dtypes
|
20 |
+
# in the parsers.pyx code
|
21 |
+
pass
|
22 |
+
elif (
|
23 |
+
type(data)._from_sequence_of_strings.__func__
|
24 |
+
is ExtensionArray._from_sequence_of_strings.__func__
|
25 |
+
):
|
26 |
+
# i.e. the EA hasn't overridden _from_sequence_of_strings
|
27 |
+
mark = pytest.mark.xfail(
|
28 |
+
reason="_from_sequence_of_strings not implemented",
|
29 |
+
raises=NotImplementedError,
|
30 |
+
)
|
31 |
+
request.node.add_marker(mark)
|
32 |
+
|
33 |
+
df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))})
|
34 |
+
csv_output = df.to_csv(index=False, na_rep=np.nan)
|
35 |
+
result = pd.read_csv(
|
36 |
+
StringIO(csv_output), dtype={"with_dtype": str(data.dtype)}, engine=engine
|
37 |
+
)
|
38 |
+
expected = df
|
39 |
+
tm.assert_frame_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/methods.py
ADDED
@@ -0,0 +1,720 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import inspect
|
2 |
+
import operator
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from pandas._typing import Dtype
|
8 |
+
|
9 |
+
from pandas.core.dtypes.common import is_bool_dtype
|
10 |
+
from pandas.core.dtypes.dtypes import NumpyEADtype
|
11 |
+
from pandas.core.dtypes.missing import na_value_for_dtype
|
12 |
+
|
13 |
+
import pandas as pd
|
14 |
+
import pandas._testing as tm
|
15 |
+
from pandas.core.sorting import nargsort
|
16 |
+
|
17 |
+
|
18 |
+
class BaseMethodsTests:
|
19 |
+
"""Various Series and DataFrame methods."""
|
20 |
+
|
21 |
+
def test_hash_pandas_object(self, data):
|
22 |
+
# _hash_pandas_object should return a uint64 ndarray of the same length
|
23 |
+
# as the data
|
24 |
+
from pandas.core.util.hashing import _default_hash_key
|
25 |
+
|
26 |
+
res = data._hash_pandas_object(
|
27 |
+
encoding="utf-8", hash_key=_default_hash_key, categorize=False
|
28 |
+
)
|
29 |
+
assert res.dtype == np.uint64
|
30 |
+
assert res.shape == data.shape
|
31 |
+
|
32 |
+
def test_value_counts_default_dropna(self, data):
|
33 |
+
# make sure we have consistent default dropna kwarg
|
34 |
+
if not hasattr(data, "value_counts"):
|
35 |
+
pytest.skip(f"value_counts is not implemented for {type(data)}")
|
36 |
+
sig = inspect.signature(data.value_counts)
|
37 |
+
kwarg = sig.parameters["dropna"]
|
38 |
+
assert kwarg.default is True
|
39 |
+
|
40 |
+
@pytest.mark.parametrize("dropna", [True, False])
|
41 |
+
def test_value_counts(self, all_data, dropna):
|
42 |
+
all_data = all_data[:10]
|
43 |
+
if dropna:
|
44 |
+
other = all_data[~all_data.isna()]
|
45 |
+
else:
|
46 |
+
other = all_data
|
47 |
+
|
48 |
+
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
|
49 |
+
expected = pd.Series(other).value_counts(dropna=dropna).sort_index()
|
50 |
+
|
51 |
+
tm.assert_series_equal(result, expected)
|
52 |
+
|
53 |
+
def test_value_counts_with_normalize(self, data):
|
54 |
+
# GH 33172
|
55 |
+
data = data[:10].unique()
|
56 |
+
values = np.array(data[~data.isna()])
|
57 |
+
ser = pd.Series(data, dtype=data.dtype)
|
58 |
+
|
59 |
+
result = ser.value_counts(normalize=True).sort_index()
|
60 |
+
|
61 |
+
if not isinstance(data, pd.Categorical):
|
62 |
+
expected = pd.Series(
|
63 |
+
[1 / len(values)] * len(values), index=result.index, name="proportion"
|
64 |
+
)
|
65 |
+
else:
|
66 |
+
expected = pd.Series(0.0, index=result.index, name="proportion")
|
67 |
+
expected[result > 0] = 1 / len(values)
|
68 |
+
|
69 |
+
if getattr(data.dtype, "storage", "") == "pyarrow" or isinstance(
|
70 |
+
data.dtype, pd.ArrowDtype
|
71 |
+
):
|
72 |
+
# TODO: avoid special-casing
|
73 |
+
expected = expected.astype("double[pyarrow]")
|
74 |
+
elif getattr(data.dtype, "storage", "") == "pyarrow_numpy":
|
75 |
+
# TODO: avoid special-casing
|
76 |
+
expected = expected.astype("float64")
|
77 |
+
elif na_value_for_dtype(data.dtype) is pd.NA:
|
78 |
+
# TODO(GH#44692): avoid special-casing
|
79 |
+
expected = expected.astype("Float64")
|
80 |
+
|
81 |
+
tm.assert_series_equal(result, expected)
|
82 |
+
|
83 |
+
def test_count(self, data_missing):
|
84 |
+
df = pd.DataFrame({"A": data_missing})
|
85 |
+
result = df.count(axis="columns")
|
86 |
+
expected = pd.Series([0, 1])
|
87 |
+
tm.assert_series_equal(result, expected)
|
88 |
+
|
89 |
+
def test_series_count(self, data_missing):
|
90 |
+
# GH#26835
|
91 |
+
ser = pd.Series(data_missing)
|
92 |
+
result = ser.count()
|
93 |
+
expected = 1
|
94 |
+
assert result == expected
|
95 |
+
|
96 |
+
def test_apply_simple_series(self, data):
|
97 |
+
result = pd.Series(data).apply(id)
|
98 |
+
assert isinstance(result, pd.Series)
|
99 |
+
|
100 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
101 |
+
def test_map(self, data_missing, na_action):
|
102 |
+
result = data_missing.map(lambda x: x, na_action=na_action)
|
103 |
+
expected = data_missing.to_numpy()
|
104 |
+
tm.assert_numpy_array_equal(result, expected)
|
105 |
+
|
106 |
+
def test_argsort(self, data_for_sorting):
|
107 |
+
result = pd.Series(data_for_sorting).argsort()
|
108 |
+
# argsort result gets passed to take, so should be np.intp
|
109 |
+
expected = pd.Series(np.array([2, 0, 1], dtype=np.intp))
|
110 |
+
tm.assert_series_equal(result, expected)
|
111 |
+
|
112 |
+
def test_argsort_missing_array(self, data_missing_for_sorting):
|
113 |
+
result = data_missing_for_sorting.argsort()
|
114 |
+
# argsort result gets passed to take, so should be np.intp
|
115 |
+
expected = np.array([2, 0, 1], dtype=np.intp)
|
116 |
+
tm.assert_numpy_array_equal(result, expected)
|
117 |
+
|
118 |
+
def test_argsort_missing(self, data_missing_for_sorting):
|
119 |
+
msg = "The behavior of Series.argsort in the presence of NA values"
|
120 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
121 |
+
result = pd.Series(data_missing_for_sorting).argsort()
|
122 |
+
expected = pd.Series(np.array([1, -1, 0], dtype=np.intp))
|
123 |
+
tm.assert_series_equal(result, expected)
|
124 |
+
|
125 |
+
def test_argmin_argmax(self, data_for_sorting, data_missing_for_sorting, na_value):
|
126 |
+
# GH 24382
|
127 |
+
is_bool = data_for_sorting.dtype._is_boolean
|
128 |
+
|
129 |
+
exp_argmax = 1
|
130 |
+
exp_argmax_repeated = 3
|
131 |
+
if is_bool:
|
132 |
+
# See data_for_sorting docstring
|
133 |
+
exp_argmax = 0
|
134 |
+
exp_argmax_repeated = 1
|
135 |
+
|
136 |
+
# data_for_sorting -> [B, C, A] with A < B < C
|
137 |
+
assert data_for_sorting.argmax() == exp_argmax
|
138 |
+
assert data_for_sorting.argmin() == 2
|
139 |
+
|
140 |
+
# with repeated values -> first occurrence
|
141 |
+
data = data_for_sorting.take([2, 0, 0, 1, 1, 2])
|
142 |
+
assert data.argmax() == exp_argmax_repeated
|
143 |
+
assert data.argmin() == 0
|
144 |
+
|
145 |
+
# with missing values
|
146 |
+
# data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.
|
147 |
+
assert data_missing_for_sorting.argmax() == 0
|
148 |
+
assert data_missing_for_sorting.argmin() == 2
|
149 |
+
|
150 |
+
@pytest.mark.parametrize("method", ["argmax", "argmin"])
|
151 |
+
def test_argmin_argmax_empty_array(self, method, data):
|
152 |
+
# GH 24382
|
153 |
+
err_msg = "attempt to get"
|
154 |
+
with pytest.raises(ValueError, match=err_msg):
|
155 |
+
getattr(data[:0], method)()
|
156 |
+
|
157 |
+
@pytest.mark.parametrize("method", ["argmax", "argmin"])
|
158 |
+
def test_argmin_argmax_all_na(self, method, data, na_value):
|
159 |
+
# all missing with skipna=True is the same as empty
|
160 |
+
err_msg = "attempt to get"
|
161 |
+
data_na = type(data)._from_sequence([na_value, na_value], dtype=data.dtype)
|
162 |
+
with pytest.raises(ValueError, match=err_msg):
|
163 |
+
getattr(data_na, method)()
|
164 |
+
|
165 |
+
@pytest.mark.parametrize(
|
166 |
+
"op_name, skipna, expected",
|
167 |
+
[
|
168 |
+
("idxmax", True, 0),
|
169 |
+
("idxmin", True, 2),
|
170 |
+
("argmax", True, 0),
|
171 |
+
("argmin", True, 2),
|
172 |
+
("idxmax", False, np.nan),
|
173 |
+
("idxmin", False, np.nan),
|
174 |
+
("argmax", False, -1),
|
175 |
+
("argmin", False, -1),
|
176 |
+
],
|
177 |
+
)
|
178 |
+
def test_argreduce_series(
|
179 |
+
self, data_missing_for_sorting, op_name, skipna, expected
|
180 |
+
):
|
181 |
+
# data_missing_for_sorting -> [B, NA, A] with A < B and NA missing.
|
182 |
+
warn = None
|
183 |
+
msg = "The behavior of Series.argmax/argmin"
|
184 |
+
if op_name.startswith("arg") and expected == -1:
|
185 |
+
warn = FutureWarning
|
186 |
+
if op_name.startswith("idx") and np.isnan(expected):
|
187 |
+
warn = FutureWarning
|
188 |
+
msg = f"The behavior of Series.{op_name}"
|
189 |
+
ser = pd.Series(data_missing_for_sorting)
|
190 |
+
with tm.assert_produces_warning(warn, match=msg):
|
191 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
192 |
+
tm.assert_almost_equal(result, expected)
|
193 |
+
|
194 |
+
def test_argmax_argmin_no_skipna_notimplemented(self, data_missing_for_sorting):
|
195 |
+
# GH#38733
|
196 |
+
data = data_missing_for_sorting
|
197 |
+
|
198 |
+
with pytest.raises(NotImplementedError, match=""):
|
199 |
+
data.argmin(skipna=False)
|
200 |
+
|
201 |
+
with pytest.raises(NotImplementedError, match=""):
|
202 |
+
data.argmax(skipna=False)
|
203 |
+
|
204 |
+
@pytest.mark.parametrize(
|
205 |
+
"na_position, expected",
|
206 |
+
[
|
207 |
+
("last", np.array([2, 0, 1], dtype=np.dtype("intp"))),
|
208 |
+
("first", np.array([1, 2, 0], dtype=np.dtype("intp"))),
|
209 |
+
],
|
210 |
+
)
|
211 |
+
def test_nargsort(self, data_missing_for_sorting, na_position, expected):
|
212 |
+
# GH 25439
|
213 |
+
result = nargsort(data_missing_for_sorting, na_position=na_position)
|
214 |
+
tm.assert_numpy_array_equal(result, expected)
|
215 |
+
|
216 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
217 |
+
def test_sort_values(self, data_for_sorting, ascending, sort_by_key):
|
218 |
+
ser = pd.Series(data_for_sorting)
|
219 |
+
result = ser.sort_values(ascending=ascending, key=sort_by_key)
|
220 |
+
expected = ser.iloc[[2, 0, 1]]
|
221 |
+
if not ascending:
|
222 |
+
# GH 35922. Expect stable sort
|
223 |
+
if ser.nunique() == 2:
|
224 |
+
expected = ser.iloc[[0, 1, 2]]
|
225 |
+
else:
|
226 |
+
expected = ser.iloc[[1, 0, 2]]
|
227 |
+
|
228 |
+
tm.assert_series_equal(result, expected)
|
229 |
+
|
230 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
231 |
+
def test_sort_values_missing(
|
232 |
+
self, data_missing_for_sorting, ascending, sort_by_key
|
233 |
+
):
|
234 |
+
ser = pd.Series(data_missing_for_sorting)
|
235 |
+
result = ser.sort_values(ascending=ascending, key=sort_by_key)
|
236 |
+
if ascending:
|
237 |
+
expected = ser.iloc[[2, 0, 1]]
|
238 |
+
else:
|
239 |
+
expected = ser.iloc[[0, 2, 1]]
|
240 |
+
tm.assert_series_equal(result, expected)
|
241 |
+
|
242 |
+
@pytest.mark.parametrize("ascending", [True, False])
|
243 |
+
def test_sort_values_frame(self, data_for_sorting, ascending):
|
244 |
+
df = pd.DataFrame({"A": [1, 2, 1], "B": data_for_sorting})
|
245 |
+
result = df.sort_values(["A", "B"])
|
246 |
+
expected = pd.DataFrame(
|
247 |
+
{"A": [1, 1, 2], "B": data_for_sorting.take([2, 0, 1])}, index=[2, 0, 1]
|
248 |
+
)
|
249 |
+
tm.assert_frame_equal(result, expected)
|
250 |
+
|
251 |
+
@pytest.mark.parametrize("keep", ["first", "last", False])
|
252 |
+
def test_duplicated(self, data, keep):
|
253 |
+
arr = data.take([0, 1, 0, 1])
|
254 |
+
result = arr.duplicated(keep=keep)
|
255 |
+
if keep == "first":
|
256 |
+
expected = np.array([False, False, True, True])
|
257 |
+
elif keep == "last":
|
258 |
+
expected = np.array([True, True, False, False])
|
259 |
+
else:
|
260 |
+
expected = np.array([True, True, True, True])
|
261 |
+
tm.assert_numpy_array_equal(result, expected)
|
262 |
+
|
263 |
+
@pytest.mark.parametrize("box", [pd.Series, lambda x: x])
|
264 |
+
@pytest.mark.parametrize("method", [lambda x: x.unique(), pd.unique])
|
265 |
+
def test_unique(self, data, box, method):
|
266 |
+
duplicated = box(data._from_sequence([data[0], data[0]], dtype=data.dtype))
|
267 |
+
|
268 |
+
result = method(duplicated)
|
269 |
+
|
270 |
+
assert len(result) == 1
|
271 |
+
assert isinstance(result, type(data))
|
272 |
+
assert result[0] == duplicated[0]
|
273 |
+
|
274 |
+
def test_factorize(self, data_for_grouping):
|
275 |
+
codes, uniques = pd.factorize(data_for_grouping, use_na_sentinel=True)
|
276 |
+
|
277 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
278 |
+
if is_bool:
|
279 |
+
# only 2 unique values
|
280 |
+
expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 0], dtype=np.intp)
|
281 |
+
expected_uniques = data_for_grouping.take([0, 4])
|
282 |
+
else:
|
283 |
+
expected_codes = np.array([0, 0, -1, -1, 1, 1, 0, 2], dtype=np.intp)
|
284 |
+
expected_uniques = data_for_grouping.take([0, 4, 7])
|
285 |
+
|
286 |
+
tm.assert_numpy_array_equal(codes, expected_codes)
|
287 |
+
tm.assert_extension_array_equal(uniques, expected_uniques)
|
288 |
+
|
289 |
+
def test_factorize_equivalence(self, data_for_grouping):
|
290 |
+
codes_1, uniques_1 = pd.factorize(data_for_grouping, use_na_sentinel=True)
|
291 |
+
codes_2, uniques_2 = data_for_grouping.factorize(use_na_sentinel=True)
|
292 |
+
|
293 |
+
tm.assert_numpy_array_equal(codes_1, codes_2)
|
294 |
+
tm.assert_extension_array_equal(uniques_1, uniques_2)
|
295 |
+
assert len(uniques_1) == len(pd.unique(uniques_1))
|
296 |
+
assert uniques_1.dtype == data_for_grouping.dtype
|
297 |
+
|
298 |
+
def test_factorize_empty(self, data):
|
299 |
+
codes, uniques = pd.factorize(data[:0])
|
300 |
+
expected_codes = np.array([], dtype=np.intp)
|
301 |
+
expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype)
|
302 |
+
|
303 |
+
tm.assert_numpy_array_equal(codes, expected_codes)
|
304 |
+
tm.assert_extension_array_equal(uniques, expected_uniques)
|
305 |
+
|
306 |
+
def test_fillna_copy_frame(self, data_missing):
|
307 |
+
arr = data_missing.take([1, 1])
|
308 |
+
df = pd.DataFrame({"A": arr})
|
309 |
+
df_orig = df.copy()
|
310 |
+
|
311 |
+
filled_val = df.iloc[0, 0]
|
312 |
+
result = df.fillna(filled_val)
|
313 |
+
|
314 |
+
result.iloc[0, 0] = filled_val
|
315 |
+
|
316 |
+
tm.assert_frame_equal(df, df_orig)
|
317 |
+
|
318 |
+
def test_fillna_copy_series(self, data_missing):
|
319 |
+
arr = data_missing.take([1, 1])
|
320 |
+
ser = pd.Series(arr, copy=False)
|
321 |
+
ser_orig = ser.copy()
|
322 |
+
|
323 |
+
filled_val = ser[0]
|
324 |
+
result = ser.fillna(filled_val)
|
325 |
+
result.iloc[0] = filled_val
|
326 |
+
|
327 |
+
tm.assert_series_equal(ser, ser_orig)
|
328 |
+
|
329 |
+
def test_fillna_length_mismatch(self, data_missing):
|
330 |
+
msg = "Length of 'value' does not match."
|
331 |
+
with pytest.raises(ValueError, match=msg):
|
332 |
+
data_missing.fillna(data_missing.take([1]))
|
333 |
+
|
334 |
+
# Subclasses can override if we expect e.g Sparse[bool], boolean, pyarrow[bool]
|
335 |
+
_combine_le_expected_dtype: Dtype = NumpyEADtype("bool")
|
336 |
+
|
337 |
+
def test_combine_le(self, data_repeated):
|
338 |
+
# GH 20825
|
339 |
+
# Test that combine works when doing a <= (le) comparison
|
340 |
+
orig_data1, orig_data2 = data_repeated(2)
|
341 |
+
s1 = pd.Series(orig_data1)
|
342 |
+
s2 = pd.Series(orig_data2)
|
343 |
+
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
|
344 |
+
expected = pd.Series(
|
345 |
+
pd.array(
|
346 |
+
[a <= b for (a, b) in zip(list(orig_data1), list(orig_data2))],
|
347 |
+
dtype=self._combine_le_expected_dtype,
|
348 |
+
)
|
349 |
+
)
|
350 |
+
tm.assert_series_equal(result, expected)
|
351 |
+
|
352 |
+
val = s1.iloc[0]
|
353 |
+
result = s1.combine(val, lambda x1, x2: x1 <= x2)
|
354 |
+
expected = pd.Series(
|
355 |
+
pd.array(
|
356 |
+
[a <= val for a in list(orig_data1)],
|
357 |
+
dtype=self._combine_le_expected_dtype,
|
358 |
+
)
|
359 |
+
)
|
360 |
+
tm.assert_series_equal(result, expected)
|
361 |
+
|
362 |
+
def test_combine_add(self, data_repeated):
|
363 |
+
# GH 20825
|
364 |
+
orig_data1, orig_data2 = data_repeated(2)
|
365 |
+
s1 = pd.Series(orig_data1)
|
366 |
+
s2 = pd.Series(orig_data2)
|
367 |
+
|
368 |
+
# Check if the operation is supported pointwise for our scalars. If not,
|
369 |
+
# we will expect Series.combine to raise as well.
|
370 |
+
try:
|
371 |
+
with np.errstate(over="ignore"):
|
372 |
+
expected = pd.Series(
|
373 |
+
orig_data1._from_sequence(
|
374 |
+
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
|
375 |
+
)
|
376 |
+
)
|
377 |
+
except TypeError:
|
378 |
+
# If the operation is not supported pointwise for our scalars,
|
379 |
+
# then Series.combine should also raise
|
380 |
+
with pytest.raises(TypeError):
|
381 |
+
s1.combine(s2, lambda x1, x2: x1 + x2)
|
382 |
+
return
|
383 |
+
|
384 |
+
result = s1.combine(s2, lambda x1, x2: x1 + x2)
|
385 |
+
tm.assert_series_equal(result, expected)
|
386 |
+
|
387 |
+
val = s1.iloc[0]
|
388 |
+
result = s1.combine(val, lambda x1, x2: x1 + x2)
|
389 |
+
expected = pd.Series(
|
390 |
+
orig_data1._from_sequence([a + val for a in list(orig_data1)])
|
391 |
+
)
|
392 |
+
tm.assert_series_equal(result, expected)
|
393 |
+
|
394 |
+
def test_combine_first(self, data):
|
395 |
+
# https://github.com/pandas-dev/pandas/issues/24147
|
396 |
+
a = pd.Series(data[:3])
|
397 |
+
b = pd.Series(data[2:5], index=[2, 3, 4])
|
398 |
+
result = a.combine_first(b)
|
399 |
+
expected = pd.Series(data[:5])
|
400 |
+
tm.assert_series_equal(result, expected)
|
401 |
+
|
402 |
+
@pytest.mark.parametrize("frame", [True, False])
|
403 |
+
@pytest.mark.parametrize(
|
404 |
+
"periods, indices",
|
405 |
+
[(-2, [2, 3, 4, -1, -1]), (0, [0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])],
|
406 |
+
)
|
407 |
+
def test_container_shift(self, data, frame, periods, indices):
|
408 |
+
# https://github.com/pandas-dev/pandas/issues/22386
|
409 |
+
subset = data[:5]
|
410 |
+
data = pd.Series(subset, name="A")
|
411 |
+
expected = pd.Series(subset.take(indices, allow_fill=True), name="A")
|
412 |
+
|
413 |
+
if frame:
|
414 |
+
result = data.to_frame(name="A").assign(B=1).shift(periods)
|
415 |
+
expected = pd.concat(
|
416 |
+
[expected, pd.Series([1] * 5, name="B").shift(periods)], axis=1
|
417 |
+
)
|
418 |
+
compare = tm.assert_frame_equal
|
419 |
+
else:
|
420 |
+
result = data.shift(periods)
|
421 |
+
compare = tm.assert_series_equal
|
422 |
+
|
423 |
+
compare(result, expected)
|
424 |
+
|
425 |
+
def test_shift_0_periods(self, data):
|
426 |
+
# GH#33856 shifting with periods=0 should return a copy, not same obj
|
427 |
+
result = data.shift(0)
|
428 |
+
assert data[0] != data[1] # otherwise below is invalid
|
429 |
+
data[0] = data[1]
|
430 |
+
assert result[0] != result[1] # i.e. not the same object/view
|
431 |
+
|
432 |
+
@pytest.mark.parametrize("periods", [1, -2])
|
433 |
+
def test_diff(self, data, periods):
|
434 |
+
data = data[:5]
|
435 |
+
if is_bool_dtype(data.dtype):
|
436 |
+
op = operator.xor
|
437 |
+
else:
|
438 |
+
op = operator.sub
|
439 |
+
try:
|
440 |
+
# does this array implement ops?
|
441 |
+
op(data, data)
|
442 |
+
except Exception:
|
443 |
+
pytest.skip(f"{type(data)} does not support diff")
|
444 |
+
s = pd.Series(data)
|
445 |
+
result = s.diff(periods)
|
446 |
+
expected = pd.Series(op(data, data.shift(periods)))
|
447 |
+
tm.assert_series_equal(result, expected)
|
448 |
+
|
449 |
+
df = pd.DataFrame({"A": data, "B": [1.0] * 5})
|
450 |
+
result = df.diff(periods)
|
451 |
+
if periods == 1:
|
452 |
+
b = [np.nan, 0, 0, 0, 0]
|
453 |
+
else:
|
454 |
+
b = [0, 0, 0, np.nan, np.nan]
|
455 |
+
expected = pd.DataFrame({"A": expected, "B": b})
|
456 |
+
tm.assert_frame_equal(result, expected)
|
457 |
+
|
458 |
+
@pytest.mark.parametrize(
|
459 |
+
"periods, indices",
|
460 |
+
[[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]],
|
461 |
+
)
|
462 |
+
def test_shift_non_empty_array(self, data, periods, indices):
|
463 |
+
# https://github.com/pandas-dev/pandas/issues/23911
|
464 |
+
subset = data[:2]
|
465 |
+
result = subset.shift(periods)
|
466 |
+
expected = subset.take(indices, allow_fill=True)
|
467 |
+
tm.assert_extension_array_equal(result, expected)
|
468 |
+
|
469 |
+
@pytest.mark.parametrize("periods", [-4, -1, 0, 1, 4])
|
470 |
+
def test_shift_empty_array(self, data, periods):
|
471 |
+
# https://github.com/pandas-dev/pandas/issues/23911
|
472 |
+
empty = data[:0]
|
473 |
+
result = empty.shift(periods)
|
474 |
+
expected = empty
|
475 |
+
tm.assert_extension_array_equal(result, expected)
|
476 |
+
|
477 |
+
def test_shift_zero_copies(self, data):
|
478 |
+
# GH#31502
|
479 |
+
result = data.shift(0)
|
480 |
+
assert result is not data
|
481 |
+
|
482 |
+
result = data[:0].shift(2)
|
483 |
+
assert result is not data
|
484 |
+
|
485 |
+
def test_shift_fill_value(self, data):
|
486 |
+
arr = data[:4]
|
487 |
+
fill_value = data[0]
|
488 |
+
result = arr.shift(1, fill_value=fill_value)
|
489 |
+
expected = data.take([0, 0, 1, 2])
|
490 |
+
tm.assert_extension_array_equal(result, expected)
|
491 |
+
|
492 |
+
result = arr.shift(-2, fill_value=fill_value)
|
493 |
+
expected = data.take([2, 3, 0, 0])
|
494 |
+
tm.assert_extension_array_equal(result, expected)
|
495 |
+
|
496 |
+
def test_not_hashable(self, data):
|
497 |
+
# We are in general mutable, so not hashable
|
498 |
+
with pytest.raises(TypeError, match="unhashable type"):
|
499 |
+
hash(data)
|
500 |
+
|
501 |
+
def test_hash_pandas_object_works(self, data, as_frame):
|
502 |
+
# https://github.com/pandas-dev/pandas/issues/23066
|
503 |
+
data = pd.Series(data)
|
504 |
+
if as_frame:
|
505 |
+
data = data.to_frame()
|
506 |
+
a = pd.util.hash_pandas_object(data)
|
507 |
+
b = pd.util.hash_pandas_object(data)
|
508 |
+
tm.assert_equal(a, b)
|
509 |
+
|
510 |
+
def test_searchsorted(self, data_for_sorting, as_series):
|
511 |
+
if data_for_sorting.dtype._is_boolean:
|
512 |
+
return self._test_searchsorted_bool_dtypes(data_for_sorting, as_series)
|
513 |
+
|
514 |
+
b, c, a = data_for_sorting
|
515 |
+
arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c]
|
516 |
+
|
517 |
+
if as_series:
|
518 |
+
arr = pd.Series(arr)
|
519 |
+
assert arr.searchsorted(a) == 0
|
520 |
+
assert arr.searchsorted(a, side="right") == 1
|
521 |
+
|
522 |
+
assert arr.searchsorted(b) == 1
|
523 |
+
assert arr.searchsorted(b, side="right") == 2
|
524 |
+
|
525 |
+
assert arr.searchsorted(c) == 2
|
526 |
+
assert arr.searchsorted(c, side="right") == 3
|
527 |
+
|
528 |
+
result = arr.searchsorted(arr.take([0, 2]))
|
529 |
+
expected = np.array([0, 2], dtype=np.intp)
|
530 |
+
|
531 |
+
tm.assert_numpy_array_equal(result, expected)
|
532 |
+
|
533 |
+
# sorter
|
534 |
+
sorter = np.array([1, 2, 0])
|
535 |
+
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
|
536 |
+
|
537 |
+
def _test_searchsorted_bool_dtypes(self, data_for_sorting, as_series):
|
538 |
+
# We call this from test_searchsorted in cases where we have a
|
539 |
+
# boolean-like dtype. The non-bool test assumes we have more than 2
|
540 |
+
# unique values.
|
541 |
+
dtype = data_for_sorting.dtype
|
542 |
+
data_for_sorting = pd.array([True, False], dtype=dtype)
|
543 |
+
b, a = data_for_sorting
|
544 |
+
arr = type(data_for_sorting)._from_sequence([a, b])
|
545 |
+
|
546 |
+
if as_series:
|
547 |
+
arr = pd.Series(arr)
|
548 |
+
assert arr.searchsorted(a) == 0
|
549 |
+
assert arr.searchsorted(a, side="right") == 1
|
550 |
+
|
551 |
+
assert arr.searchsorted(b) == 1
|
552 |
+
assert arr.searchsorted(b, side="right") == 2
|
553 |
+
|
554 |
+
result = arr.searchsorted(arr.take([0, 1]))
|
555 |
+
expected = np.array([0, 1], dtype=np.intp)
|
556 |
+
|
557 |
+
tm.assert_numpy_array_equal(result, expected)
|
558 |
+
|
559 |
+
# sorter
|
560 |
+
sorter = np.array([1, 0])
|
561 |
+
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
|
562 |
+
|
563 |
+
def test_where_series(self, data, na_value, as_frame):
|
564 |
+
assert data[0] != data[1]
|
565 |
+
cls = type(data)
|
566 |
+
a, b = data[:2]
|
567 |
+
|
568 |
+
orig = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
|
569 |
+
ser = orig.copy()
|
570 |
+
cond = np.array([True, True, False, False])
|
571 |
+
|
572 |
+
if as_frame:
|
573 |
+
ser = ser.to_frame(name="a")
|
574 |
+
cond = cond.reshape(-1, 1)
|
575 |
+
|
576 |
+
result = ser.where(cond)
|
577 |
+
expected = pd.Series(
|
578 |
+
cls._from_sequence([a, a, na_value, na_value], dtype=data.dtype)
|
579 |
+
)
|
580 |
+
|
581 |
+
if as_frame:
|
582 |
+
expected = expected.to_frame(name="a")
|
583 |
+
tm.assert_equal(result, expected)
|
584 |
+
|
585 |
+
ser.mask(~cond, inplace=True)
|
586 |
+
tm.assert_equal(ser, expected)
|
587 |
+
|
588 |
+
# array other
|
589 |
+
ser = orig.copy()
|
590 |
+
if as_frame:
|
591 |
+
ser = ser.to_frame(name="a")
|
592 |
+
cond = np.array([True, False, True, True])
|
593 |
+
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
|
594 |
+
if as_frame:
|
595 |
+
other = pd.DataFrame({"a": other})
|
596 |
+
cond = pd.DataFrame({"a": cond})
|
597 |
+
result = ser.where(cond, other)
|
598 |
+
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
|
599 |
+
if as_frame:
|
600 |
+
expected = expected.to_frame(name="a")
|
601 |
+
tm.assert_equal(result, expected)
|
602 |
+
|
603 |
+
ser.mask(~cond, other, inplace=True)
|
604 |
+
tm.assert_equal(ser, expected)
|
605 |
+
|
606 |
+
@pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
|
607 |
+
def test_repeat(self, data, repeats, as_series, use_numpy):
|
608 |
+
arr = type(data)._from_sequence(data[:3], dtype=data.dtype)
|
609 |
+
if as_series:
|
610 |
+
arr = pd.Series(arr)
|
611 |
+
|
612 |
+
result = np.repeat(arr, repeats) if use_numpy else arr.repeat(repeats)
|
613 |
+
|
614 |
+
repeats = [repeats] * 3 if isinstance(repeats, int) else repeats
|
615 |
+
expected = [x for x, n in zip(arr, repeats) for _ in range(n)]
|
616 |
+
expected = type(data)._from_sequence(expected, dtype=data.dtype)
|
617 |
+
if as_series:
|
618 |
+
expected = pd.Series(expected, index=arr.index.repeat(repeats))
|
619 |
+
|
620 |
+
tm.assert_equal(result, expected)
|
621 |
+
|
622 |
+
@pytest.mark.parametrize(
|
623 |
+
"repeats, kwargs, error, msg",
|
624 |
+
[
|
625 |
+
(2, {"axis": 1}, ValueError, "axis"),
|
626 |
+
(-1, {}, ValueError, "negative"),
|
627 |
+
([1, 2], {}, ValueError, "shape"),
|
628 |
+
(2, {"foo": "bar"}, TypeError, "'foo'"),
|
629 |
+
],
|
630 |
+
)
|
631 |
+
def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy):
|
632 |
+
with pytest.raises(error, match=msg):
|
633 |
+
if use_numpy:
|
634 |
+
np.repeat(data, repeats, **kwargs)
|
635 |
+
else:
|
636 |
+
data.repeat(repeats, **kwargs)
|
637 |
+
|
638 |
+
def test_delete(self, data):
|
639 |
+
result = data.delete(0)
|
640 |
+
expected = data[1:]
|
641 |
+
tm.assert_extension_array_equal(result, expected)
|
642 |
+
|
643 |
+
result = data.delete([1, 3])
|
644 |
+
expected = data._concat_same_type([data[[0]], data[[2]], data[4:]])
|
645 |
+
tm.assert_extension_array_equal(result, expected)
|
646 |
+
|
647 |
+
def test_insert(self, data):
|
648 |
+
# insert at the beginning
|
649 |
+
result = data[1:].insert(0, data[0])
|
650 |
+
tm.assert_extension_array_equal(result, data)
|
651 |
+
|
652 |
+
result = data[1:].insert(-len(data[1:]), data[0])
|
653 |
+
tm.assert_extension_array_equal(result, data)
|
654 |
+
|
655 |
+
# insert at the middle
|
656 |
+
result = data[:-1].insert(4, data[-1])
|
657 |
+
|
658 |
+
taker = np.arange(len(data))
|
659 |
+
taker[5:] = taker[4:-1]
|
660 |
+
taker[4] = len(data) - 1
|
661 |
+
expected = data.take(taker)
|
662 |
+
tm.assert_extension_array_equal(result, expected)
|
663 |
+
|
664 |
+
def test_insert_invalid(self, data, invalid_scalar):
|
665 |
+
item = invalid_scalar
|
666 |
+
|
667 |
+
with pytest.raises((TypeError, ValueError)):
|
668 |
+
data.insert(0, item)
|
669 |
+
|
670 |
+
with pytest.raises((TypeError, ValueError)):
|
671 |
+
data.insert(4, item)
|
672 |
+
|
673 |
+
with pytest.raises((TypeError, ValueError)):
|
674 |
+
data.insert(len(data) - 1, item)
|
675 |
+
|
676 |
+
def test_insert_invalid_loc(self, data):
|
677 |
+
ub = len(data)
|
678 |
+
|
679 |
+
with pytest.raises(IndexError):
|
680 |
+
data.insert(ub + 1, data[0])
|
681 |
+
|
682 |
+
with pytest.raises(IndexError):
|
683 |
+
data.insert(-ub - 1, data[0])
|
684 |
+
|
685 |
+
with pytest.raises(TypeError):
|
686 |
+
# we expect TypeError here instead of IndexError to match np.insert
|
687 |
+
data.insert(1.5, data[0])
|
688 |
+
|
689 |
+
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
|
690 |
+
def test_equals(self, data, na_value, as_series, box):
|
691 |
+
data2 = type(data)._from_sequence([data[0]] * len(data), dtype=data.dtype)
|
692 |
+
data_na = type(data)._from_sequence([na_value] * len(data), dtype=data.dtype)
|
693 |
+
|
694 |
+
data = tm.box_expected(data, box, transpose=False)
|
695 |
+
data2 = tm.box_expected(data2, box, transpose=False)
|
696 |
+
data_na = tm.box_expected(data_na, box, transpose=False)
|
697 |
+
|
698 |
+
# we are asserting with `is True/False` explicitly, to test that the
|
699 |
+
# result is an actual Python bool, and not something "truthy"
|
700 |
+
|
701 |
+
assert data.equals(data) is True
|
702 |
+
assert data.equals(data.copy()) is True
|
703 |
+
|
704 |
+
# unequal other data
|
705 |
+
assert data.equals(data2) is False
|
706 |
+
assert data.equals(data_na) is False
|
707 |
+
|
708 |
+
# different length
|
709 |
+
assert data[:2].equals(data[:3]) is False
|
710 |
+
|
711 |
+
# empty are equal
|
712 |
+
assert data[:0].equals(data[:0]) is True
|
713 |
+
|
714 |
+
# other types
|
715 |
+
assert data.equals(None) is False
|
716 |
+
assert data[[0]].equals(data[0]) is False
|
717 |
+
|
718 |
+
def test_equals_same_data_different_object(self, data):
|
719 |
+
# https://github.com/pandas-dev/pandas/issues/34660
|
720 |
+
assert pd.Series(data).equals(pd.Series(data))
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/missing.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
|
7 |
+
|
8 |
+
class BaseMissingTests:
|
9 |
+
def test_isna(self, data_missing):
|
10 |
+
expected = np.array([True, False])
|
11 |
+
|
12 |
+
result = pd.isna(data_missing)
|
13 |
+
tm.assert_numpy_array_equal(result, expected)
|
14 |
+
|
15 |
+
result = pd.Series(data_missing).isna()
|
16 |
+
expected = pd.Series(expected)
|
17 |
+
tm.assert_series_equal(result, expected)
|
18 |
+
|
19 |
+
# GH 21189
|
20 |
+
result = pd.Series(data_missing).drop([0, 1]).isna()
|
21 |
+
expected = pd.Series([], dtype=bool)
|
22 |
+
tm.assert_series_equal(result, expected)
|
23 |
+
|
24 |
+
@pytest.mark.parametrize("na_func", ["isna", "notna"])
|
25 |
+
def test_isna_returns_copy(self, data_missing, na_func):
|
26 |
+
result = pd.Series(data_missing)
|
27 |
+
expected = result.copy()
|
28 |
+
mask = getattr(result, na_func)()
|
29 |
+
if isinstance(mask.dtype, pd.SparseDtype):
|
30 |
+
mask = np.array(mask)
|
31 |
+
|
32 |
+
mask[:] = True
|
33 |
+
tm.assert_series_equal(result, expected)
|
34 |
+
|
35 |
+
def test_dropna_array(self, data_missing):
|
36 |
+
result = data_missing.dropna()
|
37 |
+
expected = data_missing[[1]]
|
38 |
+
tm.assert_extension_array_equal(result, expected)
|
39 |
+
|
40 |
+
def test_dropna_series(self, data_missing):
|
41 |
+
ser = pd.Series(data_missing)
|
42 |
+
result = ser.dropna()
|
43 |
+
expected = ser.iloc[[1]]
|
44 |
+
tm.assert_series_equal(result, expected)
|
45 |
+
|
46 |
+
def test_dropna_frame(self, data_missing):
|
47 |
+
df = pd.DataFrame({"A": data_missing}, columns=pd.Index(["A"], dtype=object))
|
48 |
+
|
49 |
+
# defaults
|
50 |
+
result = df.dropna()
|
51 |
+
expected = df.iloc[[1]]
|
52 |
+
tm.assert_frame_equal(result, expected)
|
53 |
+
|
54 |
+
# axis = 1
|
55 |
+
result = df.dropna(axis="columns")
|
56 |
+
expected = pd.DataFrame(index=pd.RangeIndex(2), columns=pd.Index([]))
|
57 |
+
tm.assert_frame_equal(result, expected)
|
58 |
+
|
59 |
+
# multiple
|
60 |
+
df = pd.DataFrame({"A": data_missing, "B": [1, np.nan]})
|
61 |
+
result = df.dropna()
|
62 |
+
expected = df.iloc[:0]
|
63 |
+
tm.assert_frame_equal(result, expected)
|
64 |
+
|
65 |
+
def test_fillna_scalar(self, data_missing):
|
66 |
+
valid = data_missing[1]
|
67 |
+
result = data_missing.fillna(valid)
|
68 |
+
expected = data_missing.fillna(valid)
|
69 |
+
tm.assert_extension_array_equal(result, expected)
|
70 |
+
|
71 |
+
@pytest.mark.filterwarnings(
|
72 |
+
"ignore:Series.fillna with 'method' is deprecated:FutureWarning"
|
73 |
+
)
|
74 |
+
def test_fillna_limit_pad(self, data_missing):
|
75 |
+
arr = data_missing.take([1, 0, 0, 0, 1])
|
76 |
+
result = pd.Series(arr).ffill(limit=2)
|
77 |
+
expected = pd.Series(data_missing.take([1, 1, 1, 0, 1]))
|
78 |
+
tm.assert_series_equal(result, expected)
|
79 |
+
|
80 |
+
@pytest.mark.parametrize(
|
81 |
+
"limit_area, input_ilocs, expected_ilocs",
|
82 |
+
[
|
83 |
+
("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),
|
84 |
+
("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),
|
85 |
+
("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),
|
86 |
+
("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),
|
87 |
+
("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),
|
88 |
+
("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),
|
89 |
+
("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),
|
90 |
+
("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),
|
91 |
+
],
|
92 |
+
)
|
93 |
+
def test_ffill_limit_area(
|
94 |
+
self, data_missing, limit_area, input_ilocs, expected_ilocs
|
95 |
+
):
|
96 |
+
# GH#56616
|
97 |
+
arr = data_missing.take(input_ilocs)
|
98 |
+
result = pd.Series(arr).ffill(limit_area=limit_area)
|
99 |
+
expected = pd.Series(data_missing.take(expected_ilocs))
|
100 |
+
tm.assert_series_equal(result, expected)
|
101 |
+
|
102 |
+
@pytest.mark.filterwarnings(
|
103 |
+
"ignore:Series.fillna with 'method' is deprecated:FutureWarning"
|
104 |
+
)
|
105 |
+
def test_fillna_limit_backfill(self, data_missing):
|
106 |
+
arr = data_missing.take([1, 0, 0, 0, 1])
|
107 |
+
result = pd.Series(arr).fillna(method="backfill", limit=2)
|
108 |
+
expected = pd.Series(data_missing.take([1, 0, 1, 1, 1]))
|
109 |
+
tm.assert_series_equal(result, expected)
|
110 |
+
|
111 |
+
def test_fillna_no_op_returns_copy(self, data):
|
112 |
+
data = data[~data.isna()]
|
113 |
+
|
114 |
+
valid = data[0]
|
115 |
+
result = data.fillna(valid)
|
116 |
+
assert result is not data
|
117 |
+
tm.assert_extension_array_equal(result, data)
|
118 |
+
|
119 |
+
result = data._pad_or_backfill(method="backfill")
|
120 |
+
assert result is not data
|
121 |
+
tm.assert_extension_array_equal(result, data)
|
122 |
+
|
123 |
+
def test_fillna_series(self, data_missing):
|
124 |
+
fill_value = data_missing[1]
|
125 |
+
ser = pd.Series(data_missing)
|
126 |
+
|
127 |
+
result = ser.fillna(fill_value)
|
128 |
+
expected = pd.Series(
|
129 |
+
data_missing._from_sequence(
|
130 |
+
[fill_value, fill_value], dtype=data_missing.dtype
|
131 |
+
)
|
132 |
+
)
|
133 |
+
tm.assert_series_equal(result, expected)
|
134 |
+
|
135 |
+
# Fill with a series
|
136 |
+
result = ser.fillna(expected)
|
137 |
+
tm.assert_series_equal(result, expected)
|
138 |
+
|
139 |
+
# Fill with a series not affecting the missing values
|
140 |
+
result = ser.fillna(ser)
|
141 |
+
tm.assert_series_equal(result, ser)
|
142 |
+
|
143 |
+
def test_fillna_series_method(self, data_missing, fillna_method):
|
144 |
+
fill_value = data_missing[1]
|
145 |
+
|
146 |
+
if fillna_method == "ffill":
|
147 |
+
data_missing = data_missing[::-1]
|
148 |
+
|
149 |
+
result = getattr(pd.Series(data_missing), fillna_method)()
|
150 |
+
expected = pd.Series(
|
151 |
+
data_missing._from_sequence(
|
152 |
+
[fill_value, fill_value], dtype=data_missing.dtype
|
153 |
+
)
|
154 |
+
)
|
155 |
+
|
156 |
+
tm.assert_series_equal(result, expected)
|
157 |
+
|
158 |
+
def test_fillna_frame(self, data_missing):
|
159 |
+
fill_value = data_missing[1]
|
160 |
+
|
161 |
+
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
|
162 |
+
|
163 |
+
expected = pd.DataFrame(
|
164 |
+
{
|
165 |
+
"A": data_missing._from_sequence(
|
166 |
+
[fill_value, fill_value], dtype=data_missing.dtype
|
167 |
+
),
|
168 |
+
"B": [1, 2],
|
169 |
+
}
|
170 |
+
)
|
171 |
+
|
172 |
+
tm.assert_frame_equal(result, expected)
|
173 |
+
|
174 |
+
def test_fillna_fill_other(self, data):
|
175 |
+
result = pd.DataFrame({"A": data, "B": [np.nan] * len(data)}).fillna({"B": 0.0})
|
176 |
+
|
177 |
+
expected = pd.DataFrame({"A": data, "B": [0.0] * len(result)})
|
178 |
+
|
179 |
+
tm.assert_frame_equal(result, expected)
|
180 |
+
|
181 |
+
def test_use_inf_as_na_no_effect(self, data_missing):
|
182 |
+
ser = pd.Series(data_missing)
|
183 |
+
expected = ser.isna()
|
184 |
+
msg = "use_inf_as_na option is deprecated"
|
185 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
186 |
+
with pd.option_context("mode.use_inf_as_na", True):
|
187 |
+
result = ser.isna()
|
188 |
+
tm.assert_series_equal(result, expected)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/ops.py
ADDED
@@ -0,0 +1,299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import final
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from pandas._config import using_pyarrow_string_dtype
|
9 |
+
|
10 |
+
from pandas.core.dtypes.common import is_string_dtype
|
11 |
+
|
12 |
+
import pandas as pd
|
13 |
+
import pandas._testing as tm
|
14 |
+
from pandas.core import ops
|
15 |
+
|
16 |
+
|
17 |
+
class BaseOpsUtil:
|
18 |
+
series_scalar_exc: type[Exception] | None = TypeError
|
19 |
+
frame_scalar_exc: type[Exception] | None = TypeError
|
20 |
+
series_array_exc: type[Exception] | None = TypeError
|
21 |
+
divmod_exc: type[Exception] | None = TypeError
|
22 |
+
|
23 |
+
def _get_expected_exception(
|
24 |
+
self, op_name: str, obj, other
|
25 |
+
) -> type[Exception] | None:
|
26 |
+
# Find the Exception, if any we expect to raise calling
|
27 |
+
# obj.__op_name__(other)
|
28 |
+
|
29 |
+
# The self.obj_bar_exc pattern isn't great in part because it can depend
|
30 |
+
# on op_name or dtypes, but we use it here for backward-compatibility.
|
31 |
+
if op_name in ["__divmod__", "__rdivmod__"]:
|
32 |
+
result = self.divmod_exc
|
33 |
+
elif isinstance(obj, pd.Series) and isinstance(other, pd.Series):
|
34 |
+
result = self.series_array_exc
|
35 |
+
elif isinstance(obj, pd.Series):
|
36 |
+
result = self.series_scalar_exc
|
37 |
+
else:
|
38 |
+
result = self.frame_scalar_exc
|
39 |
+
|
40 |
+
if using_pyarrow_string_dtype() and result is not None:
|
41 |
+
import pyarrow as pa
|
42 |
+
|
43 |
+
result = ( # type: ignore[assignment]
|
44 |
+
result,
|
45 |
+
pa.lib.ArrowNotImplementedError,
|
46 |
+
NotImplementedError,
|
47 |
+
)
|
48 |
+
return result
|
49 |
+
|
50 |
+
def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):
|
51 |
+
# In _check_op we check that the result of a pointwise operation
|
52 |
+
# (found via _combine) matches the result of the vectorized
|
53 |
+
# operation obj.__op_name__(other).
|
54 |
+
# In some cases pandas dtype inference on the scalar result may not
|
55 |
+
# give a matching dtype even if both operations are behaving "correctly".
|
56 |
+
# In these cases, do extra required casting here.
|
57 |
+
return pointwise_result
|
58 |
+
|
59 |
+
def get_op_from_name(self, op_name: str):
|
60 |
+
return tm.get_op_from_name(op_name)
|
61 |
+
|
62 |
+
# Subclasses are not expected to need to override check_opname, _check_op,
|
63 |
+
# _check_divmod_op, or _combine.
|
64 |
+
# Ideally any relevant overriding can be done in _cast_pointwise_result,
|
65 |
+
# get_op_from_name, and the specification of `exc`. If you find a use
|
66 |
+
# case that still requires overriding _check_op or _combine, please let
|
67 |
+
# us know at github.com/pandas-dev/pandas/issues
|
68 |
+
@final
|
69 |
+
def check_opname(self, ser: pd.Series, op_name: str, other):
|
70 |
+
exc = self._get_expected_exception(op_name, ser, other)
|
71 |
+
op = self.get_op_from_name(op_name)
|
72 |
+
|
73 |
+
self._check_op(ser, op, other, op_name, exc)
|
74 |
+
|
75 |
+
# see comment on check_opname
|
76 |
+
@final
|
77 |
+
def _combine(self, obj, other, op):
|
78 |
+
if isinstance(obj, pd.DataFrame):
|
79 |
+
if len(obj.columns) != 1:
|
80 |
+
raise NotImplementedError
|
81 |
+
expected = obj.iloc[:, 0].combine(other, op).to_frame()
|
82 |
+
else:
|
83 |
+
expected = obj.combine(other, op)
|
84 |
+
return expected
|
85 |
+
|
86 |
+
# see comment on check_opname
|
87 |
+
@final
|
88 |
+
def _check_op(
|
89 |
+
self, ser: pd.Series, op, other, op_name: str, exc=NotImplementedError
|
90 |
+
):
|
91 |
+
# Check that the Series/DataFrame arithmetic/comparison method matches
|
92 |
+
# the pointwise result from _combine.
|
93 |
+
|
94 |
+
if exc is None:
|
95 |
+
result = op(ser, other)
|
96 |
+
expected = self._combine(ser, other, op)
|
97 |
+
expected = self._cast_pointwise_result(op_name, ser, other, expected)
|
98 |
+
assert isinstance(result, type(ser))
|
99 |
+
tm.assert_equal(result, expected)
|
100 |
+
else:
|
101 |
+
with pytest.raises(exc):
|
102 |
+
op(ser, other)
|
103 |
+
|
104 |
+
# see comment on check_opname
|
105 |
+
@final
|
106 |
+
def _check_divmod_op(self, ser: pd.Series, op, other):
|
107 |
+
# check that divmod behavior matches behavior of floordiv+mod
|
108 |
+
if op is divmod:
|
109 |
+
exc = self._get_expected_exception("__divmod__", ser, other)
|
110 |
+
else:
|
111 |
+
exc = self._get_expected_exception("__rdivmod__", ser, other)
|
112 |
+
if exc is None:
|
113 |
+
result_div, result_mod = op(ser, other)
|
114 |
+
if op is divmod:
|
115 |
+
expected_div, expected_mod = ser // other, ser % other
|
116 |
+
else:
|
117 |
+
expected_div, expected_mod = other // ser, other % ser
|
118 |
+
tm.assert_series_equal(result_div, expected_div)
|
119 |
+
tm.assert_series_equal(result_mod, expected_mod)
|
120 |
+
else:
|
121 |
+
with pytest.raises(exc):
|
122 |
+
divmod(ser, other)
|
123 |
+
|
124 |
+
|
125 |
+
class BaseArithmeticOpsTests(BaseOpsUtil):
|
126 |
+
"""
|
127 |
+
Various Series and DataFrame arithmetic ops methods.
|
128 |
+
|
129 |
+
Subclasses supporting various ops should set the class variables
|
130 |
+
to indicate that they support ops of that kind
|
131 |
+
|
132 |
+
* series_scalar_exc = TypeError
|
133 |
+
* frame_scalar_exc = TypeError
|
134 |
+
* series_array_exc = TypeError
|
135 |
+
* divmod_exc = TypeError
|
136 |
+
"""
|
137 |
+
|
138 |
+
series_scalar_exc: type[Exception] | None = TypeError
|
139 |
+
frame_scalar_exc: type[Exception] | None = TypeError
|
140 |
+
series_array_exc: type[Exception] | None = TypeError
|
141 |
+
divmod_exc: type[Exception] | None = TypeError
|
142 |
+
|
143 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
|
144 |
+
# series & scalar
|
145 |
+
if all_arithmetic_operators == "__rmod__" and is_string_dtype(data.dtype):
|
146 |
+
pytest.skip("Skip testing Python string formatting")
|
147 |
+
|
148 |
+
op_name = all_arithmetic_operators
|
149 |
+
ser = pd.Series(data)
|
150 |
+
self.check_opname(ser, op_name, ser.iloc[0])
|
151 |
+
|
152 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators):
|
153 |
+
# frame & scalar
|
154 |
+
if all_arithmetic_operators == "__rmod__" and is_string_dtype(data.dtype):
|
155 |
+
pytest.skip("Skip testing Python string formatting")
|
156 |
+
|
157 |
+
op_name = all_arithmetic_operators
|
158 |
+
df = pd.DataFrame({"A": data})
|
159 |
+
self.check_opname(df, op_name, data[0])
|
160 |
+
|
161 |
+
def test_arith_series_with_array(self, data, all_arithmetic_operators):
|
162 |
+
# ndarray & other series
|
163 |
+
op_name = all_arithmetic_operators
|
164 |
+
ser = pd.Series(data)
|
165 |
+
self.check_opname(ser, op_name, pd.Series([ser.iloc[0]] * len(ser)))
|
166 |
+
|
167 |
+
def test_divmod(self, data):
|
168 |
+
ser = pd.Series(data)
|
169 |
+
self._check_divmod_op(ser, divmod, 1)
|
170 |
+
self._check_divmod_op(1, ops.rdivmod, ser)
|
171 |
+
|
172 |
+
def test_divmod_series_array(self, data, data_for_twos):
|
173 |
+
ser = pd.Series(data)
|
174 |
+
self._check_divmod_op(ser, divmod, data)
|
175 |
+
|
176 |
+
other = data_for_twos
|
177 |
+
self._check_divmod_op(other, ops.rdivmod, ser)
|
178 |
+
|
179 |
+
other = pd.Series(other)
|
180 |
+
self._check_divmod_op(other, ops.rdivmod, ser)
|
181 |
+
|
182 |
+
def test_add_series_with_extension_array(self, data):
|
183 |
+
# Check adding an ExtensionArray to a Series of the same dtype matches
|
184 |
+
# the behavior of adding the arrays directly and then wrapping in a
|
185 |
+
# Series.
|
186 |
+
|
187 |
+
ser = pd.Series(data)
|
188 |
+
|
189 |
+
exc = self._get_expected_exception("__add__", ser, data)
|
190 |
+
if exc is not None:
|
191 |
+
with pytest.raises(exc):
|
192 |
+
ser + data
|
193 |
+
return
|
194 |
+
|
195 |
+
result = ser + data
|
196 |
+
expected = pd.Series(data + data)
|
197 |
+
tm.assert_series_equal(result, expected)
|
198 |
+
|
199 |
+
@pytest.mark.parametrize("box", [pd.Series, pd.DataFrame, pd.Index])
|
200 |
+
@pytest.mark.parametrize(
|
201 |
+
"op_name",
|
202 |
+
[
|
203 |
+
x
|
204 |
+
for x in tm.arithmetic_dunder_methods + tm.comparison_dunder_methods
|
205 |
+
if not x.startswith("__r")
|
206 |
+
],
|
207 |
+
)
|
208 |
+
def test_direct_arith_with_ndframe_returns_not_implemented(
|
209 |
+
self, data, box, op_name
|
210 |
+
):
|
211 |
+
# EAs should return NotImplemented for ops with Series/DataFrame/Index
|
212 |
+
# Pandas takes care of unboxing the series and calling the EA's op.
|
213 |
+
other = box(data)
|
214 |
+
|
215 |
+
if hasattr(data, op_name):
|
216 |
+
result = getattr(data, op_name)(other)
|
217 |
+
assert result is NotImplemented
|
218 |
+
|
219 |
+
|
220 |
+
class BaseComparisonOpsTests(BaseOpsUtil):
|
221 |
+
"""Various Series and DataFrame comparison ops methods."""
|
222 |
+
|
223 |
+
def _compare_other(self, ser: pd.Series, data, op, other):
|
224 |
+
if op.__name__ in ["eq", "ne"]:
|
225 |
+
# comparison should match point-wise comparisons
|
226 |
+
result = op(ser, other)
|
227 |
+
expected = ser.combine(other, op)
|
228 |
+
expected = self._cast_pointwise_result(op.__name__, ser, other, expected)
|
229 |
+
tm.assert_series_equal(result, expected)
|
230 |
+
|
231 |
+
else:
|
232 |
+
exc = None
|
233 |
+
try:
|
234 |
+
result = op(ser, other)
|
235 |
+
except Exception as err:
|
236 |
+
exc = err
|
237 |
+
|
238 |
+
if exc is None:
|
239 |
+
# Didn't error, then should match pointwise behavior
|
240 |
+
expected = ser.combine(other, op)
|
241 |
+
expected = self._cast_pointwise_result(
|
242 |
+
op.__name__, ser, other, expected
|
243 |
+
)
|
244 |
+
tm.assert_series_equal(result, expected)
|
245 |
+
else:
|
246 |
+
with pytest.raises(type(exc)):
|
247 |
+
ser.combine(other, op)
|
248 |
+
|
249 |
+
def test_compare_scalar(self, data, comparison_op):
|
250 |
+
ser = pd.Series(data)
|
251 |
+
self._compare_other(ser, data, comparison_op, 0)
|
252 |
+
|
253 |
+
def test_compare_array(self, data, comparison_op):
|
254 |
+
ser = pd.Series(data)
|
255 |
+
other = pd.Series([data[0]] * len(data), dtype=data.dtype)
|
256 |
+
self._compare_other(ser, data, comparison_op, other)
|
257 |
+
|
258 |
+
|
259 |
+
class BaseUnaryOpsTests(BaseOpsUtil):
|
260 |
+
def test_invert(self, data):
|
261 |
+
ser = pd.Series(data, name="name")
|
262 |
+
try:
|
263 |
+
# 10 is an arbitrary choice here, just avoid iterating over
|
264 |
+
# the whole array to trim test runtime
|
265 |
+
[~x for x in data[:10]]
|
266 |
+
except TypeError:
|
267 |
+
# scalars don't support invert -> we don't expect the vectorized
|
268 |
+
# operation to succeed
|
269 |
+
with pytest.raises(TypeError):
|
270 |
+
~ser
|
271 |
+
with pytest.raises(TypeError):
|
272 |
+
~data
|
273 |
+
else:
|
274 |
+
# Note we do not reuse the pointwise result to construct expected
|
275 |
+
# because python semantics for negating bools are weird see GH#54569
|
276 |
+
result = ~ser
|
277 |
+
expected = pd.Series(~data, name="name")
|
278 |
+
tm.assert_series_equal(result, expected)
|
279 |
+
|
280 |
+
@pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs])
|
281 |
+
def test_unary_ufunc_dunder_equivalence(self, data, ufunc):
|
282 |
+
# the dunder __pos__ works if and only if np.positive works,
|
283 |
+
# same for __neg__/np.negative and __abs__/np.abs
|
284 |
+
attr = {np.positive: "__pos__", np.negative: "__neg__", np.abs: "__abs__"}[
|
285 |
+
ufunc
|
286 |
+
]
|
287 |
+
|
288 |
+
exc = None
|
289 |
+
try:
|
290 |
+
result = getattr(data, attr)()
|
291 |
+
except Exception as err:
|
292 |
+
exc = err
|
293 |
+
|
294 |
+
# if __pos__ raised, then so should the ufunc
|
295 |
+
with pytest.raises((type(exc), TypeError)):
|
296 |
+
ufunc(data)
|
297 |
+
else:
|
298 |
+
alt = ufunc(data)
|
299 |
+
tm.assert_extension_array_equal(result, alt)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/printing.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
|
8 |
+
class BasePrintingTests:
|
9 |
+
"""Tests checking the formatting of your EA when printed."""
|
10 |
+
|
11 |
+
@pytest.mark.parametrize("size", ["big", "small"])
|
12 |
+
def test_array_repr(self, data, size):
|
13 |
+
if size == "small":
|
14 |
+
data = data[:5]
|
15 |
+
else:
|
16 |
+
data = type(data)._concat_same_type([data] * 5)
|
17 |
+
|
18 |
+
result = repr(data)
|
19 |
+
assert type(data).__name__ in result
|
20 |
+
assert f"Length: {len(data)}" in result
|
21 |
+
assert str(data.dtype) in result
|
22 |
+
if size == "big":
|
23 |
+
assert "..." in result
|
24 |
+
|
25 |
+
def test_array_repr_unicode(self, data):
|
26 |
+
result = str(data)
|
27 |
+
assert isinstance(result, str)
|
28 |
+
|
29 |
+
def test_series_repr(self, data):
|
30 |
+
ser = pd.Series(data)
|
31 |
+
assert data.dtype.name in repr(ser)
|
32 |
+
|
33 |
+
def test_dataframe_repr(self, data):
|
34 |
+
df = pd.DataFrame({"A": data})
|
35 |
+
repr(df)
|
36 |
+
|
37 |
+
def test_dtype_name_in_info(self, data):
|
38 |
+
buf = io.StringIO()
|
39 |
+
pd.DataFrame({"A": data}).info(buf=buf)
|
40 |
+
result = buf.getvalue()
|
41 |
+
assert data.dtype.name in result
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/reduce.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import final
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
import pandas._testing as tm
|
7 |
+
from pandas.api.types import is_numeric_dtype
|
8 |
+
|
9 |
+
|
10 |
+
class BaseReduceTests:
|
11 |
+
"""
|
12 |
+
Reduction specific tests. Generally these only
|
13 |
+
make sense for numeric/boolean operations.
|
14 |
+
"""
|
15 |
+
|
16 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
17 |
+
# Specify if we expect this reduction to succeed.
|
18 |
+
return False
|
19 |
+
|
20 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
21 |
+
# We perform the same operation on the np.float64 data and check
|
22 |
+
# that the results match. Override if you need to cast to something
|
23 |
+
# other than float64.
|
24 |
+
res_op = getattr(ser, op_name)
|
25 |
+
|
26 |
+
try:
|
27 |
+
alt = ser.astype("float64")
|
28 |
+
except (TypeError, ValueError):
|
29 |
+
# e.g. Interval can't cast (TypeError), StringArray can't cast
|
30 |
+
# (ValueError), so let's cast to object and do
|
31 |
+
# the reduction pointwise
|
32 |
+
alt = ser.astype(object)
|
33 |
+
|
34 |
+
exp_op = getattr(alt, op_name)
|
35 |
+
if op_name == "count":
|
36 |
+
result = res_op()
|
37 |
+
expected = exp_op()
|
38 |
+
else:
|
39 |
+
result = res_op(skipna=skipna)
|
40 |
+
expected = exp_op(skipna=skipna)
|
41 |
+
tm.assert_almost_equal(result, expected)
|
42 |
+
|
43 |
+
def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
|
44 |
+
# Find the expected dtype when the given reduction is done on a DataFrame
|
45 |
+
# column with this array. The default assumes float64-like behavior,
|
46 |
+
# i.e. retains the dtype.
|
47 |
+
return arr.dtype
|
48 |
+
|
49 |
+
# We anticipate that authors should not need to override check_reduce_frame,
|
50 |
+
# but should be able to do any necessary overriding in
|
51 |
+
# _get_expected_reduction_dtype. If you have a use case where this
|
52 |
+
# does not hold, please let us know at github.com/pandas-dev/pandas/issues.
|
53 |
+
@final
|
54 |
+
def check_reduce_frame(self, ser: pd.Series, op_name: str, skipna: bool):
|
55 |
+
# Check that the 2D reduction done in a DataFrame reduction "looks like"
|
56 |
+
# a wrapped version of the 1D reduction done by Series.
|
57 |
+
arr = ser.array
|
58 |
+
df = pd.DataFrame({"a": arr})
|
59 |
+
|
60 |
+
kwargs = {"ddof": 1} if op_name in ["var", "std"] else {}
|
61 |
+
|
62 |
+
cmp_dtype = self._get_expected_reduction_dtype(arr, op_name, skipna)
|
63 |
+
|
64 |
+
# The DataFrame method just calls arr._reduce with keepdims=True,
|
65 |
+
# so this first check is perfunctory.
|
66 |
+
result1 = arr._reduce(op_name, skipna=skipna, keepdims=True, **kwargs)
|
67 |
+
result2 = getattr(df, op_name)(skipna=skipna, **kwargs).array
|
68 |
+
tm.assert_extension_array_equal(result1, result2)
|
69 |
+
|
70 |
+
# Check that the 2D reduction looks like a wrapped version of the
|
71 |
+
# 1D reduction
|
72 |
+
if not skipna and ser.isna().any():
|
73 |
+
expected = pd.array([pd.NA], dtype=cmp_dtype)
|
74 |
+
else:
|
75 |
+
exp_value = getattr(ser.dropna(), op_name)()
|
76 |
+
expected = pd.array([exp_value], dtype=cmp_dtype)
|
77 |
+
|
78 |
+
tm.assert_extension_array_equal(result1, expected)
|
79 |
+
|
80 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
81 |
+
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
|
82 |
+
op_name = all_boolean_reductions
|
83 |
+
ser = pd.Series(data)
|
84 |
+
|
85 |
+
if not self._supports_reduction(ser, op_name):
|
86 |
+
# TODO: the message being checked here isn't actually checking anything
|
87 |
+
msg = (
|
88 |
+
"[Cc]annot perform|Categorical is not ordered for operation|"
|
89 |
+
"does not support reduction|"
|
90 |
+
)
|
91 |
+
|
92 |
+
with pytest.raises(TypeError, match=msg):
|
93 |
+
getattr(ser, op_name)(skipna=skipna)
|
94 |
+
|
95 |
+
else:
|
96 |
+
self.check_reduce(ser, op_name, skipna)
|
97 |
+
|
98 |
+
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
|
99 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
100 |
+
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna):
|
101 |
+
op_name = all_numeric_reductions
|
102 |
+
ser = pd.Series(data)
|
103 |
+
|
104 |
+
if not self._supports_reduction(ser, op_name):
|
105 |
+
# TODO: the message being checked here isn't actually checking anything
|
106 |
+
msg = (
|
107 |
+
"[Cc]annot perform|Categorical is not ordered for operation|"
|
108 |
+
"does not support reduction|"
|
109 |
+
)
|
110 |
+
|
111 |
+
with pytest.raises(TypeError, match=msg):
|
112 |
+
getattr(ser, op_name)(skipna=skipna)
|
113 |
+
|
114 |
+
else:
|
115 |
+
# min/max with empty produce numpy warnings
|
116 |
+
self.check_reduce(ser, op_name, skipna)
|
117 |
+
|
118 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
119 |
+
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
|
120 |
+
op_name = all_numeric_reductions
|
121 |
+
ser = pd.Series(data)
|
122 |
+
if not is_numeric_dtype(ser.dtype):
|
123 |
+
pytest.skip(f"{ser.dtype} is not numeric dtype")
|
124 |
+
|
125 |
+
if op_name in ["count", "kurt", "sem"]:
|
126 |
+
pytest.skip(f"{op_name} not an array method")
|
127 |
+
|
128 |
+
if not self._supports_reduction(ser, op_name):
|
129 |
+
pytest.skip(f"Reduction {op_name} not supported for this dtype")
|
130 |
+
|
131 |
+
self.check_reduce_frame(ser, op_name, skipna)
|
132 |
+
|
133 |
+
|
134 |
+
# TODO(3.0): remove BaseNoReduceTests, BaseNumericReduceTests,
|
135 |
+
# BaseBooleanReduceTests
|
136 |
+
class BaseNoReduceTests(BaseReduceTests):
|
137 |
+
"""we don't define any reductions"""
|
138 |
+
|
139 |
+
|
140 |
+
class BaseNumericReduceTests(BaseReduceTests):
|
141 |
+
# For backward compatibility only, this only runs the numeric reductions
|
142 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
143 |
+
if op_name in ["any", "all"]:
|
144 |
+
pytest.skip("These are tested in BaseBooleanReduceTests")
|
145 |
+
return True
|
146 |
+
|
147 |
+
|
148 |
+
class BaseBooleanReduceTests(BaseReduceTests):
|
149 |
+
# For backward compatibility only, this only runs the numeric reductions
|
150 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
151 |
+
if op_name not in ["any", "all"]:
|
152 |
+
pytest.skip("These are tested in BaseNumericReduceTests")
|
153 |
+
return True
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py
ADDED
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.api.extensions import ExtensionArray
|
9 |
+
from pandas.core.internals.blocks import EABackedBlock
|
10 |
+
|
11 |
+
|
12 |
+
class BaseReshapingTests:
|
13 |
+
"""Tests for reshaping and concatenation."""
|
14 |
+
|
15 |
+
@pytest.mark.parametrize("in_frame", [True, False])
|
16 |
+
def test_concat(self, data, in_frame):
|
17 |
+
wrapped = pd.Series(data)
|
18 |
+
if in_frame:
|
19 |
+
wrapped = pd.DataFrame(wrapped)
|
20 |
+
result = pd.concat([wrapped, wrapped], ignore_index=True)
|
21 |
+
|
22 |
+
assert len(result) == len(data) * 2
|
23 |
+
|
24 |
+
if in_frame:
|
25 |
+
dtype = result.dtypes[0]
|
26 |
+
else:
|
27 |
+
dtype = result.dtype
|
28 |
+
|
29 |
+
assert dtype == data.dtype
|
30 |
+
if hasattr(result._mgr, "blocks"):
|
31 |
+
assert isinstance(result._mgr.blocks[0], EABackedBlock)
|
32 |
+
assert isinstance(result._mgr.arrays[0], ExtensionArray)
|
33 |
+
|
34 |
+
@pytest.mark.parametrize("in_frame", [True, False])
|
35 |
+
def test_concat_all_na_block(self, data_missing, in_frame):
|
36 |
+
valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1])
|
37 |
+
na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3])
|
38 |
+
if in_frame:
|
39 |
+
valid_block = pd.DataFrame({"a": valid_block})
|
40 |
+
na_block = pd.DataFrame({"a": na_block})
|
41 |
+
result = pd.concat([valid_block, na_block])
|
42 |
+
if in_frame:
|
43 |
+
expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])})
|
44 |
+
tm.assert_frame_equal(result, expected)
|
45 |
+
else:
|
46 |
+
expected = pd.Series(data_missing.take([1, 1, 0, 0]))
|
47 |
+
tm.assert_series_equal(result, expected)
|
48 |
+
|
49 |
+
def test_concat_mixed_dtypes(self, data):
|
50 |
+
# https://github.com/pandas-dev/pandas/issues/20762
|
51 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
52 |
+
df2 = pd.DataFrame({"A": [1, 2, 3]})
|
53 |
+
df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
|
54 |
+
dfs = [df1, df2, df3]
|
55 |
+
|
56 |
+
# dataframes
|
57 |
+
result = pd.concat(dfs)
|
58 |
+
expected = pd.concat([x.astype(object) for x in dfs])
|
59 |
+
tm.assert_frame_equal(result, expected)
|
60 |
+
|
61 |
+
# series
|
62 |
+
result = pd.concat([x["A"] for x in dfs])
|
63 |
+
expected = pd.concat([x["A"].astype(object) for x in dfs])
|
64 |
+
tm.assert_series_equal(result, expected)
|
65 |
+
|
66 |
+
# simple test for just EA and one other
|
67 |
+
result = pd.concat([df1, df2.astype(object)])
|
68 |
+
expected = pd.concat([df1.astype("object"), df2.astype("object")])
|
69 |
+
tm.assert_frame_equal(result, expected)
|
70 |
+
|
71 |
+
result = pd.concat([df1["A"], df2["A"].astype(object)])
|
72 |
+
expected = pd.concat([df1["A"].astype("object"), df2["A"].astype("object")])
|
73 |
+
tm.assert_series_equal(result, expected)
|
74 |
+
|
75 |
+
def test_concat_columns(self, data, na_value):
|
76 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
77 |
+
df2 = pd.DataFrame({"B": [1, 2, 3]})
|
78 |
+
|
79 |
+
expected = pd.DataFrame({"A": data[:3], "B": [1, 2, 3]})
|
80 |
+
result = pd.concat([df1, df2], axis=1)
|
81 |
+
tm.assert_frame_equal(result, expected)
|
82 |
+
result = pd.concat([df1["A"], df2["B"]], axis=1)
|
83 |
+
tm.assert_frame_equal(result, expected)
|
84 |
+
|
85 |
+
# non-aligned
|
86 |
+
df2 = pd.DataFrame({"B": [1, 2, 3]}, index=[1, 2, 3])
|
87 |
+
expected = pd.DataFrame(
|
88 |
+
{
|
89 |
+
"A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),
|
90 |
+
"B": [np.nan, 1, 2, 3],
|
91 |
+
}
|
92 |
+
)
|
93 |
+
|
94 |
+
result = pd.concat([df1, df2], axis=1)
|
95 |
+
tm.assert_frame_equal(result, expected)
|
96 |
+
result = pd.concat([df1["A"], df2["B"]], axis=1)
|
97 |
+
tm.assert_frame_equal(result, expected)
|
98 |
+
|
99 |
+
def test_concat_extension_arrays_copy_false(self, data, na_value):
|
100 |
+
# GH 20756
|
101 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
102 |
+
df2 = pd.DataFrame({"B": data[3:7]})
|
103 |
+
expected = pd.DataFrame(
|
104 |
+
{
|
105 |
+
"A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),
|
106 |
+
"B": data[3:7],
|
107 |
+
}
|
108 |
+
)
|
109 |
+
result = pd.concat([df1, df2], axis=1, copy=False)
|
110 |
+
tm.assert_frame_equal(result, expected)
|
111 |
+
|
112 |
+
def test_concat_with_reindex(self, data):
|
113 |
+
# GH-33027
|
114 |
+
a = pd.DataFrame({"a": data[:5]})
|
115 |
+
b = pd.DataFrame({"b": data[:5]})
|
116 |
+
result = pd.concat([a, b], ignore_index=True)
|
117 |
+
expected = pd.DataFrame(
|
118 |
+
{
|
119 |
+
"a": data.take(list(range(5)) + ([-1] * 5), allow_fill=True),
|
120 |
+
"b": data.take(([-1] * 5) + list(range(5)), allow_fill=True),
|
121 |
+
}
|
122 |
+
)
|
123 |
+
tm.assert_frame_equal(result, expected)
|
124 |
+
|
125 |
+
def test_align(self, data, na_value):
|
126 |
+
a = data[:3]
|
127 |
+
b = data[2:5]
|
128 |
+
r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3]))
|
129 |
+
|
130 |
+
# Assumes that the ctor can take a list of scalars of the type
|
131 |
+
e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype))
|
132 |
+
e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype))
|
133 |
+
tm.assert_series_equal(r1, e1)
|
134 |
+
tm.assert_series_equal(r2, e2)
|
135 |
+
|
136 |
+
def test_align_frame(self, data, na_value):
|
137 |
+
a = data[:3]
|
138 |
+
b = data[2:5]
|
139 |
+
r1, r2 = pd.DataFrame({"A": a}).align(pd.DataFrame({"A": b}, index=[1, 2, 3]))
|
140 |
+
|
141 |
+
# Assumes that the ctor can take a list of scalars of the type
|
142 |
+
e1 = pd.DataFrame(
|
143 |
+
{"A": data._from_sequence(list(a) + [na_value], dtype=data.dtype)}
|
144 |
+
)
|
145 |
+
e2 = pd.DataFrame(
|
146 |
+
{"A": data._from_sequence([na_value] + list(b), dtype=data.dtype)}
|
147 |
+
)
|
148 |
+
tm.assert_frame_equal(r1, e1)
|
149 |
+
tm.assert_frame_equal(r2, e2)
|
150 |
+
|
151 |
+
def test_align_series_frame(self, data, na_value):
|
152 |
+
# https://github.com/pandas-dev/pandas/issues/20576
|
153 |
+
ser = pd.Series(data, name="a")
|
154 |
+
df = pd.DataFrame({"col": np.arange(len(ser) + 1)})
|
155 |
+
r1, r2 = ser.align(df)
|
156 |
+
|
157 |
+
e1 = pd.Series(
|
158 |
+
data._from_sequence(list(data) + [na_value], dtype=data.dtype),
|
159 |
+
name=ser.name,
|
160 |
+
)
|
161 |
+
|
162 |
+
tm.assert_series_equal(r1, e1)
|
163 |
+
tm.assert_frame_equal(r2, df)
|
164 |
+
|
165 |
+
def test_set_frame_expand_regular_with_extension(self, data):
|
166 |
+
df = pd.DataFrame({"A": [1] * len(data)})
|
167 |
+
df["B"] = data
|
168 |
+
expected = pd.DataFrame({"A": [1] * len(data), "B": data})
|
169 |
+
tm.assert_frame_equal(df, expected)
|
170 |
+
|
171 |
+
def test_set_frame_expand_extension_with_regular(self, data):
|
172 |
+
df = pd.DataFrame({"A": data})
|
173 |
+
df["B"] = [1] * len(data)
|
174 |
+
expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
|
175 |
+
tm.assert_frame_equal(df, expected)
|
176 |
+
|
177 |
+
def test_set_frame_overwrite_object(self, data):
|
178 |
+
# https://github.com/pandas-dev/pandas/issues/20555
|
179 |
+
df = pd.DataFrame({"A": [1] * len(data)}, dtype=object)
|
180 |
+
df["A"] = data
|
181 |
+
assert df.dtypes["A"] == data.dtype
|
182 |
+
|
183 |
+
def test_merge(self, data, na_value):
|
184 |
+
# GH-20743
|
185 |
+
df1 = pd.DataFrame({"ext": data[:3], "int1": [1, 2, 3], "key": [0, 1, 2]})
|
186 |
+
df2 = pd.DataFrame({"int2": [1, 2, 3, 4], "key": [0, 0, 1, 3]})
|
187 |
+
|
188 |
+
res = pd.merge(df1, df2)
|
189 |
+
exp = pd.DataFrame(
|
190 |
+
{
|
191 |
+
"int1": [1, 1, 2],
|
192 |
+
"int2": [1, 2, 3],
|
193 |
+
"key": [0, 0, 1],
|
194 |
+
"ext": data._from_sequence(
|
195 |
+
[data[0], data[0], data[1]], dtype=data.dtype
|
196 |
+
),
|
197 |
+
}
|
198 |
+
)
|
199 |
+
tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])
|
200 |
+
|
201 |
+
res = pd.merge(df1, df2, how="outer")
|
202 |
+
exp = pd.DataFrame(
|
203 |
+
{
|
204 |
+
"int1": [1, 1, 2, 3, np.nan],
|
205 |
+
"int2": [1, 2, 3, np.nan, 4],
|
206 |
+
"key": [0, 0, 1, 2, 3],
|
207 |
+
"ext": data._from_sequence(
|
208 |
+
[data[0], data[0], data[1], data[2], na_value], dtype=data.dtype
|
209 |
+
),
|
210 |
+
}
|
211 |
+
)
|
212 |
+
tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])
|
213 |
+
|
214 |
+
def test_merge_on_extension_array(self, data):
|
215 |
+
# GH 23020
|
216 |
+
a, b = data[:2]
|
217 |
+
key = type(data)._from_sequence([a, b], dtype=data.dtype)
|
218 |
+
|
219 |
+
df = pd.DataFrame({"key": key, "val": [1, 2]})
|
220 |
+
result = pd.merge(df, df, on="key")
|
221 |
+
expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]})
|
222 |
+
tm.assert_frame_equal(result, expected)
|
223 |
+
|
224 |
+
# order
|
225 |
+
result = pd.merge(df.iloc[[1, 0]], df, on="key")
|
226 |
+
expected = expected.iloc[[1, 0]].reset_index(drop=True)
|
227 |
+
tm.assert_frame_equal(result, expected)
|
228 |
+
|
229 |
+
def test_merge_on_extension_array_duplicates(self, data):
|
230 |
+
# GH 23020
|
231 |
+
a, b = data[:2]
|
232 |
+
key = type(data)._from_sequence([a, b, a], dtype=data.dtype)
|
233 |
+
df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]})
|
234 |
+
df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]})
|
235 |
+
|
236 |
+
result = pd.merge(df1, df2, on="key")
|
237 |
+
expected = pd.DataFrame(
|
238 |
+
{
|
239 |
+
"key": key.take([0, 0, 1, 2, 2]),
|
240 |
+
"val_x": [1, 1, 2, 3, 3],
|
241 |
+
"val_y": [1, 3, 2, 1, 3],
|
242 |
+
}
|
243 |
+
)
|
244 |
+
tm.assert_frame_equal(result, expected)
|
245 |
+
|
246 |
+
@pytest.mark.filterwarnings(
|
247 |
+
"ignore:The previous implementation of stack is deprecated"
|
248 |
+
)
|
249 |
+
@pytest.mark.parametrize(
|
250 |
+
"columns",
|
251 |
+
[
|
252 |
+
["A", "B"],
|
253 |
+
pd.MultiIndex.from_tuples(
|
254 |
+
[("A", "a"), ("A", "b")], names=["outer", "inner"]
|
255 |
+
),
|
256 |
+
],
|
257 |
+
)
|
258 |
+
@pytest.mark.parametrize("future_stack", [True, False])
|
259 |
+
def test_stack(self, data, columns, future_stack):
|
260 |
+
df = pd.DataFrame({"A": data[:5], "B": data[:5]})
|
261 |
+
df.columns = columns
|
262 |
+
result = df.stack(future_stack=future_stack)
|
263 |
+
expected = df.astype(object).stack(future_stack=future_stack)
|
264 |
+
# we need a second astype(object), in case the constructor inferred
|
265 |
+
# object -> specialized, as is done for period.
|
266 |
+
expected = expected.astype(object)
|
267 |
+
|
268 |
+
if isinstance(expected, pd.Series):
|
269 |
+
assert result.dtype == df.iloc[:, 0].dtype
|
270 |
+
else:
|
271 |
+
assert all(result.dtypes == df.iloc[:, 0].dtype)
|
272 |
+
|
273 |
+
result = result.astype(object)
|
274 |
+
tm.assert_equal(result, expected)
|
275 |
+
|
276 |
+
@pytest.mark.parametrize(
|
277 |
+
"index",
|
278 |
+
[
|
279 |
+
# Two levels, uniform.
|
280 |
+
pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"]),
|
281 |
+
# non-uniform
|
282 |
+
pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]),
|
283 |
+
# three levels, non-uniform
|
284 |
+
pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 2)]),
|
285 |
+
pd.MultiIndex.from_tuples(
|
286 |
+
[
|
287 |
+
("A", "a", 1),
|
288 |
+
("A", "b", 0),
|
289 |
+
("A", "a", 0),
|
290 |
+
("B", "a", 0),
|
291 |
+
("B", "c", 1),
|
292 |
+
]
|
293 |
+
),
|
294 |
+
],
|
295 |
+
)
|
296 |
+
@pytest.mark.parametrize("obj", ["series", "frame"])
|
297 |
+
def test_unstack(self, data, index, obj):
|
298 |
+
data = data[: len(index)]
|
299 |
+
if obj == "series":
|
300 |
+
ser = pd.Series(data, index=index)
|
301 |
+
else:
|
302 |
+
ser = pd.DataFrame({"A": data, "B": data}, index=index)
|
303 |
+
|
304 |
+
n = index.nlevels
|
305 |
+
levels = list(range(n))
|
306 |
+
# [0, 1, 2]
|
307 |
+
# [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
|
308 |
+
combinations = itertools.chain.from_iterable(
|
309 |
+
itertools.permutations(levels, i) for i in range(1, n)
|
310 |
+
)
|
311 |
+
|
312 |
+
for level in combinations:
|
313 |
+
result = ser.unstack(level=level)
|
314 |
+
assert all(
|
315 |
+
isinstance(result[col].array, type(data)) for col in result.columns
|
316 |
+
)
|
317 |
+
|
318 |
+
if obj == "series":
|
319 |
+
# We should get the same result with to_frame+unstack+droplevel
|
320 |
+
df = ser.to_frame()
|
321 |
+
|
322 |
+
alt = df.unstack(level=level).droplevel(0, axis=1)
|
323 |
+
tm.assert_frame_equal(result, alt)
|
324 |
+
|
325 |
+
obj_ser = ser.astype(object)
|
326 |
+
|
327 |
+
expected = obj_ser.unstack(level=level, fill_value=data.dtype.na_value)
|
328 |
+
if obj == "series":
|
329 |
+
assert (expected.dtypes == object).all()
|
330 |
+
|
331 |
+
result = result.astype(object)
|
332 |
+
tm.assert_frame_equal(result, expected)
|
333 |
+
|
334 |
+
def test_ravel(self, data):
|
335 |
+
# as long as EA is 1D-only, ravel is a no-op
|
336 |
+
result = data.ravel()
|
337 |
+
assert type(result) == type(data)
|
338 |
+
|
339 |
+
if data.dtype._is_immutable:
|
340 |
+
pytest.skip(f"test_ravel assumes mutability and {data.dtype} is immutable")
|
341 |
+
|
342 |
+
# Check that we have a view, not a copy
|
343 |
+
result[0] = result[1]
|
344 |
+
assert data[0] == data[1]
|
345 |
+
|
346 |
+
def test_transpose(self, data):
|
347 |
+
result = data.transpose()
|
348 |
+
assert type(result) == type(data)
|
349 |
+
|
350 |
+
# check we get a new object
|
351 |
+
assert result is not data
|
352 |
+
|
353 |
+
# If we ever _did_ support 2D, shape should be reversed
|
354 |
+
assert result.shape == data.shape[::-1]
|
355 |
+
|
356 |
+
if data.dtype._is_immutable:
|
357 |
+
pytest.skip(
|
358 |
+
f"test_transpose assumes mutability and {data.dtype} is immutable"
|
359 |
+
)
|
360 |
+
|
361 |
+
# Check that we have a view, not a copy
|
362 |
+
result[0] = result[1]
|
363 |
+
assert data[0] == data[1]
|
364 |
+
|
365 |
+
def test_transpose_frame(self, data):
|
366 |
+
df = pd.DataFrame({"A": data[:4], "B": data[:4]}, index=["a", "b", "c", "d"])
|
367 |
+
result = df.T
|
368 |
+
expected = pd.DataFrame(
|
369 |
+
{
|
370 |
+
"a": type(data)._from_sequence([data[0]] * 2, dtype=data.dtype),
|
371 |
+
"b": type(data)._from_sequence([data[1]] * 2, dtype=data.dtype),
|
372 |
+
"c": type(data)._from_sequence([data[2]] * 2, dtype=data.dtype),
|
373 |
+
"d": type(data)._from_sequence([data[3]] * 2, dtype=data.dtype),
|
374 |
+
},
|
375 |
+
index=["A", "B"],
|
376 |
+
)
|
377 |
+
tm.assert_frame_equal(result, expected)
|
378 |
+
tm.assert_frame_equal(np.transpose(np.transpose(df)), df)
|
379 |
+
tm.assert_frame_equal(np.transpose(np.transpose(df[["A"]])), df[["A"]])
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/setitem.py
ADDED
@@ -0,0 +1,451 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
|
7 |
+
|
8 |
+
class BaseSetitemTests:
|
9 |
+
@pytest.fixture(
|
10 |
+
params=[
|
11 |
+
lambda x: x.index,
|
12 |
+
lambda x: list(x.index),
|
13 |
+
lambda x: slice(None),
|
14 |
+
lambda x: slice(0, len(x)),
|
15 |
+
lambda x: range(len(x)),
|
16 |
+
lambda x: list(range(len(x))),
|
17 |
+
lambda x: np.ones(len(x), dtype=bool),
|
18 |
+
],
|
19 |
+
ids=[
|
20 |
+
"index",
|
21 |
+
"list[index]",
|
22 |
+
"null_slice",
|
23 |
+
"full_slice",
|
24 |
+
"range",
|
25 |
+
"list(range)",
|
26 |
+
"mask",
|
27 |
+
],
|
28 |
+
)
|
29 |
+
def full_indexer(self, request):
|
30 |
+
"""
|
31 |
+
Fixture for an indexer to pass to obj.loc to get/set the full length of the
|
32 |
+
object.
|
33 |
+
|
34 |
+
In some cases, assumes that obj.index is the default RangeIndex.
|
35 |
+
"""
|
36 |
+
return request.param
|
37 |
+
|
38 |
+
@pytest.fixture(autouse=True)
|
39 |
+
def skip_if_immutable(self, dtype, request):
|
40 |
+
if dtype._is_immutable:
|
41 |
+
node = request.node
|
42 |
+
if node.name.split("[")[0] == "test_is_immutable":
|
43 |
+
# This fixture is auto-used, but we want to not-skip
|
44 |
+
# test_is_immutable.
|
45 |
+
return
|
46 |
+
|
47 |
+
# When BaseSetitemTests is mixed into ExtensionTests, we only
|
48 |
+
# want this fixture to operate on the tests defined in this
|
49 |
+
# class/file.
|
50 |
+
defined_in = node.function.__qualname__.split(".")[0]
|
51 |
+
if defined_in == "BaseSetitemTests":
|
52 |
+
pytest.skip("__setitem__ test not applicable with immutable dtype")
|
53 |
+
|
54 |
+
def test_is_immutable(self, data):
|
55 |
+
if data.dtype._is_immutable:
|
56 |
+
with pytest.raises(TypeError):
|
57 |
+
data[0] = data[0]
|
58 |
+
else:
|
59 |
+
data[0] = data[1]
|
60 |
+
assert data[0] == data[1]
|
61 |
+
|
62 |
+
def test_setitem_scalar_series(self, data, box_in_series):
|
63 |
+
if box_in_series:
|
64 |
+
data = pd.Series(data)
|
65 |
+
data[0] = data[1]
|
66 |
+
assert data[0] == data[1]
|
67 |
+
|
68 |
+
def test_setitem_sequence(self, data, box_in_series):
|
69 |
+
if box_in_series:
|
70 |
+
data = pd.Series(data)
|
71 |
+
original = data.copy()
|
72 |
+
|
73 |
+
data[[0, 1]] = [data[1], data[0]]
|
74 |
+
assert data[0] == original[1]
|
75 |
+
assert data[1] == original[0]
|
76 |
+
|
77 |
+
def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
|
78 |
+
ser = pd.Series(data)
|
79 |
+
original = ser.copy()
|
80 |
+
value = [data[0]]
|
81 |
+
if as_array:
|
82 |
+
value = data._from_sequence(value, dtype=data.dtype)
|
83 |
+
|
84 |
+
xpr = "cannot set using a {} indexer with a different length"
|
85 |
+
with pytest.raises(ValueError, match=xpr.format("list-like")):
|
86 |
+
ser[[0, 1]] = value
|
87 |
+
# Ensure no modifications made before the exception
|
88 |
+
tm.assert_series_equal(ser, original)
|
89 |
+
|
90 |
+
with pytest.raises(ValueError, match=xpr.format("slice")):
|
91 |
+
ser[slice(3)] = value
|
92 |
+
tm.assert_series_equal(ser, original)
|
93 |
+
|
94 |
+
def test_setitem_empty_indexer(self, data, box_in_series):
|
95 |
+
if box_in_series:
|
96 |
+
data = pd.Series(data)
|
97 |
+
original = data.copy()
|
98 |
+
data[np.array([], dtype=int)] = []
|
99 |
+
tm.assert_equal(data, original)
|
100 |
+
|
101 |
+
def test_setitem_sequence_broadcasts(self, data, box_in_series):
|
102 |
+
if box_in_series:
|
103 |
+
data = pd.Series(data)
|
104 |
+
data[[0, 1]] = data[2]
|
105 |
+
assert data[0] == data[2]
|
106 |
+
assert data[1] == data[2]
|
107 |
+
|
108 |
+
@pytest.mark.parametrize("setter", ["loc", "iloc"])
|
109 |
+
def test_setitem_scalar(self, data, setter):
|
110 |
+
arr = pd.Series(data)
|
111 |
+
setter = getattr(arr, setter)
|
112 |
+
setter[0] = data[1]
|
113 |
+
assert arr[0] == data[1]
|
114 |
+
|
115 |
+
def test_setitem_loc_scalar_mixed(self, data):
|
116 |
+
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
|
117 |
+
df.loc[0, "B"] = data[1]
|
118 |
+
assert df.loc[0, "B"] == data[1]
|
119 |
+
|
120 |
+
def test_setitem_loc_scalar_single(self, data):
|
121 |
+
df = pd.DataFrame({"B": data})
|
122 |
+
df.loc[10, "B"] = data[1]
|
123 |
+
assert df.loc[10, "B"] == data[1]
|
124 |
+
|
125 |
+
def test_setitem_loc_scalar_multiple_homogoneous(self, data):
|
126 |
+
df = pd.DataFrame({"A": data, "B": data})
|
127 |
+
df.loc[10, "B"] = data[1]
|
128 |
+
assert df.loc[10, "B"] == data[1]
|
129 |
+
|
130 |
+
def test_setitem_iloc_scalar_mixed(self, data):
|
131 |
+
df = pd.DataFrame({"A": np.arange(len(data)), "B": data})
|
132 |
+
df.iloc[0, 1] = data[1]
|
133 |
+
assert df.loc[0, "B"] == data[1]
|
134 |
+
|
135 |
+
def test_setitem_iloc_scalar_single(self, data):
|
136 |
+
df = pd.DataFrame({"B": data})
|
137 |
+
df.iloc[10, 0] = data[1]
|
138 |
+
assert df.loc[10, "B"] == data[1]
|
139 |
+
|
140 |
+
def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
|
141 |
+
df = pd.DataFrame({"A": data, "B": data})
|
142 |
+
df.iloc[10, 1] = data[1]
|
143 |
+
assert df.loc[10, "B"] == data[1]
|
144 |
+
|
145 |
+
@pytest.mark.parametrize(
|
146 |
+
"mask",
|
147 |
+
[
|
148 |
+
np.array([True, True, True, False, False]),
|
149 |
+
pd.array([True, True, True, False, False], dtype="boolean"),
|
150 |
+
pd.array([True, True, True, pd.NA, pd.NA], dtype="boolean"),
|
151 |
+
],
|
152 |
+
ids=["numpy-array", "boolean-array", "boolean-array-na"],
|
153 |
+
)
|
154 |
+
def test_setitem_mask(self, data, mask, box_in_series):
|
155 |
+
arr = data[:5].copy()
|
156 |
+
expected = arr.take([0, 0, 0, 3, 4])
|
157 |
+
if box_in_series:
|
158 |
+
arr = pd.Series(arr)
|
159 |
+
expected = pd.Series(expected)
|
160 |
+
arr[mask] = data[0]
|
161 |
+
tm.assert_equal(expected, arr)
|
162 |
+
|
163 |
+
def test_setitem_mask_raises(self, data, box_in_series):
|
164 |
+
# wrong length
|
165 |
+
mask = np.array([True, False])
|
166 |
+
|
167 |
+
if box_in_series:
|
168 |
+
data = pd.Series(data)
|
169 |
+
|
170 |
+
with pytest.raises(IndexError, match="wrong length"):
|
171 |
+
data[mask] = data[0]
|
172 |
+
|
173 |
+
mask = pd.array(mask, dtype="boolean")
|
174 |
+
with pytest.raises(IndexError, match="wrong length"):
|
175 |
+
data[mask] = data[0]
|
176 |
+
|
177 |
+
def test_setitem_mask_boolean_array_with_na(self, data, box_in_series):
|
178 |
+
mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
|
179 |
+
mask[:3] = True
|
180 |
+
mask[3:5] = pd.NA
|
181 |
+
|
182 |
+
if box_in_series:
|
183 |
+
data = pd.Series(data)
|
184 |
+
|
185 |
+
data[mask] = data[0]
|
186 |
+
|
187 |
+
assert (data[:3] == data[0]).all()
|
188 |
+
|
189 |
+
@pytest.mark.parametrize(
|
190 |
+
"idx",
|
191 |
+
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
|
192 |
+
ids=["list", "integer-array", "numpy-array"],
|
193 |
+
)
|
194 |
+
def test_setitem_integer_array(self, data, idx, box_in_series):
|
195 |
+
arr = data[:5].copy()
|
196 |
+
expected = data.take([0, 0, 0, 3, 4])
|
197 |
+
|
198 |
+
if box_in_series:
|
199 |
+
arr = pd.Series(arr)
|
200 |
+
expected = pd.Series(expected)
|
201 |
+
|
202 |
+
arr[idx] = arr[0]
|
203 |
+
tm.assert_equal(arr, expected)
|
204 |
+
|
205 |
+
@pytest.mark.parametrize(
|
206 |
+
"idx, box_in_series",
|
207 |
+
[
|
208 |
+
([0, 1, 2, pd.NA], False),
|
209 |
+
pytest.param(
|
210 |
+
[0, 1, 2, pd.NA], True, marks=pytest.mark.xfail(reason="GH-31948")
|
211 |
+
),
|
212 |
+
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
|
213 |
+
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
|
214 |
+
],
|
215 |
+
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
|
216 |
+
)
|
217 |
+
def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
|
218 |
+
arr = data.copy()
|
219 |
+
|
220 |
+
# TODO(xfail) this raises KeyError about labels not found (it tries label-based)
|
221 |
+
# for list of labels with Series
|
222 |
+
if box_in_series:
|
223 |
+
arr = pd.Series(data, index=[chr(100 + i) for i in range(len(data))])
|
224 |
+
|
225 |
+
msg = "Cannot index with an integer indexer containing NA values"
|
226 |
+
with pytest.raises(ValueError, match=msg):
|
227 |
+
arr[idx] = arr[0]
|
228 |
+
|
229 |
+
@pytest.mark.parametrize("as_callable", [True, False])
|
230 |
+
@pytest.mark.parametrize("setter", ["loc", None])
|
231 |
+
def test_setitem_mask_aligned(self, data, as_callable, setter):
|
232 |
+
ser = pd.Series(data)
|
233 |
+
mask = np.zeros(len(data), dtype=bool)
|
234 |
+
mask[:2] = True
|
235 |
+
|
236 |
+
if as_callable:
|
237 |
+
mask2 = lambda x: mask
|
238 |
+
else:
|
239 |
+
mask2 = mask
|
240 |
+
|
241 |
+
if setter:
|
242 |
+
# loc
|
243 |
+
target = getattr(ser, setter)
|
244 |
+
else:
|
245 |
+
# Series.__setitem__
|
246 |
+
target = ser
|
247 |
+
|
248 |
+
target[mask2] = data[5:7]
|
249 |
+
|
250 |
+
ser[mask2] = data[5:7]
|
251 |
+
assert ser[0] == data[5]
|
252 |
+
assert ser[1] == data[6]
|
253 |
+
|
254 |
+
@pytest.mark.parametrize("setter", ["loc", None])
|
255 |
+
def test_setitem_mask_broadcast(self, data, setter):
|
256 |
+
ser = pd.Series(data)
|
257 |
+
mask = np.zeros(len(data), dtype=bool)
|
258 |
+
mask[:2] = True
|
259 |
+
|
260 |
+
if setter: # loc
|
261 |
+
target = getattr(ser, setter)
|
262 |
+
else: # __setitem__
|
263 |
+
target = ser
|
264 |
+
|
265 |
+
target[mask] = data[10]
|
266 |
+
assert ser[0] == data[10]
|
267 |
+
assert ser[1] == data[10]
|
268 |
+
|
269 |
+
def test_setitem_expand_columns(self, data):
|
270 |
+
df = pd.DataFrame({"A": data})
|
271 |
+
result = df.copy()
|
272 |
+
result["B"] = 1
|
273 |
+
expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
|
274 |
+
tm.assert_frame_equal(result, expected)
|
275 |
+
|
276 |
+
result = df.copy()
|
277 |
+
result.loc[:, "B"] = 1
|
278 |
+
tm.assert_frame_equal(result, expected)
|
279 |
+
|
280 |
+
# overwrite with new type
|
281 |
+
result["B"] = data
|
282 |
+
expected = pd.DataFrame({"A": data, "B": data})
|
283 |
+
tm.assert_frame_equal(result, expected)
|
284 |
+
|
285 |
+
def test_setitem_expand_with_extension(self, data):
|
286 |
+
df = pd.DataFrame({"A": [1] * len(data)})
|
287 |
+
result = df.copy()
|
288 |
+
result["B"] = data
|
289 |
+
expected = pd.DataFrame({"A": [1] * len(data), "B": data})
|
290 |
+
tm.assert_frame_equal(result, expected)
|
291 |
+
|
292 |
+
result = df.copy()
|
293 |
+
result.loc[:, "B"] = data
|
294 |
+
tm.assert_frame_equal(result, expected)
|
295 |
+
|
296 |
+
def test_setitem_frame_invalid_length(self, data):
|
297 |
+
df = pd.DataFrame({"A": [1] * len(data)})
|
298 |
+
xpr = (
|
299 |
+
rf"Length of values \({len(data[:5])}\) "
|
300 |
+
rf"does not match length of index \({len(df)}\)"
|
301 |
+
)
|
302 |
+
with pytest.raises(ValueError, match=xpr):
|
303 |
+
df["B"] = data[:5]
|
304 |
+
|
305 |
+
def test_setitem_tuple_index(self, data):
|
306 |
+
ser = pd.Series(data[:2], index=[(0, 0), (0, 1)])
|
307 |
+
expected = pd.Series(data.take([1, 1]), index=ser.index)
|
308 |
+
ser[(0, 0)] = data[1]
|
309 |
+
tm.assert_series_equal(ser, expected)
|
310 |
+
|
311 |
+
def test_setitem_slice(self, data, box_in_series):
|
312 |
+
arr = data[:5].copy()
|
313 |
+
expected = data.take([0, 0, 0, 3, 4])
|
314 |
+
if box_in_series:
|
315 |
+
arr = pd.Series(arr)
|
316 |
+
expected = pd.Series(expected)
|
317 |
+
|
318 |
+
arr[:3] = data[0]
|
319 |
+
tm.assert_equal(arr, expected)
|
320 |
+
|
321 |
+
def test_setitem_loc_iloc_slice(self, data):
|
322 |
+
arr = data[:5].copy()
|
323 |
+
s = pd.Series(arr, index=["a", "b", "c", "d", "e"])
|
324 |
+
expected = pd.Series(data.take([0, 0, 0, 3, 4]), index=s.index)
|
325 |
+
|
326 |
+
result = s.copy()
|
327 |
+
result.iloc[:3] = data[0]
|
328 |
+
tm.assert_equal(result, expected)
|
329 |
+
|
330 |
+
result = s.copy()
|
331 |
+
result.loc[:"c"] = data[0]
|
332 |
+
tm.assert_equal(result, expected)
|
333 |
+
|
334 |
+
def test_setitem_slice_mismatch_length_raises(self, data):
|
335 |
+
arr = data[:5]
|
336 |
+
with pytest.raises(ValueError):
|
337 |
+
arr[:1] = arr[:2]
|
338 |
+
|
339 |
+
def test_setitem_slice_array(self, data):
|
340 |
+
arr = data[:5].copy()
|
341 |
+
arr[:5] = data[-5:]
|
342 |
+
tm.assert_extension_array_equal(arr, data[-5:])
|
343 |
+
|
344 |
+
def test_setitem_scalar_key_sequence_raise(self, data):
|
345 |
+
arr = data[:5].copy()
|
346 |
+
with pytest.raises(ValueError):
|
347 |
+
arr[0] = arr[[0, 1]]
|
348 |
+
|
349 |
+
def test_setitem_preserves_views(self, data):
|
350 |
+
# GH#28150 setitem shouldn't swap the underlying data
|
351 |
+
view1 = data.view()
|
352 |
+
view2 = data[:]
|
353 |
+
|
354 |
+
data[0] = data[1]
|
355 |
+
assert view1[0] == data[1]
|
356 |
+
assert view2[0] == data[1]
|
357 |
+
|
358 |
+
def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
|
359 |
+
# https://github.com/pandas-dev/pandas/issues/32395
|
360 |
+
df = expected = pd.DataFrame({0: pd.Series(data)})
|
361 |
+
result = pd.DataFrame(index=df.index)
|
362 |
+
|
363 |
+
key = full_indexer(df)
|
364 |
+
result.loc[key, 0] = df[0]
|
365 |
+
|
366 |
+
tm.assert_frame_equal(result, expected)
|
367 |
+
|
368 |
+
def test_setitem_with_expansion_row(self, data, na_value):
|
369 |
+
df = pd.DataFrame({"data": data[:1]})
|
370 |
+
|
371 |
+
df.loc[1, "data"] = data[1]
|
372 |
+
expected = pd.DataFrame({"data": data[:2]})
|
373 |
+
tm.assert_frame_equal(df, expected)
|
374 |
+
|
375 |
+
# https://github.com/pandas-dev/pandas/issues/47284
|
376 |
+
df.loc[2, "data"] = na_value
|
377 |
+
expected = pd.DataFrame(
|
378 |
+
{"data": pd.Series([data[0], data[1], na_value], dtype=data.dtype)}
|
379 |
+
)
|
380 |
+
tm.assert_frame_equal(df, expected)
|
381 |
+
|
382 |
+
def test_setitem_series(self, data, full_indexer):
|
383 |
+
# https://github.com/pandas-dev/pandas/issues/32395
|
384 |
+
ser = pd.Series(data, name="data")
|
385 |
+
result = pd.Series(index=ser.index, dtype=object, name="data")
|
386 |
+
|
387 |
+
# because result has object dtype, the attempt to do setting inplace
|
388 |
+
# is successful, and object dtype is retained
|
389 |
+
key = full_indexer(ser)
|
390 |
+
result.loc[key] = ser
|
391 |
+
|
392 |
+
expected = pd.Series(
|
393 |
+
data.astype(object), index=ser.index, name="data", dtype=object
|
394 |
+
)
|
395 |
+
tm.assert_series_equal(result, expected)
|
396 |
+
|
397 |
+
def test_setitem_frame_2d_values(self, data):
|
398 |
+
# GH#44514
|
399 |
+
df = pd.DataFrame({"A": data})
|
400 |
+
|
401 |
+
# Avoiding using_array_manager fixture
|
402 |
+
# https://github.com/pandas-dev/pandas/pull/44514#discussion_r754002410
|
403 |
+
using_array_manager = isinstance(df._mgr, pd.core.internals.ArrayManager)
|
404 |
+
using_copy_on_write = pd.options.mode.copy_on_write
|
405 |
+
|
406 |
+
blk_data = df._mgr.arrays[0]
|
407 |
+
|
408 |
+
orig = df.copy()
|
409 |
+
|
410 |
+
df.iloc[:] = df.copy()
|
411 |
+
tm.assert_frame_equal(df, orig)
|
412 |
+
|
413 |
+
df.iloc[:-1] = df.iloc[:-1].copy()
|
414 |
+
tm.assert_frame_equal(df, orig)
|
415 |
+
|
416 |
+
df.iloc[:] = df.values
|
417 |
+
tm.assert_frame_equal(df, orig)
|
418 |
+
if not using_array_manager and not using_copy_on_write:
|
419 |
+
# GH#33457 Check that this setting occurred in-place
|
420 |
+
# FIXME(ArrayManager): this should work there too
|
421 |
+
assert df._mgr.arrays[0] is blk_data
|
422 |
+
|
423 |
+
df.iloc[:-1] = df.values[:-1]
|
424 |
+
tm.assert_frame_equal(df, orig)
|
425 |
+
|
426 |
+
def test_delitem_series(self, data):
|
427 |
+
# GH#40763
|
428 |
+
ser = pd.Series(data, name="data")
|
429 |
+
|
430 |
+
taker = np.arange(len(ser))
|
431 |
+
taker = np.delete(taker, 1)
|
432 |
+
|
433 |
+
expected = ser[taker]
|
434 |
+
del ser[1]
|
435 |
+
tm.assert_series_equal(ser, expected)
|
436 |
+
|
437 |
+
def test_setitem_invalid(self, data, invalid_scalar):
|
438 |
+
msg = "" # messages vary by subclass, so we do not test it
|
439 |
+
with pytest.raises((ValueError, TypeError), match=msg):
|
440 |
+
data[0] = invalid_scalar
|
441 |
+
|
442 |
+
with pytest.raises((ValueError, TypeError), match=msg):
|
443 |
+
data[:] = invalid_scalar
|
444 |
+
|
445 |
+
def test_setitem_2d_values(self, data):
|
446 |
+
# GH50085
|
447 |
+
original = data.copy()
|
448 |
+
df = pd.DataFrame({"a": data, "b": data})
|
449 |
+
df.loc[[0, 1], :] = df.loc[[1, 0], :].values
|
450 |
+
assert (df.loc[0, :] == original[1]).all()
|
451 |
+
assert (df.loc[1, :] == original[0]).all()
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/conftest.py
ADDED
@@ -0,0 +1,230 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import operator
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas._config.config import _get_option
|
6 |
+
|
7 |
+
from pandas import (
|
8 |
+
Series,
|
9 |
+
options,
|
10 |
+
)
|
11 |
+
|
12 |
+
|
13 |
+
@pytest.fixture
|
14 |
+
def dtype():
|
15 |
+
"""A fixture providing the ExtensionDtype to validate."""
|
16 |
+
raise NotImplementedError
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.fixture
|
20 |
+
def data():
|
21 |
+
"""
|
22 |
+
Length-100 array for this type.
|
23 |
+
|
24 |
+
* data[0] and data[1] should both be non missing
|
25 |
+
* data[0] and data[1] should not be equal
|
26 |
+
"""
|
27 |
+
raise NotImplementedError
|
28 |
+
|
29 |
+
|
30 |
+
@pytest.fixture
|
31 |
+
def data_for_twos(dtype):
|
32 |
+
"""
|
33 |
+
Length-100 array in which all the elements are two.
|
34 |
+
|
35 |
+
Call pytest.skip in your fixture if the dtype does not support divmod.
|
36 |
+
"""
|
37 |
+
if not (dtype._is_numeric or dtype.kind == "m"):
|
38 |
+
# Object-dtypes may want to allow this, but for the most part
|
39 |
+
# only numeric and timedelta-like dtypes will need to implement this.
|
40 |
+
pytest.skip(f"{dtype} is not a numeric dtype")
|
41 |
+
|
42 |
+
raise NotImplementedError
|
43 |
+
|
44 |
+
|
45 |
+
@pytest.fixture
|
46 |
+
def data_missing():
|
47 |
+
"""Length-2 array with [NA, Valid]"""
|
48 |
+
raise NotImplementedError
|
49 |
+
|
50 |
+
|
51 |
+
@pytest.fixture(params=["data", "data_missing"])
|
52 |
+
def all_data(request, data, data_missing):
|
53 |
+
"""Parametrized fixture giving 'data' and 'data_missing'"""
|
54 |
+
if request.param == "data":
|
55 |
+
return data
|
56 |
+
elif request.param == "data_missing":
|
57 |
+
return data_missing
|
58 |
+
|
59 |
+
|
60 |
+
@pytest.fixture
|
61 |
+
def data_repeated(data):
|
62 |
+
"""
|
63 |
+
Generate many datasets.
|
64 |
+
|
65 |
+
Parameters
|
66 |
+
----------
|
67 |
+
data : fixture implementing `data`
|
68 |
+
|
69 |
+
Returns
|
70 |
+
-------
|
71 |
+
Callable[[int], Generator]:
|
72 |
+
A callable that takes a `count` argument and
|
73 |
+
returns a generator yielding `count` datasets.
|
74 |
+
"""
|
75 |
+
|
76 |
+
def gen(count):
|
77 |
+
for _ in range(count):
|
78 |
+
yield data
|
79 |
+
|
80 |
+
return gen
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.fixture
|
84 |
+
def data_for_sorting():
|
85 |
+
"""
|
86 |
+
Length-3 array with a known sort order.
|
87 |
+
|
88 |
+
This should be three items [B, C, A] with
|
89 |
+
A < B < C
|
90 |
+
|
91 |
+
For boolean dtypes (for which there are only 2 values available),
|
92 |
+
set B=C=True
|
93 |
+
"""
|
94 |
+
raise NotImplementedError
|
95 |
+
|
96 |
+
|
97 |
+
@pytest.fixture
|
98 |
+
def data_missing_for_sorting():
|
99 |
+
"""
|
100 |
+
Length-3 array with a known sort order.
|
101 |
+
|
102 |
+
This should be three items [B, NA, A] with
|
103 |
+
A < B and NA missing.
|
104 |
+
"""
|
105 |
+
raise NotImplementedError
|
106 |
+
|
107 |
+
|
108 |
+
@pytest.fixture
|
109 |
+
def na_cmp():
|
110 |
+
"""
|
111 |
+
Binary operator for comparing NA values.
|
112 |
+
|
113 |
+
Should return a function of two arguments that returns
|
114 |
+
True if both arguments are (scalar) NA for your type.
|
115 |
+
|
116 |
+
By default, uses ``operator.is_``
|
117 |
+
"""
|
118 |
+
return operator.is_
|
119 |
+
|
120 |
+
|
121 |
+
@pytest.fixture
|
122 |
+
def na_value(dtype):
|
123 |
+
"""
|
124 |
+
The scalar missing value for this type. Default dtype.na_value.
|
125 |
+
|
126 |
+
TODO: can be removed in 3.x (see https://github.com/pandas-dev/pandas/pull/54930)
|
127 |
+
"""
|
128 |
+
return dtype.na_value
|
129 |
+
|
130 |
+
|
131 |
+
@pytest.fixture
|
132 |
+
def data_for_grouping():
|
133 |
+
"""
|
134 |
+
Data for factorization, grouping, and unique tests.
|
135 |
+
|
136 |
+
Expected to be like [B, B, NA, NA, A, A, B, C]
|
137 |
+
|
138 |
+
Where A < B < C and NA is missing.
|
139 |
+
|
140 |
+
If a dtype has _is_boolean = True, i.e. only 2 unique non-NA entries,
|
141 |
+
then set C=B.
|
142 |
+
"""
|
143 |
+
raise NotImplementedError
|
144 |
+
|
145 |
+
|
146 |
+
@pytest.fixture(params=[True, False])
|
147 |
+
def box_in_series(request):
|
148 |
+
"""Whether to box the data in a Series"""
|
149 |
+
return request.param
|
150 |
+
|
151 |
+
|
152 |
+
@pytest.fixture(
|
153 |
+
params=[
|
154 |
+
lambda x: 1,
|
155 |
+
lambda x: [1] * len(x),
|
156 |
+
lambda x: Series([1] * len(x)),
|
157 |
+
lambda x: x,
|
158 |
+
],
|
159 |
+
ids=["scalar", "list", "series", "object"],
|
160 |
+
)
|
161 |
+
def groupby_apply_op(request):
|
162 |
+
"""
|
163 |
+
Functions to test groupby.apply().
|
164 |
+
"""
|
165 |
+
return request.param
|
166 |
+
|
167 |
+
|
168 |
+
@pytest.fixture(params=[True, False])
|
169 |
+
def as_frame(request):
|
170 |
+
"""
|
171 |
+
Boolean fixture to support Series and Series.to_frame() comparison testing.
|
172 |
+
"""
|
173 |
+
return request.param
|
174 |
+
|
175 |
+
|
176 |
+
@pytest.fixture(params=[True, False])
|
177 |
+
def as_series(request):
|
178 |
+
"""
|
179 |
+
Boolean fixture to support arr and Series(arr) comparison testing.
|
180 |
+
"""
|
181 |
+
return request.param
|
182 |
+
|
183 |
+
|
184 |
+
@pytest.fixture(params=[True, False])
|
185 |
+
def use_numpy(request):
|
186 |
+
"""
|
187 |
+
Boolean fixture to support comparison testing of ExtensionDtype array
|
188 |
+
and numpy array.
|
189 |
+
"""
|
190 |
+
return request.param
|
191 |
+
|
192 |
+
|
193 |
+
@pytest.fixture(params=["ffill", "bfill"])
|
194 |
+
def fillna_method(request):
|
195 |
+
"""
|
196 |
+
Parametrized fixture giving method parameters 'ffill' and 'bfill' for
|
197 |
+
Series.fillna(method=<method>) testing.
|
198 |
+
"""
|
199 |
+
return request.param
|
200 |
+
|
201 |
+
|
202 |
+
@pytest.fixture(params=[True, False])
|
203 |
+
def as_array(request):
|
204 |
+
"""
|
205 |
+
Boolean fixture to support ExtensionDtype _from_sequence method testing.
|
206 |
+
"""
|
207 |
+
return request.param
|
208 |
+
|
209 |
+
|
210 |
+
@pytest.fixture
|
211 |
+
def invalid_scalar(data):
|
212 |
+
"""
|
213 |
+
A scalar that *cannot* be held by this ExtensionArray.
|
214 |
+
|
215 |
+
The default should work for most subclasses, but is not guaranteed.
|
216 |
+
|
217 |
+
If the array can hold any item (i.e. object dtype), then use pytest.skip.
|
218 |
+
"""
|
219 |
+
return object.__new__(object)
|
220 |
+
|
221 |
+
|
222 |
+
@pytest.fixture
|
223 |
+
def using_copy_on_write() -> bool:
|
224 |
+
"""
|
225 |
+
Fixture to check if Copy-on-Write is enabled.
|
226 |
+
"""
|
227 |
+
return (
|
228 |
+
options.mode.copy_on_write is True
|
229 |
+
and _get_option("mode.data_manager", silent=True) == "block"
|
230 |
+
)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_arrow.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_categorical.py
ADDED
@@ -0,0 +1,200 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
import string
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
import pytest
|
20 |
+
|
21 |
+
from pandas._config import using_pyarrow_string_dtype
|
22 |
+
|
23 |
+
import pandas as pd
|
24 |
+
from pandas import Categorical
|
25 |
+
import pandas._testing as tm
|
26 |
+
from pandas.api.types import CategoricalDtype
|
27 |
+
from pandas.tests.extension import base
|
28 |
+
|
29 |
+
|
30 |
+
def make_data():
|
31 |
+
while True:
|
32 |
+
values = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)
|
33 |
+
# ensure we meet the requirements
|
34 |
+
# 1. first two not null
|
35 |
+
# 2. first and second are different
|
36 |
+
if values[0] != values[1]:
|
37 |
+
break
|
38 |
+
return values
|
39 |
+
|
40 |
+
|
41 |
+
@pytest.fixture
|
42 |
+
def dtype():
|
43 |
+
return CategoricalDtype()
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.fixture
|
47 |
+
def data():
|
48 |
+
"""Length-100 array for this type.
|
49 |
+
|
50 |
+
* data[0] and data[1] should both be non missing
|
51 |
+
* data[0] and data[1] should not be equal
|
52 |
+
"""
|
53 |
+
return Categorical(make_data())
|
54 |
+
|
55 |
+
|
56 |
+
@pytest.fixture
|
57 |
+
def data_missing():
|
58 |
+
"""Length 2 array with [NA, Valid]"""
|
59 |
+
return Categorical([np.nan, "A"])
|
60 |
+
|
61 |
+
|
62 |
+
@pytest.fixture
|
63 |
+
def data_for_sorting():
|
64 |
+
return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)
|
65 |
+
|
66 |
+
|
67 |
+
@pytest.fixture
|
68 |
+
def data_missing_for_sorting():
|
69 |
+
return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)
|
70 |
+
|
71 |
+
|
72 |
+
@pytest.fixture
|
73 |
+
def data_for_grouping():
|
74 |
+
return Categorical(["a", "a", None, None, "b", "b", "a", "c"])
|
75 |
+
|
76 |
+
|
77 |
+
class TestCategorical(base.ExtensionTests):
|
78 |
+
@pytest.mark.xfail(reason="Memory usage doesn't match")
|
79 |
+
def test_memory_usage(self, data):
|
80 |
+
# TODO: Is this deliberate?
|
81 |
+
super().test_memory_usage(data)
|
82 |
+
|
83 |
+
def test_contains(self, data, data_missing):
|
84 |
+
# GH-37867
|
85 |
+
# na value handling in Categorical.__contains__ is deprecated.
|
86 |
+
# See base.BaseInterFaceTests.test_contains for more details.
|
87 |
+
|
88 |
+
na_value = data.dtype.na_value
|
89 |
+
# ensure data without missing values
|
90 |
+
data = data[~data.isna()]
|
91 |
+
|
92 |
+
# first elements are non-missing
|
93 |
+
assert data[0] in data
|
94 |
+
assert data_missing[0] in data_missing
|
95 |
+
|
96 |
+
# check the presence of na_value
|
97 |
+
assert na_value in data_missing
|
98 |
+
assert na_value not in data
|
99 |
+
|
100 |
+
# Categoricals can contain other nan-likes than na_value
|
101 |
+
for na_value_obj in tm.NULL_OBJECTS:
|
102 |
+
if na_value_obj is na_value:
|
103 |
+
continue
|
104 |
+
assert na_value_obj not in data
|
105 |
+
# this section suffers from super method
|
106 |
+
if not using_pyarrow_string_dtype():
|
107 |
+
assert na_value_obj in data_missing
|
108 |
+
|
109 |
+
def test_empty(self, dtype):
|
110 |
+
cls = dtype.construct_array_type()
|
111 |
+
result = cls._empty((4,), dtype=dtype)
|
112 |
+
|
113 |
+
assert isinstance(result, cls)
|
114 |
+
# the dtype we passed is not initialized, so will not match the
|
115 |
+
# dtype on our result.
|
116 |
+
assert result.dtype == CategoricalDtype([])
|
117 |
+
|
118 |
+
@pytest.mark.skip(reason="Backwards compatibility")
|
119 |
+
def test_getitem_scalar(self, data):
|
120 |
+
# CategoricalDtype.type isn't "correct" since it should
|
121 |
+
# be a parent of the elements (object). But don't want
|
122 |
+
# to break things by changing.
|
123 |
+
super().test_getitem_scalar(data)
|
124 |
+
|
125 |
+
@pytest.mark.xfail(reason="Unobserved categories included")
|
126 |
+
def test_value_counts(self, all_data, dropna):
|
127 |
+
return super().test_value_counts(all_data, dropna)
|
128 |
+
|
129 |
+
def test_combine_add(self, data_repeated):
|
130 |
+
# GH 20825
|
131 |
+
# When adding categoricals in combine, result is a string
|
132 |
+
orig_data1, orig_data2 = data_repeated(2)
|
133 |
+
s1 = pd.Series(orig_data1)
|
134 |
+
s2 = pd.Series(orig_data2)
|
135 |
+
result = s1.combine(s2, lambda x1, x2: x1 + x2)
|
136 |
+
expected = pd.Series(
|
137 |
+
[a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]
|
138 |
+
)
|
139 |
+
tm.assert_series_equal(result, expected)
|
140 |
+
|
141 |
+
val = s1.iloc[0]
|
142 |
+
result = s1.combine(val, lambda x1, x2: x1 + x2)
|
143 |
+
expected = pd.Series([a + val for a in list(orig_data1)])
|
144 |
+
tm.assert_series_equal(result, expected)
|
145 |
+
|
146 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
147 |
+
def test_map(self, data, na_action):
|
148 |
+
result = data.map(lambda x: x, na_action=na_action)
|
149 |
+
tm.assert_extension_array_equal(result, data)
|
150 |
+
|
151 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
|
152 |
+
# frame & scalar
|
153 |
+
op_name = all_arithmetic_operators
|
154 |
+
if op_name == "__rmod__":
|
155 |
+
request.applymarker(
|
156 |
+
pytest.mark.xfail(
|
157 |
+
reason="rmod never called when string is first argument"
|
158 |
+
)
|
159 |
+
)
|
160 |
+
super().test_arith_frame_with_scalar(data, op_name)
|
161 |
+
|
162 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):
|
163 |
+
op_name = all_arithmetic_operators
|
164 |
+
if op_name == "__rmod__":
|
165 |
+
request.applymarker(
|
166 |
+
pytest.mark.xfail(
|
167 |
+
reason="rmod never called when string is first argument"
|
168 |
+
)
|
169 |
+
)
|
170 |
+
super().test_arith_series_with_scalar(data, op_name)
|
171 |
+
|
172 |
+
def _compare_other(self, ser: pd.Series, data, op, other):
|
173 |
+
op_name = f"__{op.__name__}__"
|
174 |
+
if op_name not in ["__eq__", "__ne__"]:
|
175 |
+
msg = "Unordered Categoricals can only compare equality or not"
|
176 |
+
with pytest.raises(TypeError, match=msg):
|
177 |
+
op(data, other)
|
178 |
+
else:
|
179 |
+
return super()._compare_other(ser, data, op, other)
|
180 |
+
|
181 |
+
@pytest.mark.xfail(reason="Categorical overrides __repr__")
|
182 |
+
@pytest.mark.parametrize("size", ["big", "small"])
|
183 |
+
def test_array_repr(self, data, size):
|
184 |
+
super().test_array_repr(data, size)
|
185 |
+
|
186 |
+
@pytest.mark.xfail(reason="TBD")
|
187 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
188 |
+
def test_groupby_extension_agg(self, as_index, data_for_grouping):
|
189 |
+
super().test_groupby_extension_agg(as_index, data_for_grouping)
|
190 |
+
|
191 |
+
|
192 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
193 |
+
def test_repr_2d(self, data):
|
194 |
+
# Categorical __repr__ doesn't include "Categorical", so we need
|
195 |
+
# to special-case
|
196 |
+
res = repr(data.reshape(1, -1))
|
197 |
+
assert res.count("\nCategories") == 1
|
198 |
+
|
199 |
+
res = repr(data.reshape(-1, 1))
|
200 |
+
assert res.count("\nCategories") == 1
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_common.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.core.dtypes import dtypes
|
5 |
+
from pandas.core.dtypes.common import is_extension_array_dtype
|
6 |
+
|
7 |
+
import pandas as pd
|
8 |
+
import pandas._testing as tm
|
9 |
+
from pandas.core.arrays import ExtensionArray
|
10 |
+
|
11 |
+
|
12 |
+
class DummyDtype(dtypes.ExtensionDtype):
|
13 |
+
pass
|
14 |
+
|
15 |
+
|
16 |
+
class DummyArray(ExtensionArray):
|
17 |
+
def __init__(self, data) -> None:
|
18 |
+
self.data = data
|
19 |
+
|
20 |
+
def __array__(self, dtype=None, copy=None):
|
21 |
+
return self.data
|
22 |
+
|
23 |
+
@property
|
24 |
+
def dtype(self):
|
25 |
+
return DummyDtype()
|
26 |
+
|
27 |
+
def astype(self, dtype, copy=True):
|
28 |
+
# we don't support anything but a single dtype
|
29 |
+
if isinstance(dtype, DummyDtype):
|
30 |
+
if copy:
|
31 |
+
return type(self)(self.data)
|
32 |
+
return self
|
33 |
+
elif not copy:
|
34 |
+
return np.asarray(self, dtype=dtype)
|
35 |
+
else:
|
36 |
+
return np.array(self, dtype=dtype, copy=copy)
|
37 |
+
|
38 |
+
|
39 |
+
class TestExtensionArrayDtype:
|
40 |
+
@pytest.mark.parametrize(
|
41 |
+
"values",
|
42 |
+
[
|
43 |
+
pd.Categorical([]),
|
44 |
+
pd.Categorical([]).dtype,
|
45 |
+
pd.Series(pd.Categorical([])),
|
46 |
+
DummyDtype(),
|
47 |
+
DummyArray(np.array([1, 2])),
|
48 |
+
],
|
49 |
+
)
|
50 |
+
def test_is_extension_array_dtype(self, values):
|
51 |
+
assert is_extension_array_dtype(values)
|
52 |
+
|
53 |
+
@pytest.mark.parametrize("values", [np.array([]), pd.Series(np.array([]))])
|
54 |
+
def test_is_not_extension_array_dtype(self, values):
|
55 |
+
assert not is_extension_array_dtype(values)
|
56 |
+
|
57 |
+
|
58 |
+
def test_astype():
|
59 |
+
arr = DummyArray(np.array([1, 2, 3]))
|
60 |
+
expected = np.array([1, 2, 3], dtype=object)
|
61 |
+
|
62 |
+
result = arr.astype(object)
|
63 |
+
tm.assert_numpy_array_equal(result, expected)
|
64 |
+
|
65 |
+
result = arr.astype("object")
|
66 |
+
tm.assert_numpy_array_equal(result, expected)
|
67 |
+
|
68 |
+
|
69 |
+
def test_astype_no_copy():
|
70 |
+
arr = DummyArray(np.array([1, 2, 3], dtype=np.int64))
|
71 |
+
result = arr.astype(arr.dtype, copy=False)
|
72 |
+
|
73 |
+
assert arr is result
|
74 |
+
|
75 |
+
result = arr.astype(arr.dtype)
|
76 |
+
assert arr is not result
|
77 |
+
|
78 |
+
|
79 |
+
@pytest.mark.parametrize("dtype", [dtypes.CategoricalDtype(), dtypes.IntervalDtype()])
|
80 |
+
def test_is_extension_array_dtype(dtype):
|
81 |
+
assert isinstance(dtype, dtypes.ExtensionDtype)
|
82 |
+
assert is_extension_array_dtype(dtype)
|
83 |
+
|
84 |
+
|
85 |
+
class CapturingStringArray(pd.arrays.StringArray):
|
86 |
+
"""Extend StringArray to capture arguments to __getitem__"""
|
87 |
+
|
88 |
+
def __getitem__(self, item):
|
89 |
+
self.last_item_arg = item
|
90 |
+
return super().__getitem__(item)
|
91 |
+
|
92 |
+
|
93 |
+
def test_ellipsis_index():
|
94 |
+
# GH#42430 1D slices over extension types turn into N-dimensional slices
|
95 |
+
# over ExtensionArrays
|
96 |
+
df = pd.DataFrame(
|
97 |
+
{"col1": CapturingStringArray(np.array(["hello", "world"], dtype=object))}
|
98 |
+
)
|
99 |
+
_ = df.iloc[:1]
|
100 |
+
|
101 |
+
# String comparison because there's no native way to compare slices.
|
102 |
+
# Before the fix for GH#42430, last_item_arg would get set to the 2D slice
|
103 |
+
# (Ellipsis, slice(None, 1, None))
|
104 |
+
out = df["col1"].array.last_item_arg
|
105 |
+
assert str(out) == "slice(None, 1, None)"
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_datetime.py
ADDED
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
import numpy as np
|
17 |
+
import pytest
|
18 |
+
|
19 |
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
20 |
+
|
21 |
+
import pandas as pd
|
22 |
+
import pandas._testing as tm
|
23 |
+
from pandas.core.arrays import DatetimeArray
|
24 |
+
from pandas.tests.extension import base
|
25 |
+
|
26 |
+
|
27 |
+
@pytest.fixture(params=["US/Central"])
|
28 |
+
def dtype(request):
|
29 |
+
return DatetimeTZDtype(unit="ns", tz=request.param)
|
30 |
+
|
31 |
+
|
32 |
+
@pytest.fixture
|
33 |
+
def data(dtype):
|
34 |
+
data = DatetimeArray._from_sequence(
|
35 |
+
pd.date_range("2000", periods=100, tz=dtype.tz), dtype=dtype
|
36 |
+
)
|
37 |
+
return data
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture
|
41 |
+
def data_missing(dtype):
|
42 |
+
return DatetimeArray._from_sequence(
|
43 |
+
np.array(["NaT", "2000-01-01"], dtype="datetime64[ns]"), dtype=dtype
|
44 |
+
)
|
45 |
+
|
46 |
+
|
47 |
+
@pytest.fixture
|
48 |
+
def data_for_sorting(dtype):
|
49 |
+
a = pd.Timestamp("2000-01-01")
|
50 |
+
b = pd.Timestamp("2000-01-02")
|
51 |
+
c = pd.Timestamp("2000-01-03")
|
52 |
+
return DatetimeArray._from_sequence(
|
53 |
+
np.array([b, c, a], dtype="datetime64[ns]"), dtype=dtype
|
54 |
+
)
|
55 |
+
|
56 |
+
|
57 |
+
@pytest.fixture
|
58 |
+
def data_missing_for_sorting(dtype):
|
59 |
+
a = pd.Timestamp("2000-01-01")
|
60 |
+
b = pd.Timestamp("2000-01-02")
|
61 |
+
return DatetimeArray._from_sequence(
|
62 |
+
np.array([b, "NaT", a], dtype="datetime64[ns]"), dtype=dtype
|
63 |
+
)
|
64 |
+
|
65 |
+
|
66 |
+
@pytest.fixture
|
67 |
+
def data_for_grouping(dtype):
|
68 |
+
"""
|
69 |
+
Expected to be like [B, B, NA, NA, A, A, B, C]
|
70 |
+
|
71 |
+
Where A < B < C and NA is missing
|
72 |
+
"""
|
73 |
+
a = pd.Timestamp("2000-01-01")
|
74 |
+
b = pd.Timestamp("2000-01-02")
|
75 |
+
c = pd.Timestamp("2000-01-03")
|
76 |
+
na = "NaT"
|
77 |
+
return DatetimeArray._from_sequence(
|
78 |
+
np.array([b, b, na, na, a, a, b, c], dtype="datetime64[ns]"), dtype=dtype
|
79 |
+
)
|
80 |
+
|
81 |
+
|
82 |
+
@pytest.fixture
|
83 |
+
def na_cmp():
|
84 |
+
def cmp(a, b):
|
85 |
+
return a is pd.NaT and a is b
|
86 |
+
|
87 |
+
return cmp
|
88 |
+
|
89 |
+
|
90 |
+
# ----------------------------------------------------------------------------
|
91 |
+
class TestDatetimeArray(base.ExtensionTests):
|
92 |
+
def _get_expected_exception(self, op_name, obj, other):
|
93 |
+
if op_name in ["__sub__", "__rsub__"]:
|
94 |
+
return None
|
95 |
+
return super()._get_expected_exception(op_name, obj, other)
|
96 |
+
|
97 |
+
def _supports_accumulation(self, ser, op_name: str) -> bool:
|
98 |
+
return op_name in ["cummin", "cummax"]
|
99 |
+
|
100 |
+
def _supports_reduction(self, obj, op_name: str) -> bool:
|
101 |
+
return op_name in ["min", "max", "median", "mean", "std", "any", "all"]
|
102 |
+
|
103 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
104 |
+
def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):
|
105 |
+
meth = all_boolean_reductions
|
106 |
+
msg = f"'{meth}' with datetime64 dtypes is deprecated and will raise in"
|
107 |
+
with tm.assert_produces_warning(
|
108 |
+
FutureWarning, match=msg, check_stacklevel=False
|
109 |
+
):
|
110 |
+
super().test_reduce_series_boolean(data, all_boolean_reductions, skipna)
|
111 |
+
|
112 |
+
def test_series_constructor(self, data):
|
113 |
+
# Series construction drops any .freq attr
|
114 |
+
data = data._with_freq(None)
|
115 |
+
super().test_series_constructor(data)
|
116 |
+
|
117 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
118 |
+
def test_map(self, data, na_action):
|
119 |
+
result = data.map(lambda x: x, na_action=na_action)
|
120 |
+
tm.assert_extension_array_equal(result, data)
|
121 |
+
|
122 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
123 |
+
if op_name in ["median", "mean", "std"]:
|
124 |
+
alt = ser.astype("int64")
|
125 |
+
|
126 |
+
res_op = getattr(ser, op_name)
|
127 |
+
exp_op = getattr(alt, op_name)
|
128 |
+
result = res_op(skipna=skipna)
|
129 |
+
expected = exp_op(skipna=skipna)
|
130 |
+
if op_name in ["mean", "median"]:
|
131 |
+
# error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype"
|
132 |
+
# has no attribute "tz"
|
133 |
+
tz = ser.dtype.tz # type: ignore[union-attr]
|
134 |
+
expected = pd.Timestamp(expected, tz=tz)
|
135 |
+
else:
|
136 |
+
expected = pd.Timedelta(expected)
|
137 |
+
tm.assert_almost_equal(result, expected)
|
138 |
+
|
139 |
+
else:
|
140 |
+
return super().check_reduce(ser, op_name, skipna)
|
141 |
+
|
142 |
+
|
143 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
144 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_extension.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for behavior if an author does *not* implement EA methods.
|
3 |
+
"""
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
from pandas.core.arrays import ExtensionArray
|
8 |
+
|
9 |
+
|
10 |
+
class MyEA(ExtensionArray):
|
11 |
+
def __init__(self, values) -> None:
|
12 |
+
self._values = values
|
13 |
+
|
14 |
+
|
15 |
+
@pytest.fixture
|
16 |
+
def data():
|
17 |
+
arr = np.arange(10)
|
18 |
+
return MyEA(arr)
|
19 |
+
|
20 |
+
|
21 |
+
class TestExtensionArray:
|
22 |
+
def test_errors(self, data, all_arithmetic_operators):
|
23 |
+
# invalid ops
|
24 |
+
op_name = all_arithmetic_operators
|
25 |
+
with pytest.raises(AttributeError):
|
26 |
+
getattr(data, op_name)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_interval.py
ADDED
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
from typing import TYPE_CHECKING
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import pytest
|
22 |
+
|
23 |
+
from pandas.core.dtypes.dtypes import IntervalDtype
|
24 |
+
|
25 |
+
from pandas import Interval
|
26 |
+
from pandas.core.arrays import IntervalArray
|
27 |
+
from pandas.tests.extension import base
|
28 |
+
|
29 |
+
if TYPE_CHECKING:
|
30 |
+
import pandas as pd
|
31 |
+
|
32 |
+
|
33 |
+
def make_data():
|
34 |
+
N = 100
|
35 |
+
left_array = np.random.default_rng(2).uniform(size=N).cumsum()
|
36 |
+
right_array = left_array + np.random.default_rng(2).uniform(size=N)
|
37 |
+
return [Interval(left, right) for left, right in zip(left_array, right_array)]
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture
|
41 |
+
def dtype():
|
42 |
+
return IntervalDtype()
|
43 |
+
|
44 |
+
|
45 |
+
@pytest.fixture
|
46 |
+
def data():
|
47 |
+
"""Length-100 PeriodArray for semantics test."""
|
48 |
+
return IntervalArray(make_data())
|
49 |
+
|
50 |
+
|
51 |
+
@pytest.fixture
|
52 |
+
def data_missing():
|
53 |
+
"""Length 2 array with [NA, Valid]"""
|
54 |
+
return IntervalArray.from_tuples([None, (0, 1)])
|
55 |
+
|
56 |
+
|
57 |
+
@pytest.fixture
|
58 |
+
def data_for_twos():
|
59 |
+
pytest.skip("Interval is not a numeric dtype")
|
60 |
+
|
61 |
+
|
62 |
+
@pytest.fixture
|
63 |
+
def data_for_sorting():
|
64 |
+
return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)])
|
65 |
+
|
66 |
+
|
67 |
+
@pytest.fixture
|
68 |
+
def data_missing_for_sorting():
|
69 |
+
return IntervalArray.from_tuples([(1, 2), None, (0, 1)])
|
70 |
+
|
71 |
+
|
72 |
+
@pytest.fixture
|
73 |
+
def data_for_grouping():
|
74 |
+
a = (0, 1)
|
75 |
+
b = (1, 2)
|
76 |
+
c = (2, 3)
|
77 |
+
return IntervalArray.from_tuples([b, b, None, None, a, a, b, c])
|
78 |
+
|
79 |
+
|
80 |
+
class TestIntervalArray(base.ExtensionTests):
|
81 |
+
divmod_exc = TypeError
|
82 |
+
|
83 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
84 |
+
return op_name in ["min", "max"]
|
85 |
+
|
86 |
+
@pytest.mark.xfail(
|
87 |
+
reason="Raises with incorrect message bc it disallows *all* listlikes "
|
88 |
+
"instead of just wrong-length listlikes"
|
89 |
+
)
|
90 |
+
def test_fillna_length_mismatch(self, data_missing):
|
91 |
+
super().test_fillna_length_mismatch(data_missing)
|
92 |
+
|
93 |
+
|
94 |
+
# TODO: either belongs in tests.arrays.interval or move into base tests.
|
95 |
+
def test_fillna_non_scalar_raises(data_missing):
|
96 |
+
msg = "can only insert Interval objects and NA into an IntervalArray"
|
97 |
+
with pytest.raises(TypeError, match=msg):
|
98 |
+
data_missing.fillna([1, 1])
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_masked.py
ADDED
@@ -0,0 +1,417 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
import warnings
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
import pytest
|
20 |
+
|
21 |
+
from pandas.compat import (
|
22 |
+
IS64,
|
23 |
+
is_platform_windows,
|
24 |
+
)
|
25 |
+
from pandas.compat.numpy import np_version_gt2
|
26 |
+
|
27 |
+
from pandas.core.dtypes.common import (
|
28 |
+
is_float_dtype,
|
29 |
+
is_signed_integer_dtype,
|
30 |
+
is_unsigned_integer_dtype,
|
31 |
+
)
|
32 |
+
|
33 |
+
import pandas as pd
|
34 |
+
import pandas._testing as tm
|
35 |
+
from pandas.core.arrays.boolean import BooleanDtype
|
36 |
+
from pandas.core.arrays.floating import (
|
37 |
+
Float32Dtype,
|
38 |
+
Float64Dtype,
|
39 |
+
)
|
40 |
+
from pandas.core.arrays.integer import (
|
41 |
+
Int8Dtype,
|
42 |
+
Int16Dtype,
|
43 |
+
Int32Dtype,
|
44 |
+
Int64Dtype,
|
45 |
+
UInt8Dtype,
|
46 |
+
UInt16Dtype,
|
47 |
+
UInt32Dtype,
|
48 |
+
UInt64Dtype,
|
49 |
+
)
|
50 |
+
from pandas.tests.extension import base
|
51 |
+
|
52 |
+
is_windows_or_32bit = (is_platform_windows() and not np_version_gt2) or not IS64
|
53 |
+
|
54 |
+
pytestmark = [
|
55 |
+
pytest.mark.filterwarnings(
|
56 |
+
"ignore:invalid value encountered in divide:RuntimeWarning"
|
57 |
+
),
|
58 |
+
pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning"),
|
59 |
+
# overflow only relevant for Floating dtype cases cases
|
60 |
+
pytest.mark.filterwarnings("ignore:overflow encountered in reduce:RuntimeWarning"),
|
61 |
+
]
|
62 |
+
|
63 |
+
|
64 |
+
def make_data():
|
65 |
+
return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100]
|
66 |
+
|
67 |
+
|
68 |
+
def make_float_data():
|
69 |
+
return (
|
70 |
+
list(np.arange(0.1, 0.9, 0.1))
|
71 |
+
+ [pd.NA]
|
72 |
+
+ list(np.arange(1, 9.8, 0.1))
|
73 |
+
+ [pd.NA]
|
74 |
+
+ [9.9, 10.0]
|
75 |
+
)
|
76 |
+
|
77 |
+
|
78 |
+
def make_bool_data():
|
79 |
+
return [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False]
|
80 |
+
|
81 |
+
|
82 |
+
@pytest.fixture(
|
83 |
+
params=[
|
84 |
+
Int8Dtype,
|
85 |
+
Int16Dtype,
|
86 |
+
Int32Dtype,
|
87 |
+
Int64Dtype,
|
88 |
+
UInt8Dtype,
|
89 |
+
UInt16Dtype,
|
90 |
+
UInt32Dtype,
|
91 |
+
UInt64Dtype,
|
92 |
+
Float32Dtype,
|
93 |
+
Float64Dtype,
|
94 |
+
BooleanDtype,
|
95 |
+
]
|
96 |
+
)
|
97 |
+
def dtype(request):
|
98 |
+
return request.param()
|
99 |
+
|
100 |
+
|
101 |
+
@pytest.fixture
|
102 |
+
def data(dtype):
|
103 |
+
if dtype.kind == "f":
|
104 |
+
data = make_float_data()
|
105 |
+
elif dtype.kind == "b":
|
106 |
+
data = make_bool_data()
|
107 |
+
else:
|
108 |
+
data = make_data()
|
109 |
+
return pd.array(data, dtype=dtype)
|
110 |
+
|
111 |
+
|
112 |
+
@pytest.fixture
|
113 |
+
def data_for_twos(dtype):
|
114 |
+
if dtype.kind == "b":
|
115 |
+
return pd.array(np.ones(100), dtype=dtype)
|
116 |
+
return pd.array(np.ones(100) * 2, dtype=dtype)
|
117 |
+
|
118 |
+
|
119 |
+
@pytest.fixture
|
120 |
+
def data_missing(dtype):
|
121 |
+
if dtype.kind == "f":
|
122 |
+
return pd.array([pd.NA, 0.1], dtype=dtype)
|
123 |
+
elif dtype.kind == "b":
|
124 |
+
return pd.array([np.nan, True], dtype=dtype)
|
125 |
+
return pd.array([pd.NA, 1], dtype=dtype)
|
126 |
+
|
127 |
+
|
128 |
+
@pytest.fixture
|
129 |
+
def data_for_sorting(dtype):
|
130 |
+
if dtype.kind == "f":
|
131 |
+
return pd.array([0.1, 0.2, 0.0], dtype=dtype)
|
132 |
+
elif dtype.kind == "b":
|
133 |
+
return pd.array([True, True, False], dtype=dtype)
|
134 |
+
return pd.array([1, 2, 0], dtype=dtype)
|
135 |
+
|
136 |
+
|
137 |
+
@pytest.fixture
|
138 |
+
def data_missing_for_sorting(dtype):
|
139 |
+
if dtype.kind == "f":
|
140 |
+
return pd.array([0.1, pd.NA, 0.0], dtype=dtype)
|
141 |
+
elif dtype.kind == "b":
|
142 |
+
return pd.array([True, np.nan, False], dtype=dtype)
|
143 |
+
return pd.array([1, pd.NA, 0], dtype=dtype)
|
144 |
+
|
145 |
+
|
146 |
+
@pytest.fixture
|
147 |
+
def na_cmp():
|
148 |
+
# we are pd.NA
|
149 |
+
return lambda x, y: x is pd.NA and y is pd.NA
|
150 |
+
|
151 |
+
|
152 |
+
@pytest.fixture
|
153 |
+
def data_for_grouping(dtype):
|
154 |
+
if dtype.kind == "f":
|
155 |
+
b = 0.1
|
156 |
+
a = 0.0
|
157 |
+
c = 0.2
|
158 |
+
elif dtype.kind == "b":
|
159 |
+
b = True
|
160 |
+
a = False
|
161 |
+
c = b
|
162 |
+
else:
|
163 |
+
b = 1
|
164 |
+
a = 0
|
165 |
+
c = 2
|
166 |
+
|
167 |
+
na = pd.NA
|
168 |
+
return pd.array([b, b, na, na, a, a, b, c], dtype=dtype)
|
169 |
+
|
170 |
+
|
171 |
+
class TestMaskedArrays(base.ExtensionTests):
|
172 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
173 |
+
def test_map(self, data_missing, na_action):
|
174 |
+
result = data_missing.map(lambda x: x, na_action=na_action)
|
175 |
+
if data_missing.dtype == Float32Dtype():
|
176 |
+
# map roundtrips through objects, which converts to float64
|
177 |
+
expected = data_missing.to_numpy(dtype="float64", na_value=np.nan)
|
178 |
+
else:
|
179 |
+
expected = data_missing.to_numpy()
|
180 |
+
tm.assert_numpy_array_equal(result, expected)
|
181 |
+
|
182 |
+
def test_map_na_action_ignore(self, data_missing_for_sorting):
|
183 |
+
zero = data_missing_for_sorting[2]
|
184 |
+
result = data_missing_for_sorting.map(lambda x: zero, na_action="ignore")
|
185 |
+
if data_missing_for_sorting.dtype.kind == "b":
|
186 |
+
expected = np.array([False, pd.NA, False], dtype=object)
|
187 |
+
else:
|
188 |
+
expected = np.array([zero, np.nan, zero])
|
189 |
+
tm.assert_numpy_array_equal(result, expected)
|
190 |
+
|
191 |
+
def _get_expected_exception(self, op_name, obj, other):
|
192 |
+
try:
|
193 |
+
dtype = tm.get_dtype(obj)
|
194 |
+
except AttributeError:
|
195 |
+
# passed arguments reversed
|
196 |
+
dtype = tm.get_dtype(other)
|
197 |
+
|
198 |
+
if dtype.kind == "b":
|
199 |
+
if op_name.strip("_").lstrip("r") in ["pow", "truediv", "floordiv"]:
|
200 |
+
# match behavior with non-masked bool dtype
|
201 |
+
return NotImplementedError
|
202 |
+
elif op_name in ["__sub__", "__rsub__"]:
|
203 |
+
# exception message would include "numpy boolean subtract""
|
204 |
+
return TypeError
|
205 |
+
return None
|
206 |
+
return None
|
207 |
+
|
208 |
+
def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):
|
209 |
+
sdtype = tm.get_dtype(obj)
|
210 |
+
expected = pointwise_result
|
211 |
+
|
212 |
+
if op_name in ("eq", "ne", "le", "ge", "lt", "gt"):
|
213 |
+
return expected.astype("boolean")
|
214 |
+
|
215 |
+
if sdtype.kind in "iu":
|
216 |
+
if op_name in ("__rtruediv__", "__truediv__", "__div__"):
|
217 |
+
with warnings.catch_warnings():
|
218 |
+
warnings.filterwarnings(
|
219 |
+
"ignore",
|
220 |
+
"Downcasting object dtype arrays",
|
221 |
+
category=FutureWarning,
|
222 |
+
)
|
223 |
+
filled = expected.fillna(np.nan)
|
224 |
+
expected = filled.astype("Float64")
|
225 |
+
else:
|
226 |
+
# combine method result in 'biggest' (int64) dtype
|
227 |
+
expected = expected.astype(sdtype)
|
228 |
+
elif sdtype.kind == "b":
|
229 |
+
if op_name in (
|
230 |
+
"__floordiv__",
|
231 |
+
"__rfloordiv__",
|
232 |
+
"__pow__",
|
233 |
+
"__rpow__",
|
234 |
+
"__mod__",
|
235 |
+
"__rmod__",
|
236 |
+
):
|
237 |
+
# combine keeps boolean type
|
238 |
+
expected = expected.astype("Int8")
|
239 |
+
|
240 |
+
elif op_name in ("__truediv__", "__rtruediv__"):
|
241 |
+
# combine with bools does not generate the correct result
|
242 |
+
# (numpy behaviour for div is to regard the bools as numeric)
|
243 |
+
op = self.get_op_from_name(op_name)
|
244 |
+
expected = self._combine(obj.astype(float), other, op)
|
245 |
+
expected = expected.astype("Float64")
|
246 |
+
|
247 |
+
if op_name == "__rpow__":
|
248 |
+
# for rpow, combine does not propagate NaN
|
249 |
+
result = getattr(obj, op_name)(other)
|
250 |
+
expected[result.isna()] = np.nan
|
251 |
+
else:
|
252 |
+
# combine method result in 'biggest' (float64) dtype
|
253 |
+
expected = expected.astype(sdtype)
|
254 |
+
return expected
|
255 |
+
|
256 |
+
def test_divmod_series_array(self, data, data_for_twos, request):
|
257 |
+
if data.dtype.kind == "b":
|
258 |
+
mark = pytest.mark.xfail(
|
259 |
+
reason="Inconsistency between floordiv and divmod; we raise for "
|
260 |
+
"floordiv but not for divmod. This matches what we do for "
|
261 |
+
"non-masked bool dtype."
|
262 |
+
)
|
263 |
+
request.applymarker(mark)
|
264 |
+
super().test_divmod_series_array(data, data_for_twos)
|
265 |
+
|
266 |
+
def test_combine_le(self, data_repeated):
|
267 |
+
# TODO: patching self is a bad pattern here
|
268 |
+
orig_data1, orig_data2 = data_repeated(2)
|
269 |
+
if orig_data1.dtype.kind == "b":
|
270 |
+
self._combine_le_expected_dtype = "boolean"
|
271 |
+
else:
|
272 |
+
# TODO: can we make this boolean?
|
273 |
+
self._combine_le_expected_dtype = object
|
274 |
+
super().test_combine_le(data_repeated)
|
275 |
+
|
276 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
277 |
+
if op_name in ["any", "all"] and ser.dtype.kind != "b":
|
278 |
+
pytest.skip(reason="Tested in tests/reductions/test_reductions.py")
|
279 |
+
return True
|
280 |
+
|
281 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
282 |
+
# overwrite to ensure pd.NA is tested instead of np.nan
|
283 |
+
# https://github.com/pandas-dev/pandas/issues/30958
|
284 |
+
|
285 |
+
cmp_dtype = "int64"
|
286 |
+
if ser.dtype.kind == "f":
|
287 |
+
# Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has
|
288 |
+
# no attribute "numpy_dtype"
|
289 |
+
cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr]
|
290 |
+
elif ser.dtype.kind == "b":
|
291 |
+
if op_name in ["min", "max"]:
|
292 |
+
cmp_dtype = "bool"
|
293 |
+
|
294 |
+
# TODO: prod with integer dtypes does *not* match the result we would
|
295 |
+
# get if we used object for cmp_dtype. In that cae the object result
|
296 |
+
# is a large integer while the non-object case overflows and returns 0
|
297 |
+
alt = ser.dropna().astype(cmp_dtype)
|
298 |
+
if op_name == "count":
|
299 |
+
result = getattr(ser, op_name)()
|
300 |
+
expected = getattr(alt, op_name)()
|
301 |
+
else:
|
302 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
303 |
+
expected = getattr(alt, op_name)(skipna=skipna)
|
304 |
+
if not skipna and ser.isna().any() and op_name not in ["any", "all"]:
|
305 |
+
expected = pd.NA
|
306 |
+
tm.assert_almost_equal(result, expected)
|
307 |
+
|
308 |
+
def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):
|
309 |
+
if is_float_dtype(arr.dtype):
|
310 |
+
cmp_dtype = arr.dtype.name
|
311 |
+
elif op_name in ["mean", "median", "var", "std", "skew"]:
|
312 |
+
cmp_dtype = "Float64"
|
313 |
+
elif op_name in ["max", "min"]:
|
314 |
+
cmp_dtype = arr.dtype.name
|
315 |
+
elif arr.dtype in ["Int64", "UInt64"]:
|
316 |
+
cmp_dtype = arr.dtype.name
|
317 |
+
elif is_signed_integer_dtype(arr.dtype):
|
318 |
+
# TODO: Why does Window Numpy 2.0 dtype depend on skipna?
|
319 |
+
cmp_dtype = (
|
320 |
+
"Int32"
|
321 |
+
if (is_platform_windows() and (not np_version_gt2 or not skipna))
|
322 |
+
or not IS64
|
323 |
+
else "Int64"
|
324 |
+
)
|
325 |
+
elif is_unsigned_integer_dtype(arr.dtype):
|
326 |
+
cmp_dtype = (
|
327 |
+
"UInt32"
|
328 |
+
if (is_platform_windows() and (not np_version_gt2 or not skipna))
|
329 |
+
or not IS64
|
330 |
+
else "UInt64"
|
331 |
+
)
|
332 |
+
elif arr.dtype.kind == "b":
|
333 |
+
if op_name in ["mean", "median", "var", "std", "skew"]:
|
334 |
+
cmp_dtype = "Float64"
|
335 |
+
elif op_name in ["min", "max"]:
|
336 |
+
cmp_dtype = "boolean"
|
337 |
+
elif op_name in ["sum", "prod"]:
|
338 |
+
cmp_dtype = (
|
339 |
+
"Int32"
|
340 |
+
if (is_platform_windows() and (not np_version_gt2 or not skipna))
|
341 |
+
or not IS64
|
342 |
+
else "Int64"
|
343 |
+
)
|
344 |
+
else:
|
345 |
+
raise TypeError("not supposed to reach this")
|
346 |
+
else:
|
347 |
+
raise TypeError("not supposed to reach this")
|
348 |
+
return cmp_dtype
|
349 |
+
|
350 |
+
def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:
|
351 |
+
return True
|
352 |
+
|
353 |
+
def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):
|
354 |
+
# overwrite to ensure pd.NA is tested instead of np.nan
|
355 |
+
# https://github.com/pandas-dev/pandas/issues/30958
|
356 |
+
length = 64
|
357 |
+
if is_windows_or_32bit:
|
358 |
+
# Item "ExtensionDtype" of "Union[dtype[Any], ExtensionDtype]" has
|
359 |
+
# no attribute "itemsize"
|
360 |
+
if not ser.dtype.itemsize == 8: # type: ignore[union-attr]
|
361 |
+
length = 32
|
362 |
+
|
363 |
+
if ser.dtype.name.startswith("U"):
|
364 |
+
expected_dtype = f"UInt{length}"
|
365 |
+
elif ser.dtype.name.startswith("I"):
|
366 |
+
expected_dtype = f"Int{length}"
|
367 |
+
elif ser.dtype.name.startswith("F"):
|
368 |
+
# Incompatible types in assignment (expression has type
|
369 |
+
# "Union[dtype[Any], ExtensionDtype]", variable has type "str")
|
370 |
+
expected_dtype = ser.dtype # type: ignore[assignment]
|
371 |
+
elif ser.dtype.kind == "b":
|
372 |
+
if op_name in ("cummin", "cummax"):
|
373 |
+
expected_dtype = "boolean"
|
374 |
+
else:
|
375 |
+
expected_dtype = f"Int{length}"
|
376 |
+
|
377 |
+
if expected_dtype == "Float32" and op_name == "cumprod" and skipna:
|
378 |
+
# TODO: xfail?
|
379 |
+
pytest.skip(
|
380 |
+
f"Float32 precision lead to large differences with op {op_name} "
|
381 |
+
f"and skipna={skipna}"
|
382 |
+
)
|
383 |
+
|
384 |
+
if op_name == "cumsum":
|
385 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
386 |
+
expected = pd.Series(
|
387 |
+
pd.array(
|
388 |
+
getattr(ser.astype("float64"), op_name)(skipna=skipna),
|
389 |
+
dtype=expected_dtype,
|
390 |
+
)
|
391 |
+
)
|
392 |
+
tm.assert_series_equal(result, expected)
|
393 |
+
elif op_name in ["cummax", "cummin"]:
|
394 |
+
result = getattr(ser, op_name)(skipna=skipna)
|
395 |
+
expected = pd.Series(
|
396 |
+
pd.array(
|
397 |
+
getattr(ser.astype("float64"), op_name)(skipna=skipna),
|
398 |
+
dtype=ser.dtype,
|
399 |
+
)
|
400 |
+
)
|
401 |
+
tm.assert_series_equal(result, expected)
|
402 |
+
elif op_name == "cumprod":
|
403 |
+
result = getattr(ser[:12], op_name)(skipna=skipna)
|
404 |
+
expected = pd.Series(
|
405 |
+
pd.array(
|
406 |
+
getattr(ser[:12].astype("float64"), op_name)(skipna=skipna),
|
407 |
+
dtype=expected_dtype,
|
408 |
+
)
|
409 |
+
)
|
410 |
+
tm.assert_series_equal(result, expected)
|
411 |
+
|
412 |
+
else:
|
413 |
+
raise NotImplementedError(f"{op_name} not supported")
|
414 |
+
|
415 |
+
|
416 |
+
class Test2DCompat(base.Dim2CompatTests):
|
417 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_numpy.py
ADDED
@@ -0,0 +1,426 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
Note: we do not bother with base.BaseIndexTests because NumpyExtensionArray
|
16 |
+
will never be held in an Index.
|
17 |
+
"""
|
18 |
+
import numpy as np
|
19 |
+
import pytest
|
20 |
+
|
21 |
+
from pandas.core.dtypes.dtypes import NumpyEADtype
|
22 |
+
|
23 |
+
import pandas as pd
|
24 |
+
import pandas._testing as tm
|
25 |
+
from pandas.api.types import is_object_dtype
|
26 |
+
from pandas.core.arrays.numpy_ import NumpyExtensionArray
|
27 |
+
from pandas.tests.extension import base
|
28 |
+
|
29 |
+
orig_assert_attr_equal = tm.assert_attr_equal
|
30 |
+
|
31 |
+
|
32 |
+
def _assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
|
33 |
+
"""
|
34 |
+
patch tm.assert_attr_equal so NumpyEADtype("object") is closed enough to
|
35 |
+
np.dtype("object")
|
36 |
+
"""
|
37 |
+
if attr == "dtype":
|
38 |
+
lattr = getattr(left, "dtype", None)
|
39 |
+
rattr = getattr(right, "dtype", None)
|
40 |
+
if isinstance(lattr, NumpyEADtype) and not isinstance(rattr, NumpyEADtype):
|
41 |
+
left = left.astype(lattr.numpy_dtype)
|
42 |
+
elif isinstance(rattr, NumpyEADtype) and not isinstance(lattr, NumpyEADtype):
|
43 |
+
right = right.astype(rattr.numpy_dtype)
|
44 |
+
|
45 |
+
orig_assert_attr_equal(attr, left, right, obj)
|
46 |
+
|
47 |
+
|
48 |
+
@pytest.fixture(params=["float", "object"])
|
49 |
+
def dtype(request):
|
50 |
+
return NumpyEADtype(np.dtype(request.param))
|
51 |
+
|
52 |
+
|
53 |
+
@pytest.fixture
|
54 |
+
def allow_in_pandas(monkeypatch):
|
55 |
+
"""
|
56 |
+
A monkeypatch to tells pandas to let us in.
|
57 |
+
|
58 |
+
By default, passing a NumpyExtensionArray to an index / series / frame
|
59 |
+
constructor will unbox that NumpyExtensionArray to an ndarray, and treat
|
60 |
+
it as a non-EA column. We don't want people using EAs without
|
61 |
+
reason.
|
62 |
+
|
63 |
+
The mechanism for this is a check against ABCNumpyExtensionArray
|
64 |
+
in each constructor.
|
65 |
+
|
66 |
+
But, for testing, we need to allow them in pandas. So we patch
|
67 |
+
the _typ of NumpyExtensionArray, so that we evade the ABCNumpyExtensionArray
|
68 |
+
check.
|
69 |
+
"""
|
70 |
+
with monkeypatch.context() as m:
|
71 |
+
m.setattr(NumpyExtensionArray, "_typ", "extension")
|
72 |
+
m.setattr(tm.asserters, "assert_attr_equal", _assert_attr_equal)
|
73 |
+
yield
|
74 |
+
|
75 |
+
|
76 |
+
@pytest.fixture
|
77 |
+
def data(allow_in_pandas, dtype):
|
78 |
+
if dtype.numpy_dtype == "object":
|
79 |
+
return pd.Series([(i,) for i in range(100)]).array
|
80 |
+
return NumpyExtensionArray(np.arange(1, 101, dtype=dtype._dtype))
|
81 |
+
|
82 |
+
|
83 |
+
@pytest.fixture
|
84 |
+
def data_missing(allow_in_pandas, dtype):
|
85 |
+
if dtype.numpy_dtype == "object":
|
86 |
+
return NumpyExtensionArray(np.array([np.nan, (1,)], dtype=object))
|
87 |
+
return NumpyExtensionArray(np.array([np.nan, 1.0]))
|
88 |
+
|
89 |
+
|
90 |
+
@pytest.fixture
|
91 |
+
def na_cmp():
|
92 |
+
def cmp(a, b):
|
93 |
+
return np.isnan(a) and np.isnan(b)
|
94 |
+
|
95 |
+
return cmp
|
96 |
+
|
97 |
+
|
98 |
+
@pytest.fixture
|
99 |
+
def data_for_sorting(allow_in_pandas, dtype):
|
100 |
+
"""Length-3 array with a known sort order.
|
101 |
+
|
102 |
+
This should be three items [B, C, A] with
|
103 |
+
A < B < C
|
104 |
+
"""
|
105 |
+
if dtype.numpy_dtype == "object":
|
106 |
+
# Use an empty tuple for first element, then remove,
|
107 |
+
# to disable np.array's shape inference.
|
108 |
+
return NumpyExtensionArray(np.array([(), (2,), (3,), (1,)], dtype=object)[1:])
|
109 |
+
return NumpyExtensionArray(np.array([1, 2, 0]))
|
110 |
+
|
111 |
+
|
112 |
+
@pytest.fixture
|
113 |
+
def data_missing_for_sorting(allow_in_pandas, dtype):
|
114 |
+
"""Length-3 array with a known sort order.
|
115 |
+
|
116 |
+
This should be three items [B, NA, A] with
|
117 |
+
A < B and NA missing.
|
118 |
+
"""
|
119 |
+
if dtype.numpy_dtype == "object":
|
120 |
+
return NumpyExtensionArray(np.array([(1,), np.nan, (0,)], dtype=object))
|
121 |
+
return NumpyExtensionArray(np.array([1, np.nan, 0]))
|
122 |
+
|
123 |
+
|
124 |
+
@pytest.fixture
|
125 |
+
def data_for_grouping(allow_in_pandas, dtype):
|
126 |
+
"""Data for factorization, grouping, and unique tests.
|
127 |
+
|
128 |
+
Expected to be like [B, B, NA, NA, A, A, B, C]
|
129 |
+
|
130 |
+
Where A < B < C and NA is missing
|
131 |
+
"""
|
132 |
+
if dtype.numpy_dtype == "object":
|
133 |
+
a, b, c = (1,), (2,), (3,)
|
134 |
+
else:
|
135 |
+
a, b, c = np.arange(3)
|
136 |
+
return NumpyExtensionArray(
|
137 |
+
np.array([b, b, np.nan, np.nan, a, a, b, c], dtype=dtype.numpy_dtype)
|
138 |
+
)
|
139 |
+
|
140 |
+
|
141 |
+
@pytest.fixture
|
142 |
+
def data_for_twos(dtype):
|
143 |
+
if dtype.kind == "O":
|
144 |
+
pytest.skip(f"{dtype} is not a numeric dtype")
|
145 |
+
arr = np.ones(100) * 2
|
146 |
+
return NumpyExtensionArray._from_sequence(arr, dtype=dtype)
|
147 |
+
|
148 |
+
|
149 |
+
@pytest.fixture
|
150 |
+
def skip_numpy_object(dtype, request):
|
151 |
+
"""
|
152 |
+
Tests for NumpyExtensionArray with nested data. Users typically won't create
|
153 |
+
these objects via `pd.array`, but they can show up through `.array`
|
154 |
+
on a Series with nested data. Many of the base tests fail, as they aren't
|
155 |
+
appropriate for nested data.
|
156 |
+
|
157 |
+
This fixture allows these tests to be skipped when used as a usefixtures
|
158 |
+
marker to either an individual test or a test class.
|
159 |
+
"""
|
160 |
+
if dtype == "object":
|
161 |
+
mark = pytest.mark.xfail(reason="Fails for object dtype")
|
162 |
+
request.applymarker(mark)
|
163 |
+
|
164 |
+
|
165 |
+
skip_nested = pytest.mark.usefixtures("skip_numpy_object")
|
166 |
+
|
167 |
+
|
168 |
+
class TestNumpyExtensionArray(base.ExtensionTests):
|
169 |
+
@pytest.mark.skip(reason="We don't register our dtype")
|
170 |
+
# We don't want to register. This test should probably be split in two.
|
171 |
+
def test_from_dtype(self, data):
|
172 |
+
pass
|
173 |
+
|
174 |
+
@skip_nested
|
175 |
+
def test_series_constructor_scalar_with_index(self, data, dtype):
|
176 |
+
# ValueError: Length of passed values is 1, index implies 3.
|
177 |
+
super().test_series_constructor_scalar_with_index(data, dtype)
|
178 |
+
|
179 |
+
def test_check_dtype(self, data, request, using_infer_string):
|
180 |
+
if data.dtype.numpy_dtype == "object":
|
181 |
+
request.applymarker(
|
182 |
+
pytest.mark.xfail(
|
183 |
+
reason=f"NumpyExtensionArray expectedly clashes with a "
|
184 |
+
f"NumPy name: {data.dtype.numpy_dtype}"
|
185 |
+
)
|
186 |
+
)
|
187 |
+
super().test_check_dtype(data)
|
188 |
+
|
189 |
+
def test_is_not_object_type(self, dtype, request):
|
190 |
+
if dtype.numpy_dtype == "object":
|
191 |
+
# Different from BaseDtypeTests.test_is_not_object_type
|
192 |
+
# because NumpyEADtype(object) is an object type
|
193 |
+
assert is_object_dtype(dtype)
|
194 |
+
else:
|
195 |
+
super().test_is_not_object_type(dtype)
|
196 |
+
|
197 |
+
@skip_nested
|
198 |
+
def test_getitem_scalar(self, data):
|
199 |
+
# AssertionError
|
200 |
+
super().test_getitem_scalar(data)
|
201 |
+
|
202 |
+
@skip_nested
|
203 |
+
def test_shift_fill_value(self, data):
|
204 |
+
# np.array shape inference. Shift implementation fails.
|
205 |
+
super().test_shift_fill_value(data)
|
206 |
+
|
207 |
+
@skip_nested
|
208 |
+
def test_fillna_copy_frame(self, data_missing):
|
209 |
+
# The "scalar" for this array isn't a scalar.
|
210 |
+
super().test_fillna_copy_frame(data_missing)
|
211 |
+
|
212 |
+
@skip_nested
|
213 |
+
def test_fillna_copy_series(self, data_missing):
|
214 |
+
# The "scalar" for this array isn't a scalar.
|
215 |
+
super().test_fillna_copy_series(data_missing)
|
216 |
+
|
217 |
+
@skip_nested
|
218 |
+
def test_searchsorted(self, data_for_sorting, as_series):
|
219 |
+
# TODO: NumpyExtensionArray.searchsorted calls ndarray.searchsorted which
|
220 |
+
# isn't quite what we want in nested data cases. Instead we need to
|
221 |
+
# adapt something like libindex._bin_search.
|
222 |
+
super().test_searchsorted(data_for_sorting, as_series)
|
223 |
+
|
224 |
+
@pytest.mark.xfail(reason="NumpyExtensionArray.diff may fail on dtype")
|
225 |
+
def test_diff(self, data, periods):
|
226 |
+
return super().test_diff(data, periods)
|
227 |
+
|
228 |
+
def test_insert(self, data, request):
|
229 |
+
if data.dtype.numpy_dtype == object:
|
230 |
+
mark = pytest.mark.xfail(reason="Dimension mismatch in np.concatenate")
|
231 |
+
request.applymarker(mark)
|
232 |
+
|
233 |
+
super().test_insert(data)
|
234 |
+
|
235 |
+
@skip_nested
|
236 |
+
def test_insert_invalid(self, data, invalid_scalar):
|
237 |
+
# NumpyExtensionArray[object] can hold anything, so skip
|
238 |
+
super().test_insert_invalid(data, invalid_scalar)
|
239 |
+
|
240 |
+
divmod_exc = None
|
241 |
+
series_scalar_exc = None
|
242 |
+
frame_scalar_exc = None
|
243 |
+
series_array_exc = None
|
244 |
+
|
245 |
+
def test_divmod(self, data):
|
246 |
+
divmod_exc = None
|
247 |
+
if data.dtype.kind == "O":
|
248 |
+
divmod_exc = TypeError
|
249 |
+
self.divmod_exc = divmod_exc
|
250 |
+
super().test_divmod(data)
|
251 |
+
|
252 |
+
def test_divmod_series_array(self, data):
|
253 |
+
ser = pd.Series(data)
|
254 |
+
exc = None
|
255 |
+
if data.dtype.kind == "O":
|
256 |
+
exc = TypeError
|
257 |
+
self.divmod_exc = exc
|
258 |
+
self._check_divmod_op(ser, divmod, data)
|
259 |
+
|
260 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):
|
261 |
+
opname = all_arithmetic_operators
|
262 |
+
series_scalar_exc = None
|
263 |
+
if data.dtype.numpy_dtype == object:
|
264 |
+
if opname in ["__mul__", "__rmul__"]:
|
265 |
+
mark = pytest.mark.xfail(
|
266 |
+
reason="the Series.combine step raises but not the Series method."
|
267 |
+
)
|
268 |
+
request.node.add_marker(mark)
|
269 |
+
series_scalar_exc = TypeError
|
270 |
+
self.series_scalar_exc = series_scalar_exc
|
271 |
+
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
|
272 |
+
|
273 |
+
def test_arith_series_with_array(self, data, all_arithmetic_operators):
|
274 |
+
opname = all_arithmetic_operators
|
275 |
+
series_array_exc = None
|
276 |
+
if data.dtype.numpy_dtype == object and opname not in ["__add__", "__radd__"]:
|
277 |
+
series_array_exc = TypeError
|
278 |
+
self.series_array_exc = series_array_exc
|
279 |
+
super().test_arith_series_with_array(data, all_arithmetic_operators)
|
280 |
+
|
281 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
|
282 |
+
opname = all_arithmetic_operators
|
283 |
+
frame_scalar_exc = None
|
284 |
+
if data.dtype.numpy_dtype == object:
|
285 |
+
if opname in ["__mul__", "__rmul__"]:
|
286 |
+
mark = pytest.mark.xfail(
|
287 |
+
reason="the Series.combine step raises but not the Series method."
|
288 |
+
)
|
289 |
+
request.node.add_marker(mark)
|
290 |
+
frame_scalar_exc = TypeError
|
291 |
+
self.frame_scalar_exc = frame_scalar_exc
|
292 |
+
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
|
293 |
+
|
294 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
295 |
+
if ser.dtype.kind == "O":
|
296 |
+
return op_name in ["sum", "min", "max", "any", "all"]
|
297 |
+
return True
|
298 |
+
|
299 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
300 |
+
res_op = getattr(ser, op_name)
|
301 |
+
# avoid coercing int -> float. Just cast to the actual numpy type.
|
302 |
+
# error: Item "ExtensionDtype" of "dtype[Any] | ExtensionDtype" has
|
303 |
+
# no attribute "numpy_dtype"
|
304 |
+
cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr]
|
305 |
+
alt = ser.astype(cmp_dtype)
|
306 |
+
exp_op = getattr(alt, op_name)
|
307 |
+
if op_name == "count":
|
308 |
+
result = res_op()
|
309 |
+
expected = exp_op()
|
310 |
+
else:
|
311 |
+
result = res_op(skipna=skipna)
|
312 |
+
expected = exp_op(skipna=skipna)
|
313 |
+
tm.assert_almost_equal(result, expected)
|
314 |
+
|
315 |
+
@pytest.mark.skip("TODO: tests not written yet")
|
316 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
317 |
+
def test_reduce_frame(self, data, all_numeric_reductions, skipna):
|
318 |
+
pass
|
319 |
+
|
320 |
+
@skip_nested
|
321 |
+
def test_fillna_series(self, data_missing):
|
322 |
+
# Non-scalar "scalar" values.
|
323 |
+
super().test_fillna_series(data_missing)
|
324 |
+
|
325 |
+
@skip_nested
|
326 |
+
def test_fillna_frame(self, data_missing):
|
327 |
+
# Non-scalar "scalar" values.
|
328 |
+
super().test_fillna_frame(data_missing)
|
329 |
+
|
330 |
+
@skip_nested
|
331 |
+
def test_setitem_invalid(self, data, invalid_scalar):
|
332 |
+
# object dtype can hold anything, so doesn't raise
|
333 |
+
super().test_setitem_invalid(data, invalid_scalar)
|
334 |
+
|
335 |
+
@skip_nested
|
336 |
+
def test_setitem_sequence_broadcasts(self, data, box_in_series):
|
337 |
+
# ValueError: cannot set using a list-like indexer with a different
|
338 |
+
# length than the value
|
339 |
+
super().test_setitem_sequence_broadcasts(data, box_in_series)
|
340 |
+
|
341 |
+
@skip_nested
|
342 |
+
@pytest.mark.parametrize("setter", ["loc", None])
|
343 |
+
def test_setitem_mask_broadcast(self, data, setter):
|
344 |
+
# ValueError: cannot set using a list-like indexer with a different
|
345 |
+
# length than the value
|
346 |
+
super().test_setitem_mask_broadcast(data, setter)
|
347 |
+
|
348 |
+
@skip_nested
|
349 |
+
def test_setitem_scalar_key_sequence_raise(self, data):
|
350 |
+
# Failed: DID NOT RAISE <class 'ValueError'>
|
351 |
+
super().test_setitem_scalar_key_sequence_raise(data)
|
352 |
+
|
353 |
+
# TODO: there is some issue with NumpyExtensionArray, therefore,
|
354 |
+
# skip the setitem test for now, and fix it later (GH 31446)
|
355 |
+
|
356 |
+
@skip_nested
|
357 |
+
@pytest.mark.parametrize(
|
358 |
+
"mask",
|
359 |
+
[
|
360 |
+
np.array([True, True, True, False, False]),
|
361 |
+
pd.array([True, True, True, False, False], dtype="boolean"),
|
362 |
+
],
|
363 |
+
ids=["numpy-array", "boolean-array"],
|
364 |
+
)
|
365 |
+
def test_setitem_mask(self, data, mask, box_in_series):
|
366 |
+
super().test_setitem_mask(data, mask, box_in_series)
|
367 |
+
|
368 |
+
@skip_nested
|
369 |
+
@pytest.mark.parametrize(
|
370 |
+
"idx",
|
371 |
+
[[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
|
372 |
+
ids=["list", "integer-array", "numpy-array"],
|
373 |
+
)
|
374 |
+
def test_setitem_integer_array(self, data, idx, box_in_series):
|
375 |
+
super().test_setitem_integer_array(data, idx, box_in_series)
|
376 |
+
|
377 |
+
@pytest.mark.parametrize(
|
378 |
+
"idx, box_in_series",
|
379 |
+
[
|
380 |
+
([0, 1, 2, pd.NA], False),
|
381 |
+
pytest.param([0, 1, 2, pd.NA], True, marks=pytest.mark.xfail),
|
382 |
+
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
|
383 |
+
(pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),
|
384 |
+
],
|
385 |
+
ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],
|
386 |
+
)
|
387 |
+
def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):
|
388 |
+
super().test_setitem_integer_with_missing_raises(data, idx, box_in_series)
|
389 |
+
|
390 |
+
@skip_nested
|
391 |
+
def test_setitem_slice(self, data, box_in_series):
|
392 |
+
super().test_setitem_slice(data, box_in_series)
|
393 |
+
|
394 |
+
@skip_nested
|
395 |
+
def test_setitem_loc_iloc_slice(self, data):
|
396 |
+
super().test_setitem_loc_iloc_slice(data)
|
397 |
+
|
398 |
+
def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):
|
399 |
+
# https://github.com/pandas-dev/pandas/issues/32395
|
400 |
+
df = expected = pd.DataFrame({"data": pd.Series(data)})
|
401 |
+
result = pd.DataFrame(index=df.index)
|
402 |
+
|
403 |
+
# because result has object dtype, the attempt to do setting inplace
|
404 |
+
# is successful, and object dtype is retained
|
405 |
+
key = full_indexer(df)
|
406 |
+
result.loc[key, "data"] = df["data"]
|
407 |
+
|
408 |
+
# base class method has expected = df; NumpyExtensionArray behaves oddly because
|
409 |
+
# we patch _typ for these tests.
|
410 |
+
if data.dtype.numpy_dtype != object:
|
411 |
+
if not isinstance(key, slice) or key != slice(None):
|
412 |
+
expected = pd.DataFrame({"data": data.to_numpy()})
|
413 |
+
tm.assert_frame_equal(result, expected, check_column_type=False)
|
414 |
+
|
415 |
+
@pytest.mark.xfail(reason="NumpyEADtype is unpacked")
|
416 |
+
def test_index_from_listlike_with_dtype(self, data):
|
417 |
+
super().test_index_from_listlike_with_dtype(data)
|
418 |
+
|
419 |
+
@skip_nested
|
420 |
+
@pytest.mark.parametrize("engine", ["c", "python"])
|
421 |
+
def test_EA_types(self, engine, data, request):
|
422 |
+
super().test_EA_types(engine, data, request)
|
423 |
+
|
424 |
+
|
425 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
426 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_period.py
ADDED
@@ -0,0 +1,119 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
from typing import TYPE_CHECKING
|
19 |
+
|
20 |
+
import numpy as np
|
21 |
+
import pytest
|
22 |
+
|
23 |
+
from pandas._libs import (
|
24 |
+
Period,
|
25 |
+
iNaT,
|
26 |
+
)
|
27 |
+
from pandas.compat import is_platform_windows
|
28 |
+
from pandas.compat.numpy import np_version_gte1p24
|
29 |
+
|
30 |
+
from pandas.core.dtypes.dtypes import PeriodDtype
|
31 |
+
|
32 |
+
import pandas._testing as tm
|
33 |
+
from pandas.core.arrays import PeriodArray
|
34 |
+
from pandas.tests.extension import base
|
35 |
+
|
36 |
+
if TYPE_CHECKING:
|
37 |
+
import pandas as pd
|
38 |
+
|
39 |
+
|
40 |
+
@pytest.fixture(params=["D", "2D"])
|
41 |
+
def dtype(request):
|
42 |
+
return PeriodDtype(freq=request.param)
|
43 |
+
|
44 |
+
|
45 |
+
@pytest.fixture
|
46 |
+
def data(dtype):
|
47 |
+
return PeriodArray(np.arange(1970, 2070), dtype=dtype)
|
48 |
+
|
49 |
+
|
50 |
+
@pytest.fixture
|
51 |
+
def data_for_sorting(dtype):
|
52 |
+
return PeriodArray([2018, 2019, 2017], dtype=dtype)
|
53 |
+
|
54 |
+
|
55 |
+
@pytest.fixture
|
56 |
+
def data_missing(dtype):
|
57 |
+
return PeriodArray([iNaT, 2017], dtype=dtype)
|
58 |
+
|
59 |
+
|
60 |
+
@pytest.fixture
|
61 |
+
def data_missing_for_sorting(dtype):
|
62 |
+
return PeriodArray([2018, iNaT, 2017], dtype=dtype)
|
63 |
+
|
64 |
+
|
65 |
+
@pytest.fixture
|
66 |
+
def data_for_grouping(dtype):
|
67 |
+
B = 2018
|
68 |
+
NA = iNaT
|
69 |
+
A = 2017
|
70 |
+
C = 2019
|
71 |
+
return PeriodArray([B, B, NA, NA, A, A, B, C], dtype=dtype)
|
72 |
+
|
73 |
+
|
74 |
+
class TestPeriodArray(base.ExtensionTests):
|
75 |
+
def _get_expected_exception(self, op_name, obj, other):
|
76 |
+
if op_name in ("__sub__", "__rsub__"):
|
77 |
+
return None
|
78 |
+
return super()._get_expected_exception(op_name, obj, other)
|
79 |
+
|
80 |
+
def _supports_accumulation(self, ser, op_name: str) -> bool:
|
81 |
+
return op_name in ["cummin", "cummax"]
|
82 |
+
|
83 |
+
def _supports_reduction(self, obj, op_name: str) -> bool:
|
84 |
+
return op_name in ["min", "max", "median"]
|
85 |
+
|
86 |
+
def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
|
87 |
+
if op_name == "median":
|
88 |
+
res_op = getattr(ser, op_name)
|
89 |
+
|
90 |
+
alt = ser.astype("int64")
|
91 |
+
|
92 |
+
exp_op = getattr(alt, op_name)
|
93 |
+
result = res_op(skipna=skipna)
|
94 |
+
expected = exp_op(skipna=skipna)
|
95 |
+
# error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no
|
96 |
+
# attribute "freq"
|
97 |
+
freq = ser.dtype.freq # type: ignore[union-attr]
|
98 |
+
expected = Period._from_ordinal(int(expected), freq=freq)
|
99 |
+
tm.assert_almost_equal(result, expected)
|
100 |
+
|
101 |
+
else:
|
102 |
+
return super().check_reduce(ser, op_name, skipna)
|
103 |
+
|
104 |
+
@pytest.mark.parametrize("periods", [1, -2])
|
105 |
+
def test_diff(self, data, periods):
|
106 |
+
if is_platform_windows() and np_version_gte1p24:
|
107 |
+
with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False):
|
108 |
+
super().test_diff(data, periods)
|
109 |
+
else:
|
110 |
+
super().test_diff(data, periods)
|
111 |
+
|
112 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
113 |
+
def test_map(self, data, na_action):
|
114 |
+
result = data.map(lambda x: x, na_action=na_action)
|
115 |
+
tm.assert_extension_array_equal(result, data)
|
116 |
+
|
117 |
+
|
118 |
+
class Test2DCompat(base.NDArrayBacked2DTests):
|
119 |
+
pass
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_sparse.py
ADDED
@@ -0,0 +1,498 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
|
17 |
+
import numpy as np
|
18 |
+
import pytest
|
19 |
+
|
20 |
+
from pandas.errors import PerformanceWarning
|
21 |
+
|
22 |
+
import pandas as pd
|
23 |
+
from pandas import SparseDtype
|
24 |
+
import pandas._testing as tm
|
25 |
+
from pandas.arrays import SparseArray
|
26 |
+
from pandas.tests.extension import base
|
27 |
+
|
28 |
+
|
29 |
+
def make_data(fill_value):
|
30 |
+
rng = np.random.default_rng(2)
|
31 |
+
if np.isnan(fill_value):
|
32 |
+
data = rng.uniform(size=100)
|
33 |
+
else:
|
34 |
+
data = rng.integers(1, 100, size=100, dtype=int)
|
35 |
+
if data[0] == data[1]:
|
36 |
+
data[0] += 1
|
37 |
+
|
38 |
+
data[2::3] = fill_value
|
39 |
+
return data
|
40 |
+
|
41 |
+
|
42 |
+
@pytest.fixture
|
43 |
+
def dtype():
|
44 |
+
return SparseDtype()
|
45 |
+
|
46 |
+
|
47 |
+
@pytest.fixture(params=[0, np.nan])
|
48 |
+
def data(request):
|
49 |
+
"""Length-100 PeriodArray for semantics test."""
|
50 |
+
res = SparseArray(make_data(request.param), fill_value=request.param)
|
51 |
+
return res
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.fixture
|
55 |
+
def data_for_twos():
|
56 |
+
return SparseArray(np.ones(100) * 2)
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.fixture(params=[0, np.nan])
|
60 |
+
def data_missing(request):
|
61 |
+
"""Length 2 array with [NA, Valid]"""
|
62 |
+
return SparseArray([np.nan, 1], fill_value=request.param)
|
63 |
+
|
64 |
+
|
65 |
+
@pytest.fixture(params=[0, np.nan])
|
66 |
+
def data_repeated(request):
|
67 |
+
"""Return different versions of data for count times"""
|
68 |
+
|
69 |
+
def gen(count):
|
70 |
+
for _ in range(count):
|
71 |
+
yield SparseArray(make_data(request.param), fill_value=request.param)
|
72 |
+
|
73 |
+
yield gen
|
74 |
+
|
75 |
+
|
76 |
+
@pytest.fixture(params=[0, np.nan])
|
77 |
+
def data_for_sorting(request):
|
78 |
+
return SparseArray([2, 3, 1], fill_value=request.param)
|
79 |
+
|
80 |
+
|
81 |
+
@pytest.fixture(params=[0, np.nan])
|
82 |
+
def data_missing_for_sorting(request):
|
83 |
+
return SparseArray([2, np.nan, 1], fill_value=request.param)
|
84 |
+
|
85 |
+
|
86 |
+
@pytest.fixture
|
87 |
+
def na_cmp():
|
88 |
+
return lambda left, right: pd.isna(left) and pd.isna(right)
|
89 |
+
|
90 |
+
|
91 |
+
@pytest.fixture(params=[0, np.nan])
|
92 |
+
def data_for_grouping(request):
|
93 |
+
return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param)
|
94 |
+
|
95 |
+
|
96 |
+
@pytest.fixture(params=[0, np.nan])
|
97 |
+
def data_for_compare(request):
|
98 |
+
return SparseArray([0, 0, np.nan, -2, -1, 4, 2, 3, 0, 0], fill_value=request.param)
|
99 |
+
|
100 |
+
|
101 |
+
class TestSparseArray(base.ExtensionTests):
|
102 |
+
def _supports_reduction(self, obj, op_name: str) -> bool:
|
103 |
+
return True
|
104 |
+
|
105 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
106 |
+
def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
|
107 |
+
if all_numeric_reductions in [
|
108 |
+
"prod",
|
109 |
+
"median",
|
110 |
+
"var",
|
111 |
+
"std",
|
112 |
+
"sem",
|
113 |
+
"skew",
|
114 |
+
"kurt",
|
115 |
+
]:
|
116 |
+
mark = pytest.mark.xfail(
|
117 |
+
reason="This should be viable but is not implemented"
|
118 |
+
)
|
119 |
+
request.node.add_marker(mark)
|
120 |
+
elif (
|
121 |
+
all_numeric_reductions in ["sum", "max", "min", "mean"]
|
122 |
+
and data.dtype.kind == "f"
|
123 |
+
and not skipna
|
124 |
+
):
|
125 |
+
mark = pytest.mark.xfail(reason="getting a non-nan float")
|
126 |
+
request.node.add_marker(mark)
|
127 |
+
|
128 |
+
super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
|
129 |
+
|
130 |
+
@pytest.mark.parametrize("skipna", [True, False])
|
131 |
+
def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
|
132 |
+
if all_numeric_reductions in [
|
133 |
+
"prod",
|
134 |
+
"median",
|
135 |
+
"var",
|
136 |
+
"std",
|
137 |
+
"sem",
|
138 |
+
"skew",
|
139 |
+
"kurt",
|
140 |
+
]:
|
141 |
+
mark = pytest.mark.xfail(
|
142 |
+
reason="This should be viable but is not implemented"
|
143 |
+
)
|
144 |
+
request.node.add_marker(mark)
|
145 |
+
elif (
|
146 |
+
all_numeric_reductions in ["sum", "max", "min", "mean"]
|
147 |
+
and data.dtype.kind == "f"
|
148 |
+
and not skipna
|
149 |
+
):
|
150 |
+
mark = pytest.mark.xfail(reason="ExtensionArray NA mask are different")
|
151 |
+
request.node.add_marker(mark)
|
152 |
+
|
153 |
+
super().test_reduce_frame(data, all_numeric_reductions, skipna)
|
154 |
+
|
155 |
+
def _check_unsupported(self, data):
|
156 |
+
if data.dtype == SparseDtype(int, 0):
|
157 |
+
pytest.skip("Can't store nan in int array.")
|
158 |
+
|
159 |
+
def test_concat_mixed_dtypes(self, data):
|
160 |
+
# https://github.com/pandas-dev/pandas/issues/20762
|
161 |
+
# This should be the same, aside from concat([sparse, float])
|
162 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
163 |
+
df2 = pd.DataFrame({"A": [1, 2, 3]})
|
164 |
+
df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
|
165 |
+
dfs = [df1, df2, df3]
|
166 |
+
|
167 |
+
# dataframes
|
168 |
+
result = pd.concat(dfs)
|
169 |
+
expected = pd.concat(
|
170 |
+
[x.apply(lambda s: np.asarray(s).astype(object)) for x in dfs]
|
171 |
+
)
|
172 |
+
tm.assert_frame_equal(result, expected)
|
173 |
+
|
174 |
+
@pytest.mark.filterwarnings(
|
175 |
+
"ignore:The previous implementation of stack is deprecated"
|
176 |
+
)
|
177 |
+
@pytest.mark.parametrize(
|
178 |
+
"columns",
|
179 |
+
[
|
180 |
+
["A", "B"],
|
181 |
+
pd.MultiIndex.from_tuples(
|
182 |
+
[("A", "a"), ("A", "b")], names=["outer", "inner"]
|
183 |
+
),
|
184 |
+
],
|
185 |
+
)
|
186 |
+
@pytest.mark.parametrize("future_stack", [True, False])
|
187 |
+
def test_stack(self, data, columns, future_stack):
|
188 |
+
super().test_stack(data, columns, future_stack)
|
189 |
+
|
190 |
+
def test_concat_columns(self, data, na_value):
|
191 |
+
self._check_unsupported(data)
|
192 |
+
super().test_concat_columns(data, na_value)
|
193 |
+
|
194 |
+
def test_concat_extension_arrays_copy_false(self, data, na_value):
|
195 |
+
self._check_unsupported(data)
|
196 |
+
super().test_concat_extension_arrays_copy_false(data, na_value)
|
197 |
+
|
198 |
+
def test_align(self, data, na_value):
|
199 |
+
self._check_unsupported(data)
|
200 |
+
super().test_align(data, na_value)
|
201 |
+
|
202 |
+
def test_align_frame(self, data, na_value):
|
203 |
+
self._check_unsupported(data)
|
204 |
+
super().test_align_frame(data, na_value)
|
205 |
+
|
206 |
+
def test_align_series_frame(self, data, na_value):
|
207 |
+
self._check_unsupported(data)
|
208 |
+
super().test_align_series_frame(data, na_value)
|
209 |
+
|
210 |
+
def test_merge(self, data, na_value):
|
211 |
+
self._check_unsupported(data)
|
212 |
+
super().test_merge(data, na_value)
|
213 |
+
|
214 |
+
def test_get(self, data):
|
215 |
+
ser = pd.Series(data, index=[2 * i for i in range(len(data))])
|
216 |
+
if np.isnan(ser.values.fill_value):
|
217 |
+
assert np.isnan(ser.get(4)) and np.isnan(ser.iloc[2])
|
218 |
+
else:
|
219 |
+
assert ser.get(4) == ser.iloc[2]
|
220 |
+
assert ser.get(2) == ser.iloc[1]
|
221 |
+
|
222 |
+
def test_reindex(self, data, na_value):
|
223 |
+
self._check_unsupported(data)
|
224 |
+
super().test_reindex(data, na_value)
|
225 |
+
|
226 |
+
def test_isna(self, data_missing):
|
227 |
+
sarr = SparseArray(data_missing)
|
228 |
+
expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))
|
229 |
+
expected = SparseArray([True, False], dtype=expected_dtype)
|
230 |
+
result = sarr.isna()
|
231 |
+
tm.assert_sp_array_equal(result, expected)
|
232 |
+
|
233 |
+
# test isna for arr without na
|
234 |
+
sarr = sarr.fillna(0)
|
235 |
+
expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))
|
236 |
+
expected = SparseArray([False, False], fill_value=False, dtype=expected_dtype)
|
237 |
+
tm.assert_equal(sarr.isna(), expected)
|
238 |
+
|
239 |
+
def test_fillna_limit_backfill(self, data_missing):
|
240 |
+
warns = (PerformanceWarning, FutureWarning)
|
241 |
+
with tm.assert_produces_warning(warns, check_stacklevel=False):
|
242 |
+
super().test_fillna_limit_backfill(data_missing)
|
243 |
+
|
244 |
+
def test_fillna_no_op_returns_copy(self, data, request):
|
245 |
+
if np.isnan(data.fill_value):
|
246 |
+
request.applymarker(
|
247 |
+
pytest.mark.xfail(reason="returns array with different fill value")
|
248 |
+
)
|
249 |
+
super().test_fillna_no_op_returns_copy(data)
|
250 |
+
|
251 |
+
@pytest.mark.xfail(reason="Unsupported")
|
252 |
+
def test_fillna_series(self, data_missing):
|
253 |
+
# this one looks doable.
|
254 |
+
# TODO: this fails bc we do not pass through data_missing. If we did,
|
255 |
+
# the 0-fill case would xpass
|
256 |
+
super().test_fillna_series()
|
257 |
+
|
258 |
+
def test_fillna_frame(self, data_missing):
|
259 |
+
# Have to override to specify that fill_value will change.
|
260 |
+
fill_value = data_missing[1]
|
261 |
+
|
262 |
+
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
|
263 |
+
|
264 |
+
if pd.isna(data_missing.fill_value):
|
265 |
+
dtype = SparseDtype(data_missing.dtype, fill_value)
|
266 |
+
else:
|
267 |
+
dtype = data_missing.dtype
|
268 |
+
|
269 |
+
expected = pd.DataFrame(
|
270 |
+
{
|
271 |
+
"A": data_missing._from_sequence([fill_value, fill_value], dtype=dtype),
|
272 |
+
"B": [1, 2],
|
273 |
+
}
|
274 |
+
)
|
275 |
+
|
276 |
+
tm.assert_frame_equal(result, expected)
|
277 |
+
|
278 |
+
_combine_le_expected_dtype = "Sparse[bool]"
|
279 |
+
|
280 |
+
def test_fillna_copy_frame(self, data_missing, using_copy_on_write):
|
281 |
+
arr = data_missing.take([1, 1])
|
282 |
+
df = pd.DataFrame({"A": arr}, copy=False)
|
283 |
+
|
284 |
+
filled_val = df.iloc[0, 0]
|
285 |
+
result = df.fillna(filled_val)
|
286 |
+
|
287 |
+
if hasattr(df._mgr, "blocks"):
|
288 |
+
if using_copy_on_write:
|
289 |
+
assert df.values.base is result.values.base
|
290 |
+
else:
|
291 |
+
assert df.values.base is not result.values.base
|
292 |
+
assert df.A._values.to_dense() is arr.to_dense()
|
293 |
+
|
294 |
+
def test_fillna_copy_series(self, data_missing, using_copy_on_write):
|
295 |
+
arr = data_missing.take([1, 1])
|
296 |
+
ser = pd.Series(arr, copy=False)
|
297 |
+
|
298 |
+
filled_val = ser[0]
|
299 |
+
result = ser.fillna(filled_val)
|
300 |
+
|
301 |
+
if using_copy_on_write:
|
302 |
+
assert ser._values is result._values
|
303 |
+
|
304 |
+
else:
|
305 |
+
assert ser._values is not result._values
|
306 |
+
assert ser._values.to_dense() is arr.to_dense()
|
307 |
+
|
308 |
+
@pytest.mark.xfail(reason="Not Applicable")
|
309 |
+
def test_fillna_length_mismatch(self, data_missing):
|
310 |
+
super().test_fillna_length_mismatch(data_missing)
|
311 |
+
|
312 |
+
def test_where_series(self, data, na_value):
|
313 |
+
assert data[0] != data[1]
|
314 |
+
cls = type(data)
|
315 |
+
a, b = data[:2]
|
316 |
+
|
317 |
+
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
|
318 |
+
|
319 |
+
cond = np.array([True, True, False, False])
|
320 |
+
result = ser.where(cond)
|
321 |
+
|
322 |
+
new_dtype = SparseDtype("float", 0.0)
|
323 |
+
expected = pd.Series(
|
324 |
+
cls._from_sequence([a, a, na_value, na_value], dtype=new_dtype)
|
325 |
+
)
|
326 |
+
tm.assert_series_equal(result, expected)
|
327 |
+
|
328 |
+
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
|
329 |
+
cond = np.array([True, False, True, True])
|
330 |
+
result = ser.where(cond, other)
|
331 |
+
expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))
|
332 |
+
tm.assert_series_equal(result, expected)
|
333 |
+
|
334 |
+
def test_searchsorted(self, data_for_sorting, as_series):
|
335 |
+
with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False):
|
336 |
+
super().test_searchsorted(data_for_sorting, as_series)
|
337 |
+
|
338 |
+
def test_shift_0_periods(self, data):
|
339 |
+
# GH#33856 shifting with periods=0 should return a copy, not same obj
|
340 |
+
result = data.shift(0)
|
341 |
+
|
342 |
+
data._sparse_values[0] = data._sparse_values[1]
|
343 |
+
assert result._sparse_values[0] != result._sparse_values[1]
|
344 |
+
|
345 |
+
@pytest.mark.parametrize("method", ["argmax", "argmin"])
|
346 |
+
def test_argmin_argmax_all_na(self, method, data, na_value):
|
347 |
+
# overriding because Sparse[int64, 0] cannot handle na_value
|
348 |
+
self._check_unsupported(data)
|
349 |
+
super().test_argmin_argmax_all_na(method, data, na_value)
|
350 |
+
|
351 |
+
@pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])
|
352 |
+
def test_equals(self, data, na_value, as_series, box):
|
353 |
+
self._check_unsupported(data)
|
354 |
+
super().test_equals(data, na_value, as_series, box)
|
355 |
+
|
356 |
+
@pytest.mark.parametrize(
|
357 |
+
"func, na_action, expected",
|
358 |
+
[
|
359 |
+
(lambda x: x, None, SparseArray([1.0, np.nan])),
|
360 |
+
(lambda x: x, "ignore", SparseArray([1.0, np.nan])),
|
361 |
+
(str, None, SparseArray(["1.0", "nan"], fill_value="nan")),
|
362 |
+
(str, "ignore", SparseArray(["1.0", np.nan])),
|
363 |
+
],
|
364 |
+
)
|
365 |
+
def test_map(self, func, na_action, expected):
|
366 |
+
# GH52096
|
367 |
+
data = SparseArray([1, np.nan])
|
368 |
+
result = data.map(func, na_action=na_action)
|
369 |
+
tm.assert_extension_array_equal(result, expected)
|
370 |
+
|
371 |
+
@pytest.mark.parametrize("na_action", [None, "ignore"])
|
372 |
+
def test_map_raises(self, data, na_action):
|
373 |
+
# GH52096
|
374 |
+
msg = "fill value in the sparse values not supported"
|
375 |
+
with pytest.raises(ValueError, match=msg):
|
376 |
+
data.map(lambda x: np.nan, na_action=na_action)
|
377 |
+
|
378 |
+
@pytest.mark.xfail(raises=TypeError, reason="no sparse StringDtype")
|
379 |
+
def test_astype_string(self, data, nullable_string_dtype):
|
380 |
+
# TODO: this fails bc we do not pass through nullable_string_dtype;
|
381 |
+
# If we did, the 0-cases would xpass
|
382 |
+
super().test_astype_string(data)
|
383 |
+
|
384 |
+
series_scalar_exc = None
|
385 |
+
frame_scalar_exc = None
|
386 |
+
divmod_exc = None
|
387 |
+
series_array_exc = None
|
388 |
+
|
389 |
+
def _skip_if_different_combine(self, data):
|
390 |
+
if data.fill_value == 0:
|
391 |
+
# arith ops call on dtype.fill_value so that the sparsity
|
392 |
+
# is maintained. Combine can't be called on a dtype in
|
393 |
+
# general, so we can't make the expected. This is tested elsewhere
|
394 |
+
pytest.skip("Incorrected expected from Series.combine and tested elsewhere")
|
395 |
+
|
396 |
+
def test_arith_series_with_scalar(self, data, all_arithmetic_operators):
|
397 |
+
self._skip_if_different_combine(data)
|
398 |
+
super().test_arith_series_with_scalar(data, all_arithmetic_operators)
|
399 |
+
|
400 |
+
def test_arith_series_with_array(self, data, all_arithmetic_operators):
|
401 |
+
self._skip_if_different_combine(data)
|
402 |
+
super().test_arith_series_with_array(data, all_arithmetic_operators)
|
403 |
+
|
404 |
+
def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):
|
405 |
+
if data.dtype.fill_value != 0:
|
406 |
+
pass
|
407 |
+
elif all_arithmetic_operators.strip("_") not in [
|
408 |
+
"mul",
|
409 |
+
"rmul",
|
410 |
+
"floordiv",
|
411 |
+
"rfloordiv",
|
412 |
+
"pow",
|
413 |
+
"mod",
|
414 |
+
"rmod",
|
415 |
+
]:
|
416 |
+
mark = pytest.mark.xfail(reason="result dtype.fill_value mismatch")
|
417 |
+
request.applymarker(mark)
|
418 |
+
super().test_arith_frame_with_scalar(data, all_arithmetic_operators)
|
419 |
+
|
420 |
+
def _compare_other(
|
421 |
+
self, ser: pd.Series, data_for_compare: SparseArray, comparison_op, other
|
422 |
+
):
|
423 |
+
op = comparison_op
|
424 |
+
|
425 |
+
result = op(data_for_compare, other)
|
426 |
+
if isinstance(other, pd.Series):
|
427 |
+
assert isinstance(result, pd.Series)
|
428 |
+
assert isinstance(result.dtype, SparseDtype)
|
429 |
+
else:
|
430 |
+
assert isinstance(result, SparseArray)
|
431 |
+
assert result.dtype.subtype == np.bool_
|
432 |
+
|
433 |
+
if isinstance(other, pd.Series):
|
434 |
+
fill_value = op(data_for_compare.fill_value, other._values.fill_value)
|
435 |
+
expected = SparseArray(
|
436 |
+
op(data_for_compare.to_dense(), np.asarray(other)),
|
437 |
+
fill_value=fill_value,
|
438 |
+
dtype=np.bool_,
|
439 |
+
)
|
440 |
+
|
441 |
+
else:
|
442 |
+
fill_value = np.all(
|
443 |
+
op(np.asarray(data_for_compare.fill_value), np.asarray(other))
|
444 |
+
)
|
445 |
+
|
446 |
+
expected = SparseArray(
|
447 |
+
op(data_for_compare.to_dense(), np.asarray(other)),
|
448 |
+
fill_value=fill_value,
|
449 |
+
dtype=np.bool_,
|
450 |
+
)
|
451 |
+
if isinstance(other, pd.Series):
|
452 |
+
# error: Incompatible types in assignment
|
453 |
+
expected = pd.Series(expected) # type: ignore[assignment]
|
454 |
+
tm.assert_equal(result, expected)
|
455 |
+
|
456 |
+
def test_scalar(self, data_for_compare: SparseArray, comparison_op):
|
457 |
+
ser = pd.Series(data_for_compare)
|
458 |
+
self._compare_other(ser, data_for_compare, comparison_op, 0)
|
459 |
+
self._compare_other(ser, data_for_compare, comparison_op, 1)
|
460 |
+
self._compare_other(ser, data_for_compare, comparison_op, -1)
|
461 |
+
self._compare_other(ser, data_for_compare, comparison_op, np.nan)
|
462 |
+
|
463 |
+
def test_array(self, data_for_compare: SparseArray, comparison_op, request):
|
464 |
+
if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ in [
|
465 |
+
"eq",
|
466 |
+
"ge",
|
467 |
+
"le",
|
468 |
+
]:
|
469 |
+
mark = pytest.mark.xfail(reason="Wrong fill_value")
|
470 |
+
request.applymarker(mark)
|
471 |
+
|
472 |
+
arr = np.linspace(-4, 5, 10)
|
473 |
+
ser = pd.Series(data_for_compare)
|
474 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
475 |
+
|
476 |
+
def test_sparse_array(self, data_for_compare: SparseArray, comparison_op, request):
|
477 |
+
if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ != "gt":
|
478 |
+
mark = pytest.mark.xfail(reason="Wrong fill_value")
|
479 |
+
request.applymarker(mark)
|
480 |
+
|
481 |
+
ser = pd.Series(data_for_compare)
|
482 |
+
arr = data_for_compare + 1
|
483 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
484 |
+
arr = data_for_compare * 2
|
485 |
+
self._compare_other(ser, data_for_compare, comparison_op, arr)
|
486 |
+
|
487 |
+
@pytest.mark.xfail(reason="Different repr")
|
488 |
+
def test_array_repr(self, data, size):
|
489 |
+
super().test_array_repr(data, size)
|
490 |
+
|
491 |
+
@pytest.mark.xfail(reason="result does not match expected")
|
492 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
493 |
+
def test_groupby_extension_agg(self, as_index, data_for_grouping):
|
494 |
+
super().test_groupby_extension_agg(as_index, data_for_grouping)
|
495 |
+
|
496 |
+
|
497 |
+
def test_array_type_with_arg(dtype):
|
498 |
+
assert dtype.construct_array_type() is SparseArray
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/test_string.py
ADDED
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file contains a minimal set of tests for compliance with the extension
|
3 |
+
array interface test suite, and should contain no other tests.
|
4 |
+
The test suite for the full functionality of the array is located in
|
5 |
+
`pandas/tests/arrays/`.
|
6 |
+
|
7 |
+
The tests in this file are inherited from the BaseExtensionTests, and only
|
8 |
+
minimal tweaks should be applied to get the tests passing (by overwriting a
|
9 |
+
parent method).
|
10 |
+
|
11 |
+
Additional tests should either be added to one of the BaseExtensionTests
|
12 |
+
classes (if they are relevant for the extension interface for all dtypes), or
|
13 |
+
be added to the array-specific tests in `pandas/tests/arrays/`.
|
14 |
+
|
15 |
+
"""
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
import string
|
19 |
+
from typing import cast
|
20 |
+
|
21 |
+
import numpy as np
|
22 |
+
import pytest
|
23 |
+
|
24 |
+
import pandas as pd
|
25 |
+
import pandas._testing as tm
|
26 |
+
from pandas.api.types import is_string_dtype
|
27 |
+
from pandas.core.arrays import ArrowStringArray
|
28 |
+
from pandas.core.arrays.string_ import StringDtype
|
29 |
+
from pandas.tests.extension import base
|
30 |
+
|
31 |
+
|
32 |
+
def maybe_split_array(arr, chunked):
|
33 |
+
if not chunked:
|
34 |
+
return arr
|
35 |
+
elif arr.dtype.storage != "pyarrow":
|
36 |
+
return arr
|
37 |
+
|
38 |
+
pa = pytest.importorskip("pyarrow")
|
39 |
+
|
40 |
+
arrow_array = arr._pa_array
|
41 |
+
split = len(arrow_array) // 2
|
42 |
+
arrow_array = pa.chunked_array(
|
43 |
+
[*arrow_array[:split].chunks, *arrow_array[split:].chunks]
|
44 |
+
)
|
45 |
+
assert arrow_array.num_chunks == 2
|
46 |
+
return type(arr)(arrow_array)
|
47 |
+
|
48 |
+
|
49 |
+
@pytest.fixture(params=[True, False])
|
50 |
+
def chunked(request):
|
51 |
+
return request.param
|
52 |
+
|
53 |
+
|
54 |
+
@pytest.fixture
|
55 |
+
def dtype(string_storage):
|
56 |
+
return StringDtype(storage=string_storage)
|
57 |
+
|
58 |
+
|
59 |
+
@pytest.fixture
|
60 |
+
def data(dtype, chunked):
|
61 |
+
strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)
|
62 |
+
while strings[0] == strings[1]:
|
63 |
+
strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)
|
64 |
+
|
65 |
+
arr = dtype.construct_array_type()._from_sequence(strings, dtype=dtype)
|
66 |
+
return maybe_split_array(arr, chunked)
|
67 |
+
|
68 |
+
|
69 |
+
@pytest.fixture
|
70 |
+
def data_missing(dtype, chunked):
|
71 |
+
"""Length 2 array with [NA, Valid]"""
|
72 |
+
arr = dtype.construct_array_type()._from_sequence([pd.NA, "A"], dtype=dtype)
|
73 |
+
return maybe_split_array(arr, chunked)
|
74 |
+
|
75 |
+
|
76 |
+
@pytest.fixture
|
77 |
+
def data_for_sorting(dtype, chunked):
|
78 |
+
arr = dtype.construct_array_type()._from_sequence(["B", "C", "A"], dtype=dtype)
|
79 |
+
return maybe_split_array(arr, chunked)
|
80 |
+
|
81 |
+
|
82 |
+
@pytest.fixture
|
83 |
+
def data_missing_for_sorting(dtype, chunked):
|
84 |
+
arr = dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"], dtype=dtype)
|
85 |
+
return maybe_split_array(arr, chunked)
|
86 |
+
|
87 |
+
|
88 |
+
@pytest.fixture
|
89 |
+
def data_for_grouping(dtype, chunked):
|
90 |
+
arr = dtype.construct_array_type()._from_sequence(
|
91 |
+
["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"], dtype=dtype
|
92 |
+
)
|
93 |
+
return maybe_split_array(arr, chunked)
|
94 |
+
|
95 |
+
|
96 |
+
class TestStringArray(base.ExtensionTests):
|
97 |
+
def test_eq_with_str(self, dtype):
|
98 |
+
assert dtype == f"string[{dtype.storage}]"
|
99 |
+
super().test_eq_with_str(dtype)
|
100 |
+
|
101 |
+
def test_is_not_string_type(self, dtype):
|
102 |
+
# Different from BaseDtypeTests.test_is_not_string_type
|
103 |
+
# because StringDtype is a string type
|
104 |
+
assert is_string_dtype(dtype)
|
105 |
+
|
106 |
+
def test_view(self, data, request, arrow_string_storage):
|
107 |
+
if data.dtype.storage in arrow_string_storage:
|
108 |
+
pytest.skip(reason="2D support not implemented for ArrowStringArray")
|
109 |
+
super().test_view(data)
|
110 |
+
|
111 |
+
def test_from_dtype(self, data):
|
112 |
+
# base test uses string representation of dtype
|
113 |
+
pass
|
114 |
+
|
115 |
+
def test_transpose(self, data, request, arrow_string_storage):
|
116 |
+
if data.dtype.storage in arrow_string_storage:
|
117 |
+
pytest.skip(reason="2D support not implemented for ArrowStringArray")
|
118 |
+
super().test_transpose(data)
|
119 |
+
|
120 |
+
def test_setitem_preserves_views(self, data, request, arrow_string_storage):
|
121 |
+
if data.dtype.storage in arrow_string_storage:
|
122 |
+
pytest.skip(reason="2D support not implemented for ArrowStringArray")
|
123 |
+
super().test_setitem_preserves_views(data)
|
124 |
+
|
125 |
+
def test_dropna_array(self, data_missing):
|
126 |
+
result = data_missing.dropna()
|
127 |
+
expected = data_missing[[1]]
|
128 |
+
tm.assert_extension_array_equal(result, expected)
|
129 |
+
|
130 |
+
def test_fillna_no_op_returns_copy(self, data):
|
131 |
+
data = data[~data.isna()]
|
132 |
+
|
133 |
+
valid = data[0]
|
134 |
+
result = data.fillna(valid)
|
135 |
+
assert result is not data
|
136 |
+
tm.assert_extension_array_equal(result, data)
|
137 |
+
|
138 |
+
result = data.fillna(method="backfill")
|
139 |
+
assert result is not data
|
140 |
+
tm.assert_extension_array_equal(result, data)
|
141 |
+
|
142 |
+
def _get_expected_exception(
|
143 |
+
self, op_name: str, obj, other
|
144 |
+
) -> type[Exception] | None:
|
145 |
+
if op_name in ["__divmod__", "__rdivmod__"]:
|
146 |
+
if isinstance(obj, pd.Series) and cast(
|
147 |
+
StringDtype, tm.get_dtype(obj)
|
148 |
+
).storage in [
|
149 |
+
"pyarrow",
|
150 |
+
"pyarrow_numpy",
|
151 |
+
]:
|
152 |
+
# TODO: re-raise as TypeError?
|
153 |
+
return NotImplementedError
|
154 |
+
elif isinstance(other, pd.Series) and cast(
|
155 |
+
StringDtype, tm.get_dtype(other)
|
156 |
+
).storage in [
|
157 |
+
"pyarrow",
|
158 |
+
"pyarrow_numpy",
|
159 |
+
]:
|
160 |
+
# TODO: re-raise as TypeError?
|
161 |
+
return NotImplementedError
|
162 |
+
return TypeError
|
163 |
+
elif op_name in ["__mod__", "__rmod__", "__pow__", "__rpow__"]:
|
164 |
+
if cast(StringDtype, tm.get_dtype(obj)).storage in [
|
165 |
+
"pyarrow",
|
166 |
+
"pyarrow_numpy",
|
167 |
+
]:
|
168 |
+
return NotImplementedError
|
169 |
+
return TypeError
|
170 |
+
elif op_name in ["__mul__", "__rmul__"]:
|
171 |
+
# Can only multiply strings by integers
|
172 |
+
return TypeError
|
173 |
+
elif op_name in [
|
174 |
+
"__truediv__",
|
175 |
+
"__rtruediv__",
|
176 |
+
"__floordiv__",
|
177 |
+
"__rfloordiv__",
|
178 |
+
"__sub__",
|
179 |
+
"__rsub__",
|
180 |
+
]:
|
181 |
+
if cast(StringDtype, tm.get_dtype(obj)).storage in [
|
182 |
+
"pyarrow",
|
183 |
+
"pyarrow_numpy",
|
184 |
+
]:
|
185 |
+
import pyarrow as pa
|
186 |
+
|
187 |
+
# TODO: better to re-raise as TypeError?
|
188 |
+
return pa.ArrowNotImplementedError
|
189 |
+
return TypeError
|
190 |
+
|
191 |
+
return None
|
192 |
+
|
193 |
+
def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
|
194 |
+
return (
|
195 |
+
op_name in ["min", "max"]
|
196 |
+
or ser.dtype.storage == "pyarrow_numpy" # type: ignore[union-attr]
|
197 |
+
and op_name in ("any", "all")
|
198 |
+
)
|
199 |
+
|
200 |
+
def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):
|
201 |
+
dtype = cast(StringDtype, tm.get_dtype(obj))
|
202 |
+
if op_name in ["__add__", "__radd__"]:
|
203 |
+
cast_to = dtype
|
204 |
+
elif dtype.storage == "pyarrow":
|
205 |
+
cast_to = "boolean[pyarrow]" # type: ignore[assignment]
|
206 |
+
elif dtype.storage == "pyarrow_numpy":
|
207 |
+
cast_to = np.bool_ # type: ignore[assignment]
|
208 |
+
else:
|
209 |
+
cast_to = "boolean" # type: ignore[assignment]
|
210 |
+
return pointwise_result.astype(cast_to)
|
211 |
+
|
212 |
+
def test_compare_scalar(self, data, comparison_op):
|
213 |
+
ser = pd.Series(data)
|
214 |
+
self._compare_other(ser, data, comparison_op, "abc")
|
215 |
+
|
216 |
+
@pytest.mark.filterwarnings("ignore:Falling back:pandas.errors.PerformanceWarning")
|
217 |
+
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
|
218 |
+
super().test_groupby_extension_apply(data_for_grouping, groupby_apply_op)
|
219 |
+
|
220 |
+
|
221 |
+
class Test2DCompat(base.Dim2CompatTests):
|
222 |
+
@pytest.fixture(autouse=True)
|
223 |
+
def arrow_not_supported(self, data):
|
224 |
+
if isinstance(data, ArrowStringArray):
|
225 |
+
pytest.skip(reason="2D support not implemented for ArrowStringArray")
|
226 |
+
|
227 |
+
|
228 |
+
def test_searchsorted_with_na_raises(data_for_sorting, as_series):
|
229 |
+
# GH50447
|
230 |
+
b, c, a = data_for_sorting
|
231 |
+
arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c]
|
232 |
+
arr[-1] = pd.NA
|
233 |
+
|
234 |
+
if as_series:
|
235 |
+
arr = pd.Series(arr)
|
236 |
+
|
237 |
+
msg = (
|
238 |
+
"searchsorted requires array to be sorted, "
|
239 |
+
"which is impossible with NAs present."
|
240 |
+
)
|
241 |
+
with pytest.raises(ValueError, match=msg):
|
242 |
+
arr.searchsorted(b)
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__init__.py
ADDED
File without changes
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_almost_equal.cpython-310.pyc
ADDED
Binary file (15.6 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_attr_equal.cpython-310.pyc
ADDED
Binary file (1.04 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_categorical_equal.cpython-310.pyc
ADDED
Binary file (2.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_extension_array_equal.cpython-310.pyc
ADDED
Binary file (3.78 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_index_equal.cpython-310.pyc
ADDED
Binary file (9.55 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_interval_array_equal.cpython-310.pyc
ADDED
Binary file (2.17 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_assert_numpy_array_equal.cpython-310.pyc
ADDED
Binary file (6.63 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate.cpython-310.pyc
ADDED
Binary file (2.18 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_kwarg.cpython-310.pyc
ADDED
Binary file (3.13 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_deprecate_nonkeyword_arguments.cpython-310.pyc
ADDED
Binary file (5.45 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_doc.cpython-310.pyc
ADDED
Binary file (1.98 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_numba.cpython-310.pyc
ADDED
Binary file (698 Bytes). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_rewrite_warning.cpython-310.pyc
ADDED
Binary file (1.1 kB). View file
|
|
llmeval-env/lib/python3.10/site-packages/pandas/tests/util/__pycache__/test_shares_memory.cpython-310.pyc
ADDED
Binary file (1.06 kB). View file
|
|