Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/llama-3b/global_step100/bf16_zero_pp_rank_122_mp_rank_00_optim_states.pt +3 -0
- venv/lib/python3.10/site-packages/pandas/tests/construction/__init__.py +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/test_extract_array.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/construction/test_extract_array.py +18 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/base.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/io.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/base.py +2 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/casting.py +87 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/groupby.py +174 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/index.py +19 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/io.py +39 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/missing.py +188 -0
- venv/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py +379 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__init__.py +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/test_duplicate_labels.py +413 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/test_finalize.py +767 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py +209 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py +504 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py +336 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/test_series.py +159 -0
- venv/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py +130 -0
- venv/lib/python3.10/site-packages/pandas/tests/tseries/__init__.py +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/tseries/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__init__.py +0 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_122_mp_rank_00_optim_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7e6812571c227fed711e7cad368af9a4c26e6b16ab7341eeda7d2f5939e338cf
|
3 |
+
size 41830212
|
venv/lib/python3.10/site-packages/pandas/tests/construction/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (193 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/construction/__pycache__/test_extract_array.cpython-310.pyc
ADDED
Binary file (739 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/construction/test_extract_array.py
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas import Index
|
2 |
+
import pandas._testing as tm
|
3 |
+
from pandas.core.construction import extract_array
|
4 |
+
|
5 |
+
|
6 |
+
def test_extract_array_rangeindex():
|
7 |
+
ri = Index(range(5))
|
8 |
+
|
9 |
+
expected = ri._values
|
10 |
+
res = extract_array(ri, extract_numpy=True, extract_range=True)
|
11 |
+
tm.assert_numpy_array_equal(res, expected)
|
12 |
+
res = extract_array(ri, extract_numpy=False, extract_range=True)
|
13 |
+
tm.assert_numpy_array_equal(res, expected)
|
14 |
+
|
15 |
+
res = extract_array(ri, extract_numpy=True, extract_range=False)
|
16 |
+
tm.assert_index_equal(res, ri)
|
17 |
+
res = extract_array(ri, extract_numpy=False, extract_range=False)
|
18 |
+
tm.assert_index_equal(res, ri)
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.8 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc
ADDED
Binary file (1.65 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/base.cpython-310.pyc
ADDED
Binary file (353 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc
ADDED
Binary file (3.69 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc
ADDED
Binary file (5.75 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc
ADDED
Binary file (9.38 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc
ADDED
Binary file (5.34 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc
ADDED
Binary file (14.6 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc
ADDED
Binary file (5.56 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc
ADDED
Binary file (994 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc
ADDED
Binary file (4.35 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/io.cpython-310.pyc
ADDED
Binary file (1.39 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc
ADDED
Binary file (23.6 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc
ADDED
Binary file (6.13 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc
ADDED
Binary file (8.95 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc
ADDED
Binary file (1.86 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc
ADDED
Binary file (4.38 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc
ADDED
Binary file (11.6 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc
ADDED
Binary file (15.9 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/base.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
class BaseExtensionTests:
|
2 |
+
pass
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/casting.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas.util._test_decorators as td
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.core.internals.blocks import NumpyBlock
|
9 |
+
|
10 |
+
|
11 |
+
class BaseCastingTests:
|
12 |
+
"""Casting to and from ExtensionDtypes"""
|
13 |
+
|
14 |
+
def test_astype_object_series(self, all_data):
|
15 |
+
ser = pd.Series(all_data, name="A")
|
16 |
+
result = ser.astype(object)
|
17 |
+
assert result.dtype == np.dtype(object)
|
18 |
+
if hasattr(result._mgr, "blocks"):
|
19 |
+
blk = result._mgr.blocks[0]
|
20 |
+
assert isinstance(blk, NumpyBlock)
|
21 |
+
assert blk.is_object
|
22 |
+
assert isinstance(result._mgr.array, np.ndarray)
|
23 |
+
assert result._mgr.array.dtype == np.dtype(object)
|
24 |
+
|
25 |
+
def test_astype_object_frame(self, all_data):
|
26 |
+
df = pd.DataFrame({"A": all_data})
|
27 |
+
|
28 |
+
result = df.astype(object)
|
29 |
+
if hasattr(result._mgr, "blocks"):
|
30 |
+
blk = result._mgr.blocks[0]
|
31 |
+
assert isinstance(blk, NumpyBlock), type(blk)
|
32 |
+
assert blk.is_object
|
33 |
+
assert isinstance(result._mgr.arrays[0], np.ndarray)
|
34 |
+
assert result._mgr.arrays[0].dtype == np.dtype(object)
|
35 |
+
|
36 |
+
# check that we can compare the dtypes
|
37 |
+
comp = result.dtypes == df.dtypes
|
38 |
+
assert not comp.any()
|
39 |
+
|
40 |
+
def test_tolist(self, data):
|
41 |
+
result = pd.Series(data).tolist()
|
42 |
+
expected = list(data)
|
43 |
+
assert result == expected
|
44 |
+
|
45 |
+
def test_astype_str(self, data):
|
46 |
+
result = pd.Series(data[:5]).astype(str)
|
47 |
+
expected = pd.Series([str(x) for x in data[:5]], dtype=str)
|
48 |
+
tm.assert_series_equal(result, expected)
|
49 |
+
|
50 |
+
@pytest.mark.parametrize(
|
51 |
+
"nullable_string_dtype",
|
52 |
+
[
|
53 |
+
"string[python]",
|
54 |
+
pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
|
55 |
+
],
|
56 |
+
)
|
57 |
+
def test_astype_string(self, data, nullable_string_dtype):
|
58 |
+
# GH-33465, GH#45326 as of 2.0 we decode bytes instead of calling str(obj)
|
59 |
+
result = pd.Series(data[:5]).astype(nullable_string_dtype)
|
60 |
+
expected = pd.Series(
|
61 |
+
[str(x) if not isinstance(x, bytes) else x.decode() for x in data[:5]],
|
62 |
+
dtype=nullable_string_dtype,
|
63 |
+
)
|
64 |
+
tm.assert_series_equal(result, expected)
|
65 |
+
|
66 |
+
def test_to_numpy(self, data):
|
67 |
+
expected = np.asarray(data)
|
68 |
+
|
69 |
+
result = data.to_numpy()
|
70 |
+
tm.assert_equal(result, expected)
|
71 |
+
|
72 |
+
result = pd.Series(data).to_numpy()
|
73 |
+
tm.assert_equal(result, expected)
|
74 |
+
|
75 |
+
def test_astype_empty_dataframe(self, dtype):
|
76 |
+
# https://github.com/pandas-dev/pandas/issues/33113
|
77 |
+
df = pd.DataFrame()
|
78 |
+
result = df.astype(dtype)
|
79 |
+
tm.assert_frame_equal(result, df)
|
80 |
+
|
81 |
+
@pytest.mark.parametrize("copy", [True, False])
|
82 |
+
def test_astype_own_type(self, data, copy):
|
83 |
+
# ensure that astype returns the original object for equal dtype and copy=False
|
84 |
+
# https://github.com/pandas-dev/pandas/issues/28488
|
85 |
+
result = data.astype(data.dtype, copy=copy)
|
86 |
+
assert (result is data) is (not copy)
|
87 |
+
tm.assert_extension_array_equal(result, data)
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/groupby.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
|
3 |
+
import pytest
|
4 |
+
|
5 |
+
from pandas.core.dtypes.common import (
|
6 |
+
is_bool_dtype,
|
7 |
+
is_numeric_dtype,
|
8 |
+
is_object_dtype,
|
9 |
+
is_string_dtype,
|
10 |
+
)
|
11 |
+
|
12 |
+
import pandas as pd
|
13 |
+
import pandas._testing as tm
|
14 |
+
|
15 |
+
|
16 |
+
@pytest.mark.filterwarnings(
|
17 |
+
"ignore:The default of observed=False is deprecated:FutureWarning"
|
18 |
+
)
|
19 |
+
class BaseGroupbyTests:
|
20 |
+
"""Groupby-specific tests."""
|
21 |
+
|
22 |
+
def test_grouping_grouper(self, data_for_grouping):
|
23 |
+
df = pd.DataFrame(
|
24 |
+
{
|
25 |
+
"A": pd.Series(
|
26 |
+
["B", "B", None, None, "A", "A", "B", "C"], dtype=object
|
27 |
+
),
|
28 |
+
"B": data_for_grouping,
|
29 |
+
}
|
30 |
+
)
|
31 |
+
gr1 = df.groupby("A")._grouper.groupings[0]
|
32 |
+
gr2 = df.groupby("B")._grouper.groupings[0]
|
33 |
+
|
34 |
+
tm.assert_numpy_array_equal(gr1.grouping_vector, df.A.values)
|
35 |
+
tm.assert_extension_array_equal(gr2.grouping_vector, data_for_grouping)
|
36 |
+
|
37 |
+
@pytest.mark.parametrize("as_index", [True, False])
|
38 |
+
def test_groupby_extension_agg(self, as_index, data_for_grouping):
|
39 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
40 |
+
|
41 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
42 |
+
if is_bool:
|
43 |
+
# only 2 unique values, and the final entry has c==b
|
44 |
+
# (see data_for_grouping docstring)
|
45 |
+
df = df.iloc[:-1]
|
46 |
+
|
47 |
+
result = df.groupby("B", as_index=as_index).A.mean()
|
48 |
+
_, uniques = pd.factorize(data_for_grouping, sort=True)
|
49 |
+
|
50 |
+
exp_vals = [3.0, 1.0, 4.0]
|
51 |
+
if is_bool:
|
52 |
+
exp_vals = exp_vals[:-1]
|
53 |
+
if as_index:
|
54 |
+
index = pd.Index(uniques, name="B")
|
55 |
+
expected = pd.Series(exp_vals, index=index, name="A")
|
56 |
+
tm.assert_series_equal(result, expected)
|
57 |
+
else:
|
58 |
+
expected = pd.DataFrame({"B": uniques, "A": exp_vals})
|
59 |
+
tm.assert_frame_equal(result, expected)
|
60 |
+
|
61 |
+
def test_groupby_agg_extension(self, data_for_grouping):
|
62 |
+
# GH#38980 groupby agg on extension type fails for non-numeric types
|
63 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
64 |
+
|
65 |
+
expected = df.iloc[[0, 2, 4, 7]]
|
66 |
+
expected = expected.set_index("A")
|
67 |
+
|
68 |
+
result = df.groupby("A").agg({"B": "first"})
|
69 |
+
tm.assert_frame_equal(result, expected)
|
70 |
+
|
71 |
+
result = df.groupby("A").agg("first")
|
72 |
+
tm.assert_frame_equal(result, expected)
|
73 |
+
|
74 |
+
result = df.groupby("A").first()
|
75 |
+
tm.assert_frame_equal(result, expected)
|
76 |
+
|
77 |
+
def test_groupby_extension_no_sort(self, data_for_grouping):
|
78 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
79 |
+
|
80 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
81 |
+
if is_bool:
|
82 |
+
# only 2 unique values, and the final entry has c==b
|
83 |
+
# (see data_for_grouping docstring)
|
84 |
+
df = df.iloc[:-1]
|
85 |
+
|
86 |
+
result = df.groupby("B", sort=False).A.mean()
|
87 |
+
_, index = pd.factorize(data_for_grouping, sort=False)
|
88 |
+
|
89 |
+
index = pd.Index(index, name="B")
|
90 |
+
exp_vals = [1.0, 3.0, 4.0]
|
91 |
+
if is_bool:
|
92 |
+
exp_vals = exp_vals[:-1]
|
93 |
+
expected = pd.Series(exp_vals, index=index, name="A")
|
94 |
+
tm.assert_series_equal(result, expected)
|
95 |
+
|
96 |
+
def test_groupby_extension_transform(self, data_for_grouping):
|
97 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
98 |
+
|
99 |
+
valid = data_for_grouping[~data_for_grouping.isna()]
|
100 |
+
df = pd.DataFrame({"A": [1, 1, 3, 3, 1, 4], "B": valid})
|
101 |
+
is_bool = data_for_grouping.dtype._is_boolean
|
102 |
+
if is_bool:
|
103 |
+
# only 2 unique values, and the final entry has c==b
|
104 |
+
# (see data_for_grouping docstring)
|
105 |
+
df = df.iloc[:-1]
|
106 |
+
|
107 |
+
result = df.groupby("B").A.transform(len)
|
108 |
+
expected = pd.Series([3, 3, 2, 2, 3, 1], name="A")
|
109 |
+
if is_bool:
|
110 |
+
expected = expected[:-1]
|
111 |
+
|
112 |
+
tm.assert_series_equal(result, expected)
|
113 |
+
|
114 |
+
def test_groupby_extension_apply(self, data_for_grouping, groupby_apply_op):
|
115 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
116 |
+
msg = "DataFrameGroupBy.apply operated on the grouping columns"
|
117 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
118 |
+
df.groupby("B", group_keys=False, observed=False).apply(groupby_apply_op)
|
119 |
+
df.groupby("B", group_keys=False, observed=False).A.apply(groupby_apply_op)
|
120 |
+
msg = "DataFrameGroupBy.apply operated on the grouping columns"
|
121 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
122 |
+
df.groupby("A", group_keys=False, observed=False).apply(groupby_apply_op)
|
123 |
+
df.groupby("A", group_keys=False, observed=False).B.apply(groupby_apply_op)
|
124 |
+
|
125 |
+
def test_groupby_apply_identity(self, data_for_grouping):
|
126 |
+
df = pd.DataFrame({"A": [1, 1, 2, 2, 3, 3, 1, 4], "B": data_for_grouping})
|
127 |
+
result = df.groupby("A").B.apply(lambda x: x.array)
|
128 |
+
expected = pd.Series(
|
129 |
+
[
|
130 |
+
df.B.iloc[[0, 1, 6]].array,
|
131 |
+
df.B.iloc[[2, 3]].array,
|
132 |
+
df.B.iloc[[4, 5]].array,
|
133 |
+
df.B.iloc[[7]].array,
|
134 |
+
],
|
135 |
+
index=pd.Index([1, 2, 3, 4], name="A"),
|
136 |
+
name="B",
|
137 |
+
)
|
138 |
+
tm.assert_series_equal(result, expected)
|
139 |
+
|
140 |
+
def test_in_numeric_groupby(self, data_for_grouping):
|
141 |
+
df = pd.DataFrame(
|
142 |
+
{
|
143 |
+
"A": [1, 1, 2, 2, 3, 3, 1, 4],
|
144 |
+
"B": data_for_grouping,
|
145 |
+
"C": [1, 1, 1, 1, 1, 1, 1, 1],
|
146 |
+
}
|
147 |
+
)
|
148 |
+
|
149 |
+
dtype = data_for_grouping.dtype
|
150 |
+
if (
|
151 |
+
is_numeric_dtype(dtype)
|
152 |
+
or is_bool_dtype(dtype)
|
153 |
+
or dtype.name == "decimal"
|
154 |
+
or is_string_dtype(dtype)
|
155 |
+
or is_object_dtype(dtype)
|
156 |
+
or dtype.kind == "m" # in particular duration[*][pyarrow]
|
157 |
+
):
|
158 |
+
expected = pd.Index(["B", "C"])
|
159 |
+
result = df.groupby("A").sum().columns
|
160 |
+
else:
|
161 |
+
expected = pd.Index(["C"])
|
162 |
+
|
163 |
+
msg = "|".join(
|
164 |
+
[
|
165 |
+
# period/datetime
|
166 |
+
"does not support sum operations",
|
167 |
+
# all others
|
168 |
+
re.escape(f"agg function failed [how->sum,dtype->{dtype}"),
|
169 |
+
]
|
170 |
+
)
|
171 |
+
with pytest.raises(TypeError, match=msg):
|
172 |
+
df.groupby("A").sum()
|
173 |
+
result = df.groupby("A").sum(numeric_only=True).columns
|
174 |
+
tm.assert_index_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/index.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Tests for Indexes backed by arbitrary ExtensionArrays.
|
3 |
+
"""
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
|
7 |
+
class BaseIndexTests:
|
8 |
+
"""Tests for Index object backed by an ExtensionArray"""
|
9 |
+
|
10 |
+
def test_index_from_array(self, data):
|
11 |
+
idx = pd.Index(data)
|
12 |
+
assert data.dtype == idx.dtype
|
13 |
+
|
14 |
+
def test_index_from_listlike_with_dtype(self, data):
|
15 |
+
idx = pd.Index(data, dtype=data.dtype)
|
16 |
+
assert idx.dtype == data.dtype
|
17 |
+
|
18 |
+
idx = pd.Index(list(data), dtype=data.dtype)
|
19 |
+
assert idx.dtype == data.dtype
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/io.py
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from io import StringIO
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.core.arrays import ExtensionArray
|
9 |
+
|
10 |
+
|
11 |
+
class BaseParsingTests:
|
12 |
+
@pytest.mark.parametrize("engine", ["c", "python"])
|
13 |
+
def test_EA_types(self, engine, data, request):
|
14 |
+
if isinstance(data.dtype, pd.CategoricalDtype):
|
15 |
+
# in parsers.pyx _convert_with_dtype there is special-casing for
|
16 |
+
# Categorical that pre-empts _from_sequence_of_strings
|
17 |
+
pass
|
18 |
+
elif isinstance(data.dtype, pd.core.dtypes.dtypes.NumpyEADtype):
|
19 |
+
# These get unwrapped internally so are treated as numpy dtypes
|
20 |
+
# in the parsers.pyx code
|
21 |
+
pass
|
22 |
+
elif (
|
23 |
+
type(data)._from_sequence_of_strings.__func__
|
24 |
+
is ExtensionArray._from_sequence_of_strings.__func__
|
25 |
+
):
|
26 |
+
# i.e. the EA hasn't overridden _from_sequence_of_strings
|
27 |
+
mark = pytest.mark.xfail(
|
28 |
+
reason="_from_sequence_of_strings not implemented",
|
29 |
+
raises=NotImplementedError,
|
30 |
+
)
|
31 |
+
request.node.add_marker(mark)
|
32 |
+
|
33 |
+
df = pd.DataFrame({"with_dtype": pd.Series(data, dtype=str(data.dtype))})
|
34 |
+
csv_output = df.to_csv(index=False, na_rep=np.nan)
|
35 |
+
result = pd.read_csv(
|
36 |
+
StringIO(csv_output), dtype={"with_dtype": str(data.dtype)}, engine=engine
|
37 |
+
)
|
38 |
+
expected = df
|
39 |
+
tm.assert_frame_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/missing.py
ADDED
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
import pandas._testing as tm
|
6 |
+
|
7 |
+
|
8 |
+
class BaseMissingTests:
|
9 |
+
def test_isna(self, data_missing):
|
10 |
+
expected = np.array([True, False])
|
11 |
+
|
12 |
+
result = pd.isna(data_missing)
|
13 |
+
tm.assert_numpy_array_equal(result, expected)
|
14 |
+
|
15 |
+
result = pd.Series(data_missing).isna()
|
16 |
+
expected = pd.Series(expected)
|
17 |
+
tm.assert_series_equal(result, expected)
|
18 |
+
|
19 |
+
# GH 21189
|
20 |
+
result = pd.Series(data_missing).drop([0, 1]).isna()
|
21 |
+
expected = pd.Series([], dtype=bool)
|
22 |
+
tm.assert_series_equal(result, expected)
|
23 |
+
|
24 |
+
@pytest.mark.parametrize("na_func", ["isna", "notna"])
|
25 |
+
def test_isna_returns_copy(self, data_missing, na_func):
|
26 |
+
result = pd.Series(data_missing)
|
27 |
+
expected = result.copy()
|
28 |
+
mask = getattr(result, na_func)()
|
29 |
+
if isinstance(mask.dtype, pd.SparseDtype):
|
30 |
+
mask = np.array(mask)
|
31 |
+
|
32 |
+
mask[:] = True
|
33 |
+
tm.assert_series_equal(result, expected)
|
34 |
+
|
35 |
+
def test_dropna_array(self, data_missing):
|
36 |
+
result = data_missing.dropna()
|
37 |
+
expected = data_missing[[1]]
|
38 |
+
tm.assert_extension_array_equal(result, expected)
|
39 |
+
|
40 |
+
def test_dropna_series(self, data_missing):
|
41 |
+
ser = pd.Series(data_missing)
|
42 |
+
result = ser.dropna()
|
43 |
+
expected = ser.iloc[[1]]
|
44 |
+
tm.assert_series_equal(result, expected)
|
45 |
+
|
46 |
+
def test_dropna_frame(self, data_missing):
|
47 |
+
df = pd.DataFrame({"A": data_missing}, columns=pd.Index(["A"], dtype=object))
|
48 |
+
|
49 |
+
# defaults
|
50 |
+
result = df.dropna()
|
51 |
+
expected = df.iloc[[1]]
|
52 |
+
tm.assert_frame_equal(result, expected)
|
53 |
+
|
54 |
+
# axis = 1
|
55 |
+
result = df.dropna(axis="columns")
|
56 |
+
expected = pd.DataFrame(index=pd.RangeIndex(2), columns=pd.Index([]))
|
57 |
+
tm.assert_frame_equal(result, expected)
|
58 |
+
|
59 |
+
# multiple
|
60 |
+
df = pd.DataFrame({"A": data_missing, "B": [1, np.nan]})
|
61 |
+
result = df.dropna()
|
62 |
+
expected = df.iloc[:0]
|
63 |
+
tm.assert_frame_equal(result, expected)
|
64 |
+
|
65 |
+
def test_fillna_scalar(self, data_missing):
|
66 |
+
valid = data_missing[1]
|
67 |
+
result = data_missing.fillna(valid)
|
68 |
+
expected = data_missing.fillna(valid)
|
69 |
+
tm.assert_extension_array_equal(result, expected)
|
70 |
+
|
71 |
+
@pytest.mark.filterwarnings(
|
72 |
+
"ignore:Series.fillna with 'method' is deprecated:FutureWarning"
|
73 |
+
)
|
74 |
+
def test_fillna_limit_pad(self, data_missing):
|
75 |
+
arr = data_missing.take([1, 0, 0, 0, 1])
|
76 |
+
result = pd.Series(arr).ffill(limit=2)
|
77 |
+
expected = pd.Series(data_missing.take([1, 1, 1, 0, 1]))
|
78 |
+
tm.assert_series_equal(result, expected)
|
79 |
+
|
80 |
+
@pytest.mark.parametrize(
|
81 |
+
"limit_area, input_ilocs, expected_ilocs",
|
82 |
+
[
|
83 |
+
("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),
|
84 |
+
("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),
|
85 |
+
("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),
|
86 |
+
("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),
|
87 |
+
("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),
|
88 |
+
("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),
|
89 |
+
("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),
|
90 |
+
("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),
|
91 |
+
],
|
92 |
+
)
|
93 |
+
def test_ffill_limit_area(
|
94 |
+
self, data_missing, limit_area, input_ilocs, expected_ilocs
|
95 |
+
):
|
96 |
+
# GH#56616
|
97 |
+
arr = data_missing.take(input_ilocs)
|
98 |
+
result = pd.Series(arr).ffill(limit_area=limit_area)
|
99 |
+
expected = pd.Series(data_missing.take(expected_ilocs))
|
100 |
+
tm.assert_series_equal(result, expected)
|
101 |
+
|
102 |
+
@pytest.mark.filterwarnings(
|
103 |
+
"ignore:Series.fillna with 'method' is deprecated:FutureWarning"
|
104 |
+
)
|
105 |
+
def test_fillna_limit_backfill(self, data_missing):
|
106 |
+
arr = data_missing.take([1, 0, 0, 0, 1])
|
107 |
+
result = pd.Series(arr).fillna(method="backfill", limit=2)
|
108 |
+
expected = pd.Series(data_missing.take([1, 0, 1, 1, 1]))
|
109 |
+
tm.assert_series_equal(result, expected)
|
110 |
+
|
111 |
+
def test_fillna_no_op_returns_copy(self, data):
|
112 |
+
data = data[~data.isna()]
|
113 |
+
|
114 |
+
valid = data[0]
|
115 |
+
result = data.fillna(valid)
|
116 |
+
assert result is not data
|
117 |
+
tm.assert_extension_array_equal(result, data)
|
118 |
+
|
119 |
+
result = data._pad_or_backfill(method="backfill")
|
120 |
+
assert result is not data
|
121 |
+
tm.assert_extension_array_equal(result, data)
|
122 |
+
|
123 |
+
def test_fillna_series(self, data_missing):
|
124 |
+
fill_value = data_missing[1]
|
125 |
+
ser = pd.Series(data_missing)
|
126 |
+
|
127 |
+
result = ser.fillna(fill_value)
|
128 |
+
expected = pd.Series(
|
129 |
+
data_missing._from_sequence(
|
130 |
+
[fill_value, fill_value], dtype=data_missing.dtype
|
131 |
+
)
|
132 |
+
)
|
133 |
+
tm.assert_series_equal(result, expected)
|
134 |
+
|
135 |
+
# Fill with a series
|
136 |
+
result = ser.fillna(expected)
|
137 |
+
tm.assert_series_equal(result, expected)
|
138 |
+
|
139 |
+
# Fill with a series not affecting the missing values
|
140 |
+
result = ser.fillna(ser)
|
141 |
+
tm.assert_series_equal(result, ser)
|
142 |
+
|
143 |
+
def test_fillna_series_method(self, data_missing, fillna_method):
|
144 |
+
fill_value = data_missing[1]
|
145 |
+
|
146 |
+
if fillna_method == "ffill":
|
147 |
+
data_missing = data_missing[::-1]
|
148 |
+
|
149 |
+
result = getattr(pd.Series(data_missing), fillna_method)()
|
150 |
+
expected = pd.Series(
|
151 |
+
data_missing._from_sequence(
|
152 |
+
[fill_value, fill_value], dtype=data_missing.dtype
|
153 |
+
)
|
154 |
+
)
|
155 |
+
|
156 |
+
tm.assert_series_equal(result, expected)
|
157 |
+
|
158 |
+
def test_fillna_frame(self, data_missing):
|
159 |
+
fill_value = data_missing[1]
|
160 |
+
|
161 |
+
result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)
|
162 |
+
|
163 |
+
expected = pd.DataFrame(
|
164 |
+
{
|
165 |
+
"A": data_missing._from_sequence(
|
166 |
+
[fill_value, fill_value], dtype=data_missing.dtype
|
167 |
+
),
|
168 |
+
"B": [1, 2],
|
169 |
+
}
|
170 |
+
)
|
171 |
+
|
172 |
+
tm.assert_frame_equal(result, expected)
|
173 |
+
|
174 |
+
def test_fillna_fill_other(self, data):
|
175 |
+
result = pd.DataFrame({"A": data, "B": [np.nan] * len(data)}).fillna({"B": 0.0})
|
176 |
+
|
177 |
+
expected = pd.DataFrame({"A": data, "B": [0.0] * len(result)})
|
178 |
+
|
179 |
+
tm.assert_frame_equal(result, expected)
|
180 |
+
|
181 |
+
def test_use_inf_as_na_no_effect(self, data_missing):
|
182 |
+
ser = pd.Series(data_missing)
|
183 |
+
expected = ser.isna()
|
184 |
+
msg = "use_inf_as_na option is deprecated"
|
185 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
186 |
+
with pd.option_context("mode.use_inf_as_na", True):
|
187 |
+
result = ser.isna()
|
188 |
+
tm.assert_series_equal(result, expected)
|
venv/lib/python3.10/site-packages/pandas/tests/extension/base/reshaping.py
ADDED
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import itertools
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.api.extensions import ExtensionArray
|
9 |
+
from pandas.core.internals.blocks import EABackedBlock
|
10 |
+
|
11 |
+
|
12 |
+
class BaseReshapingTests:
|
13 |
+
"""Tests for reshaping and concatenation."""
|
14 |
+
|
15 |
+
@pytest.mark.parametrize("in_frame", [True, False])
|
16 |
+
def test_concat(self, data, in_frame):
|
17 |
+
wrapped = pd.Series(data)
|
18 |
+
if in_frame:
|
19 |
+
wrapped = pd.DataFrame(wrapped)
|
20 |
+
result = pd.concat([wrapped, wrapped], ignore_index=True)
|
21 |
+
|
22 |
+
assert len(result) == len(data) * 2
|
23 |
+
|
24 |
+
if in_frame:
|
25 |
+
dtype = result.dtypes[0]
|
26 |
+
else:
|
27 |
+
dtype = result.dtype
|
28 |
+
|
29 |
+
assert dtype == data.dtype
|
30 |
+
if hasattr(result._mgr, "blocks"):
|
31 |
+
assert isinstance(result._mgr.blocks[0], EABackedBlock)
|
32 |
+
assert isinstance(result._mgr.arrays[0], ExtensionArray)
|
33 |
+
|
34 |
+
@pytest.mark.parametrize("in_frame", [True, False])
|
35 |
+
def test_concat_all_na_block(self, data_missing, in_frame):
|
36 |
+
valid_block = pd.Series(data_missing.take([1, 1]), index=[0, 1])
|
37 |
+
na_block = pd.Series(data_missing.take([0, 0]), index=[2, 3])
|
38 |
+
if in_frame:
|
39 |
+
valid_block = pd.DataFrame({"a": valid_block})
|
40 |
+
na_block = pd.DataFrame({"a": na_block})
|
41 |
+
result = pd.concat([valid_block, na_block])
|
42 |
+
if in_frame:
|
43 |
+
expected = pd.DataFrame({"a": data_missing.take([1, 1, 0, 0])})
|
44 |
+
tm.assert_frame_equal(result, expected)
|
45 |
+
else:
|
46 |
+
expected = pd.Series(data_missing.take([1, 1, 0, 0]))
|
47 |
+
tm.assert_series_equal(result, expected)
|
48 |
+
|
49 |
+
def test_concat_mixed_dtypes(self, data):
|
50 |
+
# https://github.com/pandas-dev/pandas/issues/20762
|
51 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
52 |
+
df2 = pd.DataFrame({"A": [1, 2, 3]})
|
53 |
+
df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")
|
54 |
+
dfs = [df1, df2, df3]
|
55 |
+
|
56 |
+
# dataframes
|
57 |
+
result = pd.concat(dfs)
|
58 |
+
expected = pd.concat([x.astype(object) for x in dfs])
|
59 |
+
tm.assert_frame_equal(result, expected)
|
60 |
+
|
61 |
+
# series
|
62 |
+
result = pd.concat([x["A"] for x in dfs])
|
63 |
+
expected = pd.concat([x["A"].astype(object) for x in dfs])
|
64 |
+
tm.assert_series_equal(result, expected)
|
65 |
+
|
66 |
+
# simple test for just EA and one other
|
67 |
+
result = pd.concat([df1, df2.astype(object)])
|
68 |
+
expected = pd.concat([df1.astype("object"), df2.astype("object")])
|
69 |
+
tm.assert_frame_equal(result, expected)
|
70 |
+
|
71 |
+
result = pd.concat([df1["A"], df2["A"].astype(object)])
|
72 |
+
expected = pd.concat([df1["A"].astype("object"), df2["A"].astype("object")])
|
73 |
+
tm.assert_series_equal(result, expected)
|
74 |
+
|
75 |
+
def test_concat_columns(self, data, na_value):
|
76 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
77 |
+
df2 = pd.DataFrame({"B": [1, 2, 3]})
|
78 |
+
|
79 |
+
expected = pd.DataFrame({"A": data[:3], "B": [1, 2, 3]})
|
80 |
+
result = pd.concat([df1, df2], axis=1)
|
81 |
+
tm.assert_frame_equal(result, expected)
|
82 |
+
result = pd.concat([df1["A"], df2["B"]], axis=1)
|
83 |
+
tm.assert_frame_equal(result, expected)
|
84 |
+
|
85 |
+
# non-aligned
|
86 |
+
df2 = pd.DataFrame({"B": [1, 2, 3]}, index=[1, 2, 3])
|
87 |
+
expected = pd.DataFrame(
|
88 |
+
{
|
89 |
+
"A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),
|
90 |
+
"B": [np.nan, 1, 2, 3],
|
91 |
+
}
|
92 |
+
)
|
93 |
+
|
94 |
+
result = pd.concat([df1, df2], axis=1)
|
95 |
+
tm.assert_frame_equal(result, expected)
|
96 |
+
result = pd.concat([df1["A"], df2["B"]], axis=1)
|
97 |
+
tm.assert_frame_equal(result, expected)
|
98 |
+
|
99 |
+
def test_concat_extension_arrays_copy_false(self, data, na_value):
|
100 |
+
# GH 20756
|
101 |
+
df1 = pd.DataFrame({"A": data[:3]})
|
102 |
+
df2 = pd.DataFrame({"B": data[3:7]})
|
103 |
+
expected = pd.DataFrame(
|
104 |
+
{
|
105 |
+
"A": data._from_sequence(list(data[:3]) + [na_value], dtype=data.dtype),
|
106 |
+
"B": data[3:7],
|
107 |
+
}
|
108 |
+
)
|
109 |
+
result = pd.concat([df1, df2], axis=1, copy=False)
|
110 |
+
tm.assert_frame_equal(result, expected)
|
111 |
+
|
112 |
+
def test_concat_with_reindex(self, data):
|
113 |
+
# GH-33027
|
114 |
+
a = pd.DataFrame({"a": data[:5]})
|
115 |
+
b = pd.DataFrame({"b": data[:5]})
|
116 |
+
result = pd.concat([a, b], ignore_index=True)
|
117 |
+
expected = pd.DataFrame(
|
118 |
+
{
|
119 |
+
"a": data.take(list(range(5)) + ([-1] * 5), allow_fill=True),
|
120 |
+
"b": data.take(([-1] * 5) + list(range(5)), allow_fill=True),
|
121 |
+
}
|
122 |
+
)
|
123 |
+
tm.assert_frame_equal(result, expected)
|
124 |
+
|
125 |
+
def test_align(self, data, na_value):
|
126 |
+
a = data[:3]
|
127 |
+
b = data[2:5]
|
128 |
+
r1, r2 = pd.Series(a).align(pd.Series(b, index=[1, 2, 3]))
|
129 |
+
|
130 |
+
# Assumes that the ctor can take a list of scalars of the type
|
131 |
+
e1 = pd.Series(data._from_sequence(list(a) + [na_value], dtype=data.dtype))
|
132 |
+
e2 = pd.Series(data._from_sequence([na_value] + list(b), dtype=data.dtype))
|
133 |
+
tm.assert_series_equal(r1, e1)
|
134 |
+
tm.assert_series_equal(r2, e2)
|
135 |
+
|
136 |
+
def test_align_frame(self, data, na_value):
|
137 |
+
a = data[:3]
|
138 |
+
b = data[2:5]
|
139 |
+
r1, r2 = pd.DataFrame({"A": a}).align(pd.DataFrame({"A": b}, index=[1, 2, 3]))
|
140 |
+
|
141 |
+
# Assumes that the ctor can take a list of scalars of the type
|
142 |
+
e1 = pd.DataFrame(
|
143 |
+
{"A": data._from_sequence(list(a) + [na_value], dtype=data.dtype)}
|
144 |
+
)
|
145 |
+
e2 = pd.DataFrame(
|
146 |
+
{"A": data._from_sequence([na_value] + list(b), dtype=data.dtype)}
|
147 |
+
)
|
148 |
+
tm.assert_frame_equal(r1, e1)
|
149 |
+
tm.assert_frame_equal(r2, e2)
|
150 |
+
|
151 |
+
def test_align_series_frame(self, data, na_value):
|
152 |
+
# https://github.com/pandas-dev/pandas/issues/20576
|
153 |
+
ser = pd.Series(data, name="a")
|
154 |
+
df = pd.DataFrame({"col": np.arange(len(ser) + 1)})
|
155 |
+
r1, r2 = ser.align(df)
|
156 |
+
|
157 |
+
e1 = pd.Series(
|
158 |
+
data._from_sequence(list(data) + [na_value], dtype=data.dtype),
|
159 |
+
name=ser.name,
|
160 |
+
)
|
161 |
+
|
162 |
+
tm.assert_series_equal(r1, e1)
|
163 |
+
tm.assert_frame_equal(r2, df)
|
164 |
+
|
165 |
+
def test_set_frame_expand_regular_with_extension(self, data):
|
166 |
+
df = pd.DataFrame({"A": [1] * len(data)})
|
167 |
+
df["B"] = data
|
168 |
+
expected = pd.DataFrame({"A": [1] * len(data), "B": data})
|
169 |
+
tm.assert_frame_equal(df, expected)
|
170 |
+
|
171 |
+
def test_set_frame_expand_extension_with_regular(self, data):
|
172 |
+
df = pd.DataFrame({"A": data})
|
173 |
+
df["B"] = [1] * len(data)
|
174 |
+
expected = pd.DataFrame({"A": data, "B": [1] * len(data)})
|
175 |
+
tm.assert_frame_equal(df, expected)
|
176 |
+
|
177 |
+
def test_set_frame_overwrite_object(self, data):
|
178 |
+
# https://github.com/pandas-dev/pandas/issues/20555
|
179 |
+
df = pd.DataFrame({"A": [1] * len(data)}, dtype=object)
|
180 |
+
df["A"] = data
|
181 |
+
assert df.dtypes["A"] == data.dtype
|
182 |
+
|
183 |
+
def test_merge(self, data, na_value):
|
184 |
+
# GH-20743
|
185 |
+
df1 = pd.DataFrame({"ext": data[:3], "int1": [1, 2, 3], "key": [0, 1, 2]})
|
186 |
+
df2 = pd.DataFrame({"int2": [1, 2, 3, 4], "key": [0, 0, 1, 3]})
|
187 |
+
|
188 |
+
res = pd.merge(df1, df2)
|
189 |
+
exp = pd.DataFrame(
|
190 |
+
{
|
191 |
+
"int1": [1, 1, 2],
|
192 |
+
"int2": [1, 2, 3],
|
193 |
+
"key": [0, 0, 1],
|
194 |
+
"ext": data._from_sequence(
|
195 |
+
[data[0], data[0], data[1]], dtype=data.dtype
|
196 |
+
),
|
197 |
+
}
|
198 |
+
)
|
199 |
+
tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])
|
200 |
+
|
201 |
+
res = pd.merge(df1, df2, how="outer")
|
202 |
+
exp = pd.DataFrame(
|
203 |
+
{
|
204 |
+
"int1": [1, 1, 2, 3, np.nan],
|
205 |
+
"int2": [1, 2, 3, np.nan, 4],
|
206 |
+
"key": [0, 0, 1, 2, 3],
|
207 |
+
"ext": data._from_sequence(
|
208 |
+
[data[0], data[0], data[1], data[2], na_value], dtype=data.dtype
|
209 |
+
),
|
210 |
+
}
|
211 |
+
)
|
212 |
+
tm.assert_frame_equal(res, exp[["ext", "int1", "key", "int2"]])
|
213 |
+
|
214 |
+
def test_merge_on_extension_array(self, data):
|
215 |
+
# GH 23020
|
216 |
+
a, b = data[:2]
|
217 |
+
key = type(data)._from_sequence([a, b], dtype=data.dtype)
|
218 |
+
|
219 |
+
df = pd.DataFrame({"key": key, "val": [1, 2]})
|
220 |
+
result = pd.merge(df, df, on="key")
|
221 |
+
expected = pd.DataFrame({"key": key, "val_x": [1, 2], "val_y": [1, 2]})
|
222 |
+
tm.assert_frame_equal(result, expected)
|
223 |
+
|
224 |
+
# order
|
225 |
+
result = pd.merge(df.iloc[[1, 0]], df, on="key")
|
226 |
+
expected = expected.iloc[[1, 0]].reset_index(drop=True)
|
227 |
+
tm.assert_frame_equal(result, expected)
|
228 |
+
|
229 |
+
def test_merge_on_extension_array_duplicates(self, data):
|
230 |
+
# GH 23020
|
231 |
+
a, b = data[:2]
|
232 |
+
key = type(data)._from_sequence([a, b, a], dtype=data.dtype)
|
233 |
+
df1 = pd.DataFrame({"key": key, "val": [1, 2, 3]})
|
234 |
+
df2 = pd.DataFrame({"key": key, "val": [1, 2, 3]})
|
235 |
+
|
236 |
+
result = pd.merge(df1, df2, on="key")
|
237 |
+
expected = pd.DataFrame(
|
238 |
+
{
|
239 |
+
"key": key.take([0, 0, 1, 2, 2]),
|
240 |
+
"val_x": [1, 1, 2, 3, 3],
|
241 |
+
"val_y": [1, 3, 2, 1, 3],
|
242 |
+
}
|
243 |
+
)
|
244 |
+
tm.assert_frame_equal(result, expected)
|
245 |
+
|
246 |
+
@pytest.mark.filterwarnings(
|
247 |
+
"ignore:The previous implementation of stack is deprecated"
|
248 |
+
)
|
249 |
+
@pytest.mark.parametrize(
|
250 |
+
"columns",
|
251 |
+
[
|
252 |
+
["A", "B"],
|
253 |
+
pd.MultiIndex.from_tuples(
|
254 |
+
[("A", "a"), ("A", "b")], names=["outer", "inner"]
|
255 |
+
),
|
256 |
+
],
|
257 |
+
)
|
258 |
+
@pytest.mark.parametrize("future_stack", [True, False])
|
259 |
+
def test_stack(self, data, columns, future_stack):
|
260 |
+
df = pd.DataFrame({"A": data[:5], "B": data[:5]})
|
261 |
+
df.columns = columns
|
262 |
+
result = df.stack(future_stack=future_stack)
|
263 |
+
expected = df.astype(object).stack(future_stack=future_stack)
|
264 |
+
# we need a second astype(object), in case the constructor inferred
|
265 |
+
# object -> specialized, as is done for period.
|
266 |
+
expected = expected.astype(object)
|
267 |
+
|
268 |
+
if isinstance(expected, pd.Series):
|
269 |
+
assert result.dtype == df.iloc[:, 0].dtype
|
270 |
+
else:
|
271 |
+
assert all(result.dtypes == df.iloc[:, 0].dtype)
|
272 |
+
|
273 |
+
result = result.astype(object)
|
274 |
+
tm.assert_equal(result, expected)
|
275 |
+
|
276 |
+
@pytest.mark.parametrize(
|
277 |
+
"index",
|
278 |
+
[
|
279 |
+
# Two levels, uniform.
|
280 |
+
pd.MultiIndex.from_product(([["A", "B"], ["a", "b"]]), names=["a", "b"]),
|
281 |
+
# non-uniform
|
282 |
+
pd.MultiIndex.from_tuples([("A", "a"), ("A", "b"), ("B", "b")]),
|
283 |
+
# three levels, non-uniform
|
284 |
+
pd.MultiIndex.from_product([("A", "B"), ("a", "b", "c"), (0, 1, 2)]),
|
285 |
+
pd.MultiIndex.from_tuples(
|
286 |
+
[
|
287 |
+
("A", "a", 1),
|
288 |
+
("A", "b", 0),
|
289 |
+
("A", "a", 0),
|
290 |
+
("B", "a", 0),
|
291 |
+
("B", "c", 1),
|
292 |
+
]
|
293 |
+
),
|
294 |
+
],
|
295 |
+
)
|
296 |
+
@pytest.mark.parametrize("obj", ["series", "frame"])
|
297 |
+
def test_unstack(self, data, index, obj):
|
298 |
+
data = data[: len(index)]
|
299 |
+
if obj == "series":
|
300 |
+
ser = pd.Series(data, index=index)
|
301 |
+
else:
|
302 |
+
ser = pd.DataFrame({"A": data, "B": data}, index=index)
|
303 |
+
|
304 |
+
n = index.nlevels
|
305 |
+
levels = list(range(n))
|
306 |
+
# [0, 1, 2]
|
307 |
+
# [(0,), (1,), (2,), (0, 1), (0, 2), (1, 0), (1, 2), (2, 0), (2, 1)]
|
308 |
+
combinations = itertools.chain.from_iterable(
|
309 |
+
itertools.permutations(levels, i) for i in range(1, n)
|
310 |
+
)
|
311 |
+
|
312 |
+
for level in combinations:
|
313 |
+
result = ser.unstack(level=level)
|
314 |
+
assert all(
|
315 |
+
isinstance(result[col].array, type(data)) for col in result.columns
|
316 |
+
)
|
317 |
+
|
318 |
+
if obj == "series":
|
319 |
+
# We should get the same result with to_frame+unstack+droplevel
|
320 |
+
df = ser.to_frame()
|
321 |
+
|
322 |
+
alt = df.unstack(level=level).droplevel(0, axis=1)
|
323 |
+
tm.assert_frame_equal(result, alt)
|
324 |
+
|
325 |
+
obj_ser = ser.astype(object)
|
326 |
+
|
327 |
+
expected = obj_ser.unstack(level=level, fill_value=data.dtype.na_value)
|
328 |
+
if obj == "series":
|
329 |
+
assert (expected.dtypes == object).all()
|
330 |
+
|
331 |
+
result = result.astype(object)
|
332 |
+
tm.assert_frame_equal(result, expected)
|
333 |
+
|
334 |
+
def test_ravel(self, data):
|
335 |
+
# as long as EA is 1D-only, ravel is a no-op
|
336 |
+
result = data.ravel()
|
337 |
+
assert type(result) == type(data)
|
338 |
+
|
339 |
+
if data.dtype._is_immutable:
|
340 |
+
pytest.skip(f"test_ravel assumes mutability and {data.dtype} is immutable")
|
341 |
+
|
342 |
+
# Check that we have a view, not a copy
|
343 |
+
result[0] = result[1]
|
344 |
+
assert data[0] == data[1]
|
345 |
+
|
346 |
+
def test_transpose(self, data):
|
347 |
+
result = data.transpose()
|
348 |
+
assert type(result) == type(data)
|
349 |
+
|
350 |
+
# check we get a new object
|
351 |
+
assert result is not data
|
352 |
+
|
353 |
+
# If we ever _did_ support 2D, shape should be reversed
|
354 |
+
assert result.shape == data.shape[::-1]
|
355 |
+
|
356 |
+
if data.dtype._is_immutable:
|
357 |
+
pytest.skip(
|
358 |
+
f"test_transpose assumes mutability and {data.dtype} is immutable"
|
359 |
+
)
|
360 |
+
|
361 |
+
# Check that we have a view, not a copy
|
362 |
+
result[0] = result[1]
|
363 |
+
assert data[0] == data[1]
|
364 |
+
|
365 |
+
def test_transpose_frame(self, data):
|
366 |
+
df = pd.DataFrame({"A": data[:4], "B": data[:4]}, index=["a", "b", "c", "d"])
|
367 |
+
result = df.T
|
368 |
+
expected = pd.DataFrame(
|
369 |
+
{
|
370 |
+
"a": type(data)._from_sequence([data[0]] * 2, dtype=data.dtype),
|
371 |
+
"b": type(data)._from_sequence([data[1]] * 2, dtype=data.dtype),
|
372 |
+
"c": type(data)._from_sequence([data[2]] * 2, dtype=data.dtype),
|
373 |
+
"d": type(data)._from_sequence([data[3]] * 2, dtype=data.dtype),
|
374 |
+
},
|
375 |
+
index=["A", "B"],
|
376 |
+
)
|
377 |
+
tm.assert_frame_equal(result, expected)
|
378 |
+
tm.assert_frame_equal(np.transpose(np.transpose(df)), df)
|
379 |
+
tm.assert_frame_equal(np.transpose(np.transpose(df[["A"]])), df[["A"]])
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (188 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc
ADDED
Binary file (11.9 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc
ADDED
Binary file (18.1 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc
ADDED
Binary file (7.46 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc
ADDED
Binary file (15.8 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc
ADDED
Binary file (7.43 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc
ADDED
Binary file (5.95 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc
ADDED
Binary file (4.25 kB). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/generic/test_duplicate_labels.py
ADDED
@@ -0,0 +1,413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Tests dealing with the NDFrame.allows_duplicates."""
|
2 |
+
import operator
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
import pandas as pd
|
8 |
+
import pandas._testing as tm
|
9 |
+
|
10 |
+
not_implemented = pytest.mark.xfail(reason="Not implemented.")
|
11 |
+
|
12 |
+
# ----------------------------------------------------------------------------
|
13 |
+
# Preservation
|
14 |
+
|
15 |
+
|
16 |
+
class TestPreserves:
|
17 |
+
@pytest.mark.parametrize(
|
18 |
+
"cls, data",
|
19 |
+
[
|
20 |
+
(pd.Series, np.array([])),
|
21 |
+
(pd.Series, [1, 2]),
|
22 |
+
(pd.DataFrame, {}),
|
23 |
+
(pd.DataFrame, {"A": [1, 2]}),
|
24 |
+
],
|
25 |
+
)
|
26 |
+
def test_construction_ok(self, cls, data):
|
27 |
+
result = cls(data)
|
28 |
+
assert result.flags.allows_duplicate_labels is True
|
29 |
+
|
30 |
+
result = cls(data).set_flags(allows_duplicate_labels=False)
|
31 |
+
assert result.flags.allows_duplicate_labels is False
|
32 |
+
|
33 |
+
@pytest.mark.parametrize(
|
34 |
+
"func",
|
35 |
+
[
|
36 |
+
operator.itemgetter(["a"]),
|
37 |
+
operator.methodcaller("add", 1),
|
38 |
+
operator.methodcaller("rename", str.upper),
|
39 |
+
operator.methodcaller("rename", "name"),
|
40 |
+
operator.methodcaller("abs"),
|
41 |
+
np.abs,
|
42 |
+
],
|
43 |
+
)
|
44 |
+
def test_preserved_series(self, func):
|
45 |
+
s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)
|
46 |
+
assert func(s).flags.allows_duplicate_labels is False
|
47 |
+
|
48 |
+
@pytest.mark.parametrize(
|
49 |
+
"other", [pd.Series(0, index=["a", "b", "c"]), pd.Series(0, index=["a", "b"])]
|
50 |
+
)
|
51 |
+
# TODO: frame
|
52 |
+
@not_implemented
|
53 |
+
def test_align(self, other):
|
54 |
+
s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)
|
55 |
+
a, b = s.align(other)
|
56 |
+
assert a.flags.allows_duplicate_labels is False
|
57 |
+
assert b.flags.allows_duplicate_labels is False
|
58 |
+
|
59 |
+
def test_preserved_frame(self):
|
60 |
+
df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags(
|
61 |
+
allows_duplicate_labels=False
|
62 |
+
)
|
63 |
+
assert df.loc[["a"]].flags.allows_duplicate_labels is False
|
64 |
+
assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False
|
65 |
+
|
66 |
+
def test_to_frame(self):
|
67 |
+
ser = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False)
|
68 |
+
assert ser.to_frame().flags.allows_duplicate_labels is False
|
69 |
+
|
70 |
+
@pytest.mark.parametrize("func", ["add", "sub"])
|
71 |
+
@pytest.mark.parametrize("frame", [False, True])
|
72 |
+
@pytest.mark.parametrize("other", [1, pd.Series([1, 2], name="A")])
|
73 |
+
def test_binops(self, func, other, frame):
|
74 |
+
df = pd.Series([1, 2], name="A", index=["a", "b"]).set_flags(
|
75 |
+
allows_duplicate_labels=False
|
76 |
+
)
|
77 |
+
if frame:
|
78 |
+
df = df.to_frame()
|
79 |
+
if isinstance(other, pd.Series) and frame:
|
80 |
+
other = other.to_frame()
|
81 |
+
func = operator.methodcaller(func, other)
|
82 |
+
assert df.flags.allows_duplicate_labels is False
|
83 |
+
assert func(df).flags.allows_duplicate_labels is False
|
84 |
+
|
85 |
+
def test_preserve_getitem(self):
|
86 |
+
df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False)
|
87 |
+
assert df[["A"]].flags.allows_duplicate_labels is False
|
88 |
+
assert df["A"].flags.allows_duplicate_labels is False
|
89 |
+
assert df.loc[0].flags.allows_duplicate_labels is False
|
90 |
+
assert df.loc[[0]].flags.allows_duplicate_labels is False
|
91 |
+
assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False
|
92 |
+
|
93 |
+
def test_ndframe_getitem_caching_issue(
|
94 |
+
self, request, using_copy_on_write, warn_copy_on_write
|
95 |
+
):
|
96 |
+
if not (using_copy_on_write or warn_copy_on_write):
|
97 |
+
request.applymarker(pytest.mark.xfail(reason="Unclear behavior."))
|
98 |
+
# NDFrame.__getitem__ will cache the first df['A']. May need to
|
99 |
+
# invalidate that cache? Update the cached entries?
|
100 |
+
df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False)
|
101 |
+
assert df["A"].flags.allows_duplicate_labels is False
|
102 |
+
df.flags.allows_duplicate_labels = True
|
103 |
+
assert df["A"].flags.allows_duplicate_labels is True
|
104 |
+
|
105 |
+
@pytest.mark.parametrize(
|
106 |
+
"objs, kwargs",
|
107 |
+
[
|
108 |
+
# Series
|
109 |
+
(
|
110 |
+
[
|
111 |
+
pd.Series(1, index=["a", "b"]),
|
112 |
+
pd.Series(2, index=["c", "d"]),
|
113 |
+
],
|
114 |
+
{},
|
115 |
+
),
|
116 |
+
(
|
117 |
+
[
|
118 |
+
pd.Series(1, index=["a", "b"]),
|
119 |
+
pd.Series(2, index=["a", "b"]),
|
120 |
+
],
|
121 |
+
{"ignore_index": True},
|
122 |
+
),
|
123 |
+
(
|
124 |
+
[
|
125 |
+
pd.Series(1, index=["a", "b"]),
|
126 |
+
pd.Series(2, index=["a", "b"]),
|
127 |
+
],
|
128 |
+
{"axis": 1},
|
129 |
+
),
|
130 |
+
# Frame
|
131 |
+
(
|
132 |
+
[
|
133 |
+
pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
|
134 |
+
pd.DataFrame({"A": [1, 2]}, index=["c", "d"]),
|
135 |
+
],
|
136 |
+
{},
|
137 |
+
),
|
138 |
+
(
|
139 |
+
[
|
140 |
+
pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
|
141 |
+
pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
|
142 |
+
],
|
143 |
+
{"ignore_index": True},
|
144 |
+
),
|
145 |
+
(
|
146 |
+
[
|
147 |
+
pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
|
148 |
+
pd.DataFrame({"B": [1, 2]}, index=["a", "b"]),
|
149 |
+
],
|
150 |
+
{"axis": 1},
|
151 |
+
),
|
152 |
+
# Series / Frame
|
153 |
+
(
|
154 |
+
[
|
155 |
+
pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
|
156 |
+
pd.Series([1, 2], index=["a", "b"], name="B"),
|
157 |
+
],
|
158 |
+
{"axis": 1},
|
159 |
+
),
|
160 |
+
],
|
161 |
+
)
|
162 |
+
def test_concat(self, objs, kwargs):
|
163 |
+
objs = [x.set_flags(allows_duplicate_labels=False) for x in objs]
|
164 |
+
result = pd.concat(objs, **kwargs)
|
165 |
+
assert result.flags.allows_duplicate_labels is False
|
166 |
+
|
167 |
+
@pytest.mark.parametrize(
|
168 |
+
"left, right, expected",
|
169 |
+
[
|
170 |
+
# false false false
|
171 |
+
pytest.param(
|
172 |
+
pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags(
|
173 |
+
allows_duplicate_labels=False
|
174 |
+
),
|
175 |
+
pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags(
|
176 |
+
allows_duplicate_labels=False
|
177 |
+
),
|
178 |
+
False,
|
179 |
+
marks=not_implemented,
|
180 |
+
),
|
181 |
+
# false true false
|
182 |
+
pytest.param(
|
183 |
+
pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags(
|
184 |
+
allows_duplicate_labels=False
|
185 |
+
),
|
186 |
+
pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),
|
187 |
+
False,
|
188 |
+
marks=not_implemented,
|
189 |
+
),
|
190 |
+
# true true true
|
191 |
+
(
|
192 |
+
pd.DataFrame({"A": [0, 1]}, index=["a", "b"]),
|
193 |
+
pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),
|
194 |
+
True,
|
195 |
+
),
|
196 |
+
],
|
197 |
+
)
|
198 |
+
def test_merge(self, left, right, expected):
|
199 |
+
result = pd.merge(left, right, left_index=True, right_index=True)
|
200 |
+
assert result.flags.allows_duplicate_labels is expected
|
201 |
+
|
202 |
+
@not_implemented
|
203 |
+
def test_groupby(self):
|
204 |
+
# XXX: This is under tested
|
205 |
+
# TODO:
|
206 |
+
# - apply
|
207 |
+
# - transform
|
208 |
+
# - Should passing a grouper that disallows duplicates propagate?
|
209 |
+
df = pd.DataFrame({"A": [1, 2, 3]}).set_flags(allows_duplicate_labels=False)
|
210 |
+
result = df.groupby([0, 0, 1]).agg("count")
|
211 |
+
assert result.flags.allows_duplicate_labels is False
|
212 |
+
|
213 |
+
@pytest.mark.parametrize("frame", [True, False])
|
214 |
+
@not_implemented
|
215 |
+
def test_window(self, frame):
|
216 |
+
df = pd.Series(
|
217 |
+
1,
|
218 |
+
index=pd.date_range("2000", periods=12),
|
219 |
+
name="A",
|
220 |
+
allows_duplicate_labels=False,
|
221 |
+
)
|
222 |
+
if frame:
|
223 |
+
df = df.to_frame()
|
224 |
+
assert df.rolling(3).mean().flags.allows_duplicate_labels is False
|
225 |
+
assert df.ewm(3).mean().flags.allows_duplicate_labels is False
|
226 |
+
assert df.expanding(3).mean().flags.allows_duplicate_labels is False
|
227 |
+
|
228 |
+
|
229 |
+
# ----------------------------------------------------------------------------
|
230 |
+
# Raises
|
231 |
+
|
232 |
+
|
233 |
+
class TestRaises:
|
234 |
+
@pytest.mark.parametrize(
|
235 |
+
"cls, axes",
|
236 |
+
[
|
237 |
+
(pd.Series, {"index": ["a", "a"], "dtype": float}),
|
238 |
+
(pd.DataFrame, {"index": ["a", "a"]}),
|
239 |
+
(pd.DataFrame, {"index": ["a", "a"], "columns": ["b", "b"]}),
|
240 |
+
(pd.DataFrame, {"columns": ["b", "b"]}),
|
241 |
+
],
|
242 |
+
)
|
243 |
+
def test_set_flags_with_duplicates(self, cls, axes):
|
244 |
+
result = cls(**axes)
|
245 |
+
assert result.flags.allows_duplicate_labels is True
|
246 |
+
|
247 |
+
msg = "Index has duplicates."
|
248 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
249 |
+
cls(**axes).set_flags(allows_duplicate_labels=False)
|
250 |
+
|
251 |
+
@pytest.mark.parametrize(
|
252 |
+
"data",
|
253 |
+
[
|
254 |
+
pd.Series(index=[0, 0], dtype=float),
|
255 |
+
pd.DataFrame(index=[0, 0]),
|
256 |
+
pd.DataFrame(columns=[0, 0]),
|
257 |
+
],
|
258 |
+
)
|
259 |
+
def test_setting_allows_duplicate_labels_raises(self, data):
|
260 |
+
msg = "Index has duplicates."
|
261 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
262 |
+
data.flags.allows_duplicate_labels = False
|
263 |
+
|
264 |
+
assert data.flags.allows_duplicate_labels is True
|
265 |
+
|
266 |
+
def test_series_raises(self):
|
267 |
+
a = pd.Series(0, index=["a", "b"])
|
268 |
+
b = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)
|
269 |
+
msg = "Index has duplicates."
|
270 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
271 |
+
pd.concat([a, b])
|
272 |
+
|
273 |
+
@pytest.mark.parametrize(
|
274 |
+
"getter, target",
|
275 |
+
[
|
276 |
+
(operator.itemgetter(["A", "A"]), None),
|
277 |
+
# loc
|
278 |
+
(operator.itemgetter(["a", "a"]), "loc"),
|
279 |
+
pytest.param(operator.itemgetter(("a", ["A", "A"])), "loc"),
|
280 |
+
(operator.itemgetter((["a", "a"], "A")), "loc"),
|
281 |
+
# iloc
|
282 |
+
(operator.itemgetter([0, 0]), "iloc"),
|
283 |
+
pytest.param(operator.itemgetter((0, [0, 0])), "iloc"),
|
284 |
+
pytest.param(operator.itemgetter(([0, 0], 0)), "iloc"),
|
285 |
+
],
|
286 |
+
)
|
287 |
+
def test_getitem_raises(self, getter, target):
|
288 |
+
df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags(
|
289 |
+
allows_duplicate_labels=False
|
290 |
+
)
|
291 |
+
if target:
|
292 |
+
# df, df.loc, or df.iloc
|
293 |
+
target = getattr(df, target)
|
294 |
+
else:
|
295 |
+
target = df
|
296 |
+
|
297 |
+
msg = "Index has duplicates."
|
298 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
299 |
+
getter(target)
|
300 |
+
|
301 |
+
@pytest.mark.parametrize(
|
302 |
+
"objs, kwargs",
|
303 |
+
[
|
304 |
+
(
|
305 |
+
[
|
306 |
+
pd.Series(1, index=[0, 1], name="a"),
|
307 |
+
pd.Series(2, index=[0, 1], name="a"),
|
308 |
+
],
|
309 |
+
{"axis": 1},
|
310 |
+
)
|
311 |
+
],
|
312 |
+
)
|
313 |
+
def test_concat_raises(self, objs, kwargs):
|
314 |
+
objs = [x.set_flags(allows_duplicate_labels=False) for x in objs]
|
315 |
+
msg = "Index has duplicates."
|
316 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
317 |
+
pd.concat(objs, **kwargs)
|
318 |
+
|
319 |
+
@not_implemented
|
320 |
+
def test_merge_raises(self):
|
321 |
+
a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags(
|
322 |
+
allows_duplicate_labels=False
|
323 |
+
)
|
324 |
+
b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"])
|
325 |
+
msg = "Index has duplicates."
|
326 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
327 |
+
pd.merge(a, b, left_index=True, right_index=True)
|
328 |
+
|
329 |
+
|
330 |
+
@pytest.mark.parametrize(
|
331 |
+
"idx",
|
332 |
+
[
|
333 |
+
pd.Index([1, 1]),
|
334 |
+
pd.Index(["a", "a"]),
|
335 |
+
pd.Index([1.1, 1.1]),
|
336 |
+
pd.PeriodIndex([pd.Period("2000", "D")] * 2),
|
337 |
+
pd.DatetimeIndex([pd.Timestamp("2000")] * 2),
|
338 |
+
pd.TimedeltaIndex([pd.Timedelta("1D")] * 2),
|
339 |
+
pd.CategoricalIndex(["a", "a"]),
|
340 |
+
pd.IntervalIndex([pd.Interval(0, 1)] * 2),
|
341 |
+
pd.MultiIndex.from_tuples([("a", 1), ("a", 1)]),
|
342 |
+
],
|
343 |
+
ids=lambda x: type(x).__name__,
|
344 |
+
)
|
345 |
+
def test_raises_basic(idx):
|
346 |
+
msg = "Index has duplicates."
|
347 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
348 |
+
pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False)
|
349 |
+
|
350 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
351 |
+
pd.DataFrame({"A": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False)
|
352 |
+
|
353 |
+
with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
|
354 |
+
pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False)
|
355 |
+
|
356 |
+
|
357 |
+
def test_format_duplicate_labels_message():
|
358 |
+
idx = pd.Index(["a", "b", "a", "b", "c"])
|
359 |
+
result = idx._format_duplicate_message()
|
360 |
+
expected = pd.DataFrame(
|
361 |
+
{"positions": [[0, 2], [1, 3]]}, index=pd.Index(["a", "b"], name="label")
|
362 |
+
)
|
363 |
+
tm.assert_frame_equal(result, expected)
|
364 |
+
|
365 |
+
|
366 |
+
def test_format_duplicate_labels_message_multi():
|
367 |
+
idx = pd.MultiIndex.from_product([["A"], ["a", "b", "a", "b", "c"]])
|
368 |
+
result = idx._format_duplicate_message()
|
369 |
+
expected = pd.DataFrame(
|
370 |
+
{"positions": [[0, 2], [1, 3]]},
|
371 |
+
index=pd.MultiIndex.from_product([["A"], ["a", "b"]]),
|
372 |
+
)
|
373 |
+
tm.assert_frame_equal(result, expected)
|
374 |
+
|
375 |
+
|
376 |
+
def test_dataframe_insert_raises():
|
377 |
+
df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False)
|
378 |
+
msg = "Cannot specify"
|
379 |
+
with pytest.raises(ValueError, match=msg):
|
380 |
+
df.insert(0, "A", [3, 4], allow_duplicates=True)
|
381 |
+
|
382 |
+
|
383 |
+
@pytest.mark.parametrize(
|
384 |
+
"method, frame_only",
|
385 |
+
[
|
386 |
+
(operator.methodcaller("set_index", "A", inplace=True), True),
|
387 |
+
(operator.methodcaller("reset_index", inplace=True), True),
|
388 |
+
(operator.methodcaller("rename", lambda x: x, inplace=True), False),
|
389 |
+
],
|
390 |
+
)
|
391 |
+
def test_inplace_raises(method, frame_only):
|
392 |
+
df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags(
|
393 |
+
allows_duplicate_labels=False
|
394 |
+
)
|
395 |
+
s = df["A"]
|
396 |
+
s.flags.allows_duplicate_labels = False
|
397 |
+
msg = "Cannot specify"
|
398 |
+
|
399 |
+
with pytest.raises(ValueError, match=msg):
|
400 |
+
method(df)
|
401 |
+
if not frame_only:
|
402 |
+
with pytest.raises(ValueError, match=msg):
|
403 |
+
method(s)
|
404 |
+
|
405 |
+
|
406 |
+
def test_pickle():
|
407 |
+
a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False)
|
408 |
+
b = tm.round_trip_pickle(a)
|
409 |
+
tm.assert_series_equal(a, b)
|
410 |
+
|
411 |
+
a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False)
|
412 |
+
b = tm.round_trip_pickle(a)
|
413 |
+
tm.assert_frame_equal(a, b)
|
venv/lib/python3.10/site-packages/pandas/tests/generic/test_finalize.py
ADDED
@@ -0,0 +1,767 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
An exhaustive list of pandas methods exercising NDFrame.__finalize__.
|
3 |
+
"""
|
4 |
+
import operator
|
5 |
+
import re
|
6 |
+
|
7 |
+
import numpy as np
|
8 |
+
import pytest
|
9 |
+
|
10 |
+
import pandas as pd
|
11 |
+
import pandas._testing as tm
|
12 |
+
|
13 |
+
# TODO:
|
14 |
+
# * Binary methods (mul, div, etc.)
|
15 |
+
# * Binary outputs (align, etc.)
|
16 |
+
# * top-level methods (concat, merge, get_dummies, etc.)
|
17 |
+
# * window
|
18 |
+
# * cumulative reductions
|
19 |
+
|
20 |
+
not_implemented_mark = pytest.mark.xfail(reason="not implemented")
|
21 |
+
|
22 |
+
mi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"])
|
23 |
+
|
24 |
+
frame_data = ({"A": [1]},)
|
25 |
+
frame_mi_data = ({"A": [1, 2, 3, 4]}, mi)
|
26 |
+
|
27 |
+
|
28 |
+
# Tuple of
|
29 |
+
# - Callable: Constructor (Series, DataFrame)
|
30 |
+
# - Tuple: Constructor args
|
31 |
+
# - Callable: pass the constructed value with attrs set to this.
|
32 |
+
|
33 |
+
_all_methods = [
|
34 |
+
(pd.Series, ([0],), operator.methodcaller("take", [])),
|
35 |
+
(pd.Series, ([0],), operator.methodcaller("__getitem__", [True])),
|
36 |
+
(pd.Series, ([0],), operator.methodcaller("repeat", 2)),
|
37 |
+
(pd.Series, ([0],), operator.methodcaller("reset_index")),
|
38 |
+
(pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)),
|
39 |
+
(pd.Series, ([0],), operator.methodcaller("to_frame")),
|
40 |
+
(pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")),
|
41 |
+
(pd.Series, ([0, 0],), operator.methodcaller("duplicated")),
|
42 |
+
(pd.Series, ([0, 0],), operator.methodcaller("round")),
|
43 |
+
(pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)),
|
44 |
+
(pd.Series, ([0, 0],), operator.methodcaller("rename", "name")),
|
45 |
+
(pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])),
|
46 |
+
(pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])),
|
47 |
+
(pd.Series, ([0, 0],), operator.methodcaller("drop", [0])),
|
48 |
+
(pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)),
|
49 |
+
(pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})),
|
50 |
+
(pd.Series, ([0, 0],), operator.methodcaller("shift")),
|
51 |
+
(pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])),
|
52 |
+
(pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)),
|
53 |
+
(pd.Series, ([0, 0],), operator.methodcaller("isna")),
|
54 |
+
(pd.Series, ([0, 0],), operator.methodcaller("isnull")),
|
55 |
+
(pd.Series, ([0, 0],), operator.methodcaller("notna")),
|
56 |
+
(pd.Series, ([0, 0],), operator.methodcaller("notnull")),
|
57 |
+
(pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))),
|
58 |
+
# TODO: mul, div, etc.
|
59 |
+
(
|
60 |
+
pd.Series,
|
61 |
+
([0], pd.period_range("2000", periods=1)),
|
62 |
+
operator.methodcaller("to_timestamp"),
|
63 |
+
),
|
64 |
+
(
|
65 |
+
pd.Series,
|
66 |
+
([0], pd.date_range("2000", periods=1)),
|
67 |
+
operator.methodcaller("to_period"),
|
68 |
+
),
|
69 |
+
pytest.param(
|
70 |
+
(
|
71 |
+
pd.DataFrame,
|
72 |
+
frame_data,
|
73 |
+
operator.methodcaller("dot", pd.DataFrame(index=["A"])),
|
74 |
+
),
|
75 |
+
marks=pytest.mark.xfail(reason="Implement binary finalize"),
|
76 |
+
),
|
77 |
+
(pd.DataFrame, frame_data, operator.methodcaller("transpose")),
|
78 |
+
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", "A")),
|
79 |
+
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", ["A"])),
|
80 |
+
(pd.DataFrame, frame_data, operator.methodcaller("__getitem__", np.array([True]))),
|
81 |
+
(pd.DataFrame, ({("A", "a"): [1]},), operator.methodcaller("__getitem__", ["A"])),
|
82 |
+
(pd.DataFrame, frame_data, operator.methodcaller("query", "A == 1")),
|
83 |
+
(pd.DataFrame, frame_data, operator.methodcaller("eval", "A + 1", engine="python")),
|
84 |
+
(pd.DataFrame, frame_data, operator.methodcaller("select_dtypes", include="int")),
|
85 |
+
(pd.DataFrame, frame_data, operator.methodcaller("assign", b=1)),
|
86 |
+
(pd.DataFrame, frame_data, operator.methodcaller("set_axis", ["A"])),
|
87 |
+
(pd.DataFrame, frame_data, operator.methodcaller("reindex", [0, 1])),
|
88 |
+
(pd.DataFrame, frame_data, operator.methodcaller("drop", columns=["A"])),
|
89 |
+
(pd.DataFrame, frame_data, operator.methodcaller("drop", index=[0])),
|
90 |
+
(pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})),
|
91 |
+
(pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)),
|
92 |
+
(pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")),
|
93 |
+
(pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")),
|
94 |
+
(pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")),
|
95 |
+
(pd.DataFrame, frame_data, operator.methodcaller("reset_index")),
|
96 |
+
(pd.DataFrame, frame_data, operator.methodcaller("isna")),
|
97 |
+
(pd.DataFrame, frame_data, operator.methodcaller("isnull")),
|
98 |
+
(pd.DataFrame, frame_data, operator.methodcaller("notna")),
|
99 |
+
(pd.DataFrame, frame_data, operator.methodcaller("notnull")),
|
100 |
+
(pd.DataFrame, frame_data, operator.methodcaller("dropna")),
|
101 |
+
(pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")),
|
102 |
+
(pd.DataFrame, frame_data, operator.methodcaller("duplicated")),
|
103 |
+
(pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")),
|
104 |
+
(pd.DataFrame, frame_data, operator.methodcaller("sort_index")),
|
105 |
+
(pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")),
|
106 |
+
(pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")),
|
107 |
+
(pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")),
|
108 |
+
(
|
109 |
+
pd.DataFrame,
|
110 |
+
frame_data,
|
111 |
+
operator.methodcaller("add", pd.DataFrame(*frame_data)),
|
112 |
+
),
|
113 |
+
# TODO: div, mul, etc.
|
114 |
+
(
|
115 |
+
pd.DataFrame,
|
116 |
+
frame_data,
|
117 |
+
operator.methodcaller("combine", pd.DataFrame(*frame_data), operator.add),
|
118 |
+
),
|
119 |
+
(
|
120 |
+
pd.DataFrame,
|
121 |
+
frame_data,
|
122 |
+
operator.methodcaller("combine_first", pd.DataFrame(*frame_data)),
|
123 |
+
),
|
124 |
+
pytest.param(
|
125 |
+
(
|
126 |
+
pd.DataFrame,
|
127 |
+
frame_data,
|
128 |
+
operator.methodcaller("update", pd.DataFrame(*frame_data)),
|
129 |
+
),
|
130 |
+
marks=not_implemented_mark,
|
131 |
+
),
|
132 |
+
(pd.DataFrame, frame_data, operator.methodcaller("pivot", columns="A")),
|
133 |
+
(
|
134 |
+
pd.DataFrame,
|
135 |
+
({"A": [1], "B": [1]},),
|
136 |
+
operator.methodcaller("pivot_table", columns="A"),
|
137 |
+
),
|
138 |
+
(
|
139 |
+
pd.DataFrame,
|
140 |
+
({"A": [1], "B": [1]},),
|
141 |
+
operator.methodcaller("pivot_table", columns="A", aggfunc=["mean", "sum"]),
|
142 |
+
),
|
143 |
+
(pd.DataFrame, frame_data, operator.methodcaller("stack")),
|
144 |
+
(pd.DataFrame, frame_data, operator.methodcaller("explode", "A")),
|
145 |
+
(pd.DataFrame, frame_mi_data, operator.methodcaller("unstack")),
|
146 |
+
(
|
147 |
+
pd.DataFrame,
|
148 |
+
({"A": ["a", "b", "c"], "B": [1, 3, 5], "C": [2, 4, 6]},),
|
149 |
+
operator.methodcaller("melt", id_vars=["A"], value_vars=["B"]),
|
150 |
+
),
|
151 |
+
(pd.DataFrame, frame_data, operator.methodcaller("map", lambda x: x)),
|
152 |
+
pytest.param(
|
153 |
+
(
|
154 |
+
pd.DataFrame,
|
155 |
+
frame_data,
|
156 |
+
operator.methodcaller("merge", pd.DataFrame({"A": [1]})),
|
157 |
+
),
|
158 |
+
marks=not_implemented_mark,
|
159 |
+
),
|
160 |
+
(pd.DataFrame, frame_data, operator.methodcaller("round", 2)),
|
161 |
+
(pd.DataFrame, frame_data, operator.methodcaller("corr")),
|
162 |
+
pytest.param(
|
163 |
+
(pd.DataFrame, frame_data, operator.methodcaller("cov")),
|
164 |
+
marks=[
|
165 |
+
pytest.mark.filterwarnings("ignore::RuntimeWarning"),
|
166 |
+
],
|
167 |
+
),
|
168 |
+
(
|
169 |
+
pd.DataFrame,
|
170 |
+
frame_data,
|
171 |
+
operator.methodcaller("corrwith", pd.DataFrame(*frame_data)),
|
172 |
+
),
|
173 |
+
(pd.DataFrame, frame_data, operator.methodcaller("count")),
|
174 |
+
(pd.DataFrame, frame_data, operator.methodcaller("nunique")),
|
175 |
+
(pd.DataFrame, frame_data, operator.methodcaller("idxmin")),
|
176 |
+
(pd.DataFrame, frame_data, operator.methodcaller("idxmax")),
|
177 |
+
(pd.DataFrame, frame_data, operator.methodcaller("mode")),
|
178 |
+
(pd.Series, [0], operator.methodcaller("mode")),
|
179 |
+
(pd.DataFrame, frame_data, operator.methodcaller("median")),
|
180 |
+
(
|
181 |
+
pd.DataFrame,
|
182 |
+
frame_data,
|
183 |
+
operator.methodcaller("quantile", numeric_only=True),
|
184 |
+
),
|
185 |
+
(
|
186 |
+
pd.DataFrame,
|
187 |
+
frame_data,
|
188 |
+
operator.methodcaller("quantile", q=[0.25, 0.75], numeric_only=True),
|
189 |
+
),
|
190 |
+
(
|
191 |
+
pd.DataFrame,
|
192 |
+
({"A": [pd.Timedelta(days=1), pd.Timedelta(days=2)]},),
|
193 |
+
operator.methodcaller("quantile", numeric_only=False),
|
194 |
+
),
|
195 |
+
(
|
196 |
+
pd.DataFrame,
|
197 |
+
({"A": [np.datetime64("2022-01-01"), np.datetime64("2022-01-02")]},),
|
198 |
+
operator.methodcaller("quantile", numeric_only=True),
|
199 |
+
),
|
200 |
+
(
|
201 |
+
pd.DataFrame,
|
202 |
+
({"A": [1]}, [pd.Period("2000", "D")]),
|
203 |
+
operator.methodcaller("to_timestamp"),
|
204 |
+
),
|
205 |
+
(
|
206 |
+
pd.DataFrame,
|
207 |
+
({"A": [1]}, [pd.Timestamp("2000")]),
|
208 |
+
operator.methodcaller("to_period", freq="D"),
|
209 |
+
),
|
210 |
+
(pd.DataFrame, frame_mi_data, operator.methodcaller("isin", [1])),
|
211 |
+
(pd.DataFrame, frame_mi_data, operator.methodcaller("isin", pd.Series([1]))),
|
212 |
+
(
|
213 |
+
pd.DataFrame,
|
214 |
+
frame_mi_data,
|
215 |
+
operator.methodcaller("isin", pd.DataFrame({"A": [1]})),
|
216 |
+
),
|
217 |
+
(pd.DataFrame, frame_mi_data, operator.methodcaller("droplevel", "A")),
|
218 |
+
(pd.DataFrame, frame_data, operator.methodcaller("pop", "A")),
|
219 |
+
# Squeeze on columns, otherwise we'll end up with a scalar
|
220 |
+
(pd.DataFrame, frame_data, operator.methodcaller("squeeze", axis="columns")),
|
221 |
+
(pd.Series, ([1, 2],), operator.methodcaller("squeeze")),
|
222 |
+
(pd.Series, ([1, 2],), operator.methodcaller("rename_axis", index="a")),
|
223 |
+
(pd.DataFrame, frame_data, operator.methodcaller("rename_axis", columns="a")),
|
224 |
+
# Unary ops
|
225 |
+
(pd.DataFrame, frame_data, operator.neg),
|
226 |
+
(pd.Series, [1], operator.neg),
|
227 |
+
(pd.DataFrame, frame_data, operator.pos),
|
228 |
+
(pd.Series, [1], operator.pos),
|
229 |
+
(pd.DataFrame, frame_data, operator.inv),
|
230 |
+
(pd.Series, [1], operator.inv),
|
231 |
+
(pd.DataFrame, frame_data, abs),
|
232 |
+
(pd.Series, [1], abs),
|
233 |
+
(pd.DataFrame, frame_data, round),
|
234 |
+
(pd.Series, [1], round),
|
235 |
+
(pd.DataFrame, frame_data, operator.methodcaller("take", [0, 0])),
|
236 |
+
(pd.DataFrame, frame_mi_data, operator.methodcaller("xs", "a")),
|
237 |
+
(pd.Series, (1, mi), operator.methodcaller("xs", "a")),
|
238 |
+
(pd.DataFrame, frame_data, operator.methodcaller("get", "A")),
|
239 |
+
(
|
240 |
+
pd.DataFrame,
|
241 |
+
frame_data,
|
242 |
+
operator.methodcaller("reindex_like", pd.DataFrame({"A": [1, 2, 3]})),
|
243 |
+
),
|
244 |
+
(
|
245 |
+
pd.Series,
|
246 |
+
frame_data,
|
247 |
+
operator.methodcaller("reindex_like", pd.Series([0, 1, 2])),
|
248 |
+
),
|
249 |
+
(pd.DataFrame, frame_data, operator.methodcaller("add_prefix", "_")),
|
250 |
+
(pd.DataFrame, frame_data, operator.methodcaller("add_suffix", "_")),
|
251 |
+
(pd.Series, (1, ["a", "b"]), operator.methodcaller("add_prefix", "_")),
|
252 |
+
(pd.Series, (1, ["a", "b"]), operator.methodcaller("add_suffix", "_")),
|
253 |
+
(pd.Series, ([3, 2],), operator.methodcaller("sort_values")),
|
254 |
+
(pd.Series, ([1] * 10,), operator.methodcaller("head")),
|
255 |
+
(pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("head")),
|
256 |
+
(pd.Series, ([1] * 10,), operator.methodcaller("tail")),
|
257 |
+
(pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("tail")),
|
258 |
+
(pd.Series, ([1, 2],), operator.methodcaller("sample", n=2, replace=True)),
|
259 |
+
(pd.DataFrame, (frame_data,), operator.methodcaller("sample", n=2, replace=True)),
|
260 |
+
(pd.Series, ([1, 2],), operator.methodcaller("astype", float)),
|
261 |
+
(pd.DataFrame, frame_data, operator.methodcaller("astype", float)),
|
262 |
+
(pd.Series, ([1, 2],), operator.methodcaller("copy")),
|
263 |
+
(pd.DataFrame, frame_data, operator.methodcaller("copy")),
|
264 |
+
(pd.Series, ([1, 2], None, object), operator.methodcaller("infer_objects")),
|
265 |
+
(
|
266 |
+
pd.DataFrame,
|
267 |
+
({"A": np.array([1, 2], dtype=object)},),
|
268 |
+
operator.methodcaller("infer_objects"),
|
269 |
+
),
|
270 |
+
(pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")),
|
271 |
+
(pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")),
|
272 |
+
(pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")),
|
273 |
+
(pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")),
|
274 |
+
(pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)),
|
275 |
+
(pd.DataFrame, frame_data, operator.methodcaller("clip", lower=1)),
|
276 |
+
(
|
277 |
+
pd.Series,
|
278 |
+
(1, pd.date_range("2000", periods=4)),
|
279 |
+
operator.methodcaller("asfreq", "h"),
|
280 |
+
),
|
281 |
+
(
|
282 |
+
pd.DataFrame,
|
283 |
+
({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
|
284 |
+
operator.methodcaller("asfreq", "h"),
|
285 |
+
),
|
286 |
+
(
|
287 |
+
pd.Series,
|
288 |
+
(1, pd.date_range("2000", periods=4)),
|
289 |
+
operator.methodcaller("at_time", "12:00"),
|
290 |
+
),
|
291 |
+
(
|
292 |
+
pd.DataFrame,
|
293 |
+
({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
|
294 |
+
operator.methodcaller("at_time", "12:00"),
|
295 |
+
),
|
296 |
+
(
|
297 |
+
pd.Series,
|
298 |
+
(1, pd.date_range("2000", periods=4)),
|
299 |
+
operator.methodcaller("between_time", "12:00", "13:00"),
|
300 |
+
),
|
301 |
+
(
|
302 |
+
pd.DataFrame,
|
303 |
+
({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
|
304 |
+
operator.methodcaller("between_time", "12:00", "13:00"),
|
305 |
+
),
|
306 |
+
(
|
307 |
+
pd.Series,
|
308 |
+
(1, pd.date_range("2000", periods=4)),
|
309 |
+
operator.methodcaller("last", "3D"),
|
310 |
+
),
|
311 |
+
(
|
312 |
+
pd.DataFrame,
|
313 |
+
({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
|
314 |
+
operator.methodcaller("last", "3D"),
|
315 |
+
),
|
316 |
+
(pd.Series, ([1, 2],), operator.methodcaller("rank")),
|
317 |
+
(pd.DataFrame, frame_data, operator.methodcaller("rank")),
|
318 |
+
(pd.Series, ([1, 2],), operator.methodcaller("where", np.array([True, False]))),
|
319 |
+
(pd.DataFrame, frame_data, operator.methodcaller("where", np.array([[True]]))),
|
320 |
+
(pd.Series, ([1, 2],), operator.methodcaller("mask", np.array([True, False]))),
|
321 |
+
(pd.DataFrame, frame_data, operator.methodcaller("mask", np.array([[True]]))),
|
322 |
+
(pd.Series, ([1, 2],), operator.methodcaller("truncate", before=0)),
|
323 |
+
(pd.DataFrame, frame_data, operator.methodcaller("truncate", before=0)),
|
324 |
+
(
|
325 |
+
pd.Series,
|
326 |
+
(1, pd.date_range("2000", periods=4, tz="UTC")),
|
327 |
+
operator.methodcaller("tz_convert", "CET"),
|
328 |
+
),
|
329 |
+
(
|
330 |
+
pd.DataFrame,
|
331 |
+
({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4, tz="UTC")),
|
332 |
+
operator.methodcaller("tz_convert", "CET"),
|
333 |
+
),
|
334 |
+
(
|
335 |
+
pd.Series,
|
336 |
+
(1, pd.date_range("2000", periods=4)),
|
337 |
+
operator.methodcaller("tz_localize", "CET"),
|
338 |
+
),
|
339 |
+
(
|
340 |
+
pd.DataFrame,
|
341 |
+
({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
|
342 |
+
operator.methodcaller("tz_localize", "CET"),
|
343 |
+
),
|
344 |
+
(pd.Series, ([1, 2],), operator.methodcaller("describe")),
|
345 |
+
(pd.DataFrame, frame_data, operator.methodcaller("describe")),
|
346 |
+
(pd.Series, ([1, 2],), operator.methodcaller("pct_change")),
|
347 |
+
(pd.DataFrame, frame_data, operator.methodcaller("pct_change")),
|
348 |
+
(pd.Series, ([1],), operator.methodcaller("transform", lambda x: x - x.min())),
|
349 |
+
(
|
350 |
+
pd.DataFrame,
|
351 |
+
frame_mi_data,
|
352 |
+
operator.methodcaller("transform", lambda x: x - x.min()),
|
353 |
+
),
|
354 |
+
(pd.Series, ([1],), operator.methodcaller("apply", lambda x: x)),
|
355 |
+
(pd.DataFrame, frame_mi_data, operator.methodcaller("apply", lambda x: x)),
|
356 |
+
# Cumulative reductions
|
357 |
+
(pd.Series, ([1],), operator.methodcaller("cumsum")),
|
358 |
+
(pd.DataFrame, frame_data, operator.methodcaller("cumsum")),
|
359 |
+
(pd.Series, ([1],), operator.methodcaller("cummin")),
|
360 |
+
(pd.DataFrame, frame_data, operator.methodcaller("cummin")),
|
361 |
+
(pd.Series, ([1],), operator.methodcaller("cummax")),
|
362 |
+
(pd.DataFrame, frame_data, operator.methodcaller("cummax")),
|
363 |
+
(pd.Series, ([1],), operator.methodcaller("cumprod")),
|
364 |
+
(pd.DataFrame, frame_data, operator.methodcaller("cumprod")),
|
365 |
+
# Reductions
|
366 |
+
(pd.DataFrame, frame_data, operator.methodcaller("any")),
|
367 |
+
(pd.DataFrame, frame_data, operator.methodcaller("all")),
|
368 |
+
(pd.DataFrame, frame_data, operator.methodcaller("min")),
|
369 |
+
(pd.DataFrame, frame_data, operator.methodcaller("max")),
|
370 |
+
(pd.DataFrame, frame_data, operator.methodcaller("sum")),
|
371 |
+
(pd.DataFrame, frame_data, operator.methodcaller("std")),
|
372 |
+
(pd.DataFrame, frame_data, operator.methodcaller("mean")),
|
373 |
+
(pd.DataFrame, frame_data, operator.methodcaller("prod")),
|
374 |
+
(pd.DataFrame, frame_data, operator.methodcaller("sem")),
|
375 |
+
(pd.DataFrame, frame_data, operator.methodcaller("skew")),
|
376 |
+
(pd.DataFrame, frame_data, operator.methodcaller("kurt")),
|
377 |
+
]
|
378 |
+
|
379 |
+
|
380 |
+
def idfn(x):
|
381 |
+
xpr = re.compile(r"'(.*)?'")
|
382 |
+
m = xpr.search(str(x))
|
383 |
+
if m:
|
384 |
+
return m.group(1)
|
385 |
+
else:
|
386 |
+
return str(x)
|
387 |
+
|
388 |
+
|
389 |
+
@pytest.fixture(params=_all_methods, ids=lambda x: idfn(x[-1]))
|
390 |
+
def ndframe_method(request):
|
391 |
+
"""
|
392 |
+
An NDFrame method returning an NDFrame.
|
393 |
+
"""
|
394 |
+
return request.param
|
395 |
+
|
396 |
+
|
397 |
+
@pytest.mark.filterwarnings(
|
398 |
+
"ignore:DataFrame.fillna with 'method' is deprecated:FutureWarning",
|
399 |
+
"ignore:last is deprecated:FutureWarning",
|
400 |
+
)
|
401 |
+
def test_finalize_called(ndframe_method):
|
402 |
+
cls, init_args, method = ndframe_method
|
403 |
+
ndframe = cls(*init_args)
|
404 |
+
|
405 |
+
ndframe.attrs = {"a": 1}
|
406 |
+
result = method(ndframe)
|
407 |
+
|
408 |
+
assert result.attrs == {"a": 1}
|
409 |
+
|
410 |
+
|
411 |
+
@pytest.mark.parametrize(
|
412 |
+
"data",
|
413 |
+
[
|
414 |
+
pd.Series(1, pd.date_range("2000", periods=4)),
|
415 |
+
pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
|
416 |
+
],
|
417 |
+
)
|
418 |
+
def test_finalize_first(data):
|
419 |
+
deprecated_msg = "first is deprecated"
|
420 |
+
|
421 |
+
data.attrs = {"a": 1}
|
422 |
+
with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):
|
423 |
+
result = data.first("3D")
|
424 |
+
assert result.attrs == {"a": 1}
|
425 |
+
|
426 |
+
|
427 |
+
@pytest.mark.parametrize(
|
428 |
+
"data",
|
429 |
+
[
|
430 |
+
pd.Series(1, pd.date_range("2000", periods=4)),
|
431 |
+
pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
|
432 |
+
],
|
433 |
+
)
|
434 |
+
def test_finalize_last(data):
|
435 |
+
# GH 53710
|
436 |
+
deprecated_msg = "last is deprecated"
|
437 |
+
|
438 |
+
data.attrs = {"a": 1}
|
439 |
+
with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):
|
440 |
+
result = data.last("3D")
|
441 |
+
assert result.attrs == {"a": 1}
|
442 |
+
|
443 |
+
|
444 |
+
@not_implemented_mark
|
445 |
+
def test_finalize_called_eval_numexpr():
|
446 |
+
pytest.importorskip("numexpr")
|
447 |
+
df = pd.DataFrame({"A": [1, 2]})
|
448 |
+
df.attrs["A"] = 1
|
449 |
+
result = df.eval("A + 1", engine="numexpr")
|
450 |
+
assert result.attrs == {"A": 1}
|
451 |
+
|
452 |
+
|
453 |
+
# ----------------------------------------------------------------------------
|
454 |
+
# Binary operations
|
455 |
+
|
456 |
+
|
457 |
+
@pytest.mark.parametrize("annotate", ["left", "right", "both"])
|
458 |
+
@pytest.mark.parametrize(
|
459 |
+
"args",
|
460 |
+
[
|
461 |
+
(1, pd.Series([1])),
|
462 |
+
(1, pd.DataFrame({"A": [1]})),
|
463 |
+
(pd.Series([1]), 1),
|
464 |
+
(pd.DataFrame({"A": [1]}), 1),
|
465 |
+
(pd.Series([1]), pd.Series([1])),
|
466 |
+
(pd.DataFrame({"A": [1]}), pd.DataFrame({"A": [1]})),
|
467 |
+
(pd.Series([1]), pd.DataFrame({"A": [1]})),
|
468 |
+
(pd.DataFrame({"A": [1]}), pd.Series([1])),
|
469 |
+
],
|
470 |
+
ids=lambda x: f"({type(x[0]).__name__},{type(x[1]).__name__})",
|
471 |
+
)
|
472 |
+
def test_binops(request, args, annotate, all_binary_operators):
|
473 |
+
# This generates 624 tests... Is that needed?
|
474 |
+
left, right = args
|
475 |
+
if isinstance(left, (pd.DataFrame, pd.Series)):
|
476 |
+
left.attrs = {}
|
477 |
+
if isinstance(right, (pd.DataFrame, pd.Series)):
|
478 |
+
right.attrs = {}
|
479 |
+
|
480 |
+
if annotate == "left" and isinstance(left, int):
|
481 |
+
pytest.skip("left is an int and doesn't support .attrs")
|
482 |
+
if annotate == "right" and isinstance(right, int):
|
483 |
+
pytest.skip("right is an int and doesn't support .attrs")
|
484 |
+
|
485 |
+
if not (isinstance(left, int) or isinstance(right, int)) and annotate != "both":
|
486 |
+
if not all_binary_operators.__name__.startswith("r"):
|
487 |
+
if annotate == "right" and isinstance(left, type(right)):
|
488 |
+
request.applymarker(
|
489 |
+
pytest.mark.xfail(
|
490 |
+
reason=f"{all_binary_operators} doesn't work when right has "
|
491 |
+
f"attrs and both are {type(left)}"
|
492 |
+
)
|
493 |
+
)
|
494 |
+
if not isinstance(left, type(right)):
|
495 |
+
if annotate == "left" and isinstance(left, pd.Series):
|
496 |
+
request.applymarker(
|
497 |
+
pytest.mark.xfail(
|
498 |
+
reason=f"{all_binary_operators} doesn't work when the "
|
499 |
+
"objects are different Series has attrs"
|
500 |
+
)
|
501 |
+
)
|
502 |
+
elif annotate == "right" and isinstance(right, pd.Series):
|
503 |
+
request.applymarker(
|
504 |
+
pytest.mark.xfail(
|
505 |
+
reason=f"{all_binary_operators} doesn't work when the "
|
506 |
+
"objects are different Series has attrs"
|
507 |
+
)
|
508 |
+
)
|
509 |
+
else:
|
510 |
+
if annotate == "left" and isinstance(left, type(right)):
|
511 |
+
request.applymarker(
|
512 |
+
pytest.mark.xfail(
|
513 |
+
reason=f"{all_binary_operators} doesn't work when left has "
|
514 |
+
f"attrs and both are {type(left)}"
|
515 |
+
)
|
516 |
+
)
|
517 |
+
if not isinstance(left, type(right)):
|
518 |
+
if annotate == "right" and isinstance(right, pd.Series):
|
519 |
+
request.applymarker(
|
520 |
+
pytest.mark.xfail(
|
521 |
+
reason=f"{all_binary_operators} doesn't work when the "
|
522 |
+
"objects are different Series has attrs"
|
523 |
+
)
|
524 |
+
)
|
525 |
+
elif annotate == "left" and isinstance(left, pd.Series):
|
526 |
+
request.applymarker(
|
527 |
+
pytest.mark.xfail(
|
528 |
+
reason=f"{all_binary_operators} doesn't work when the "
|
529 |
+
"objects are different Series has attrs"
|
530 |
+
)
|
531 |
+
)
|
532 |
+
if annotate in {"left", "both"} and not isinstance(left, int):
|
533 |
+
left.attrs = {"a": 1}
|
534 |
+
if annotate in {"right", "both"} and not isinstance(right, int):
|
535 |
+
right.attrs = {"a": 1}
|
536 |
+
|
537 |
+
is_cmp = all_binary_operators in [
|
538 |
+
operator.eq,
|
539 |
+
operator.ne,
|
540 |
+
operator.gt,
|
541 |
+
operator.ge,
|
542 |
+
operator.lt,
|
543 |
+
operator.le,
|
544 |
+
]
|
545 |
+
if is_cmp and isinstance(left, pd.DataFrame) and isinstance(right, pd.Series):
|
546 |
+
# in 2.0 silent alignment on comparisons was removed xref GH#28759
|
547 |
+
left, right = left.align(right, axis=1, copy=False)
|
548 |
+
elif is_cmp and isinstance(left, pd.Series) and isinstance(right, pd.DataFrame):
|
549 |
+
right, left = right.align(left, axis=1, copy=False)
|
550 |
+
|
551 |
+
result = all_binary_operators(left, right)
|
552 |
+
assert result.attrs == {"a": 1}
|
553 |
+
|
554 |
+
|
555 |
+
# ----------------------------------------------------------------------------
|
556 |
+
# Accessors
|
557 |
+
|
558 |
+
|
559 |
+
@pytest.mark.parametrize(
|
560 |
+
"method",
|
561 |
+
[
|
562 |
+
operator.methodcaller("capitalize"),
|
563 |
+
operator.methodcaller("casefold"),
|
564 |
+
operator.methodcaller("cat", ["a"]),
|
565 |
+
operator.methodcaller("contains", "a"),
|
566 |
+
operator.methodcaller("count", "a"),
|
567 |
+
operator.methodcaller("encode", "utf-8"),
|
568 |
+
operator.methodcaller("endswith", "a"),
|
569 |
+
operator.methodcaller("extract", r"(\w)(\d)"),
|
570 |
+
operator.methodcaller("extract", r"(\w)(\d)", expand=False),
|
571 |
+
operator.methodcaller("find", "a"),
|
572 |
+
operator.methodcaller("findall", "a"),
|
573 |
+
operator.methodcaller("get", 0),
|
574 |
+
operator.methodcaller("index", "a"),
|
575 |
+
operator.methodcaller("len"),
|
576 |
+
operator.methodcaller("ljust", 4),
|
577 |
+
operator.methodcaller("lower"),
|
578 |
+
operator.methodcaller("lstrip"),
|
579 |
+
operator.methodcaller("match", r"\w"),
|
580 |
+
operator.methodcaller("normalize", "NFC"),
|
581 |
+
operator.methodcaller("pad", 4),
|
582 |
+
operator.methodcaller("partition", "a"),
|
583 |
+
operator.methodcaller("repeat", 2),
|
584 |
+
operator.methodcaller("replace", "a", "b"),
|
585 |
+
operator.methodcaller("rfind", "a"),
|
586 |
+
operator.methodcaller("rindex", "a"),
|
587 |
+
operator.methodcaller("rjust", 4),
|
588 |
+
operator.methodcaller("rpartition", "a"),
|
589 |
+
operator.methodcaller("rstrip"),
|
590 |
+
operator.methodcaller("slice", 4),
|
591 |
+
operator.methodcaller("slice_replace", 1, repl="a"),
|
592 |
+
operator.methodcaller("startswith", "a"),
|
593 |
+
operator.methodcaller("strip"),
|
594 |
+
operator.methodcaller("swapcase"),
|
595 |
+
operator.methodcaller("translate", {"a": "b"}),
|
596 |
+
operator.methodcaller("upper"),
|
597 |
+
operator.methodcaller("wrap", 4),
|
598 |
+
operator.methodcaller("zfill", 4),
|
599 |
+
operator.methodcaller("isalnum"),
|
600 |
+
operator.methodcaller("isalpha"),
|
601 |
+
operator.methodcaller("isdigit"),
|
602 |
+
operator.methodcaller("isspace"),
|
603 |
+
operator.methodcaller("islower"),
|
604 |
+
operator.methodcaller("isupper"),
|
605 |
+
operator.methodcaller("istitle"),
|
606 |
+
operator.methodcaller("isnumeric"),
|
607 |
+
operator.methodcaller("isdecimal"),
|
608 |
+
operator.methodcaller("get_dummies"),
|
609 |
+
],
|
610 |
+
ids=idfn,
|
611 |
+
)
|
612 |
+
def test_string_method(method):
|
613 |
+
s = pd.Series(["a1"])
|
614 |
+
s.attrs = {"a": 1}
|
615 |
+
result = method(s.str)
|
616 |
+
assert result.attrs == {"a": 1}
|
617 |
+
|
618 |
+
|
619 |
+
@pytest.mark.parametrize(
|
620 |
+
"method",
|
621 |
+
[
|
622 |
+
operator.methodcaller("to_period"),
|
623 |
+
operator.methodcaller("tz_localize", "CET"),
|
624 |
+
operator.methodcaller("normalize"),
|
625 |
+
operator.methodcaller("strftime", "%Y"),
|
626 |
+
operator.methodcaller("round", "h"),
|
627 |
+
operator.methodcaller("floor", "h"),
|
628 |
+
operator.methodcaller("ceil", "h"),
|
629 |
+
operator.methodcaller("month_name"),
|
630 |
+
operator.methodcaller("day_name"),
|
631 |
+
],
|
632 |
+
ids=idfn,
|
633 |
+
)
|
634 |
+
def test_datetime_method(method):
|
635 |
+
s = pd.Series(pd.date_range("2000", periods=4))
|
636 |
+
s.attrs = {"a": 1}
|
637 |
+
result = method(s.dt)
|
638 |
+
assert result.attrs == {"a": 1}
|
639 |
+
|
640 |
+
|
641 |
+
@pytest.mark.parametrize(
|
642 |
+
"attr",
|
643 |
+
[
|
644 |
+
"date",
|
645 |
+
"time",
|
646 |
+
"timetz",
|
647 |
+
"year",
|
648 |
+
"month",
|
649 |
+
"day",
|
650 |
+
"hour",
|
651 |
+
"minute",
|
652 |
+
"second",
|
653 |
+
"microsecond",
|
654 |
+
"nanosecond",
|
655 |
+
"dayofweek",
|
656 |
+
"day_of_week",
|
657 |
+
"dayofyear",
|
658 |
+
"day_of_year",
|
659 |
+
"quarter",
|
660 |
+
"is_month_start",
|
661 |
+
"is_month_end",
|
662 |
+
"is_quarter_start",
|
663 |
+
"is_quarter_end",
|
664 |
+
"is_year_start",
|
665 |
+
"is_year_end",
|
666 |
+
"is_leap_year",
|
667 |
+
"daysinmonth",
|
668 |
+
"days_in_month",
|
669 |
+
],
|
670 |
+
)
|
671 |
+
def test_datetime_property(attr):
|
672 |
+
s = pd.Series(pd.date_range("2000", periods=4))
|
673 |
+
s.attrs = {"a": 1}
|
674 |
+
result = getattr(s.dt, attr)
|
675 |
+
assert result.attrs == {"a": 1}
|
676 |
+
|
677 |
+
|
678 |
+
@pytest.mark.parametrize(
|
679 |
+
"attr", ["days", "seconds", "microseconds", "nanoseconds", "components"]
|
680 |
+
)
|
681 |
+
def test_timedelta_property(attr):
|
682 |
+
s = pd.Series(pd.timedelta_range("2000", periods=4))
|
683 |
+
s.attrs = {"a": 1}
|
684 |
+
result = getattr(s.dt, attr)
|
685 |
+
assert result.attrs == {"a": 1}
|
686 |
+
|
687 |
+
|
688 |
+
@pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")])
|
689 |
+
def test_timedelta_methods(method):
|
690 |
+
s = pd.Series(pd.timedelta_range("2000", periods=4))
|
691 |
+
s.attrs = {"a": 1}
|
692 |
+
result = method(s.dt)
|
693 |
+
assert result.attrs == {"a": 1}
|
694 |
+
|
695 |
+
|
696 |
+
@pytest.mark.parametrize(
|
697 |
+
"method",
|
698 |
+
[
|
699 |
+
operator.methodcaller("add_categories", ["c"]),
|
700 |
+
operator.methodcaller("as_ordered"),
|
701 |
+
operator.methodcaller("as_unordered"),
|
702 |
+
lambda x: getattr(x, "codes"),
|
703 |
+
operator.methodcaller("remove_categories", "a"),
|
704 |
+
operator.methodcaller("remove_unused_categories"),
|
705 |
+
operator.methodcaller("rename_categories", {"a": "A", "b": "B"}),
|
706 |
+
operator.methodcaller("reorder_categories", ["b", "a"]),
|
707 |
+
operator.methodcaller("set_categories", ["A", "B"]),
|
708 |
+
],
|
709 |
+
)
|
710 |
+
@not_implemented_mark
|
711 |
+
def test_categorical_accessor(method):
|
712 |
+
s = pd.Series(["a", "b"], dtype="category")
|
713 |
+
s.attrs = {"a": 1}
|
714 |
+
result = method(s.cat)
|
715 |
+
assert result.attrs == {"a": 1}
|
716 |
+
|
717 |
+
|
718 |
+
# ----------------------------------------------------------------------------
|
719 |
+
# Groupby
|
720 |
+
|
721 |
+
|
722 |
+
@pytest.mark.parametrize(
|
723 |
+
"obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})]
|
724 |
+
)
|
725 |
+
@pytest.mark.parametrize(
|
726 |
+
"method",
|
727 |
+
[
|
728 |
+
operator.methodcaller("sum"),
|
729 |
+
lambda x: x.apply(lambda y: y),
|
730 |
+
lambda x: x.agg("sum"),
|
731 |
+
lambda x: x.agg("mean"),
|
732 |
+
lambda x: x.agg("median"),
|
733 |
+
],
|
734 |
+
)
|
735 |
+
def test_groupby_finalize(obj, method):
|
736 |
+
obj.attrs = {"a": 1}
|
737 |
+
result = method(obj.groupby([0, 0], group_keys=False))
|
738 |
+
assert result.attrs == {"a": 1}
|
739 |
+
|
740 |
+
|
741 |
+
@pytest.mark.parametrize(
|
742 |
+
"obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})]
|
743 |
+
)
|
744 |
+
@pytest.mark.parametrize(
|
745 |
+
"method",
|
746 |
+
[
|
747 |
+
lambda x: x.agg(["sum", "count"]),
|
748 |
+
lambda x: x.agg("std"),
|
749 |
+
lambda x: x.agg("var"),
|
750 |
+
lambda x: x.agg("sem"),
|
751 |
+
lambda x: x.agg("size"),
|
752 |
+
lambda x: x.agg("ohlc"),
|
753 |
+
],
|
754 |
+
)
|
755 |
+
@not_implemented_mark
|
756 |
+
def test_groupby_finalize_not_implemented(obj, method):
|
757 |
+
obj.attrs = {"a": 1}
|
758 |
+
result = method(obj.groupby([0, 0]))
|
759 |
+
assert result.attrs == {"a": 1}
|
760 |
+
|
761 |
+
|
762 |
+
def test_finalize_frame_series_name():
|
763 |
+
# https://github.com/pandas-dev/pandas/pull/37186/files#r506978889
|
764 |
+
# ensure we don't copy the column `name` to the Series.
|
765 |
+
df = pd.DataFrame({"name": [1, 2]})
|
766 |
+
result = pd.Series([1, 2]).__finalize__(df)
|
767 |
+
assert result.name is None
|
venv/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py
ADDED
@@ -0,0 +1,209 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from copy import deepcopy
|
2 |
+
from operator import methodcaller
|
3 |
+
|
4 |
+
import numpy as np
|
5 |
+
import pytest
|
6 |
+
|
7 |
+
import pandas as pd
|
8 |
+
from pandas import (
|
9 |
+
DataFrame,
|
10 |
+
MultiIndex,
|
11 |
+
Series,
|
12 |
+
date_range,
|
13 |
+
)
|
14 |
+
import pandas._testing as tm
|
15 |
+
|
16 |
+
|
17 |
+
class TestDataFrame:
|
18 |
+
@pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
|
19 |
+
def test_set_axis_name(self, func):
|
20 |
+
df = DataFrame([[1, 2], [3, 4]])
|
21 |
+
|
22 |
+
result = methodcaller(func, "foo")(df)
|
23 |
+
assert df.index.name is None
|
24 |
+
assert result.index.name == "foo"
|
25 |
+
|
26 |
+
result = methodcaller(func, "cols", axis=1)(df)
|
27 |
+
assert df.columns.name is None
|
28 |
+
assert result.columns.name == "cols"
|
29 |
+
|
30 |
+
@pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
|
31 |
+
def test_set_axis_name_mi(self, func):
|
32 |
+
df = DataFrame(
|
33 |
+
np.empty((3, 3)),
|
34 |
+
index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]),
|
35 |
+
columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]),
|
36 |
+
)
|
37 |
+
|
38 |
+
level_names = ["L1", "L2"]
|
39 |
+
|
40 |
+
result = methodcaller(func, level_names)(df)
|
41 |
+
assert result.index.names == level_names
|
42 |
+
assert result.columns.names == [None, None]
|
43 |
+
|
44 |
+
result = methodcaller(func, level_names, axis=1)(df)
|
45 |
+
assert result.columns.names == ["L1", "L2"]
|
46 |
+
assert result.index.names == [None, None]
|
47 |
+
|
48 |
+
def test_nonzero_single_element(self):
|
49 |
+
# allow single item via bool method
|
50 |
+
msg_warn = (
|
51 |
+
"DataFrame.bool is now deprecated and will be removed "
|
52 |
+
"in future version of pandas"
|
53 |
+
)
|
54 |
+
df = DataFrame([[True]])
|
55 |
+
df1 = DataFrame([[False]])
|
56 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
57 |
+
assert df.bool()
|
58 |
+
|
59 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
60 |
+
assert not df1.bool()
|
61 |
+
|
62 |
+
df = DataFrame([[False, False]])
|
63 |
+
msg_err = "The truth value of a DataFrame is ambiguous"
|
64 |
+
with pytest.raises(ValueError, match=msg_err):
|
65 |
+
bool(df)
|
66 |
+
|
67 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
68 |
+
with pytest.raises(ValueError, match=msg_err):
|
69 |
+
df.bool()
|
70 |
+
|
71 |
+
def test_metadata_propagation_indiv_groupby(self):
|
72 |
+
# groupby
|
73 |
+
df = DataFrame(
|
74 |
+
{
|
75 |
+
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
|
76 |
+
"B": ["one", "one", "two", "three", "two", "two", "one", "three"],
|
77 |
+
"C": np.random.default_rng(2).standard_normal(8),
|
78 |
+
"D": np.random.default_rng(2).standard_normal(8),
|
79 |
+
}
|
80 |
+
)
|
81 |
+
result = df.groupby("A").sum()
|
82 |
+
tm.assert_metadata_equivalent(df, result)
|
83 |
+
|
84 |
+
def test_metadata_propagation_indiv_resample(self):
|
85 |
+
# resample
|
86 |
+
df = DataFrame(
|
87 |
+
np.random.default_rng(2).standard_normal((1000, 2)),
|
88 |
+
index=date_range("20130101", periods=1000, freq="s"),
|
89 |
+
)
|
90 |
+
result = df.resample("1min")
|
91 |
+
tm.assert_metadata_equivalent(df, result)
|
92 |
+
|
93 |
+
def test_metadata_propagation_indiv(self, monkeypatch):
|
94 |
+
# merging with override
|
95 |
+
# GH 6923
|
96 |
+
|
97 |
+
def finalize(self, other, method=None, **kwargs):
|
98 |
+
for name in self._metadata:
|
99 |
+
if method == "merge":
|
100 |
+
left, right = other.left, other.right
|
101 |
+
value = getattr(left, name, "") + "|" + getattr(right, name, "")
|
102 |
+
object.__setattr__(self, name, value)
|
103 |
+
elif method == "concat":
|
104 |
+
value = "+".join(
|
105 |
+
[getattr(o, name) for o in other.objs if getattr(o, name, None)]
|
106 |
+
)
|
107 |
+
object.__setattr__(self, name, value)
|
108 |
+
else:
|
109 |
+
object.__setattr__(self, name, getattr(other, name, ""))
|
110 |
+
|
111 |
+
return self
|
112 |
+
|
113 |
+
with monkeypatch.context() as m:
|
114 |
+
m.setattr(DataFrame, "_metadata", ["filename"])
|
115 |
+
m.setattr(DataFrame, "__finalize__", finalize)
|
116 |
+
|
117 |
+
df1 = DataFrame(
|
118 |
+
np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"]
|
119 |
+
)
|
120 |
+
df2 = DataFrame(
|
121 |
+
np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"]
|
122 |
+
)
|
123 |
+
DataFrame._metadata = ["filename"]
|
124 |
+
df1.filename = "fname1.csv"
|
125 |
+
df2.filename = "fname2.csv"
|
126 |
+
|
127 |
+
result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
|
128 |
+
assert result.filename == "fname1.csv|fname2.csv"
|
129 |
+
|
130 |
+
# concat
|
131 |
+
# GH#6927
|
132 |
+
df1 = DataFrame(
|
133 |
+
np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab")
|
134 |
+
)
|
135 |
+
df1.filename = "foo"
|
136 |
+
|
137 |
+
result = pd.concat([df1, df1])
|
138 |
+
assert result.filename == "foo+foo"
|
139 |
+
|
140 |
+
def test_set_attribute(self):
|
141 |
+
# Test for consistent setattr behavior when an attribute and a column
|
142 |
+
# have the same name (Issue #8994)
|
143 |
+
df = DataFrame({"x": [1, 2, 3]})
|
144 |
+
|
145 |
+
df.y = 2
|
146 |
+
df["y"] = [2, 4, 6]
|
147 |
+
df.y = 5
|
148 |
+
|
149 |
+
assert df.y == 5
|
150 |
+
tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y"))
|
151 |
+
|
152 |
+
def test_deepcopy_empty(self):
|
153 |
+
# This test covers empty frame copying with non-empty column sets
|
154 |
+
# as reported in issue GH15370
|
155 |
+
empty_frame = DataFrame(data=[], index=[], columns=["A"])
|
156 |
+
empty_frame_copy = deepcopy(empty_frame)
|
157 |
+
|
158 |
+
tm.assert_frame_equal(empty_frame_copy, empty_frame)
|
159 |
+
|
160 |
+
|
161 |
+
# formerly in Generic but only test DataFrame
|
162 |
+
class TestDataFrame2:
|
163 |
+
@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
|
164 |
+
def test_validate_bool_args(self, value):
|
165 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
166 |
+
|
167 |
+
msg = 'For argument "inplace" expected type bool, received type'
|
168 |
+
with pytest.raises(ValueError, match=msg):
|
169 |
+
df.copy().rename_axis(mapper={"a": "x", "b": "y"}, axis=1, inplace=value)
|
170 |
+
|
171 |
+
with pytest.raises(ValueError, match=msg):
|
172 |
+
df.copy().drop("a", axis=1, inplace=value)
|
173 |
+
|
174 |
+
with pytest.raises(ValueError, match=msg):
|
175 |
+
df.copy().fillna(value=0, inplace=value)
|
176 |
+
|
177 |
+
with pytest.raises(ValueError, match=msg):
|
178 |
+
df.copy().replace(to_replace=1, value=7, inplace=value)
|
179 |
+
|
180 |
+
with pytest.raises(ValueError, match=msg):
|
181 |
+
df.copy().interpolate(inplace=value)
|
182 |
+
|
183 |
+
with pytest.raises(ValueError, match=msg):
|
184 |
+
df.copy()._where(cond=df.a > 2, inplace=value)
|
185 |
+
|
186 |
+
with pytest.raises(ValueError, match=msg):
|
187 |
+
df.copy().mask(cond=df.a > 2, inplace=value)
|
188 |
+
|
189 |
+
def test_unexpected_keyword(self):
|
190 |
+
# GH8597
|
191 |
+
df = DataFrame(
|
192 |
+
np.random.default_rng(2).standard_normal((5, 2)), columns=["jim", "joe"]
|
193 |
+
)
|
194 |
+
ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
|
195 |
+
ts = df["joe"].copy()
|
196 |
+
ts[2] = np.nan
|
197 |
+
|
198 |
+
msg = "unexpected keyword"
|
199 |
+
with pytest.raises(TypeError, match=msg):
|
200 |
+
df.drop("joe", axis=1, in_place=True)
|
201 |
+
|
202 |
+
with pytest.raises(TypeError, match=msg):
|
203 |
+
df.reindex([1, 0], inplace=True)
|
204 |
+
|
205 |
+
with pytest.raises(TypeError, match=msg):
|
206 |
+
ca.fillna(0, inplace=True)
|
207 |
+
|
208 |
+
with pytest.raises(TypeError, match=msg):
|
209 |
+
ts.fillna(0, in_place=True)
|
venv/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py
ADDED
@@ -0,0 +1,504 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from copy import (
|
2 |
+
copy,
|
3 |
+
deepcopy,
|
4 |
+
)
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas.core.dtypes.common import is_scalar
|
10 |
+
|
11 |
+
from pandas import (
|
12 |
+
DataFrame,
|
13 |
+
Index,
|
14 |
+
Series,
|
15 |
+
date_range,
|
16 |
+
)
|
17 |
+
import pandas._testing as tm
|
18 |
+
|
19 |
+
# ----------------------------------------------------------------------
|
20 |
+
# Generic types test cases
|
21 |
+
|
22 |
+
|
23 |
+
def construct(box, shape, value=None, dtype=None, **kwargs):
|
24 |
+
"""
|
25 |
+
construct an object for the given shape
|
26 |
+
if value is specified use that if its a scalar
|
27 |
+
if value is an array, repeat it as needed
|
28 |
+
"""
|
29 |
+
if isinstance(shape, int):
|
30 |
+
shape = tuple([shape] * box._AXIS_LEN)
|
31 |
+
if value is not None:
|
32 |
+
if is_scalar(value):
|
33 |
+
if value == "empty":
|
34 |
+
arr = None
|
35 |
+
dtype = np.float64
|
36 |
+
|
37 |
+
# remove the info axis
|
38 |
+
kwargs.pop(box._info_axis_name, None)
|
39 |
+
else:
|
40 |
+
arr = np.empty(shape, dtype=dtype)
|
41 |
+
arr.fill(value)
|
42 |
+
else:
|
43 |
+
fshape = np.prod(shape)
|
44 |
+
arr = value.ravel()
|
45 |
+
new_shape = fshape / arr.shape[0]
|
46 |
+
if fshape % arr.shape[0] != 0:
|
47 |
+
raise Exception("invalid value passed in construct")
|
48 |
+
|
49 |
+
arr = np.repeat(arr, new_shape).reshape(shape)
|
50 |
+
else:
|
51 |
+
arr = np.random.default_rng(2).standard_normal(shape)
|
52 |
+
return box(arr, dtype=dtype, **kwargs)
|
53 |
+
|
54 |
+
|
55 |
+
class TestGeneric:
|
56 |
+
@pytest.mark.parametrize(
|
57 |
+
"func",
|
58 |
+
[
|
59 |
+
str.lower,
|
60 |
+
{x: x.lower() for x in list("ABCD")},
|
61 |
+
Series({x: x.lower() for x in list("ABCD")}),
|
62 |
+
],
|
63 |
+
)
|
64 |
+
def test_rename(self, frame_or_series, func):
|
65 |
+
# single axis
|
66 |
+
idx = list("ABCD")
|
67 |
+
|
68 |
+
for axis in frame_or_series._AXIS_ORDERS:
|
69 |
+
kwargs = {axis: idx}
|
70 |
+
obj = construct(frame_or_series, 4, **kwargs)
|
71 |
+
|
72 |
+
# rename a single axis
|
73 |
+
result = obj.rename(**{axis: func})
|
74 |
+
expected = obj.copy()
|
75 |
+
setattr(expected, axis, list("abcd"))
|
76 |
+
tm.assert_equal(result, expected)
|
77 |
+
|
78 |
+
def test_get_numeric_data(self, frame_or_series):
|
79 |
+
n = 4
|
80 |
+
kwargs = {
|
81 |
+
frame_or_series._get_axis_name(i): list(range(n))
|
82 |
+
for i in range(frame_or_series._AXIS_LEN)
|
83 |
+
}
|
84 |
+
|
85 |
+
# get the numeric data
|
86 |
+
o = construct(frame_or_series, n, **kwargs)
|
87 |
+
result = o._get_numeric_data()
|
88 |
+
tm.assert_equal(result, o)
|
89 |
+
|
90 |
+
# non-inclusion
|
91 |
+
result = o._get_bool_data()
|
92 |
+
expected = construct(frame_or_series, n, value="empty", **kwargs)
|
93 |
+
if isinstance(o, DataFrame):
|
94 |
+
# preserve columns dtype
|
95 |
+
expected.columns = o.columns[:0]
|
96 |
+
# https://github.com/pandas-dev/pandas/issues/50862
|
97 |
+
tm.assert_equal(result.reset_index(drop=True), expected)
|
98 |
+
|
99 |
+
# get the bool data
|
100 |
+
arr = np.array([True, True, False, True])
|
101 |
+
o = construct(frame_or_series, n, value=arr, **kwargs)
|
102 |
+
result = o._get_numeric_data()
|
103 |
+
tm.assert_equal(result, o)
|
104 |
+
|
105 |
+
def test_nonzero(self, frame_or_series):
|
106 |
+
# GH 4633
|
107 |
+
# look at the boolean/nonzero behavior for objects
|
108 |
+
obj = construct(frame_or_series, shape=4)
|
109 |
+
msg = f"The truth value of a {frame_or_series.__name__} is ambiguous"
|
110 |
+
with pytest.raises(ValueError, match=msg):
|
111 |
+
bool(obj == 0)
|
112 |
+
with pytest.raises(ValueError, match=msg):
|
113 |
+
bool(obj == 1)
|
114 |
+
with pytest.raises(ValueError, match=msg):
|
115 |
+
bool(obj)
|
116 |
+
|
117 |
+
obj = construct(frame_or_series, shape=4, value=1)
|
118 |
+
with pytest.raises(ValueError, match=msg):
|
119 |
+
bool(obj == 0)
|
120 |
+
with pytest.raises(ValueError, match=msg):
|
121 |
+
bool(obj == 1)
|
122 |
+
with pytest.raises(ValueError, match=msg):
|
123 |
+
bool(obj)
|
124 |
+
|
125 |
+
obj = construct(frame_or_series, shape=4, value=np.nan)
|
126 |
+
with pytest.raises(ValueError, match=msg):
|
127 |
+
bool(obj == 0)
|
128 |
+
with pytest.raises(ValueError, match=msg):
|
129 |
+
bool(obj == 1)
|
130 |
+
with pytest.raises(ValueError, match=msg):
|
131 |
+
bool(obj)
|
132 |
+
|
133 |
+
# empty
|
134 |
+
obj = construct(frame_or_series, shape=0)
|
135 |
+
with pytest.raises(ValueError, match=msg):
|
136 |
+
bool(obj)
|
137 |
+
|
138 |
+
# invalid behaviors
|
139 |
+
|
140 |
+
obj1 = construct(frame_or_series, shape=4, value=1)
|
141 |
+
obj2 = construct(frame_or_series, shape=4, value=1)
|
142 |
+
|
143 |
+
with pytest.raises(ValueError, match=msg):
|
144 |
+
if obj1:
|
145 |
+
pass
|
146 |
+
|
147 |
+
with pytest.raises(ValueError, match=msg):
|
148 |
+
obj1 and obj2
|
149 |
+
with pytest.raises(ValueError, match=msg):
|
150 |
+
obj1 or obj2
|
151 |
+
with pytest.raises(ValueError, match=msg):
|
152 |
+
not obj1
|
153 |
+
|
154 |
+
def test_frame_or_series_compound_dtypes(self, frame_or_series):
|
155 |
+
# see gh-5191
|
156 |
+
# Compound dtypes should raise NotImplementedError.
|
157 |
+
|
158 |
+
def f(dtype):
|
159 |
+
return construct(frame_or_series, shape=3, value=1, dtype=dtype)
|
160 |
+
|
161 |
+
msg = (
|
162 |
+
"compound dtypes are not implemented "
|
163 |
+
f"in the {frame_or_series.__name__} constructor"
|
164 |
+
)
|
165 |
+
|
166 |
+
with pytest.raises(NotImplementedError, match=msg):
|
167 |
+
f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
|
168 |
+
|
169 |
+
# these work (though results may be unexpected)
|
170 |
+
f("int64")
|
171 |
+
f("float64")
|
172 |
+
f("M8[ns]")
|
173 |
+
|
174 |
+
def test_metadata_propagation(self, frame_or_series):
|
175 |
+
# check that the metadata matches up on the resulting ops
|
176 |
+
|
177 |
+
o = construct(frame_or_series, shape=3)
|
178 |
+
o.name = "foo"
|
179 |
+
o2 = construct(frame_or_series, shape=3)
|
180 |
+
o2.name = "bar"
|
181 |
+
|
182 |
+
# ----------
|
183 |
+
# preserving
|
184 |
+
# ----------
|
185 |
+
|
186 |
+
# simple ops with scalars
|
187 |
+
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
|
188 |
+
result = getattr(o, op)(1)
|
189 |
+
tm.assert_metadata_equivalent(o, result)
|
190 |
+
|
191 |
+
# ops with like
|
192 |
+
for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
|
193 |
+
result = getattr(o, op)(o)
|
194 |
+
tm.assert_metadata_equivalent(o, result)
|
195 |
+
|
196 |
+
# simple boolean
|
197 |
+
for op in ["__eq__", "__le__", "__ge__"]:
|
198 |
+
v1 = getattr(o, op)(o)
|
199 |
+
tm.assert_metadata_equivalent(o, v1)
|
200 |
+
tm.assert_metadata_equivalent(o, v1 & v1)
|
201 |
+
tm.assert_metadata_equivalent(o, v1 | v1)
|
202 |
+
|
203 |
+
# combine_first
|
204 |
+
result = o.combine_first(o2)
|
205 |
+
tm.assert_metadata_equivalent(o, result)
|
206 |
+
|
207 |
+
# ---------------------------
|
208 |
+
# non-preserving (by default)
|
209 |
+
# ---------------------------
|
210 |
+
|
211 |
+
# add non-like
|
212 |
+
result = o + o2
|
213 |
+
tm.assert_metadata_equivalent(result)
|
214 |
+
|
215 |
+
# simple boolean
|
216 |
+
for op in ["__eq__", "__le__", "__ge__"]:
|
217 |
+
# this is a name matching op
|
218 |
+
v1 = getattr(o, op)(o)
|
219 |
+
v2 = getattr(o, op)(o2)
|
220 |
+
tm.assert_metadata_equivalent(v2)
|
221 |
+
tm.assert_metadata_equivalent(v1 & v2)
|
222 |
+
tm.assert_metadata_equivalent(v1 | v2)
|
223 |
+
|
224 |
+
def test_size_compat(self, frame_or_series):
|
225 |
+
# GH8846
|
226 |
+
# size property should be defined
|
227 |
+
|
228 |
+
o = construct(frame_or_series, shape=10)
|
229 |
+
assert o.size == np.prod(o.shape)
|
230 |
+
assert o.size == 10 ** len(o.axes)
|
231 |
+
|
232 |
+
def test_split_compat(self, frame_or_series):
|
233 |
+
# xref GH8846
|
234 |
+
o = construct(frame_or_series, shape=10)
|
235 |
+
with tm.assert_produces_warning(
|
236 |
+
FutureWarning, match=".swapaxes' is deprecated", check_stacklevel=False
|
237 |
+
):
|
238 |
+
assert len(np.array_split(o, 5)) == 5
|
239 |
+
assert len(np.array_split(o, 2)) == 2
|
240 |
+
|
241 |
+
# See gh-12301
|
242 |
+
def test_stat_unexpected_keyword(self, frame_or_series):
|
243 |
+
obj = construct(frame_or_series, 5)
|
244 |
+
starwars = "Star Wars"
|
245 |
+
errmsg = "unexpected keyword"
|
246 |
+
|
247 |
+
with pytest.raises(TypeError, match=errmsg):
|
248 |
+
obj.max(epic=starwars) # stat_function
|
249 |
+
with pytest.raises(TypeError, match=errmsg):
|
250 |
+
obj.var(epic=starwars) # stat_function_ddof
|
251 |
+
with pytest.raises(TypeError, match=errmsg):
|
252 |
+
obj.sum(epic=starwars) # cum_function
|
253 |
+
with pytest.raises(TypeError, match=errmsg):
|
254 |
+
obj.any(epic=starwars) # logical_function
|
255 |
+
|
256 |
+
@pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])
|
257 |
+
def test_api_compat(self, func, frame_or_series):
|
258 |
+
# GH 12021
|
259 |
+
# compat for __name__, __qualname__
|
260 |
+
|
261 |
+
obj = construct(frame_or_series, 5)
|
262 |
+
f = getattr(obj, func)
|
263 |
+
assert f.__name__ == func
|
264 |
+
assert f.__qualname__.endswith(func)
|
265 |
+
|
266 |
+
def test_stat_non_defaults_args(self, frame_or_series):
|
267 |
+
obj = construct(frame_or_series, 5)
|
268 |
+
out = np.array([0])
|
269 |
+
errmsg = "the 'out' parameter is not supported"
|
270 |
+
|
271 |
+
with pytest.raises(ValueError, match=errmsg):
|
272 |
+
obj.max(out=out) # stat_function
|
273 |
+
with pytest.raises(ValueError, match=errmsg):
|
274 |
+
obj.var(out=out) # stat_function_ddof
|
275 |
+
with pytest.raises(ValueError, match=errmsg):
|
276 |
+
obj.sum(out=out) # cum_function
|
277 |
+
with pytest.raises(ValueError, match=errmsg):
|
278 |
+
obj.any(out=out) # logical_function
|
279 |
+
|
280 |
+
def test_truncate_out_of_bounds(self, frame_or_series):
|
281 |
+
# GH11382
|
282 |
+
|
283 |
+
# small
|
284 |
+
shape = [2000] + ([1] * (frame_or_series._AXIS_LEN - 1))
|
285 |
+
small = construct(frame_or_series, shape, dtype="int8", value=1)
|
286 |
+
tm.assert_equal(small.truncate(), small)
|
287 |
+
tm.assert_equal(small.truncate(before=0, after=3e3), small)
|
288 |
+
tm.assert_equal(small.truncate(before=-1, after=2e3), small)
|
289 |
+
|
290 |
+
# big
|
291 |
+
shape = [2_000_000] + ([1] * (frame_or_series._AXIS_LEN - 1))
|
292 |
+
big = construct(frame_or_series, shape, dtype="int8", value=1)
|
293 |
+
tm.assert_equal(big.truncate(), big)
|
294 |
+
tm.assert_equal(big.truncate(before=0, after=3e6), big)
|
295 |
+
tm.assert_equal(big.truncate(before=-1, after=2e6), big)
|
296 |
+
|
297 |
+
@pytest.mark.parametrize(
|
298 |
+
"func",
|
299 |
+
[copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
|
300 |
+
)
|
301 |
+
@pytest.mark.parametrize("shape", [0, 1, 2])
|
302 |
+
def test_copy_and_deepcopy(self, frame_or_series, shape, func):
|
303 |
+
# GH 15444
|
304 |
+
obj = construct(frame_or_series, shape)
|
305 |
+
obj_copy = func(obj)
|
306 |
+
assert obj_copy is not obj
|
307 |
+
tm.assert_equal(obj_copy, obj)
|
308 |
+
|
309 |
+
def test_data_deprecated(self, frame_or_series):
|
310 |
+
obj = frame_or_series()
|
311 |
+
msg = "(Series|DataFrame)._data is deprecated"
|
312 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
313 |
+
mgr = obj._data
|
314 |
+
assert mgr is obj._mgr
|
315 |
+
|
316 |
+
|
317 |
+
class TestNDFrame:
|
318 |
+
# tests that don't fit elsewhere
|
319 |
+
|
320 |
+
@pytest.mark.parametrize(
|
321 |
+
"ser",
|
322 |
+
[
|
323 |
+
Series(range(10), dtype=np.float64),
|
324 |
+
Series([str(i) for i in range(10)], dtype=object),
|
325 |
+
],
|
326 |
+
)
|
327 |
+
def test_squeeze_series_noop(self, ser):
|
328 |
+
# noop
|
329 |
+
tm.assert_series_equal(ser.squeeze(), ser)
|
330 |
+
|
331 |
+
def test_squeeze_frame_noop(self):
|
332 |
+
# noop
|
333 |
+
df = DataFrame(np.eye(2))
|
334 |
+
tm.assert_frame_equal(df.squeeze(), df)
|
335 |
+
|
336 |
+
def test_squeeze_frame_reindex(self):
|
337 |
+
# squeezing
|
338 |
+
df = DataFrame(
|
339 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
340 |
+
columns=Index(list("ABCD"), dtype=object),
|
341 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
342 |
+
).reindex(columns=["A"])
|
343 |
+
tm.assert_series_equal(df.squeeze(), df["A"])
|
344 |
+
|
345 |
+
def test_squeeze_0_len_dim(self):
|
346 |
+
# don't fail with 0 length dimensions GH11229 & GH8999
|
347 |
+
empty_series = Series([], name="five", dtype=np.float64)
|
348 |
+
empty_frame = DataFrame([empty_series])
|
349 |
+
tm.assert_series_equal(empty_series, empty_series.squeeze())
|
350 |
+
tm.assert_series_equal(empty_series, empty_frame.squeeze())
|
351 |
+
|
352 |
+
def test_squeeze_axis(self):
|
353 |
+
# axis argument
|
354 |
+
df = DataFrame(
|
355 |
+
np.random.default_rng(2).standard_normal((1, 4)),
|
356 |
+
columns=Index(list("ABCD"), dtype=object),
|
357 |
+
index=date_range("2000-01-01", periods=1, freq="B"),
|
358 |
+
).iloc[:, :1]
|
359 |
+
assert df.shape == (1, 1)
|
360 |
+
tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
|
361 |
+
tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
|
362 |
+
tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
|
363 |
+
tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])
|
364 |
+
assert df.squeeze() == df.iloc[0, 0]
|
365 |
+
msg = "No axis named 2 for object type DataFrame"
|
366 |
+
with pytest.raises(ValueError, match=msg):
|
367 |
+
df.squeeze(axis=2)
|
368 |
+
msg = "No axis named x for object type DataFrame"
|
369 |
+
with pytest.raises(ValueError, match=msg):
|
370 |
+
df.squeeze(axis="x")
|
371 |
+
|
372 |
+
def test_squeeze_axis_len_3(self):
|
373 |
+
df = DataFrame(
|
374 |
+
np.random.default_rng(2).standard_normal((3, 4)),
|
375 |
+
columns=Index(list("ABCD"), dtype=object),
|
376 |
+
index=date_range("2000-01-01", periods=3, freq="B"),
|
377 |
+
)
|
378 |
+
tm.assert_frame_equal(df.squeeze(axis=0), df)
|
379 |
+
|
380 |
+
def test_numpy_squeeze(self):
|
381 |
+
s = Series(range(2), dtype=np.float64)
|
382 |
+
tm.assert_series_equal(np.squeeze(s), s)
|
383 |
+
|
384 |
+
df = DataFrame(
|
385 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
386 |
+
columns=Index(list("ABCD"), dtype=object),
|
387 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
388 |
+
).reindex(columns=["A"])
|
389 |
+
tm.assert_series_equal(np.squeeze(df), df["A"])
|
390 |
+
|
391 |
+
@pytest.mark.parametrize(
|
392 |
+
"ser",
|
393 |
+
[
|
394 |
+
Series(range(10), dtype=np.float64),
|
395 |
+
Series([str(i) for i in range(10)], dtype=object),
|
396 |
+
],
|
397 |
+
)
|
398 |
+
def test_transpose_series(self, ser):
|
399 |
+
# calls implementation in pandas/core/base.py
|
400 |
+
tm.assert_series_equal(ser.transpose(), ser)
|
401 |
+
|
402 |
+
def test_transpose_frame(self):
|
403 |
+
df = DataFrame(
|
404 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
405 |
+
columns=Index(list("ABCD"), dtype=object),
|
406 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
407 |
+
)
|
408 |
+
tm.assert_frame_equal(df.transpose().transpose(), df)
|
409 |
+
|
410 |
+
def test_numpy_transpose(self, frame_or_series):
|
411 |
+
obj = DataFrame(
|
412 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
413 |
+
columns=Index(list("ABCD"), dtype=object),
|
414 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
415 |
+
)
|
416 |
+
obj = tm.get_obj(obj, frame_or_series)
|
417 |
+
|
418 |
+
if frame_or_series is Series:
|
419 |
+
# 1D -> np.transpose is no-op
|
420 |
+
tm.assert_series_equal(np.transpose(obj), obj)
|
421 |
+
|
422 |
+
# round-trip preserved
|
423 |
+
tm.assert_equal(np.transpose(np.transpose(obj)), obj)
|
424 |
+
|
425 |
+
msg = "the 'axes' parameter is not supported"
|
426 |
+
with pytest.raises(ValueError, match=msg):
|
427 |
+
np.transpose(obj, axes=1)
|
428 |
+
|
429 |
+
@pytest.mark.parametrize(
|
430 |
+
"ser",
|
431 |
+
[
|
432 |
+
Series(range(10), dtype=np.float64),
|
433 |
+
Series([str(i) for i in range(10)], dtype=object),
|
434 |
+
],
|
435 |
+
)
|
436 |
+
def test_take_series(self, ser):
|
437 |
+
indices = [1, 5, -2, 6, 3, -1]
|
438 |
+
out = ser.take(indices)
|
439 |
+
expected = Series(
|
440 |
+
data=ser.values.take(indices),
|
441 |
+
index=ser.index.take(indices),
|
442 |
+
dtype=ser.dtype,
|
443 |
+
)
|
444 |
+
tm.assert_series_equal(out, expected)
|
445 |
+
|
446 |
+
def test_take_frame(self):
|
447 |
+
indices = [1, 5, -2, 6, 3, -1]
|
448 |
+
df = DataFrame(
|
449 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
450 |
+
columns=Index(list("ABCD"), dtype=object),
|
451 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
452 |
+
)
|
453 |
+
out = df.take(indices)
|
454 |
+
expected = DataFrame(
|
455 |
+
data=df.values.take(indices, axis=0),
|
456 |
+
index=df.index.take(indices),
|
457 |
+
columns=df.columns,
|
458 |
+
)
|
459 |
+
tm.assert_frame_equal(out, expected)
|
460 |
+
|
461 |
+
def test_take_invalid_kwargs(self, frame_or_series):
|
462 |
+
indices = [-3, 2, 0, 1]
|
463 |
+
|
464 |
+
obj = DataFrame(range(5))
|
465 |
+
obj = tm.get_obj(obj, frame_or_series)
|
466 |
+
|
467 |
+
msg = r"take\(\) got an unexpected keyword argument 'foo'"
|
468 |
+
with pytest.raises(TypeError, match=msg):
|
469 |
+
obj.take(indices, foo=2)
|
470 |
+
|
471 |
+
msg = "the 'out' parameter is not supported"
|
472 |
+
with pytest.raises(ValueError, match=msg):
|
473 |
+
obj.take(indices, out=indices)
|
474 |
+
|
475 |
+
msg = "the 'mode' parameter is not supported"
|
476 |
+
with pytest.raises(ValueError, match=msg):
|
477 |
+
obj.take(indices, mode="clip")
|
478 |
+
|
479 |
+
def test_axis_classmethods(self, frame_or_series):
|
480 |
+
box = frame_or_series
|
481 |
+
obj = box(dtype=object)
|
482 |
+
values = box._AXIS_TO_AXIS_NUMBER.keys()
|
483 |
+
for v in values:
|
484 |
+
assert obj._get_axis_number(v) == box._get_axis_number(v)
|
485 |
+
assert obj._get_axis_name(v) == box._get_axis_name(v)
|
486 |
+
assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
|
487 |
+
|
488 |
+
def test_flags_identity(self, frame_or_series):
|
489 |
+
obj = Series([1, 2])
|
490 |
+
if frame_or_series is DataFrame:
|
491 |
+
obj = obj.to_frame()
|
492 |
+
|
493 |
+
assert obj.flags is obj.flags
|
494 |
+
obj2 = obj.copy()
|
495 |
+
assert obj2.flags is not obj.flags
|
496 |
+
|
497 |
+
def test_bool_dep(self) -> None:
|
498 |
+
# GH-51749
|
499 |
+
msg_warn = (
|
500 |
+
"DataFrame.bool is now deprecated and will be removed "
|
501 |
+
"in future version of pandas"
|
502 |
+
)
|
503 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
504 |
+
DataFrame({"col": [False]}).bool()
|
venv/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py
ADDED
@@ -0,0 +1,336 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from pandas.core.dtypes.missing import array_equivalent
|
4 |
+
|
5 |
+
import pandas as pd
|
6 |
+
|
7 |
+
|
8 |
+
# Fixtures
|
9 |
+
# ========
|
10 |
+
@pytest.fixture
|
11 |
+
def df():
|
12 |
+
"""DataFrame with columns 'L1', 'L2', and 'L3'"""
|
13 |
+
return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]})
|
14 |
+
|
15 |
+
|
16 |
+
@pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]])
|
17 |
+
def df_levels(request, df):
|
18 |
+
"""DataFrame with columns or index levels 'L1', 'L2', and 'L3'"""
|
19 |
+
levels = request.param
|
20 |
+
|
21 |
+
if levels:
|
22 |
+
df = df.set_index(levels)
|
23 |
+
|
24 |
+
return df
|
25 |
+
|
26 |
+
|
27 |
+
@pytest.fixture
|
28 |
+
def df_ambig(df):
|
29 |
+
"""DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3'"""
|
30 |
+
df = df.set_index(["L1", "L2"])
|
31 |
+
|
32 |
+
df["L1"] = df["L3"]
|
33 |
+
|
34 |
+
return df
|
35 |
+
|
36 |
+
|
37 |
+
@pytest.fixture
|
38 |
+
def df_duplabels(df):
|
39 |
+
"""DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2'"""
|
40 |
+
df = df.set_index(["L1"])
|
41 |
+
df = pd.concat([df, df["L2"]], axis=1)
|
42 |
+
|
43 |
+
return df
|
44 |
+
|
45 |
+
|
46 |
+
# Test is label/level reference
|
47 |
+
# =============================
|
48 |
+
def get_labels_levels(df_levels):
|
49 |
+
expected_labels = list(df_levels.columns)
|
50 |
+
expected_levels = [name for name in df_levels.index.names if name is not None]
|
51 |
+
return expected_labels, expected_levels
|
52 |
+
|
53 |
+
|
54 |
+
def assert_label_reference(frame, labels, axis):
|
55 |
+
for label in labels:
|
56 |
+
assert frame._is_label_reference(label, axis=axis)
|
57 |
+
assert not frame._is_level_reference(label, axis=axis)
|
58 |
+
assert frame._is_label_or_level_reference(label, axis=axis)
|
59 |
+
|
60 |
+
|
61 |
+
def assert_level_reference(frame, levels, axis):
|
62 |
+
for level in levels:
|
63 |
+
assert frame._is_level_reference(level, axis=axis)
|
64 |
+
assert not frame._is_label_reference(level, axis=axis)
|
65 |
+
assert frame._is_label_or_level_reference(level, axis=axis)
|
66 |
+
|
67 |
+
|
68 |
+
# DataFrame
|
69 |
+
# ---------
|
70 |
+
def test_is_level_or_label_reference_df_simple(df_levels, axis):
|
71 |
+
axis = df_levels._get_axis_number(axis)
|
72 |
+
# Compute expected labels and levels
|
73 |
+
expected_labels, expected_levels = get_labels_levels(df_levels)
|
74 |
+
|
75 |
+
# Transpose frame if axis == 1
|
76 |
+
if axis == 1:
|
77 |
+
df_levels = df_levels.T
|
78 |
+
|
79 |
+
# Perform checks
|
80 |
+
assert_level_reference(df_levels, expected_levels, axis=axis)
|
81 |
+
assert_label_reference(df_levels, expected_labels, axis=axis)
|
82 |
+
|
83 |
+
|
84 |
+
def test_is_level_reference_df_ambig(df_ambig, axis):
|
85 |
+
axis = df_ambig._get_axis_number(axis)
|
86 |
+
|
87 |
+
# Transpose frame if axis == 1
|
88 |
+
if axis == 1:
|
89 |
+
df_ambig = df_ambig.T
|
90 |
+
|
91 |
+
# df has both an on-axis level and off-axis label named L1
|
92 |
+
# Therefore L1 should reference the label, not the level
|
93 |
+
assert_label_reference(df_ambig, ["L1"], axis=axis)
|
94 |
+
|
95 |
+
# df has an on-axis level named L2 and it is not ambiguous
|
96 |
+
# Therefore L2 is an level reference
|
97 |
+
assert_level_reference(df_ambig, ["L2"], axis=axis)
|
98 |
+
|
99 |
+
# df has a column named L3 and it not an level reference
|
100 |
+
assert_label_reference(df_ambig, ["L3"], axis=axis)
|
101 |
+
|
102 |
+
|
103 |
+
# Series
|
104 |
+
# ------
|
105 |
+
def test_is_level_reference_series_simple_axis0(df):
|
106 |
+
# Make series with L1 as index
|
107 |
+
s = df.set_index("L1").L2
|
108 |
+
assert_level_reference(s, ["L1"], axis=0)
|
109 |
+
assert not s._is_level_reference("L2")
|
110 |
+
|
111 |
+
# Make series with L1 and L2 as index
|
112 |
+
s = df.set_index(["L1", "L2"]).L3
|
113 |
+
assert_level_reference(s, ["L1", "L2"], axis=0)
|
114 |
+
assert not s._is_level_reference("L3")
|
115 |
+
|
116 |
+
|
117 |
+
def test_is_level_reference_series_axis1_error(df):
|
118 |
+
# Make series with L1 as index
|
119 |
+
s = df.set_index("L1").L2
|
120 |
+
|
121 |
+
with pytest.raises(ValueError, match="No axis named 1"):
|
122 |
+
s._is_level_reference("L1", axis=1)
|
123 |
+
|
124 |
+
|
125 |
+
# Test _check_label_or_level_ambiguity_df
|
126 |
+
# =======================================
|
127 |
+
|
128 |
+
|
129 |
+
# DataFrame
|
130 |
+
# ---------
|
131 |
+
def test_check_label_or_level_ambiguity_df(df_ambig, axis):
|
132 |
+
axis = df_ambig._get_axis_number(axis)
|
133 |
+
# Transpose frame if axis == 1
|
134 |
+
if axis == 1:
|
135 |
+
df_ambig = df_ambig.T
|
136 |
+
msg = "'L1' is both a column level and an index label"
|
137 |
+
|
138 |
+
else:
|
139 |
+
msg = "'L1' is both an index level and a column label"
|
140 |
+
# df_ambig has both an on-axis level and off-axis label named L1
|
141 |
+
# Therefore, L1 is ambiguous.
|
142 |
+
with pytest.raises(ValueError, match=msg):
|
143 |
+
df_ambig._check_label_or_level_ambiguity("L1", axis=axis)
|
144 |
+
|
145 |
+
# df_ambig has an on-axis level named L2,, and it is not ambiguous.
|
146 |
+
df_ambig._check_label_or_level_ambiguity("L2", axis=axis)
|
147 |
+
|
148 |
+
# df_ambig has an off-axis label named L3, and it is not ambiguous
|
149 |
+
assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis)
|
150 |
+
|
151 |
+
|
152 |
+
# Series
|
153 |
+
# ------
|
154 |
+
def test_check_label_or_level_ambiguity_series(df):
|
155 |
+
# A series has no columns and therefore references are never ambiguous
|
156 |
+
|
157 |
+
# Make series with L1 as index
|
158 |
+
s = df.set_index("L1").L2
|
159 |
+
s._check_label_or_level_ambiguity("L1", axis=0)
|
160 |
+
s._check_label_or_level_ambiguity("L2", axis=0)
|
161 |
+
|
162 |
+
# Make series with L1 and L2 as index
|
163 |
+
s = df.set_index(["L1", "L2"]).L3
|
164 |
+
s._check_label_or_level_ambiguity("L1", axis=0)
|
165 |
+
s._check_label_or_level_ambiguity("L2", axis=0)
|
166 |
+
s._check_label_or_level_ambiguity("L3", axis=0)
|
167 |
+
|
168 |
+
|
169 |
+
def test_check_label_or_level_ambiguity_series_axis1_error(df):
|
170 |
+
# Make series with L1 as index
|
171 |
+
s = df.set_index("L1").L2
|
172 |
+
|
173 |
+
with pytest.raises(ValueError, match="No axis named 1"):
|
174 |
+
s._check_label_or_level_ambiguity("L1", axis=1)
|
175 |
+
|
176 |
+
|
177 |
+
# Test _get_label_or_level_values
|
178 |
+
# ===============================
|
179 |
+
def assert_label_values(frame, labels, axis):
|
180 |
+
axis = frame._get_axis_number(axis)
|
181 |
+
for label in labels:
|
182 |
+
if axis == 0:
|
183 |
+
expected = frame[label]._values
|
184 |
+
else:
|
185 |
+
expected = frame.loc[label]._values
|
186 |
+
|
187 |
+
result = frame._get_label_or_level_values(label, axis=axis)
|
188 |
+
assert array_equivalent(expected, result)
|
189 |
+
|
190 |
+
|
191 |
+
def assert_level_values(frame, levels, axis):
|
192 |
+
axis = frame._get_axis_number(axis)
|
193 |
+
for level in levels:
|
194 |
+
if axis == 0:
|
195 |
+
expected = frame.index.get_level_values(level=level)._values
|
196 |
+
else:
|
197 |
+
expected = frame.columns.get_level_values(level=level)._values
|
198 |
+
|
199 |
+
result = frame._get_label_or_level_values(level, axis=axis)
|
200 |
+
assert array_equivalent(expected, result)
|
201 |
+
|
202 |
+
|
203 |
+
# DataFrame
|
204 |
+
# ---------
|
205 |
+
def test_get_label_or_level_values_df_simple(df_levels, axis):
|
206 |
+
# Compute expected labels and levels
|
207 |
+
expected_labels, expected_levels = get_labels_levels(df_levels)
|
208 |
+
|
209 |
+
axis = df_levels._get_axis_number(axis)
|
210 |
+
# Transpose frame if axis == 1
|
211 |
+
if axis == 1:
|
212 |
+
df_levels = df_levels.T
|
213 |
+
|
214 |
+
# Perform checks
|
215 |
+
assert_label_values(df_levels, expected_labels, axis=axis)
|
216 |
+
assert_level_values(df_levels, expected_levels, axis=axis)
|
217 |
+
|
218 |
+
|
219 |
+
def test_get_label_or_level_values_df_ambig(df_ambig, axis):
|
220 |
+
axis = df_ambig._get_axis_number(axis)
|
221 |
+
# Transpose frame if axis == 1
|
222 |
+
if axis == 1:
|
223 |
+
df_ambig = df_ambig.T
|
224 |
+
|
225 |
+
# df has an on-axis level named L2, and it is not ambiguous.
|
226 |
+
assert_level_values(df_ambig, ["L2"], axis=axis)
|
227 |
+
|
228 |
+
# df has an off-axis label named L3, and it is not ambiguous.
|
229 |
+
assert_label_values(df_ambig, ["L3"], axis=axis)
|
230 |
+
|
231 |
+
|
232 |
+
def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
|
233 |
+
axis = df_duplabels._get_axis_number(axis)
|
234 |
+
# Transpose frame if axis == 1
|
235 |
+
if axis == 1:
|
236 |
+
df_duplabels = df_duplabels.T
|
237 |
+
|
238 |
+
# df has unambiguous level 'L1'
|
239 |
+
assert_level_values(df_duplabels, ["L1"], axis=axis)
|
240 |
+
|
241 |
+
# df has unique label 'L3'
|
242 |
+
assert_label_values(df_duplabels, ["L3"], axis=axis)
|
243 |
+
|
244 |
+
# df has duplicate labels 'L2'
|
245 |
+
if axis == 0:
|
246 |
+
expected_msg = "The column label 'L2' is not unique"
|
247 |
+
else:
|
248 |
+
expected_msg = "The index label 'L2' is not unique"
|
249 |
+
|
250 |
+
with pytest.raises(ValueError, match=expected_msg):
|
251 |
+
assert_label_values(df_duplabels, ["L2"], axis=axis)
|
252 |
+
|
253 |
+
|
254 |
+
# Series
|
255 |
+
# ------
|
256 |
+
def test_get_label_or_level_values_series_axis0(df):
|
257 |
+
# Make series with L1 as index
|
258 |
+
s = df.set_index("L1").L2
|
259 |
+
assert_level_values(s, ["L1"], axis=0)
|
260 |
+
|
261 |
+
# Make series with L1 and L2 as index
|
262 |
+
s = df.set_index(["L1", "L2"]).L3
|
263 |
+
assert_level_values(s, ["L1", "L2"], axis=0)
|
264 |
+
|
265 |
+
|
266 |
+
def test_get_label_or_level_values_series_axis1_error(df):
|
267 |
+
# Make series with L1 as index
|
268 |
+
s = df.set_index("L1").L2
|
269 |
+
|
270 |
+
with pytest.raises(ValueError, match="No axis named 1"):
|
271 |
+
s._get_label_or_level_values("L1", axis=1)
|
272 |
+
|
273 |
+
|
274 |
+
# Test _drop_labels_or_levels
|
275 |
+
# ===========================
|
276 |
+
def assert_labels_dropped(frame, labels, axis):
|
277 |
+
axis = frame._get_axis_number(axis)
|
278 |
+
for label in labels:
|
279 |
+
df_dropped = frame._drop_labels_or_levels(label, axis=axis)
|
280 |
+
|
281 |
+
if axis == 0:
|
282 |
+
assert label in frame.columns
|
283 |
+
assert label not in df_dropped.columns
|
284 |
+
else:
|
285 |
+
assert label in frame.index
|
286 |
+
assert label not in df_dropped.index
|
287 |
+
|
288 |
+
|
289 |
+
def assert_levels_dropped(frame, levels, axis):
|
290 |
+
axis = frame._get_axis_number(axis)
|
291 |
+
for level in levels:
|
292 |
+
df_dropped = frame._drop_labels_or_levels(level, axis=axis)
|
293 |
+
|
294 |
+
if axis == 0:
|
295 |
+
assert level in frame.index.names
|
296 |
+
assert level not in df_dropped.index.names
|
297 |
+
else:
|
298 |
+
assert level in frame.columns.names
|
299 |
+
assert level not in df_dropped.columns.names
|
300 |
+
|
301 |
+
|
302 |
+
# DataFrame
|
303 |
+
# ---------
|
304 |
+
def test_drop_labels_or_levels_df(df_levels, axis):
|
305 |
+
# Compute expected labels and levels
|
306 |
+
expected_labels, expected_levels = get_labels_levels(df_levels)
|
307 |
+
|
308 |
+
axis = df_levels._get_axis_number(axis)
|
309 |
+
# Transpose frame if axis == 1
|
310 |
+
if axis == 1:
|
311 |
+
df_levels = df_levels.T
|
312 |
+
|
313 |
+
# Perform checks
|
314 |
+
assert_labels_dropped(df_levels, expected_labels, axis=axis)
|
315 |
+
assert_levels_dropped(df_levels, expected_levels, axis=axis)
|
316 |
+
|
317 |
+
with pytest.raises(ValueError, match="not valid labels or levels"):
|
318 |
+
df_levels._drop_labels_or_levels("L4", axis=axis)
|
319 |
+
|
320 |
+
|
321 |
+
# Series
|
322 |
+
# ------
|
323 |
+
def test_drop_labels_or_levels_series(df):
|
324 |
+
# Make series with L1 as index
|
325 |
+
s = df.set_index("L1").L2
|
326 |
+
assert_levels_dropped(s, ["L1"], axis=0)
|
327 |
+
|
328 |
+
with pytest.raises(ValueError, match="not valid labels or levels"):
|
329 |
+
s._drop_labels_or_levels("L4", axis=0)
|
330 |
+
|
331 |
+
# Make series with L1 and L2 as index
|
332 |
+
s = df.set_index(["L1", "L2"]).L3
|
333 |
+
assert_levels_dropped(s, ["L1", "L2"], axis=0)
|
334 |
+
|
335 |
+
with pytest.raises(ValueError, match="not valid labels or levels"):
|
336 |
+
s._drop_labels_or_levels("L4", axis=0)
|
venv/lib/python3.10/site-packages/pandas/tests/generic/test_series.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from operator import methodcaller
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
from pandas import (
|
8 |
+
MultiIndex,
|
9 |
+
Series,
|
10 |
+
date_range,
|
11 |
+
)
|
12 |
+
import pandas._testing as tm
|
13 |
+
|
14 |
+
|
15 |
+
class TestSeries:
|
16 |
+
@pytest.mark.parametrize("func", ["rename_axis", "_set_axis_name"])
|
17 |
+
def test_set_axis_name_mi(self, func):
|
18 |
+
ser = Series(
|
19 |
+
[11, 21, 31],
|
20 |
+
index=MultiIndex.from_tuples(
|
21 |
+
[("A", x) for x in ["a", "B", "c"]], names=["l1", "l2"]
|
22 |
+
),
|
23 |
+
)
|
24 |
+
|
25 |
+
result = methodcaller(func, ["L1", "L2"])(ser)
|
26 |
+
assert ser.index.name is None
|
27 |
+
assert ser.index.names == ["l1", "l2"]
|
28 |
+
assert result.index.name is None
|
29 |
+
assert result.index.names, ["L1", "L2"]
|
30 |
+
|
31 |
+
def test_set_axis_name_raises(self):
|
32 |
+
ser = Series([1])
|
33 |
+
msg = "No axis named 1 for object type Series"
|
34 |
+
with pytest.raises(ValueError, match=msg):
|
35 |
+
ser._set_axis_name(name="a", axis=1)
|
36 |
+
|
37 |
+
def test_get_bool_data_preserve_dtype(self):
|
38 |
+
ser = Series([True, False, True])
|
39 |
+
result = ser._get_bool_data()
|
40 |
+
tm.assert_series_equal(result, ser)
|
41 |
+
|
42 |
+
def test_nonzero_single_element(self):
|
43 |
+
# allow single item via bool method
|
44 |
+
msg_warn = (
|
45 |
+
"Series.bool is now deprecated and will be removed "
|
46 |
+
"in future version of pandas"
|
47 |
+
)
|
48 |
+
ser = Series([True])
|
49 |
+
ser1 = Series([False])
|
50 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
51 |
+
assert ser.bool()
|
52 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
53 |
+
assert not ser1.bool()
|
54 |
+
|
55 |
+
@pytest.mark.parametrize("data", [np.nan, pd.NaT, True, False])
|
56 |
+
def test_nonzero_single_element_raise_1(self, data):
|
57 |
+
# single item nan to raise
|
58 |
+
series = Series([data])
|
59 |
+
|
60 |
+
msg = "The truth value of a Series is ambiguous"
|
61 |
+
with pytest.raises(ValueError, match=msg):
|
62 |
+
bool(series)
|
63 |
+
|
64 |
+
@pytest.mark.parametrize("data", [np.nan, pd.NaT])
|
65 |
+
def test_nonzero_single_element_raise_2(self, data):
|
66 |
+
msg_warn = (
|
67 |
+
"Series.bool is now deprecated and will be removed "
|
68 |
+
"in future version of pandas"
|
69 |
+
)
|
70 |
+
msg_err = "bool cannot act on a non-boolean single element Series"
|
71 |
+
series = Series([data])
|
72 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
73 |
+
with pytest.raises(ValueError, match=msg_err):
|
74 |
+
series.bool()
|
75 |
+
|
76 |
+
@pytest.mark.parametrize("data", [(True, True), (False, False)])
|
77 |
+
def test_nonzero_multiple_element_raise(self, data):
|
78 |
+
# multiple bool are still an error
|
79 |
+
msg_warn = (
|
80 |
+
"Series.bool is now deprecated and will be removed "
|
81 |
+
"in future version of pandas"
|
82 |
+
)
|
83 |
+
msg_err = "The truth value of a Series is ambiguous"
|
84 |
+
series = Series([data])
|
85 |
+
with pytest.raises(ValueError, match=msg_err):
|
86 |
+
bool(series)
|
87 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
88 |
+
with pytest.raises(ValueError, match=msg_err):
|
89 |
+
series.bool()
|
90 |
+
|
91 |
+
@pytest.mark.parametrize("data", [1, 0, "a", 0.0])
|
92 |
+
def test_nonbool_single_element_raise(self, data):
|
93 |
+
# single non-bool are an error
|
94 |
+
msg_warn = (
|
95 |
+
"Series.bool is now deprecated and will be removed "
|
96 |
+
"in future version of pandas"
|
97 |
+
)
|
98 |
+
msg_err1 = "The truth value of a Series is ambiguous"
|
99 |
+
msg_err2 = "bool cannot act on a non-boolean single element Series"
|
100 |
+
series = Series([data])
|
101 |
+
with pytest.raises(ValueError, match=msg_err1):
|
102 |
+
bool(series)
|
103 |
+
with tm.assert_produces_warning(FutureWarning, match=msg_warn):
|
104 |
+
with pytest.raises(ValueError, match=msg_err2):
|
105 |
+
series.bool()
|
106 |
+
|
107 |
+
def test_metadata_propagation_indiv_resample(self):
|
108 |
+
# resample
|
109 |
+
ts = Series(
|
110 |
+
np.random.default_rng(2).random(1000),
|
111 |
+
index=date_range("20130101", periods=1000, freq="s"),
|
112 |
+
name="foo",
|
113 |
+
)
|
114 |
+
result = ts.resample("1min").mean()
|
115 |
+
tm.assert_metadata_equivalent(ts, result)
|
116 |
+
|
117 |
+
result = ts.resample("1min").min()
|
118 |
+
tm.assert_metadata_equivalent(ts, result)
|
119 |
+
|
120 |
+
result = ts.resample("1min").apply(lambda x: x.sum())
|
121 |
+
tm.assert_metadata_equivalent(ts, result)
|
122 |
+
|
123 |
+
def test_metadata_propagation_indiv(self, monkeypatch):
|
124 |
+
# check that the metadata matches up on the resulting ops
|
125 |
+
|
126 |
+
ser = Series(range(3), range(3))
|
127 |
+
ser.name = "foo"
|
128 |
+
ser2 = Series(range(3), range(3))
|
129 |
+
ser2.name = "bar"
|
130 |
+
|
131 |
+
result = ser.T
|
132 |
+
tm.assert_metadata_equivalent(ser, result)
|
133 |
+
|
134 |
+
def finalize(self, other, method=None, **kwargs):
|
135 |
+
for name in self._metadata:
|
136 |
+
if method == "concat" and name == "filename":
|
137 |
+
value = "+".join(
|
138 |
+
[
|
139 |
+
getattr(obj, name)
|
140 |
+
for obj in other.objs
|
141 |
+
if getattr(obj, name, None)
|
142 |
+
]
|
143 |
+
)
|
144 |
+
object.__setattr__(self, name, value)
|
145 |
+
else:
|
146 |
+
object.__setattr__(self, name, getattr(other, name, None))
|
147 |
+
|
148 |
+
return self
|
149 |
+
|
150 |
+
with monkeypatch.context() as m:
|
151 |
+
m.setattr(Series, "_metadata", ["name", "filename"])
|
152 |
+
m.setattr(Series, "__finalize__", finalize)
|
153 |
+
|
154 |
+
ser.filename = "foo"
|
155 |
+
ser2.filename = "bar"
|
156 |
+
|
157 |
+
result = pd.concat([ser, ser2])
|
158 |
+
assert result.filename == "foo+bar"
|
159 |
+
assert result.name is None
|
venv/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py
ADDED
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
Categorical,
|
6 |
+
DataFrame,
|
7 |
+
MultiIndex,
|
8 |
+
Series,
|
9 |
+
date_range,
|
10 |
+
)
|
11 |
+
import pandas._testing as tm
|
12 |
+
|
13 |
+
pytest.importorskip("xarray")
|
14 |
+
|
15 |
+
|
16 |
+
class TestDataFrameToXArray:
|
17 |
+
@pytest.fixture
|
18 |
+
def df(self):
|
19 |
+
return DataFrame(
|
20 |
+
{
|
21 |
+
"a": list("abcd"),
|
22 |
+
"b": list(range(1, 5)),
|
23 |
+
"c": np.arange(3, 7).astype("u1"),
|
24 |
+
"d": np.arange(4.0, 8.0, dtype="float64"),
|
25 |
+
"e": [True, False, True, False],
|
26 |
+
"f": Categorical(list("abcd")),
|
27 |
+
"g": date_range("20130101", periods=4),
|
28 |
+
"h": date_range("20130101", periods=4, tz="US/Eastern"),
|
29 |
+
}
|
30 |
+
)
|
31 |
+
|
32 |
+
def test_to_xarray_index_types(self, index_flat, df, using_infer_string):
|
33 |
+
index = index_flat
|
34 |
+
# MultiIndex is tested in test_to_xarray_with_multiindex
|
35 |
+
if len(index) == 0:
|
36 |
+
pytest.skip("Test doesn't make sense for empty index")
|
37 |
+
|
38 |
+
from xarray import Dataset
|
39 |
+
|
40 |
+
df.index = index[:4]
|
41 |
+
df.index.name = "foo"
|
42 |
+
df.columns.name = "bar"
|
43 |
+
result = df.to_xarray()
|
44 |
+
assert result.sizes["foo"] == 4
|
45 |
+
assert len(result.coords) == 1
|
46 |
+
assert len(result.data_vars) == 8
|
47 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
|
48 |
+
assert isinstance(result, Dataset)
|
49 |
+
|
50 |
+
# idempotency
|
51 |
+
# datetimes w/tz are preserved
|
52 |
+
# column names are lost
|
53 |
+
expected = df.copy()
|
54 |
+
expected["f"] = expected["f"].astype(
|
55 |
+
object if not using_infer_string else "string[pyarrow_numpy]"
|
56 |
+
)
|
57 |
+
expected.columns.name = None
|
58 |
+
tm.assert_frame_equal(result.to_dataframe(), expected)
|
59 |
+
|
60 |
+
def test_to_xarray_empty(self, df):
|
61 |
+
from xarray import Dataset
|
62 |
+
|
63 |
+
df.index.name = "foo"
|
64 |
+
result = df[0:0].to_xarray()
|
65 |
+
assert result.sizes["foo"] == 0
|
66 |
+
assert isinstance(result, Dataset)
|
67 |
+
|
68 |
+
def test_to_xarray_with_multiindex(self, df, using_infer_string):
|
69 |
+
from xarray import Dataset
|
70 |
+
|
71 |
+
# MultiIndex
|
72 |
+
df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"])
|
73 |
+
result = df.to_xarray()
|
74 |
+
assert result.sizes["one"] == 1
|
75 |
+
assert result.sizes["two"] == 4
|
76 |
+
assert len(result.coords) == 2
|
77 |
+
assert len(result.data_vars) == 8
|
78 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
|
79 |
+
assert isinstance(result, Dataset)
|
80 |
+
|
81 |
+
result = result.to_dataframe()
|
82 |
+
expected = df.copy()
|
83 |
+
expected["f"] = expected["f"].astype(
|
84 |
+
object if not using_infer_string else "string[pyarrow_numpy]"
|
85 |
+
)
|
86 |
+
expected.columns.name = None
|
87 |
+
tm.assert_frame_equal(result, expected)
|
88 |
+
|
89 |
+
|
90 |
+
class TestSeriesToXArray:
|
91 |
+
def test_to_xarray_index_types(self, index_flat):
|
92 |
+
index = index_flat
|
93 |
+
# MultiIndex is tested in test_to_xarray_with_multiindex
|
94 |
+
|
95 |
+
from xarray import DataArray
|
96 |
+
|
97 |
+
ser = Series(range(len(index)), index=index, dtype="int64")
|
98 |
+
ser.index.name = "foo"
|
99 |
+
result = ser.to_xarray()
|
100 |
+
repr(result)
|
101 |
+
assert len(result) == len(index)
|
102 |
+
assert len(result.coords) == 1
|
103 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
|
104 |
+
assert isinstance(result, DataArray)
|
105 |
+
|
106 |
+
# idempotency
|
107 |
+
tm.assert_series_equal(result.to_series(), ser)
|
108 |
+
|
109 |
+
def test_to_xarray_empty(self):
|
110 |
+
from xarray import DataArray
|
111 |
+
|
112 |
+
ser = Series([], dtype=object)
|
113 |
+
ser.index.name = "foo"
|
114 |
+
result = ser.to_xarray()
|
115 |
+
assert len(result) == 0
|
116 |
+
assert len(result.coords) == 1
|
117 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
|
118 |
+
assert isinstance(result, DataArray)
|
119 |
+
|
120 |
+
def test_to_xarray_with_multiindex(self):
|
121 |
+
from xarray import DataArray
|
122 |
+
|
123 |
+
mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"])
|
124 |
+
ser = Series(range(6), dtype="int64", index=mi)
|
125 |
+
result = ser.to_xarray()
|
126 |
+
assert len(result) == 2
|
127 |
+
tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
|
128 |
+
assert isinstance(result, DataArray)
|
129 |
+
res = result.to_series()
|
130 |
+
tm.assert_series_equal(res, ser)
|
venv/lib/python3.10/site-packages/pandas/tests/tseries/__init__.py
ADDED
File without changes
|
venv/lib/python3.10/site-packages/pandas/tests/tseries/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (188 Bytes). View file
|
|
venv/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__init__.py
ADDED
File without changes
|