Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/common.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_misc.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_transpose.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_unique.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_value_counts.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/common.py +9 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_conversion.py +562 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_fillna.py +60 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_misc.py +191 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_astype.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_chained_assignment_deprecation.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_constructors.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_core_functionalities.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_indexing.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_internals.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_interp_fillna.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_methods.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_setitem.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/util.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_array.py +190 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_astype.py +260 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_chained_assignment_deprecation.py +174 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_clip.py +101 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_constructors.py +382 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_core_functionalities.py +106 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_functions.py +396 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_indexing.py +1266 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_internals.py +151 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_interp_fillna.py +432 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_methods.py +2055 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_replace.py +481 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_setitem.py +156 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_util.py +14 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/util.py +30 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__init__.py +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py +50 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py +986 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py +195 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_read.py +412 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_retain_attributes.py +92 -0
- env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_subclass.py +52 -0
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (182 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/common.cpython-310.pyc
ADDED
Binary file (526 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_constructors.cpython-310.pyc
ADDED
Binary file (5.6 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_conversion.cpython-310.pyc
ADDED
Binary file (14.3 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_fillna.cpython-310.pyc
ADDED
Binary file (1.56 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_misc.cpython-310.pyc
ADDED
Binary file (5.04 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_transpose.cpython-310.pyc
ADDED
Binary file (1.82 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_unique.cpython-310.pyc
ADDED
Binary file (3.68 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__pycache__/test_value_counts.cpython-310.pyc
ADDED
Binary file (8.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/common.py
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Any
|
2 |
+
|
3 |
+
from pandas import Index
|
4 |
+
|
5 |
+
|
6 |
+
def allow_na_ops(obj: Any) -> bool:
|
7 |
+
"""Whether to skip test cases including NaN"""
|
8 |
+
is_bool_index = isinstance(obj, Index) and obj.inferred_type == "boolean"
|
9 |
+
return not is_bool_index and obj._can_hold_na
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_conversion.py
ADDED
@@ -0,0 +1,562 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.core.dtypes.dtypes import DatetimeTZDtype
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
from pandas import (
|
8 |
+
CategoricalIndex,
|
9 |
+
Series,
|
10 |
+
Timedelta,
|
11 |
+
Timestamp,
|
12 |
+
date_range,
|
13 |
+
)
|
14 |
+
import pandas._testing as tm
|
15 |
+
from pandas.core.arrays import (
|
16 |
+
DatetimeArray,
|
17 |
+
IntervalArray,
|
18 |
+
NumpyExtensionArray,
|
19 |
+
PeriodArray,
|
20 |
+
SparseArray,
|
21 |
+
TimedeltaArray,
|
22 |
+
)
|
23 |
+
from pandas.core.arrays.string_arrow import ArrowStringArrayNumpySemantics
|
24 |
+
|
25 |
+
|
26 |
+
class TestToIterable:
|
27 |
+
# test that we convert an iterable to python types
|
28 |
+
|
29 |
+
dtypes = [
|
30 |
+
("int8", int),
|
31 |
+
("int16", int),
|
32 |
+
("int32", int),
|
33 |
+
("int64", int),
|
34 |
+
("uint8", int),
|
35 |
+
("uint16", int),
|
36 |
+
("uint32", int),
|
37 |
+
("uint64", int),
|
38 |
+
("float16", float),
|
39 |
+
("float32", float),
|
40 |
+
("float64", float),
|
41 |
+
("datetime64[ns]", Timestamp),
|
42 |
+
("datetime64[ns, US/Eastern]", Timestamp),
|
43 |
+
("timedelta64[ns]", Timedelta),
|
44 |
+
]
|
45 |
+
|
46 |
+
@pytest.mark.parametrize("dtype, rdtype", dtypes)
|
47 |
+
@pytest.mark.parametrize(
|
48 |
+
"method",
|
49 |
+
[
|
50 |
+
lambda x: x.tolist(),
|
51 |
+
lambda x: x.to_list(),
|
52 |
+
lambda x: list(x),
|
53 |
+
lambda x: list(x.__iter__()),
|
54 |
+
],
|
55 |
+
ids=["tolist", "to_list", "list", "iter"],
|
56 |
+
)
|
57 |
+
def test_iterable(self, index_or_series, method, dtype, rdtype):
|
58 |
+
# gh-10904
|
59 |
+
# gh-13258
|
60 |
+
# coerce iteration to underlying python / pandas types
|
61 |
+
typ = index_or_series
|
62 |
+
if dtype == "float16" and issubclass(typ, pd.Index):
|
63 |
+
with pytest.raises(NotImplementedError, match="float16 indexes are not "):
|
64 |
+
typ([1], dtype=dtype)
|
65 |
+
return
|
66 |
+
s = typ([1], dtype=dtype)
|
67 |
+
result = method(s)[0]
|
68 |
+
assert isinstance(result, rdtype)
|
69 |
+
|
70 |
+
@pytest.mark.parametrize(
|
71 |
+
"dtype, rdtype, obj",
|
72 |
+
[
|
73 |
+
("object", object, "a"),
|
74 |
+
("object", int, 1),
|
75 |
+
("category", object, "a"),
|
76 |
+
("category", int, 1),
|
77 |
+
],
|
78 |
+
)
|
79 |
+
@pytest.mark.parametrize(
|
80 |
+
"method",
|
81 |
+
[
|
82 |
+
lambda x: x.tolist(),
|
83 |
+
lambda x: x.to_list(),
|
84 |
+
lambda x: list(x),
|
85 |
+
lambda x: list(x.__iter__()),
|
86 |
+
],
|
87 |
+
ids=["tolist", "to_list", "list", "iter"],
|
88 |
+
)
|
89 |
+
def test_iterable_object_and_category(
|
90 |
+
self, index_or_series, method, dtype, rdtype, obj
|
91 |
+
):
|
92 |
+
# gh-10904
|
93 |
+
# gh-13258
|
94 |
+
# coerce iteration to underlying python / pandas types
|
95 |
+
typ = index_or_series
|
96 |
+
s = typ([obj], dtype=dtype)
|
97 |
+
result = method(s)[0]
|
98 |
+
assert isinstance(result, rdtype)
|
99 |
+
|
100 |
+
@pytest.mark.parametrize("dtype, rdtype", dtypes)
|
101 |
+
def test_iterable_items(self, dtype, rdtype):
|
102 |
+
# gh-13258
|
103 |
+
# test if items yields the correct boxed scalars
|
104 |
+
# this only applies to series
|
105 |
+
s = Series([1], dtype=dtype)
|
106 |
+
_, result = next(iter(s.items()))
|
107 |
+
assert isinstance(result, rdtype)
|
108 |
+
|
109 |
+
_, result = next(iter(s.items()))
|
110 |
+
assert isinstance(result, rdtype)
|
111 |
+
|
112 |
+
@pytest.mark.parametrize(
|
113 |
+
"dtype, rdtype", dtypes + [("object", int), ("category", int)]
|
114 |
+
)
|
115 |
+
def test_iterable_map(self, index_or_series, dtype, rdtype):
|
116 |
+
# gh-13236
|
117 |
+
# coerce iteration to underlying python / pandas types
|
118 |
+
typ = index_or_series
|
119 |
+
if dtype == "float16" and issubclass(typ, pd.Index):
|
120 |
+
with pytest.raises(NotImplementedError, match="float16 indexes are not "):
|
121 |
+
typ([1], dtype=dtype)
|
122 |
+
return
|
123 |
+
s = typ([1], dtype=dtype)
|
124 |
+
result = s.map(type)[0]
|
125 |
+
if not isinstance(rdtype, tuple):
|
126 |
+
rdtype = (rdtype,)
|
127 |
+
assert result in rdtype
|
128 |
+
|
129 |
+
@pytest.mark.parametrize(
|
130 |
+
"method",
|
131 |
+
[
|
132 |
+
lambda x: x.tolist(),
|
133 |
+
lambda x: x.to_list(),
|
134 |
+
lambda x: list(x),
|
135 |
+
lambda x: list(x.__iter__()),
|
136 |
+
],
|
137 |
+
ids=["tolist", "to_list", "list", "iter"],
|
138 |
+
)
|
139 |
+
def test_categorial_datetimelike(self, method):
|
140 |
+
i = CategoricalIndex([Timestamp("1999-12-31"), Timestamp("2000-12-31")])
|
141 |
+
|
142 |
+
result = method(i)[0]
|
143 |
+
assert isinstance(result, Timestamp)
|
144 |
+
|
145 |
+
def test_iter_box_dt64(self, unit):
|
146 |
+
vals = [Timestamp("2011-01-01"), Timestamp("2011-01-02")]
|
147 |
+
ser = Series(vals).dt.as_unit(unit)
|
148 |
+
assert ser.dtype == f"datetime64[{unit}]"
|
149 |
+
for res, exp in zip(ser, vals):
|
150 |
+
assert isinstance(res, Timestamp)
|
151 |
+
assert res.tz is None
|
152 |
+
assert res == exp
|
153 |
+
assert res.unit == unit
|
154 |
+
|
155 |
+
def test_iter_box_dt64tz(self, unit):
|
156 |
+
vals = [
|
157 |
+
Timestamp("2011-01-01", tz="US/Eastern"),
|
158 |
+
Timestamp("2011-01-02", tz="US/Eastern"),
|
159 |
+
]
|
160 |
+
ser = Series(vals).dt.as_unit(unit)
|
161 |
+
|
162 |
+
assert ser.dtype == f"datetime64[{unit}, US/Eastern]"
|
163 |
+
for res, exp in zip(ser, vals):
|
164 |
+
assert isinstance(res, Timestamp)
|
165 |
+
assert res.tz == exp.tz
|
166 |
+
assert res == exp
|
167 |
+
assert res.unit == unit
|
168 |
+
|
169 |
+
def test_iter_box_timedelta64(self, unit):
|
170 |
+
# timedelta
|
171 |
+
vals = [Timedelta("1 days"), Timedelta("2 days")]
|
172 |
+
ser = Series(vals).dt.as_unit(unit)
|
173 |
+
assert ser.dtype == f"timedelta64[{unit}]"
|
174 |
+
for res, exp in zip(ser, vals):
|
175 |
+
assert isinstance(res, Timedelta)
|
176 |
+
assert res == exp
|
177 |
+
assert res.unit == unit
|
178 |
+
|
179 |
+
def test_iter_box_period(self):
|
180 |
+
# period
|
181 |
+
vals = [pd.Period("2011-01-01", freq="M"), pd.Period("2011-01-02", freq="M")]
|
182 |
+
s = Series(vals)
|
183 |
+
assert s.dtype == "Period[M]"
|
184 |
+
for res, exp in zip(s, vals):
|
185 |
+
assert isinstance(res, pd.Period)
|
186 |
+
assert res.freq == "ME"
|
187 |
+
assert res == exp
|
188 |
+
|
189 |
+
|
190 |
+
@pytest.mark.parametrize(
|
191 |
+
"arr, expected_type, dtype",
|
192 |
+
[
|
193 |
+
(np.array([0, 1], dtype=np.int64), np.ndarray, "int64"),
|
194 |
+
(np.array(["a", "b"]), np.ndarray, "object"),
|
195 |
+
(pd.Categorical(["a", "b"]), pd.Categorical, "category"),
|
196 |
+
(
|
197 |
+
pd.DatetimeIndex(["2017", "2018"], tz="US/Central"),
|
198 |
+
DatetimeArray,
|
199 |
+
"datetime64[ns, US/Central]",
|
200 |
+
),
|
201 |
+
(
|
202 |
+
pd.PeriodIndex([2018, 2019], freq="Y"),
|
203 |
+
PeriodArray,
|
204 |
+
pd.core.dtypes.dtypes.PeriodDtype("Y-DEC"),
|
205 |
+
),
|
206 |
+
(pd.IntervalIndex.from_breaks([0, 1, 2]), IntervalArray, "interval"),
|
207 |
+
(
|
208 |
+
pd.DatetimeIndex(["2017", "2018"]),
|
209 |
+
DatetimeArray,
|
210 |
+
"datetime64[ns]",
|
211 |
+
),
|
212 |
+
(
|
213 |
+
pd.TimedeltaIndex([10**10]),
|
214 |
+
TimedeltaArray,
|
215 |
+
"m8[ns]",
|
216 |
+
),
|
217 |
+
],
|
218 |
+
)
|
219 |
+
def test_values_consistent(arr, expected_type, dtype, using_infer_string):
|
220 |
+
if using_infer_string and dtype == "object":
|
221 |
+
expected_type = ArrowStringArrayNumpySemantics
|
222 |
+
l_values = Series(arr)._values
|
223 |
+
r_values = pd.Index(arr)._values
|
224 |
+
assert type(l_values) is expected_type
|
225 |
+
assert type(l_values) is type(r_values)
|
226 |
+
|
227 |
+
tm.assert_equal(l_values, r_values)
|
228 |
+
|
229 |
+
|
230 |
+
@pytest.mark.parametrize("arr", [np.array([1, 2, 3])])
|
231 |
+
def test_numpy_array(arr):
|
232 |
+
ser = Series(arr)
|
233 |
+
result = ser.array
|
234 |
+
expected = NumpyExtensionArray(arr)
|
235 |
+
tm.assert_extension_array_equal(result, expected)
|
236 |
+
|
237 |
+
|
238 |
+
def test_numpy_array_all_dtypes(any_numpy_dtype):
|
239 |
+
ser = Series(dtype=any_numpy_dtype)
|
240 |
+
result = ser.array
|
241 |
+
if np.dtype(any_numpy_dtype).kind == "M":
|
242 |
+
assert isinstance(result, DatetimeArray)
|
243 |
+
elif np.dtype(any_numpy_dtype).kind == "m":
|
244 |
+
assert isinstance(result, TimedeltaArray)
|
245 |
+
else:
|
246 |
+
assert isinstance(result, NumpyExtensionArray)
|
247 |
+
|
248 |
+
|
249 |
+
@pytest.mark.parametrize(
|
250 |
+
"arr, attr",
|
251 |
+
[
|
252 |
+
(pd.Categorical(["a", "b"]), "_codes"),
|
253 |
+
(PeriodArray._from_sequence(["2000", "2001"], dtype="period[D]"), "_ndarray"),
|
254 |
+
(pd.array([0, np.nan], dtype="Int64"), "_data"),
|
255 |
+
(IntervalArray.from_breaks([0, 1]), "_left"),
|
256 |
+
(SparseArray([0, 1]), "_sparse_values"),
|
257 |
+
(
|
258 |
+
DatetimeArray._from_sequence(np.array([1, 2], dtype="datetime64[ns]")),
|
259 |
+
"_ndarray",
|
260 |
+
),
|
261 |
+
# tz-aware Datetime
|
262 |
+
(
|
263 |
+
DatetimeArray._from_sequence(
|
264 |
+
np.array(
|
265 |
+
["2000-01-01T12:00:00", "2000-01-02T12:00:00"], dtype="M8[ns]"
|
266 |
+
),
|
267 |
+
dtype=DatetimeTZDtype(tz="US/Central"),
|
268 |
+
),
|
269 |
+
"_ndarray",
|
270 |
+
),
|
271 |
+
],
|
272 |
+
)
|
273 |
+
def test_array(arr, attr, index_or_series, request):
|
274 |
+
box = index_or_series
|
275 |
+
|
276 |
+
result = box(arr, copy=False).array
|
277 |
+
|
278 |
+
if attr:
|
279 |
+
arr = getattr(arr, attr)
|
280 |
+
result = getattr(result, attr)
|
281 |
+
|
282 |
+
assert result is arr
|
283 |
+
|
284 |
+
|
285 |
+
def test_array_multiindex_raises():
|
286 |
+
idx = pd.MultiIndex.from_product([["A"], ["a", "b"]])
|
287 |
+
msg = "MultiIndex has no single backing array"
|
288 |
+
with pytest.raises(ValueError, match=msg):
|
289 |
+
idx.array
|
290 |
+
|
291 |
+
|
292 |
+
@pytest.mark.parametrize(
|
293 |
+
"arr, expected",
|
294 |
+
[
|
295 |
+
(np.array([1, 2], dtype=np.int64), np.array([1, 2], dtype=np.int64)),
|
296 |
+
(pd.Categorical(["a", "b"]), np.array(["a", "b"], dtype=object)),
|
297 |
+
(
|
298 |
+
pd.core.arrays.period_array(["2000", "2001"], freq="D"),
|
299 |
+
np.array([pd.Period("2000", freq="D"), pd.Period("2001", freq="D")]),
|
300 |
+
),
|
301 |
+
(pd.array([0, np.nan], dtype="Int64"), np.array([0, np.nan])),
|
302 |
+
(
|
303 |
+
IntervalArray.from_breaks([0, 1, 2]),
|
304 |
+
np.array([pd.Interval(0, 1), pd.Interval(1, 2)], dtype=object),
|
305 |
+
),
|
306 |
+
(SparseArray([0, 1]), np.array([0, 1], dtype=np.int64)),
|
307 |
+
# tz-naive datetime
|
308 |
+
(
|
309 |
+
DatetimeArray._from_sequence(np.array(["2000", "2001"], dtype="M8[ns]")),
|
310 |
+
np.array(["2000", "2001"], dtype="M8[ns]"),
|
311 |
+
),
|
312 |
+
# tz-aware stays tz`-aware
|
313 |
+
(
|
314 |
+
DatetimeArray._from_sequence(
|
315 |
+
np.array(["2000-01-01T06:00:00", "2000-01-02T06:00:00"], dtype="M8[ns]")
|
316 |
+
)
|
317 |
+
.tz_localize("UTC")
|
318 |
+
.tz_convert("US/Central"),
|
319 |
+
np.array(
|
320 |
+
[
|
321 |
+
Timestamp("2000-01-01", tz="US/Central"),
|
322 |
+
Timestamp("2000-01-02", tz="US/Central"),
|
323 |
+
]
|
324 |
+
),
|
325 |
+
),
|
326 |
+
# Timedelta
|
327 |
+
(
|
328 |
+
TimedeltaArray._from_sequence(
|
329 |
+
np.array([0, 3600000000000], dtype="i8").view("m8[ns]")
|
330 |
+
),
|
331 |
+
np.array([0, 3600000000000], dtype="m8[ns]"),
|
332 |
+
),
|
333 |
+
# GH#26406 tz is preserved in Categorical[dt64tz]
|
334 |
+
(
|
335 |
+
pd.Categorical(date_range("2016-01-01", periods=2, tz="US/Pacific")),
|
336 |
+
np.array(
|
337 |
+
[
|
338 |
+
Timestamp("2016-01-01", tz="US/Pacific"),
|
339 |
+
Timestamp("2016-01-02", tz="US/Pacific"),
|
340 |
+
]
|
341 |
+
),
|
342 |
+
),
|
343 |
+
],
|
344 |
+
)
|
345 |
+
def test_to_numpy(arr, expected, index_or_series_or_array, request):
|
346 |
+
box = index_or_series_or_array
|
347 |
+
|
348 |
+
with tm.assert_produces_warning(None):
|
349 |
+
thing = box(arr)
|
350 |
+
|
351 |
+
result = thing.to_numpy()
|
352 |
+
tm.assert_numpy_array_equal(result, expected)
|
353 |
+
|
354 |
+
result = np.asarray(thing)
|
355 |
+
tm.assert_numpy_array_equal(result, expected)
|
356 |
+
|
357 |
+
|
358 |
+
@pytest.mark.parametrize("as_series", [True, False])
|
359 |
+
@pytest.mark.parametrize(
|
360 |
+
"arr", [np.array([1, 2, 3], dtype="int64"), np.array(["a", "b", "c"], dtype=object)]
|
361 |
+
)
|
362 |
+
def test_to_numpy_copy(arr, as_series, using_infer_string):
|
363 |
+
obj = pd.Index(arr, copy=False)
|
364 |
+
if as_series:
|
365 |
+
obj = Series(obj.values, copy=False)
|
366 |
+
|
367 |
+
# no copy by default
|
368 |
+
result = obj.to_numpy()
|
369 |
+
if using_infer_string and arr.dtype == object:
|
370 |
+
assert np.shares_memory(arr, result) is False
|
371 |
+
else:
|
372 |
+
assert np.shares_memory(arr, result) is True
|
373 |
+
|
374 |
+
result = obj.to_numpy(copy=False)
|
375 |
+
if using_infer_string and arr.dtype == object:
|
376 |
+
assert np.shares_memory(arr, result) is False
|
377 |
+
else:
|
378 |
+
assert np.shares_memory(arr, result) is True
|
379 |
+
|
380 |
+
# copy=True
|
381 |
+
result = obj.to_numpy(copy=True)
|
382 |
+
assert np.shares_memory(arr, result) is False
|
383 |
+
|
384 |
+
|
385 |
+
@pytest.mark.parametrize("as_series", [True, False])
|
386 |
+
def test_to_numpy_dtype(as_series, unit):
|
387 |
+
tz = "US/Eastern"
|
388 |
+
obj = pd.DatetimeIndex(["2000", "2001"], tz=tz)
|
389 |
+
if as_series:
|
390 |
+
obj = Series(obj)
|
391 |
+
|
392 |
+
# preserve tz by default
|
393 |
+
result = obj.to_numpy()
|
394 |
+
expected = np.array(
|
395 |
+
[Timestamp("2000", tz=tz), Timestamp("2001", tz=tz)], dtype=object
|
396 |
+
)
|
397 |
+
tm.assert_numpy_array_equal(result, expected)
|
398 |
+
|
399 |
+
result = obj.to_numpy(dtype="object")
|
400 |
+
tm.assert_numpy_array_equal(result, expected)
|
401 |
+
|
402 |
+
result = obj.to_numpy(dtype="M8[ns]")
|
403 |
+
expected = np.array(["2000-01-01T05", "2001-01-01T05"], dtype="M8[ns]")
|
404 |
+
tm.assert_numpy_array_equal(result, expected)
|
405 |
+
|
406 |
+
|
407 |
+
@pytest.mark.parametrize(
|
408 |
+
"values, dtype, na_value, expected",
|
409 |
+
[
|
410 |
+
([1, 2, None], "float64", 0, [1.0, 2.0, 0.0]),
|
411 |
+
(
|
412 |
+
[Timestamp("2000"), Timestamp("2000"), pd.NaT],
|
413 |
+
None,
|
414 |
+
Timestamp("2000"),
|
415 |
+
[np.datetime64("2000-01-01T00:00:00.000000000")] * 3,
|
416 |
+
),
|
417 |
+
],
|
418 |
+
)
|
419 |
+
def test_to_numpy_na_value_numpy_dtype(
|
420 |
+
index_or_series, values, dtype, na_value, expected
|
421 |
+
):
|
422 |
+
obj = index_or_series(values)
|
423 |
+
result = obj.to_numpy(dtype=dtype, na_value=na_value)
|
424 |
+
expected = np.array(expected)
|
425 |
+
tm.assert_numpy_array_equal(result, expected)
|
426 |
+
|
427 |
+
|
428 |
+
@pytest.mark.parametrize(
|
429 |
+
"data, multiindex, dtype, na_value, expected",
|
430 |
+
[
|
431 |
+
(
|
432 |
+
[1, 2, None, 4],
|
433 |
+
[(0, "a"), (0, "b"), (1, "b"), (1, "c")],
|
434 |
+
float,
|
435 |
+
None,
|
436 |
+
[1.0, 2.0, np.nan, 4.0],
|
437 |
+
),
|
438 |
+
(
|
439 |
+
[1, 2, None, 4],
|
440 |
+
[(0, "a"), (0, "b"), (1, "b"), (1, "c")],
|
441 |
+
float,
|
442 |
+
np.nan,
|
443 |
+
[1.0, 2.0, np.nan, 4.0],
|
444 |
+
),
|
445 |
+
(
|
446 |
+
[1.0, 2.0, np.nan, 4.0],
|
447 |
+
[("a", 0), ("a", 1), ("a", 2), ("b", 0)],
|
448 |
+
int,
|
449 |
+
0,
|
450 |
+
[1, 2, 0, 4],
|
451 |
+
),
|
452 |
+
(
|
453 |
+
[Timestamp("2000"), Timestamp("2000"), pd.NaT],
|
454 |
+
[(0, Timestamp("2021")), (0, Timestamp("2022")), (1, Timestamp("2000"))],
|
455 |
+
None,
|
456 |
+
Timestamp("2000"),
|
457 |
+
[np.datetime64("2000-01-01T00:00:00.000000000")] * 3,
|
458 |
+
),
|
459 |
+
],
|
460 |
+
)
|
461 |
+
def test_to_numpy_multiindex_series_na_value(
|
462 |
+
data, multiindex, dtype, na_value, expected
|
463 |
+
):
|
464 |
+
index = pd.MultiIndex.from_tuples(multiindex)
|
465 |
+
series = Series(data, index=index)
|
466 |
+
result = series.to_numpy(dtype=dtype, na_value=na_value)
|
467 |
+
expected = np.array(expected)
|
468 |
+
tm.assert_numpy_array_equal(result, expected)
|
469 |
+
|
470 |
+
|
471 |
+
def test_to_numpy_kwargs_raises():
|
472 |
+
# numpy
|
473 |
+
s = Series([1, 2, 3])
|
474 |
+
msg = r"to_numpy\(\) got an unexpected keyword argument 'foo'"
|
475 |
+
with pytest.raises(TypeError, match=msg):
|
476 |
+
s.to_numpy(foo=True)
|
477 |
+
|
478 |
+
# extension
|
479 |
+
s = Series([1, 2, 3], dtype="Int64")
|
480 |
+
with pytest.raises(TypeError, match=msg):
|
481 |
+
s.to_numpy(foo=True)
|
482 |
+
|
483 |
+
|
484 |
+
@pytest.mark.parametrize(
|
485 |
+
"data",
|
486 |
+
[
|
487 |
+
{"a": [1, 2, 3], "b": [1, 2, None]},
|
488 |
+
{"a": np.array([1, 2, 3]), "b": np.array([1, 2, np.nan])},
|
489 |
+
{"a": pd.array([1, 2, 3]), "b": pd.array([1, 2, None])},
|
490 |
+
],
|
491 |
+
)
|
492 |
+
@pytest.mark.parametrize("dtype, na_value", [(float, np.nan), (object, None)])
|
493 |
+
def test_to_numpy_dataframe_na_value(data, dtype, na_value):
|
494 |
+
# https://github.com/pandas-dev/pandas/issues/33820
|
495 |
+
df = pd.DataFrame(data)
|
496 |
+
result = df.to_numpy(dtype=dtype, na_value=na_value)
|
497 |
+
expected = np.array([[1, 1], [2, 2], [3, na_value]], dtype=dtype)
|
498 |
+
tm.assert_numpy_array_equal(result, expected)
|
499 |
+
|
500 |
+
|
501 |
+
@pytest.mark.parametrize(
|
502 |
+
"data, expected",
|
503 |
+
[
|
504 |
+
(
|
505 |
+
{"a": pd.array([1, 2, None])},
|
506 |
+
np.array([[1.0], [2.0], [np.nan]], dtype=float),
|
507 |
+
),
|
508 |
+
(
|
509 |
+
{"a": [1, 2, 3], "b": [1, 2, 3]},
|
510 |
+
np.array([[1, 1], [2, 2], [3, 3]], dtype=float),
|
511 |
+
),
|
512 |
+
],
|
513 |
+
)
|
514 |
+
def test_to_numpy_dataframe_single_block(data, expected):
|
515 |
+
# https://github.com/pandas-dev/pandas/issues/33820
|
516 |
+
df = pd.DataFrame(data)
|
517 |
+
result = df.to_numpy(dtype=float, na_value=np.nan)
|
518 |
+
tm.assert_numpy_array_equal(result, expected)
|
519 |
+
|
520 |
+
|
521 |
+
def test_to_numpy_dataframe_single_block_no_mutate():
|
522 |
+
# https://github.com/pandas-dev/pandas/issues/33820
|
523 |
+
result = pd.DataFrame(np.array([1.0, 2.0, np.nan]))
|
524 |
+
expected = pd.DataFrame(np.array([1.0, 2.0, np.nan]))
|
525 |
+
result.to_numpy(na_value=0.0)
|
526 |
+
tm.assert_frame_equal(result, expected)
|
527 |
+
|
528 |
+
|
529 |
+
class TestAsArray:
|
530 |
+
@pytest.mark.parametrize("tz", [None, "US/Central"])
|
531 |
+
def test_asarray_object_dt64(self, tz):
|
532 |
+
ser = Series(date_range("2000", periods=2, tz=tz))
|
533 |
+
|
534 |
+
with tm.assert_produces_warning(None):
|
535 |
+
# Future behavior (for tzaware case) with no warning
|
536 |
+
result = np.asarray(ser, dtype=object)
|
537 |
+
|
538 |
+
expected = np.array(
|
539 |
+
[Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)]
|
540 |
+
)
|
541 |
+
tm.assert_numpy_array_equal(result, expected)
|
542 |
+
|
543 |
+
def test_asarray_tz_naive(self):
|
544 |
+
# This shouldn't produce a warning.
|
545 |
+
ser = Series(date_range("2000", periods=2))
|
546 |
+
expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")
|
547 |
+
result = np.asarray(ser)
|
548 |
+
|
549 |
+
tm.assert_numpy_array_equal(result, expected)
|
550 |
+
|
551 |
+
def test_asarray_tz_aware(self):
|
552 |
+
tz = "US/Central"
|
553 |
+
ser = Series(date_range("2000", periods=2, tz=tz))
|
554 |
+
expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]")
|
555 |
+
result = np.asarray(ser, dtype="datetime64[ns]")
|
556 |
+
|
557 |
+
tm.assert_numpy_array_equal(result, expected)
|
558 |
+
|
559 |
+
# Old behavior with no warning
|
560 |
+
result = np.asarray(ser, dtype="M8[ns]")
|
561 |
+
|
562 |
+
tm.assert_numpy_array_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_fillna.py
ADDED
@@ -0,0 +1,60 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Though Index.fillna and Series.fillna has separate impl,
|
3 |
+
test here to confirm these works as the same
|
4 |
+
"""
|
5 |
+
|
6 |
+
import numpy as np
|
7 |
+
import pytest
|
8 |
+
|
9 |
+
from pandas import MultiIndex
|
10 |
+
import pandas._testing as tm
|
11 |
+
from pandas.tests.base.common import allow_na_ops
|
12 |
+
|
13 |
+
|
14 |
+
def test_fillna(index_or_series_obj):
|
15 |
+
# GH 11343
|
16 |
+
obj = index_or_series_obj
|
17 |
+
|
18 |
+
if isinstance(obj, MultiIndex):
|
19 |
+
msg = "isna is not defined for MultiIndex"
|
20 |
+
with pytest.raises(NotImplementedError, match=msg):
|
21 |
+
obj.fillna(0)
|
22 |
+
return
|
23 |
+
|
24 |
+
# values will not be changed
|
25 |
+
fill_value = obj.values[0] if len(obj) > 0 else 0
|
26 |
+
result = obj.fillna(fill_value)
|
27 |
+
|
28 |
+
tm.assert_equal(obj, result)
|
29 |
+
|
30 |
+
# check shallow_copied
|
31 |
+
assert obj is not result
|
32 |
+
|
33 |
+
|
34 |
+
@pytest.mark.parametrize("null_obj", [np.nan, None])
|
35 |
+
def test_fillna_null(null_obj, index_or_series_obj):
|
36 |
+
# GH 11343
|
37 |
+
obj = index_or_series_obj
|
38 |
+
klass = type(obj)
|
39 |
+
|
40 |
+
if not allow_na_ops(obj):
|
41 |
+
pytest.skip(f"{klass} doesn't allow for NA operations")
|
42 |
+
elif len(obj) < 1:
|
43 |
+
pytest.skip("Test doesn't make sense on empty data")
|
44 |
+
elif isinstance(obj, MultiIndex):
|
45 |
+
pytest.skip(f"MultiIndex can't hold '{null_obj}'")
|
46 |
+
|
47 |
+
values = obj._values
|
48 |
+
fill_value = values[0]
|
49 |
+
expected = values.copy()
|
50 |
+
values[0:2] = null_obj
|
51 |
+
expected[0:2] = fill_value
|
52 |
+
|
53 |
+
expected = klass(expected)
|
54 |
+
obj = klass(values)
|
55 |
+
|
56 |
+
result = obj.fillna(fill_value)
|
57 |
+
tm.assert_equal(result, expected)
|
58 |
+
|
59 |
+
# check shallow_copied
|
60 |
+
assert obj is not result
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_misc.py
ADDED
@@ -0,0 +1,191 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas._config import using_pyarrow_string_dtype
|
7 |
+
|
8 |
+
from pandas.compat import PYPY
|
9 |
+
|
10 |
+
from pandas.core.dtypes.common import (
|
11 |
+
is_dtype_equal,
|
12 |
+
is_object_dtype,
|
13 |
+
)
|
14 |
+
|
15 |
+
import pandas as pd
|
16 |
+
from pandas import (
|
17 |
+
Index,
|
18 |
+
Series,
|
19 |
+
)
|
20 |
+
import pandas._testing as tm
|
21 |
+
|
22 |
+
|
23 |
+
def test_isnull_notnull_docstrings():
|
24 |
+
# GH#41855 make sure its clear these are aliases
|
25 |
+
doc = pd.DataFrame.notnull.__doc__
|
26 |
+
assert doc.startswith("\nDataFrame.notnull is an alias for DataFrame.notna.\n")
|
27 |
+
doc = pd.DataFrame.isnull.__doc__
|
28 |
+
assert doc.startswith("\nDataFrame.isnull is an alias for DataFrame.isna.\n")
|
29 |
+
|
30 |
+
doc = Series.notnull.__doc__
|
31 |
+
assert doc.startswith("\nSeries.notnull is an alias for Series.notna.\n")
|
32 |
+
doc = Series.isnull.__doc__
|
33 |
+
assert doc.startswith("\nSeries.isnull is an alias for Series.isna.\n")
|
34 |
+
|
35 |
+
|
36 |
+
@pytest.mark.parametrize(
|
37 |
+
"op_name, op",
|
38 |
+
[
|
39 |
+
("add", "+"),
|
40 |
+
("sub", "-"),
|
41 |
+
("mul", "*"),
|
42 |
+
("mod", "%"),
|
43 |
+
("pow", "**"),
|
44 |
+
("truediv", "/"),
|
45 |
+
("floordiv", "//"),
|
46 |
+
],
|
47 |
+
)
|
48 |
+
def test_binary_ops_docstring(frame_or_series, op_name, op):
|
49 |
+
# not using the all_arithmetic_functions fixture with _get_opstr
|
50 |
+
# as _get_opstr is used internally in the dynamic implementation of the docstring
|
51 |
+
klass = frame_or_series
|
52 |
+
|
53 |
+
operand1 = klass.__name__.lower()
|
54 |
+
operand2 = "other"
|
55 |
+
expected_str = " ".join([operand1, op, operand2])
|
56 |
+
assert expected_str in getattr(klass, op_name).__doc__
|
57 |
+
|
58 |
+
# reverse version of the binary ops
|
59 |
+
expected_str = " ".join([operand2, op, operand1])
|
60 |
+
assert expected_str in getattr(klass, "r" + op_name).__doc__
|
61 |
+
|
62 |
+
|
63 |
+
def test_ndarray_compat_properties(index_or_series_obj):
|
64 |
+
obj = index_or_series_obj
|
65 |
+
|
66 |
+
# Check that we work.
|
67 |
+
for p in ["shape", "dtype", "T", "nbytes"]:
|
68 |
+
assert getattr(obj, p, None) is not None
|
69 |
+
|
70 |
+
# deprecated properties
|
71 |
+
for p in ["strides", "itemsize", "base", "data"]:
|
72 |
+
assert not hasattr(obj, p)
|
73 |
+
|
74 |
+
msg = "can only convert an array of size 1 to a Python scalar"
|
75 |
+
with pytest.raises(ValueError, match=msg):
|
76 |
+
obj.item() # len > 1
|
77 |
+
|
78 |
+
assert obj.ndim == 1
|
79 |
+
assert obj.size == len(obj)
|
80 |
+
|
81 |
+
assert Index([1]).item() == 1
|
82 |
+
assert Series([1]).item() == 1
|
83 |
+
|
84 |
+
|
85 |
+
@pytest.mark.skipif(
|
86 |
+
PYPY or using_pyarrow_string_dtype(),
|
87 |
+
reason="not relevant for PyPy doesn't work properly for arrow strings",
|
88 |
+
)
|
89 |
+
def test_memory_usage(index_or_series_memory_obj):
|
90 |
+
obj = index_or_series_memory_obj
|
91 |
+
# Clear index caches so that len(obj) == 0 report 0 memory usage
|
92 |
+
if isinstance(obj, Series):
|
93 |
+
is_ser = True
|
94 |
+
obj.index._engine.clear_mapping()
|
95 |
+
else:
|
96 |
+
is_ser = False
|
97 |
+
obj._engine.clear_mapping()
|
98 |
+
|
99 |
+
res = obj.memory_usage()
|
100 |
+
res_deep = obj.memory_usage(deep=True)
|
101 |
+
|
102 |
+
is_object = is_object_dtype(obj) or (is_ser and is_object_dtype(obj.index))
|
103 |
+
is_categorical = isinstance(obj.dtype, pd.CategoricalDtype) or (
|
104 |
+
is_ser and isinstance(obj.index.dtype, pd.CategoricalDtype)
|
105 |
+
)
|
106 |
+
is_object_string = is_dtype_equal(obj, "string[python]") or (
|
107 |
+
is_ser and is_dtype_equal(obj.index.dtype, "string[python]")
|
108 |
+
)
|
109 |
+
|
110 |
+
if len(obj) == 0:
|
111 |
+
expected = 0
|
112 |
+
assert res_deep == res == expected
|
113 |
+
elif is_object or is_categorical or is_object_string:
|
114 |
+
# only deep will pick them up
|
115 |
+
assert res_deep > res
|
116 |
+
else:
|
117 |
+
assert res == res_deep
|
118 |
+
|
119 |
+
# sys.getsizeof will call the .memory_usage with
|
120 |
+
# deep=True, and add on some GC overhead
|
121 |
+
diff = res_deep - sys.getsizeof(obj)
|
122 |
+
assert abs(diff) < 100
|
123 |
+
|
124 |
+
|
125 |
+
def test_memory_usage_components_series(series_with_simple_index):
|
126 |
+
series = series_with_simple_index
|
127 |
+
total_usage = series.memory_usage(index=True)
|
128 |
+
non_index_usage = series.memory_usage(index=False)
|
129 |
+
index_usage = series.index.memory_usage()
|
130 |
+
assert total_usage == non_index_usage + index_usage
|
131 |
+
|
132 |
+
|
133 |
+
@pytest.mark.parametrize("dtype", tm.NARROW_NP_DTYPES)
|
134 |
+
def test_memory_usage_components_narrow_series(dtype):
|
135 |
+
series = Series(range(5), dtype=dtype, index=[f"i-{i}" for i in range(5)], name="a")
|
136 |
+
total_usage = series.memory_usage(index=True)
|
137 |
+
non_index_usage = series.memory_usage(index=False)
|
138 |
+
index_usage = series.index.memory_usage()
|
139 |
+
assert total_usage == non_index_usage + index_usage
|
140 |
+
|
141 |
+
|
142 |
+
def test_searchsorted(request, index_or_series_obj):
|
143 |
+
# numpy.searchsorted calls obj.searchsorted under the hood.
|
144 |
+
# See gh-12238
|
145 |
+
obj = index_or_series_obj
|
146 |
+
|
147 |
+
if isinstance(obj, pd.MultiIndex):
|
148 |
+
# See gh-14833
|
149 |
+
request.applymarker(
|
150 |
+
pytest.mark.xfail(
|
151 |
+
reason="np.searchsorted doesn't work on pd.MultiIndex: GH 14833"
|
152 |
+
)
|
153 |
+
)
|
154 |
+
elif obj.dtype.kind == "c" and isinstance(obj, Index):
|
155 |
+
# TODO: Should Series cases also raise? Looks like they use numpy
|
156 |
+
# comparison semantics https://github.com/numpy/numpy/issues/15981
|
157 |
+
mark = pytest.mark.xfail(reason="complex objects are not comparable")
|
158 |
+
request.applymarker(mark)
|
159 |
+
|
160 |
+
max_obj = max(obj, default=0)
|
161 |
+
index = np.searchsorted(obj, max_obj)
|
162 |
+
assert 0 <= index <= len(obj)
|
163 |
+
|
164 |
+
index = np.searchsorted(obj, max_obj, sorter=range(len(obj)))
|
165 |
+
assert 0 <= index <= len(obj)
|
166 |
+
|
167 |
+
|
168 |
+
def test_access_by_position(index_flat):
|
169 |
+
index = index_flat
|
170 |
+
|
171 |
+
if len(index) == 0:
|
172 |
+
pytest.skip("Test doesn't make sense on empty data")
|
173 |
+
|
174 |
+
series = Series(index)
|
175 |
+
assert index[0] == series.iloc[0]
|
176 |
+
assert index[5] == series.iloc[5]
|
177 |
+
assert index[-1] == series.iloc[-1]
|
178 |
+
|
179 |
+
size = len(index)
|
180 |
+
assert index[-1] == index[size - 1]
|
181 |
+
|
182 |
+
msg = f"index {size} is out of bounds for axis 0 with size {size}"
|
183 |
+
if is_dtype_equal(index.dtype, "string[pyarrow]") or is_dtype_equal(
|
184 |
+
index.dtype, "string[pyarrow_numpy]"
|
185 |
+
):
|
186 |
+
msg = "index out of bounds"
|
187 |
+
with pytest.raises(IndexError, match=msg):
|
188 |
+
index[size]
|
189 |
+
msg = "single positional indexer is out-of-bounds"
|
190 |
+
with pytest.raises(IndexError, match=msg):
|
191 |
+
series.iloc[size]
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (187 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_astype.cpython-310.pyc
ADDED
Binary file (6.76 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_chained_assignment_deprecation.cpython-310.pyc
ADDED
Binary file (4.04 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_constructors.cpython-310.pyc
ADDED
Binary file (9.82 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_core_functionalities.cpython-310.pyc
ADDED
Binary file (3.12 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_indexing.cpython-310.pyc
ADDED
Binary file (27.1 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_internals.cpython-310.pyc
ADDED
Binary file (3.73 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_interp_fillna.cpython-310.pyc
ADDED
Binary file (11.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_methods.cpython-310.pyc
ADDED
Binary file (52 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/test_setitem.cpython-310.pyc
ADDED
Binary file (3.76 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/__pycache__/util.cpython-310.pyc
ADDED
Binary file (991 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_array.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
DataFrame,
|
6 |
+
Series,
|
7 |
+
date_range,
|
8 |
+
)
|
9 |
+
import pandas._testing as tm
|
10 |
+
from pandas.tests.copy_view.util import get_array
|
11 |
+
|
12 |
+
# -----------------------------------------------------------------------------
|
13 |
+
# Copy/view behaviour for accessing underlying array of Series/DataFrame
|
14 |
+
|
15 |
+
|
16 |
+
@pytest.mark.parametrize(
|
17 |
+
"method",
|
18 |
+
[lambda ser: ser.values, lambda ser: np.asarray(ser)],
|
19 |
+
ids=["values", "asarray"],
|
20 |
+
)
|
21 |
+
def test_series_values(using_copy_on_write, method):
|
22 |
+
ser = Series([1, 2, 3], name="name")
|
23 |
+
ser_orig = ser.copy()
|
24 |
+
|
25 |
+
arr = method(ser)
|
26 |
+
|
27 |
+
if using_copy_on_write:
|
28 |
+
# .values still gives a view but is read-only
|
29 |
+
assert np.shares_memory(arr, get_array(ser, "name"))
|
30 |
+
assert arr.flags.writeable is False
|
31 |
+
|
32 |
+
# mutating series through arr therefore doesn't work
|
33 |
+
with pytest.raises(ValueError, match="read-only"):
|
34 |
+
arr[0] = 0
|
35 |
+
tm.assert_series_equal(ser, ser_orig)
|
36 |
+
|
37 |
+
# mutating the series itself still works
|
38 |
+
ser.iloc[0] = 0
|
39 |
+
assert ser.values[0] == 0
|
40 |
+
else:
|
41 |
+
assert arr.flags.writeable is True
|
42 |
+
arr[0] = 0
|
43 |
+
assert ser.iloc[0] == 0
|
44 |
+
|
45 |
+
|
46 |
+
@pytest.mark.parametrize(
|
47 |
+
"method",
|
48 |
+
[lambda df: df.values, lambda df: np.asarray(df)],
|
49 |
+
ids=["values", "asarray"],
|
50 |
+
)
|
51 |
+
def test_dataframe_values(using_copy_on_write, using_array_manager, method):
|
52 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
53 |
+
df_orig = df.copy()
|
54 |
+
|
55 |
+
arr = method(df)
|
56 |
+
|
57 |
+
if using_copy_on_write:
|
58 |
+
# .values still gives a view but is read-only
|
59 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
60 |
+
assert arr.flags.writeable is False
|
61 |
+
|
62 |
+
# mutating series through arr therefore doesn't work
|
63 |
+
with pytest.raises(ValueError, match="read-only"):
|
64 |
+
arr[0, 0] = 0
|
65 |
+
tm.assert_frame_equal(df, df_orig)
|
66 |
+
|
67 |
+
# mutating the series itself still works
|
68 |
+
df.iloc[0, 0] = 0
|
69 |
+
assert df.values[0, 0] == 0
|
70 |
+
else:
|
71 |
+
assert arr.flags.writeable is True
|
72 |
+
arr[0, 0] = 0
|
73 |
+
if not using_array_manager:
|
74 |
+
assert df.iloc[0, 0] == 0
|
75 |
+
else:
|
76 |
+
tm.assert_frame_equal(df, df_orig)
|
77 |
+
|
78 |
+
|
79 |
+
def test_series_to_numpy(using_copy_on_write):
|
80 |
+
ser = Series([1, 2, 3], name="name")
|
81 |
+
ser_orig = ser.copy()
|
82 |
+
|
83 |
+
# default: copy=False, no dtype or NAs
|
84 |
+
arr = ser.to_numpy()
|
85 |
+
if using_copy_on_write:
|
86 |
+
# to_numpy still gives a view but is read-only
|
87 |
+
assert np.shares_memory(arr, get_array(ser, "name"))
|
88 |
+
assert arr.flags.writeable is False
|
89 |
+
|
90 |
+
# mutating series through arr therefore doesn't work
|
91 |
+
with pytest.raises(ValueError, match="read-only"):
|
92 |
+
arr[0] = 0
|
93 |
+
tm.assert_series_equal(ser, ser_orig)
|
94 |
+
|
95 |
+
# mutating the series itself still works
|
96 |
+
ser.iloc[0] = 0
|
97 |
+
assert ser.values[0] == 0
|
98 |
+
else:
|
99 |
+
assert arr.flags.writeable is True
|
100 |
+
arr[0] = 0
|
101 |
+
assert ser.iloc[0] == 0
|
102 |
+
|
103 |
+
# specify copy=False gives a writeable array
|
104 |
+
ser = Series([1, 2, 3], name="name")
|
105 |
+
arr = ser.to_numpy(copy=True)
|
106 |
+
assert not np.shares_memory(arr, get_array(ser, "name"))
|
107 |
+
assert arr.flags.writeable is True
|
108 |
+
|
109 |
+
# specifying a dtype that already causes a copy also gives a writeable array
|
110 |
+
ser = Series([1, 2, 3], name="name")
|
111 |
+
arr = ser.to_numpy(dtype="float64")
|
112 |
+
assert not np.shares_memory(arr, get_array(ser, "name"))
|
113 |
+
assert arr.flags.writeable is True
|
114 |
+
|
115 |
+
|
116 |
+
@pytest.mark.parametrize("order", ["F", "C"])
|
117 |
+
def test_ravel_read_only(using_copy_on_write, order):
|
118 |
+
ser = Series([1, 2, 3])
|
119 |
+
with tm.assert_produces_warning(FutureWarning, match="is deprecated"):
|
120 |
+
arr = ser.ravel(order=order)
|
121 |
+
if using_copy_on_write:
|
122 |
+
assert arr.flags.writeable is False
|
123 |
+
assert np.shares_memory(get_array(ser), arr)
|
124 |
+
|
125 |
+
|
126 |
+
def test_series_array_ea_dtypes(using_copy_on_write):
|
127 |
+
ser = Series([1, 2, 3], dtype="Int64")
|
128 |
+
arr = np.asarray(ser, dtype="int64")
|
129 |
+
assert np.shares_memory(arr, get_array(ser))
|
130 |
+
if using_copy_on_write:
|
131 |
+
assert arr.flags.writeable is False
|
132 |
+
else:
|
133 |
+
assert arr.flags.writeable is True
|
134 |
+
|
135 |
+
arr = np.asarray(ser)
|
136 |
+
assert np.shares_memory(arr, get_array(ser))
|
137 |
+
if using_copy_on_write:
|
138 |
+
assert arr.flags.writeable is False
|
139 |
+
else:
|
140 |
+
assert arr.flags.writeable is True
|
141 |
+
|
142 |
+
|
143 |
+
def test_dataframe_array_ea_dtypes(using_copy_on_write):
|
144 |
+
df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
|
145 |
+
arr = np.asarray(df, dtype="int64")
|
146 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
147 |
+
if using_copy_on_write:
|
148 |
+
assert arr.flags.writeable is False
|
149 |
+
else:
|
150 |
+
assert arr.flags.writeable is True
|
151 |
+
|
152 |
+
arr = np.asarray(df)
|
153 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
154 |
+
if using_copy_on_write:
|
155 |
+
assert arr.flags.writeable is False
|
156 |
+
else:
|
157 |
+
assert arr.flags.writeable is True
|
158 |
+
|
159 |
+
|
160 |
+
def test_dataframe_array_string_dtype(using_copy_on_write, using_array_manager):
|
161 |
+
df = DataFrame({"a": ["a", "b"]}, dtype="string")
|
162 |
+
arr = np.asarray(df)
|
163 |
+
if not using_array_manager:
|
164 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
165 |
+
if using_copy_on_write:
|
166 |
+
assert arr.flags.writeable is False
|
167 |
+
else:
|
168 |
+
assert arr.flags.writeable is True
|
169 |
+
|
170 |
+
|
171 |
+
def test_dataframe_multiple_numpy_dtypes():
|
172 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1.5})
|
173 |
+
arr = np.asarray(df)
|
174 |
+
assert not np.shares_memory(arr, get_array(df, "a"))
|
175 |
+
assert arr.flags.writeable is True
|
176 |
+
|
177 |
+
|
178 |
+
def test_values_is_ea(using_copy_on_write):
|
179 |
+
df = DataFrame({"a": date_range("2012-01-01", periods=3)})
|
180 |
+
arr = np.asarray(df)
|
181 |
+
if using_copy_on_write:
|
182 |
+
assert arr.flags.writeable is False
|
183 |
+
else:
|
184 |
+
assert arr.flags.writeable is True
|
185 |
+
|
186 |
+
|
187 |
+
def test_empty_dataframe():
|
188 |
+
df = DataFrame()
|
189 |
+
arr = np.asarray(df)
|
190 |
+
assert arr.flags.writeable is True
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_astype.py
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pickle
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import pytest
|
5 |
+
|
6 |
+
from pandas.compat.pyarrow import pa_version_under12p0
|
7 |
+
import pandas.util._test_decorators as td
|
8 |
+
|
9 |
+
import pandas as pd
|
10 |
+
from pandas import (
|
11 |
+
DataFrame,
|
12 |
+
Series,
|
13 |
+
Timestamp,
|
14 |
+
date_range,
|
15 |
+
)
|
16 |
+
import pandas._testing as tm
|
17 |
+
from pandas.tests.copy_view.util import get_array
|
18 |
+
|
19 |
+
|
20 |
+
def test_astype_single_dtype(using_copy_on_write):
|
21 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": 1.5})
|
22 |
+
df_orig = df.copy()
|
23 |
+
df2 = df.astype("float64")
|
24 |
+
|
25 |
+
if using_copy_on_write:
|
26 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
27 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
28 |
+
else:
|
29 |
+
assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
30 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
31 |
+
|
32 |
+
# mutating df2 triggers a copy-on-write for that column/block
|
33 |
+
df2.iloc[0, 2] = 5.5
|
34 |
+
if using_copy_on_write:
|
35 |
+
assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
36 |
+
tm.assert_frame_equal(df, df_orig)
|
37 |
+
|
38 |
+
# mutating parent also doesn't update result
|
39 |
+
df2 = df.astype("float64")
|
40 |
+
df.iloc[0, 2] = 5.5
|
41 |
+
tm.assert_frame_equal(df2, df_orig.astype("float64"))
|
42 |
+
|
43 |
+
|
44 |
+
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
|
45 |
+
@pytest.mark.parametrize("new_dtype", ["int64", "Int64", "int64[pyarrow]"])
|
46 |
+
def test_astype_avoids_copy(using_copy_on_write, dtype, new_dtype):
|
47 |
+
if new_dtype == "int64[pyarrow]":
|
48 |
+
pytest.importorskip("pyarrow")
|
49 |
+
df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)
|
50 |
+
df_orig = df.copy()
|
51 |
+
df2 = df.astype(new_dtype)
|
52 |
+
|
53 |
+
if using_copy_on_write:
|
54 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
55 |
+
else:
|
56 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
57 |
+
|
58 |
+
# mutating df2 triggers a copy-on-write for that column/block
|
59 |
+
df2.iloc[0, 0] = 10
|
60 |
+
if using_copy_on_write:
|
61 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
62 |
+
tm.assert_frame_equal(df, df_orig)
|
63 |
+
|
64 |
+
# mutating parent also doesn't update result
|
65 |
+
df2 = df.astype(new_dtype)
|
66 |
+
df.iloc[0, 0] = 100
|
67 |
+
tm.assert_frame_equal(df2, df_orig.astype(new_dtype))
|
68 |
+
|
69 |
+
|
70 |
+
@pytest.mark.parametrize("dtype", ["float64", "int32", "Int32", "int32[pyarrow]"])
|
71 |
+
def test_astype_different_target_dtype(using_copy_on_write, dtype):
|
72 |
+
if dtype == "int32[pyarrow]":
|
73 |
+
pytest.importorskip("pyarrow")
|
74 |
+
df = DataFrame({"a": [1, 2, 3]})
|
75 |
+
df_orig = df.copy()
|
76 |
+
df2 = df.astype(dtype)
|
77 |
+
|
78 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
79 |
+
if using_copy_on_write:
|
80 |
+
assert df2._mgr._has_no_reference(0)
|
81 |
+
|
82 |
+
df2.iloc[0, 0] = 5
|
83 |
+
tm.assert_frame_equal(df, df_orig)
|
84 |
+
|
85 |
+
# mutating parent also doesn't update result
|
86 |
+
df2 = df.astype(dtype)
|
87 |
+
df.iloc[0, 0] = 100
|
88 |
+
tm.assert_frame_equal(df2, df_orig.astype(dtype))
|
89 |
+
|
90 |
+
|
91 |
+
@td.skip_array_manager_invalid_test
|
92 |
+
def test_astype_numpy_to_ea():
|
93 |
+
ser = Series([1, 2, 3])
|
94 |
+
with pd.option_context("mode.copy_on_write", True):
|
95 |
+
result = ser.astype("Int64")
|
96 |
+
assert np.shares_memory(get_array(ser), get_array(result))
|
97 |
+
|
98 |
+
|
99 |
+
@pytest.mark.parametrize(
|
100 |
+
"dtype, new_dtype", [("object", "string"), ("string", "object")]
|
101 |
+
)
|
102 |
+
def test_astype_string_and_object(using_copy_on_write, dtype, new_dtype):
|
103 |
+
df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype)
|
104 |
+
df_orig = df.copy()
|
105 |
+
df2 = df.astype(new_dtype)
|
106 |
+
|
107 |
+
if using_copy_on_write:
|
108 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
109 |
+
else:
|
110 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
111 |
+
|
112 |
+
df2.iloc[0, 0] = "x"
|
113 |
+
tm.assert_frame_equal(df, df_orig)
|
114 |
+
|
115 |
+
|
116 |
+
@pytest.mark.parametrize(
|
117 |
+
"dtype, new_dtype", [("object", "string"), ("string", "object")]
|
118 |
+
)
|
119 |
+
def test_astype_string_and_object_update_original(
|
120 |
+
using_copy_on_write, dtype, new_dtype
|
121 |
+
):
|
122 |
+
df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype)
|
123 |
+
df2 = df.astype(new_dtype)
|
124 |
+
df_orig = df2.copy()
|
125 |
+
|
126 |
+
if using_copy_on_write:
|
127 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
128 |
+
else:
|
129 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
130 |
+
|
131 |
+
df.iloc[0, 0] = "x"
|
132 |
+
tm.assert_frame_equal(df2, df_orig)
|
133 |
+
|
134 |
+
|
135 |
+
def test_astype_string_copy_on_pickle_roundrip():
|
136 |
+
# https://github.com/pandas-dev/pandas/issues/54654
|
137 |
+
# ensure_string_array may alter array inplace
|
138 |
+
base = Series(np.array([(1, 2), None, 1], dtype="object"))
|
139 |
+
base_copy = pickle.loads(pickle.dumps(base))
|
140 |
+
base_copy.astype(str)
|
141 |
+
tm.assert_series_equal(base, base_copy)
|
142 |
+
|
143 |
+
|
144 |
+
def test_astype_dict_dtypes(using_copy_on_write):
|
145 |
+
df = DataFrame(
|
146 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": Series([1.5, 1.5, 1.5], dtype="float64")}
|
147 |
+
)
|
148 |
+
df_orig = df.copy()
|
149 |
+
df2 = df.astype({"a": "float64", "c": "float64"})
|
150 |
+
|
151 |
+
if using_copy_on_write:
|
152 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
153 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
154 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
155 |
+
else:
|
156 |
+
assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
157 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
158 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
159 |
+
|
160 |
+
# mutating df2 triggers a copy-on-write for that column/block
|
161 |
+
df2.iloc[0, 2] = 5.5
|
162 |
+
if using_copy_on_write:
|
163 |
+
assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
164 |
+
|
165 |
+
df2.iloc[0, 1] = 10
|
166 |
+
if using_copy_on_write:
|
167 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
168 |
+
tm.assert_frame_equal(df, df_orig)
|
169 |
+
|
170 |
+
|
171 |
+
def test_astype_different_datetime_resos(using_copy_on_write):
|
172 |
+
df = DataFrame({"a": date_range("2019-12-31", periods=2, freq="D")})
|
173 |
+
result = df.astype("datetime64[ms]")
|
174 |
+
|
175 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
176 |
+
if using_copy_on_write:
|
177 |
+
assert result._mgr._has_no_reference(0)
|
178 |
+
|
179 |
+
|
180 |
+
def test_astype_different_timezones(using_copy_on_write):
|
181 |
+
df = DataFrame(
|
182 |
+
{"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")}
|
183 |
+
)
|
184 |
+
result = df.astype("datetime64[ns, Europe/Berlin]")
|
185 |
+
if using_copy_on_write:
|
186 |
+
assert not result._mgr._has_no_reference(0)
|
187 |
+
assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
188 |
+
|
189 |
+
|
190 |
+
def test_astype_different_timezones_different_reso(using_copy_on_write):
|
191 |
+
df = DataFrame(
|
192 |
+
{"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")}
|
193 |
+
)
|
194 |
+
result = df.astype("datetime64[ms, Europe/Berlin]")
|
195 |
+
if using_copy_on_write:
|
196 |
+
assert result._mgr._has_no_reference(0)
|
197 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
198 |
+
|
199 |
+
|
200 |
+
def test_astype_arrow_timestamp(using_copy_on_write):
|
201 |
+
pytest.importorskip("pyarrow")
|
202 |
+
df = DataFrame(
|
203 |
+
{
|
204 |
+
"a": [
|
205 |
+
Timestamp("2020-01-01 01:01:01.000001"),
|
206 |
+
Timestamp("2020-01-01 01:01:01.000001"),
|
207 |
+
]
|
208 |
+
},
|
209 |
+
dtype="M8[ns]",
|
210 |
+
)
|
211 |
+
result = df.astype("timestamp[ns][pyarrow]")
|
212 |
+
if using_copy_on_write:
|
213 |
+
assert not result._mgr._has_no_reference(0)
|
214 |
+
if pa_version_under12p0:
|
215 |
+
assert not np.shares_memory(
|
216 |
+
get_array(df, "a"), get_array(result, "a")._pa_array
|
217 |
+
)
|
218 |
+
else:
|
219 |
+
assert np.shares_memory(
|
220 |
+
get_array(df, "a"), get_array(result, "a")._pa_array
|
221 |
+
)
|
222 |
+
|
223 |
+
|
224 |
+
def test_convert_dtypes_infer_objects(using_copy_on_write):
|
225 |
+
ser = Series(["a", "b", "c"])
|
226 |
+
ser_orig = ser.copy()
|
227 |
+
result = ser.convert_dtypes(
|
228 |
+
convert_integer=False,
|
229 |
+
convert_boolean=False,
|
230 |
+
convert_floating=False,
|
231 |
+
convert_string=False,
|
232 |
+
)
|
233 |
+
|
234 |
+
if using_copy_on_write:
|
235 |
+
assert np.shares_memory(get_array(ser), get_array(result))
|
236 |
+
else:
|
237 |
+
assert not np.shares_memory(get_array(ser), get_array(result))
|
238 |
+
|
239 |
+
result.iloc[0] = "x"
|
240 |
+
tm.assert_series_equal(ser, ser_orig)
|
241 |
+
|
242 |
+
|
243 |
+
def test_convert_dtypes(using_copy_on_write):
|
244 |
+
df = DataFrame({"a": ["a", "b"], "b": [1, 2], "c": [1.5, 2.5], "d": [True, False]})
|
245 |
+
df_orig = df.copy()
|
246 |
+
df2 = df.convert_dtypes()
|
247 |
+
|
248 |
+
if using_copy_on_write:
|
249 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
250 |
+
assert np.shares_memory(get_array(df2, "d"), get_array(df, "d"))
|
251 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
252 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
253 |
+
else:
|
254 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
255 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
256 |
+
assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
257 |
+
assert not np.shares_memory(get_array(df2, "d"), get_array(df, "d"))
|
258 |
+
|
259 |
+
df2.iloc[0, 0] = "x"
|
260 |
+
tm.assert_frame_equal(df, df_orig)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_chained_assignment_deprecation.py
ADDED
@@ -0,0 +1,174 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.compat import PY311
|
5 |
+
from pandas.errors import (
|
6 |
+
ChainedAssignmentError,
|
7 |
+
SettingWithCopyWarning,
|
8 |
+
)
|
9 |
+
|
10 |
+
from pandas import (
|
11 |
+
DataFrame,
|
12 |
+
option_context,
|
13 |
+
)
|
14 |
+
import pandas._testing as tm
|
15 |
+
|
16 |
+
|
17 |
+
def test_methods_iloc_warn(using_copy_on_write):
|
18 |
+
if not using_copy_on_write:
|
19 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1})
|
20 |
+
with tm.assert_cow_warning(match="A value"):
|
21 |
+
df.iloc[:, 0].replace(1, 5, inplace=True)
|
22 |
+
|
23 |
+
with tm.assert_cow_warning(match="A value"):
|
24 |
+
df.iloc[:, 0].fillna(1, inplace=True)
|
25 |
+
|
26 |
+
with tm.assert_cow_warning(match="A value"):
|
27 |
+
df.iloc[:, 0].interpolate(inplace=True)
|
28 |
+
|
29 |
+
with tm.assert_cow_warning(match="A value"):
|
30 |
+
df.iloc[:, 0].ffill(inplace=True)
|
31 |
+
|
32 |
+
with tm.assert_cow_warning(match="A value"):
|
33 |
+
df.iloc[:, 0].bfill(inplace=True)
|
34 |
+
|
35 |
+
|
36 |
+
@pytest.mark.parametrize(
|
37 |
+
"func, args",
|
38 |
+
[
|
39 |
+
("replace", (4, 5)),
|
40 |
+
("fillna", (1,)),
|
41 |
+
("interpolate", ()),
|
42 |
+
("bfill", ()),
|
43 |
+
("ffill", ()),
|
44 |
+
],
|
45 |
+
)
|
46 |
+
def test_methods_iloc_getitem_item_cache(
|
47 |
+
func, args, using_copy_on_write, warn_copy_on_write
|
48 |
+
):
|
49 |
+
# ensure we don't incorrectly raise chained assignment warning because
|
50 |
+
# of the item cache / iloc not setting the item cache
|
51 |
+
df_orig = DataFrame({"a": [1, 2, 3], "b": 1})
|
52 |
+
|
53 |
+
df = df_orig.copy()
|
54 |
+
ser = df.iloc[:, 0]
|
55 |
+
getattr(ser, func)(*args, inplace=True)
|
56 |
+
|
57 |
+
# parent that holds item_cache is dead, so don't increase ref count
|
58 |
+
df = df_orig.copy()
|
59 |
+
ser = df.copy()["a"]
|
60 |
+
getattr(ser, func)(*args, inplace=True)
|
61 |
+
|
62 |
+
df = df_orig.copy()
|
63 |
+
df["a"] # populate the item_cache
|
64 |
+
ser = df.iloc[:, 0] # iloc creates a new object
|
65 |
+
getattr(ser, func)(*args, inplace=True)
|
66 |
+
|
67 |
+
df = df_orig.copy()
|
68 |
+
df["a"] # populate the item_cache
|
69 |
+
ser = df["a"]
|
70 |
+
getattr(ser, func)(*args, inplace=True)
|
71 |
+
|
72 |
+
df = df_orig.copy()
|
73 |
+
df["a"] # populate the item_cache
|
74 |
+
# TODO(CoW-warn) because of the usage of *args, this doesn't warn on Py3.11+
|
75 |
+
if using_copy_on_write:
|
76 |
+
with tm.raises_chained_assignment_error(not PY311):
|
77 |
+
getattr(df["a"], func)(*args, inplace=True)
|
78 |
+
else:
|
79 |
+
with tm.assert_cow_warning(not PY311, match="A value"):
|
80 |
+
getattr(df["a"], func)(*args, inplace=True)
|
81 |
+
|
82 |
+
df = df_orig.copy()
|
83 |
+
ser = df["a"] # populate the item_cache and keep ref
|
84 |
+
if using_copy_on_write:
|
85 |
+
with tm.raises_chained_assignment_error(not PY311):
|
86 |
+
getattr(df["a"], func)(*args, inplace=True)
|
87 |
+
else:
|
88 |
+
# ideally also warns on the default mode, but the ser' _cacher
|
89 |
+
# messes up the refcount + even in warning mode this doesn't trigger
|
90 |
+
# the warning of Py3.1+ (see above)
|
91 |
+
with tm.assert_cow_warning(warn_copy_on_write and not PY311, match="A value"):
|
92 |
+
getattr(df["a"], func)(*args, inplace=True)
|
93 |
+
|
94 |
+
|
95 |
+
def test_methods_iloc_getitem_item_cache_fillna(
|
96 |
+
using_copy_on_write, warn_copy_on_write
|
97 |
+
):
|
98 |
+
# ensure we don't incorrectly raise chained assignment warning because
|
99 |
+
# of the item cache / iloc not setting the item cache
|
100 |
+
df_orig = DataFrame({"a": [1, 2, 3], "b": 1})
|
101 |
+
|
102 |
+
df = df_orig.copy()
|
103 |
+
ser = df.iloc[:, 0]
|
104 |
+
ser.fillna(1, inplace=True)
|
105 |
+
|
106 |
+
# parent that holds item_cache is dead, so don't increase ref count
|
107 |
+
df = df_orig.copy()
|
108 |
+
ser = df.copy()["a"]
|
109 |
+
ser.fillna(1, inplace=True)
|
110 |
+
|
111 |
+
df = df_orig.copy()
|
112 |
+
df["a"] # populate the item_cache
|
113 |
+
ser = df.iloc[:, 0] # iloc creates a new object
|
114 |
+
ser.fillna(1, inplace=True)
|
115 |
+
|
116 |
+
df = df_orig.copy()
|
117 |
+
df["a"] # populate the item_cache
|
118 |
+
ser = df["a"]
|
119 |
+
ser.fillna(1, inplace=True)
|
120 |
+
|
121 |
+
df = df_orig.copy()
|
122 |
+
df["a"] # populate the item_cache
|
123 |
+
if using_copy_on_write:
|
124 |
+
with tm.raises_chained_assignment_error():
|
125 |
+
df["a"].fillna(1, inplace=True)
|
126 |
+
else:
|
127 |
+
with tm.assert_cow_warning(match="A value"):
|
128 |
+
df["a"].fillna(1, inplace=True)
|
129 |
+
|
130 |
+
df = df_orig.copy()
|
131 |
+
ser = df["a"] # populate the item_cache and keep ref
|
132 |
+
if using_copy_on_write:
|
133 |
+
with tm.raises_chained_assignment_error():
|
134 |
+
df["a"].fillna(1, inplace=True)
|
135 |
+
else:
|
136 |
+
# TODO(CoW-warn) ideally also warns on the default mode, but the ser' _cacher
|
137 |
+
# messes up the refcount
|
138 |
+
with tm.assert_cow_warning(warn_copy_on_write, match="A value"):
|
139 |
+
df["a"].fillna(1, inplace=True)
|
140 |
+
|
141 |
+
|
142 |
+
# TODO(CoW-warn) expand the cases
|
143 |
+
@pytest.mark.parametrize(
|
144 |
+
"indexer", [0, [0, 1], slice(0, 2), np.array([True, False, True])]
|
145 |
+
)
|
146 |
+
def test_series_setitem(indexer, using_copy_on_write, warn_copy_on_write):
|
147 |
+
# ensure we only get a single warning for those typical cases of chained
|
148 |
+
# assignment
|
149 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1})
|
150 |
+
|
151 |
+
# using custom check instead of tm.assert_produces_warning because that doesn't
|
152 |
+
# fail if multiple warnings are raised
|
153 |
+
with pytest.warns() as record:
|
154 |
+
df["a"][indexer] = 0
|
155 |
+
assert len(record) == 1
|
156 |
+
if using_copy_on_write:
|
157 |
+
assert record[0].category == ChainedAssignmentError
|
158 |
+
else:
|
159 |
+
assert record[0].category == FutureWarning
|
160 |
+
assert "ChainedAssignmentError" in record[0].message.args[0]
|
161 |
+
|
162 |
+
|
163 |
+
@pytest.mark.filterwarnings("ignore::pandas.errors.SettingWithCopyWarning")
|
164 |
+
@pytest.mark.parametrize(
|
165 |
+
"indexer", ["a", ["a", "b"], slice(0, 2), np.array([True, False, True])]
|
166 |
+
)
|
167 |
+
def test_frame_setitem(indexer, using_copy_on_write):
|
168 |
+
df = DataFrame({"a": [1, 2, 3, 4, 5], "b": 1})
|
169 |
+
|
170 |
+
extra_warnings = () if using_copy_on_write else (SettingWithCopyWarning,)
|
171 |
+
|
172 |
+
with option_context("chained_assignment", "warn"):
|
173 |
+
with tm.raises_chained_assignment_error(extra_warnings=extra_warnings):
|
174 |
+
df[0:3][indexer] = 10
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_clip.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from pandas import (
|
4 |
+
DataFrame,
|
5 |
+
option_context,
|
6 |
+
)
|
7 |
+
import pandas._testing as tm
|
8 |
+
from pandas.tests.copy_view.util import get_array
|
9 |
+
|
10 |
+
|
11 |
+
def test_clip_inplace_reference(using_copy_on_write, warn_copy_on_write):
|
12 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
13 |
+
df_copy = df.copy()
|
14 |
+
arr_a = get_array(df, "a")
|
15 |
+
view = df[:]
|
16 |
+
if warn_copy_on_write:
|
17 |
+
with tm.assert_cow_warning():
|
18 |
+
df.clip(lower=2, inplace=True)
|
19 |
+
else:
|
20 |
+
df.clip(lower=2, inplace=True)
|
21 |
+
|
22 |
+
if using_copy_on_write:
|
23 |
+
assert not np.shares_memory(get_array(df, "a"), arr_a)
|
24 |
+
assert df._mgr._has_no_reference(0)
|
25 |
+
assert view._mgr._has_no_reference(0)
|
26 |
+
tm.assert_frame_equal(df_copy, view)
|
27 |
+
else:
|
28 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
29 |
+
|
30 |
+
|
31 |
+
def test_clip_inplace_reference_no_op(using_copy_on_write):
|
32 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
33 |
+
df_copy = df.copy()
|
34 |
+
arr_a = get_array(df, "a")
|
35 |
+
view = df[:]
|
36 |
+
df.clip(lower=0, inplace=True)
|
37 |
+
|
38 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
39 |
+
|
40 |
+
if using_copy_on_write:
|
41 |
+
assert not df._mgr._has_no_reference(0)
|
42 |
+
assert not view._mgr._has_no_reference(0)
|
43 |
+
tm.assert_frame_equal(df_copy, view)
|
44 |
+
|
45 |
+
|
46 |
+
def test_clip_inplace(using_copy_on_write):
|
47 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
48 |
+
arr_a = get_array(df, "a")
|
49 |
+
df.clip(lower=2, inplace=True)
|
50 |
+
|
51 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
52 |
+
|
53 |
+
if using_copy_on_write:
|
54 |
+
assert df._mgr._has_no_reference(0)
|
55 |
+
|
56 |
+
|
57 |
+
def test_clip(using_copy_on_write):
|
58 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
59 |
+
df_orig = df.copy()
|
60 |
+
df2 = df.clip(lower=2)
|
61 |
+
|
62 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
63 |
+
|
64 |
+
if using_copy_on_write:
|
65 |
+
assert df._mgr._has_no_reference(0)
|
66 |
+
tm.assert_frame_equal(df_orig, df)
|
67 |
+
|
68 |
+
|
69 |
+
def test_clip_no_op(using_copy_on_write):
|
70 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
71 |
+
df2 = df.clip(lower=0)
|
72 |
+
|
73 |
+
if using_copy_on_write:
|
74 |
+
assert not df._mgr._has_no_reference(0)
|
75 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
76 |
+
else:
|
77 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
78 |
+
|
79 |
+
|
80 |
+
def test_clip_chained_inplace(using_copy_on_write):
|
81 |
+
df = DataFrame({"a": [1, 4, 2], "b": 1})
|
82 |
+
df_orig = df.copy()
|
83 |
+
if using_copy_on_write:
|
84 |
+
with tm.raises_chained_assignment_error():
|
85 |
+
df["a"].clip(1, 2, inplace=True)
|
86 |
+
tm.assert_frame_equal(df, df_orig)
|
87 |
+
|
88 |
+
with tm.raises_chained_assignment_error():
|
89 |
+
df[["a"]].clip(1, 2, inplace=True)
|
90 |
+
tm.assert_frame_equal(df, df_orig)
|
91 |
+
else:
|
92 |
+
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
|
93 |
+
df["a"].clip(1, 2, inplace=True)
|
94 |
+
|
95 |
+
with tm.assert_produces_warning(None):
|
96 |
+
with option_context("mode.chained_assignment", None):
|
97 |
+
df[["a"]].clip(1, 2, inplace=True)
|
98 |
+
|
99 |
+
with tm.assert_produces_warning(None):
|
100 |
+
with option_context("mode.chained_assignment", None):
|
101 |
+
df[df["a"] > 1].clip(1, 2, inplace=True)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_constructors.py
ADDED
@@ -0,0 +1,382 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
from pandas import (
|
6 |
+
DataFrame,
|
7 |
+
DatetimeIndex,
|
8 |
+
Index,
|
9 |
+
Period,
|
10 |
+
PeriodIndex,
|
11 |
+
Series,
|
12 |
+
Timedelta,
|
13 |
+
TimedeltaIndex,
|
14 |
+
Timestamp,
|
15 |
+
)
|
16 |
+
import pandas._testing as tm
|
17 |
+
from pandas.tests.copy_view.util import get_array
|
18 |
+
|
19 |
+
# -----------------------------------------------------------------------------
|
20 |
+
# Copy/view behaviour for Series / DataFrame constructors
|
21 |
+
|
22 |
+
|
23 |
+
@pytest.mark.parametrize("dtype", [None, "int64"])
|
24 |
+
def test_series_from_series(dtype, using_copy_on_write, warn_copy_on_write):
|
25 |
+
# Case: constructing a Series from another Series object follows CoW rules:
|
26 |
+
# a new object is returned and thus mutations are not propagated
|
27 |
+
ser = Series([1, 2, 3], name="name")
|
28 |
+
|
29 |
+
# default is copy=False -> new Series is a shallow copy / view of original
|
30 |
+
result = Series(ser, dtype=dtype)
|
31 |
+
|
32 |
+
# the shallow copy still shares memory
|
33 |
+
assert np.shares_memory(get_array(ser), get_array(result))
|
34 |
+
|
35 |
+
if using_copy_on_write:
|
36 |
+
assert result._mgr.blocks[0].refs.has_reference()
|
37 |
+
|
38 |
+
if using_copy_on_write:
|
39 |
+
# mutating new series copy doesn't mutate original
|
40 |
+
result.iloc[0] = 0
|
41 |
+
assert ser.iloc[0] == 1
|
42 |
+
# mutating triggered a copy-on-write -> no longer shares memory
|
43 |
+
assert not np.shares_memory(get_array(ser), get_array(result))
|
44 |
+
else:
|
45 |
+
# mutating shallow copy does mutate original
|
46 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
47 |
+
result.iloc[0] = 0
|
48 |
+
assert ser.iloc[0] == 0
|
49 |
+
# and still shares memory
|
50 |
+
assert np.shares_memory(get_array(ser), get_array(result))
|
51 |
+
|
52 |
+
# the same when modifying the parent
|
53 |
+
result = Series(ser, dtype=dtype)
|
54 |
+
|
55 |
+
if using_copy_on_write:
|
56 |
+
# mutating original doesn't mutate new series
|
57 |
+
ser.iloc[0] = 0
|
58 |
+
assert result.iloc[0] == 1
|
59 |
+
else:
|
60 |
+
# mutating original does mutate shallow copy
|
61 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
62 |
+
ser.iloc[0] = 0
|
63 |
+
assert result.iloc[0] == 0
|
64 |
+
|
65 |
+
|
66 |
+
def test_series_from_series_with_reindex(using_copy_on_write, warn_copy_on_write):
|
67 |
+
# Case: constructing a Series from another Series with specifying an index
|
68 |
+
# that potentially requires a reindex of the values
|
69 |
+
ser = Series([1, 2, 3], name="name")
|
70 |
+
|
71 |
+
# passing an index that doesn't actually require a reindex of the values
|
72 |
+
# -> without CoW we get an actual mutating view
|
73 |
+
for index in [
|
74 |
+
ser.index,
|
75 |
+
ser.index.copy(),
|
76 |
+
list(ser.index),
|
77 |
+
ser.index.rename("idx"),
|
78 |
+
]:
|
79 |
+
result = Series(ser, index=index)
|
80 |
+
assert np.shares_memory(ser.values, result.values)
|
81 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
82 |
+
result.iloc[0] = 0
|
83 |
+
if using_copy_on_write:
|
84 |
+
assert ser.iloc[0] == 1
|
85 |
+
else:
|
86 |
+
assert ser.iloc[0] == 0
|
87 |
+
|
88 |
+
# ensure that if an actual reindex is needed, we don't have any refs
|
89 |
+
# (mutating the result wouldn't trigger CoW)
|
90 |
+
result = Series(ser, index=[0, 1, 2, 3])
|
91 |
+
assert not np.shares_memory(ser.values, result.values)
|
92 |
+
if using_copy_on_write:
|
93 |
+
assert not result._mgr.blocks[0].refs.has_reference()
|
94 |
+
|
95 |
+
|
96 |
+
@pytest.mark.parametrize("fastpath", [False, True])
|
97 |
+
@pytest.mark.parametrize("dtype", [None, "int64"])
|
98 |
+
@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)])
|
99 |
+
@pytest.mark.parametrize(
|
100 |
+
"arr", [np.array([1, 2, 3], dtype="int64"), pd.array([1, 2, 3], dtype="Int64")]
|
101 |
+
)
|
102 |
+
def test_series_from_array(using_copy_on_write, idx, dtype, fastpath, arr):
|
103 |
+
if idx is None or dtype is not None:
|
104 |
+
fastpath = False
|
105 |
+
msg = "The 'fastpath' keyword in pd.Series is deprecated"
|
106 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
107 |
+
ser = Series(arr, dtype=dtype, index=idx, fastpath=fastpath)
|
108 |
+
ser_orig = ser.copy()
|
109 |
+
data = getattr(arr, "_data", arr)
|
110 |
+
if using_copy_on_write:
|
111 |
+
assert not np.shares_memory(get_array(ser), data)
|
112 |
+
else:
|
113 |
+
assert np.shares_memory(get_array(ser), data)
|
114 |
+
|
115 |
+
arr[0] = 100
|
116 |
+
if using_copy_on_write:
|
117 |
+
tm.assert_series_equal(ser, ser_orig)
|
118 |
+
else:
|
119 |
+
expected = Series([100, 2, 3], dtype=dtype if dtype is not None else arr.dtype)
|
120 |
+
tm.assert_series_equal(ser, expected)
|
121 |
+
|
122 |
+
|
123 |
+
@pytest.mark.parametrize("copy", [True, False, None])
|
124 |
+
def test_series_from_array_different_dtype(using_copy_on_write, copy):
|
125 |
+
arr = np.array([1, 2, 3], dtype="int64")
|
126 |
+
ser = Series(arr, dtype="int32", copy=copy)
|
127 |
+
assert not np.shares_memory(get_array(ser), arr)
|
128 |
+
|
129 |
+
|
130 |
+
@pytest.mark.parametrize(
|
131 |
+
"idx",
|
132 |
+
[
|
133 |
+
Index([1, 2]),
|
134 |
+
DatetimeIndex([Timestamp("2019-12-31"), Timestamp("2020-12-31")]),
|
135 |
+
PeriodIndex([Period("2019-12-31"), Period("2020-12-31")]),
|
136 |
+
TimedeltaIndex([Timedelta("1 days"), Timedelta("2 days")]),
|
137 |
+
],
|
138 |
+
)
|
139 |
+
def test_series_from_index(using_copy_on_write, idx):
|
140 |
+
ser = Series(idx)
|
141 |
+
expected = idx.copy(deep=True)
|
142 |
+
if using_copy_on_write:
|
143 |
+
assert np.shares_memory(get_array(ser), get_array(idx))
|
144 |
+
assert not ser._mgr._has_no_reference(0)
|
145 |
+
else:
|
146 |
+
assert not np.shares_memory(get_array(ser), get_array(idx))
|
147 |
+
ser.iloc[0] = ser.iloc[1]
|
148 |
+
tm.assert_index_equal(idx, expected)
|
149 |
+
|
150 |
+
|
151 |
+
def test_series_from_index_different_dtypes(using_copy_on_write):
|
152 |
+
idx = Index([1, 2, 3], dtype="int64")
|
153 |
+
ser = Series(idx, dtype="int32")
|
154 |
+
assert not np.shares_memory(get_array(ser), get_array(idx))
|
155 |
+
if using_copy_on_write:
|
156 |
+
assert ser._mgr._has_no_reference(0)
|
157 |
+
|
158 |
+
|
159 |
+
@pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
|
160 |
+
@pytest.mark.parametrize("fastpath", [False, True])
|
161 |
+
@pytest.mark.parametrize("dtype", [None, "int64"])
|
162 |
+
@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)])
|
163 |
+
def test_series_from_block_manager(using_copy_on_write, idx, dtype, fastpath):
|
164 |
+
ser = Series([1, 2, 3], dtype="int64")
|
165 |
+
ser_orig = ser.copy()
|
166 |
+
msg = "The 'fastpath' keyword in pd.Series is deprecated"
|
167 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
168 |
+
ser2 = Series(ser._mgr, dtype=dtype, fastpath=fastpath, index=idx)
|
169 |
+
assert np.shares_memory(get_array(ser), get_array(ser2))
|
170 |
+
if using_copy_on_write:
|
171 |
+
assert not ser2._mgr._has_no_reference(0)
|
172 |
+
|
173 |
+
ser2.iloc[0] = 100
|
174 |
+
if using_copy_on_write:
|
175 |
+
tm.assert_series_equal(ser, ser_orig)
|
176 |
+
else:
|
177 |
+
expected = Series([100, 2, 3])
|
178 |
+
tm.assert_series_equal(ser, expected)
|
179 |
+
|
180 |
+
|
181 |
+
def test_series_from_block_manager_different_dtype(using_copy_on_write):
|
182 |
+
ser = Series([1, 2, 3], dtype="int64")
|
183 |
+
msg = "Passing a SingleBlockManager to Series"
|
184 |
+
with tm.assert_produces_warning(DeprecationWarning, match=msg):
|
185 |
+
ser2 = Series(ser._mgr, dtype="int32")
|
186 |
+
assert not np.shares_memory(get_array(ser), get_array(ser2))
|
187 |
+
if using_copy_on_write:
|
188 |
+
assert ser2._mgr._has_no_reference(0)
|
189 |
+
|
190 |
+
|
191 |
+
@pytest.mark.parametrize("use_mgr", [True, False])
|
192 |
+
@pytest.mark.parametrize("columns", [None, ["a"]])
|
193 |
+
def test_dataframe_constructor_mgr_or_df(
|
194 |
+
using_copy_on_write, warn_copy_on_write, columns, use_mgr
|
195 |
+
):
|
196 |
+
df = DataFrame({"a": [1, 2, 3]})
|
197 |
+
df_orig = df.copy()
|
198 |
+
|
199 |
+
if use_mgr:
|
200 |
+
data = df._mgr
|
201 |
+
warn = DeprecationWarning
|
202 |
+
else:
|
203 |
+
data = df
|
204 |
+
warn = None
|
205 |
+
msg = "Passing a BlockManager to DataFrame"
|
206 |
+
with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
|
207 |
+
new_df = DataFrame(data)
|
208 |
+
|
209 |
+
assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
|
210 |
+
with tm.assert_cow_warning(warn_copy_on_write and not use_mgr):
|
211 |
+
new_df.iloc[0] = 100
|
212 |
+
|
213 |
+
if using_copy_on_write:
|
214 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
|
215 |
+
tm.assert_frame_equal(df, df_orig)
|
216 |
+
else:
|
217 |
+
assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))
|
218 |
+
tm.assert_frame_equal(df, new_df)
|
219 |
+
|
220 |
+
|
221 |
+
@pytest.mark.parametrize("dtype", [None, "int64", "Int64"])
|
222 |
+
@pytest.mark.parametrize("index", [None, [0, 1, 2]])
|
223 |
+
@pytest.mark.parametrize("columns", [None, ["a", "b"], ["a", "b", "c"]])
|
224 |
+
def test_dataframe_from_dict_of_series(
|
225 |
+
request, using_copy_on_write, warn_copy_on_write, columns, index, dtype
|
226 |
+
):
|
227 |
+
# Case: constructing a DataFrame from Series objects with copy=False
|
228 |
+
# has to do a lazy following CoW rules
|
229 |
+
# (the default for DataFrame(dict) is still to copy to ensure consolidation)
|
230 |
+
s1 = Series([1, 2, 3])
|
231 |
+
s2 = Series([4, 5, 6])
|
232 |
+
s1_orig = s1.copy()
|
233 |
+
expected = DataFrame(
|
234 |
+
{"a": [1, 2, 3], "b": [4, 5, 6]}, index=index, columns=columns, dtype=dtype
|
235 |
+
)
|
236 |
+
|
237 |
+
result = DataFrame(
|
238 |
+
{"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False
|
239 |
+
)
|
240 |
+
|
241 |
+
# the shallow copy still shares memory
|
242 |
+
assert np.shares_memory(get_array(result, "a"), get_array(s1))
|
243 |
+
|
244 |
+
# mutating the new dataframe doesn't mutate original
|
245 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
246 |
+
result.iloc[0, 0] = 10
|
247 |
+
if using_copy_on_write:
|
248 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(s1))
|
249 |
+
tm.assert_series_equal(s1, s1_orig)
|
250 |
+
else:
|
251 |
+
assert s1.iloc[0] == 10
|
252 |
+
|
253 |
+
# the same when modifying the parent series
|
254 |
+
s1 = Series([1, 2, 3])
|
255 |
+
s2 = Series([4, 5, 6])
|
256 |
+
result = DataFrame(
|
257 |
+
{"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False
|
258 |
+
)
|
259 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
260 |
+
s1.iloc[0] = 10
|
261 |
+
if using_copy_on_write:
|
262 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(s1))
|
263 |
+
tm.assert_frame_equal(result, expected)
|
264 |
+
else:
|
265 |
+
assert result.iloc[0, 0] == 10
|
266 |
+
|
267 |
+
|
268 |
+
@pytest.mark.parametrize("dtype", [None, "int64"])
|
269 |
+
def test_dataframe_from_dict_of_series_with_reindex(dtype):
|
270 |
+
# Case: constructing a DataFrame from Series objects with copy=False
|
271 |
+
# and passing an index that requires an actual (no-view) reindex -> need
|
272 |
+
# to ensure the result doesn't have refs set up to unnecessarily trigger
|
273 |
+
# a copy on write
|
274 |
+
s1 = Series([1, 2, 3])
|
275 |
+
s2 = Series([4, 5, 6])
|
276 |
+
df = DataFrame({"a": s1, "b": s2}, index=[1, 2, 3], dtype=dtype, copy=False)
|
277 |
+
|
278 |
+
# df should own its memory, so mutating shouldn't trigger a copy
|
279 |
+
arr_before = get_array(df, "a")
|
280 |
+
assert not np.shares_memory(arr_before, get_array(s1))
|
281 |
+
df.iloc[0, 0] = 100
|
282 |
+
arr_after = get_array(df, "a")
|
283 |
+
assert np.shares_memory(arr_before, arr_after)
|
284 |
+
|
285 |
+
|
286 |
+
@pytest.mark.parametrize("cons", [Series, Index])
|
287 |
+
@pytest.mark.parametrize(
|
288 |
+
"data, dtype", [([1, 2], None), ([1, 2], "int64"), (["a", "b"], None)]
|
289 |
+
)
|
290 |
+
def test_dataframe_from_series_or_index(
|
291 |
+
using_copy_on_write, warn_copy_on_write, data, dtype, cons
|
292 |
+
):
|
293 |
+
obj = cons(data, dtype=dtype)
|
294 |
+
obj_orig = obj.copy()
|
295 |
+
df = DataFrame(obj, dtype=dtype)
|
296 |
+
assert np.shares_memory(get_array(obj), get_array(df, 0))
|
297 |
+
if using_copy_on_write:
|
298 |
+
assert not df._mgr._has_no_reference(0)
|
299 |
+
|
300 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
301 |
+
df.iloc[0, 0] = data[-1]
|
302 |
+
if using_copy_on_write:
|
303 |
+
tm.assert_equal(obj, obj_orig)
|
304 |
+
|
305 |
+
|
306 |
+
@pytest.mark.parametrize("cons", [Series, Index])
|
307 |
+
def test_dataframe_from_series_or_index_different_dtype(using_copy_on_write, cons):
|
308 |
+
obj = cons([1, 2], dtype="int64")
|
309 |
+
df = DataFrame(obj, dtype="int32")
|
310 |
+
assert not np.shares_memory(get_array(obj), get_array(df, 0))
|
311 |
+
if using_copy_on_write:
|
312 |
+
assert df._mgr._has_no_reference(0)
|
313 |
+
|
314 |
+
|
315 |
+
def test_dataframe_from_series_infer_datetime(using_copy_on_write):
|
316 |
+
ser = Series([Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype=object)
|
317 |
+
with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
|
318 |
+
df = DataFrame(ser)
|
319 |
+
assert not np.shares_memory(get_array(ser), get_array(df, 0))
|
320 |
+
if using_copy_on_write:
|
321 |
+
assert df._mgr._has_no_reference(0)
|
322 |
+
|
323 |
+
|
324 |
+
@pytest.mark.parametrize("index", [None, [0, 1, 2]])
|
325 |
+
def test_dataframe_from_dict_of_series_with_dtype(index):
|
326 |
+
# Variant of above, but now passing a dtype that causes a copy
|
327 |
+
# -> need to ensure the result doesn't have refs set up to unnecessarily
|
328 |
+
# trigger a copy on write
|
329 |
+
s1 = Series([1.0, 2.0, 3.0])
|
330 |
+
s2 = Series([4, 5, 6])
|
331 |
+
df = DataFrame({"a": s1, "b": s2}, index=index, dtype="int64", copy=False)
|
332 |
+
|
333 |
+
# df should own its memory, so mutating shouldn't trigger a copy
|
334 |
+
arr_before = get_array(df, "a")
|
335 |
+
assert not np.shares_memory(arr_before, get_array(s1))
|
336 |
+
df.iloc[0, 0] = 100
|
337 |
+
arr_after = get_array(df, "a")
|
338 |
+
assert np.shares_memory(arr_before, arr_after)
|
339 |
+
|
340 |
+
|
341 |
+
@pytest.mark.parametrize("copy", [False, None, True])
|
342 |
+
def test_frame_from_numpy_array(using_copy_on_write, copy, using_array_manager):
|
343 |
+
arr = np.array([[1, 2], [3, 4]])
|
344 |
+
df = DataFrame(arr, copy=copy)
|
345 |
+
|
346 |
+
if (
|
347 |
+
using_copy_on_write
|
348 |
+
and copy is not False
|
349 |
+
or copy is True
|
350 |
+
or (using_array_manager and copy is None)
|
351 |
+
):
|
352 |
+
assert not np.shares_memory(get_array(df, 0), arr)
|
353 |
+
else:
|
354 |
+
assert np.shares_memory(get_array(df, 0), arr)
|
355 |
+
|
356 |
+
|
357 |
+
def test_dataframe_from_records_with_dataframe(using_copy_on_write, warn_copy_on_write):
|
358 |
+
df = DataFrame({"a": [1, 2, 3]})
|
359 |
+
df_orig = df.copy()
|
360 |
+
with tm.assert_produces_warning(FutureWarning):
|
361 |
+
df2 = DataFrame.from_records(df)
|
362 |
+
if using_copy_on_write:
|
363 |
+
assert not df._mgr._has_no_reference(0)
|
364 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
365 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
366 |
+
df2.iloc[0, 0] = 100
|
367 |
+
if using_copy_on_write:
|
368 |
+
tm.assert_frame_equal(df, df_orig)
|
369 |
+
else:
|
370 |
+
tm.assert_frame_equal(df, df2)
|
371 |
+
|
372 |
+
|
373 |
+
def test_frame_from_dict_of_index(using_copy_on_write):
|
374 |
+
idx = Index([1, 2, 3])
|
375 |
+
expected = idx.copy(deep=True)
|
376 |
+
df = DataFrame({"a": idx}, copy=False)
|
377 |
+
assert np.shares_memory(get_array(df, "a"), idx._values)
|
378 |
+
if using_copy_on_write:
|
379 |
+
assert not df._mgr._has_no_reference(0)
|
380 |
+
|
381 |
+
df.iloc[0, 0] = 100
|
382 |
+
tm.assert_index_equal(idx, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_core_functionalities.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import DataFrame
|
5 |
+
import pandas._testing as tm
|
6 |
+
from pandas.tests.copy_view.util import get_array
|
7 |
+
|
8 |
+
|
9 |
+
def test_assigning_to_same_variable_removes_references(using_copy_on_write):
|
10 |
+
df = DataFrame({"a": [1, 2, 3]})
|
11 |
+
df = df.reset_index()
|
12 |
+
if using_copy_on_write:
|
13 |
+
assert df._mgr._has_no_reference(1)
|
14 |
+
arr = get_array(df, "a")
|
15 |
+
df.iloc[0, 1] = 100 # Write into a
|
16 |
+
|
17 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
18 |
+
|
19 |
+
|
20 |
+
def test_setitem_dont_track_unnecessary_references(using_copy_on_write):
|
21 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
|
22 |
+
|
23 |
+
df["b"] = 100
|
24 |
+
arr = get_array(df, "a")
|
25 |
+
# We split the block in setitem, if we are not careful the new blocks will
|
26 |
+
# reference each other triggering a copy
|
27 |
+
df.iloc[0, 0] = 100
|
28 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
29 |
+
|
30 |
+
|
31 |
+
def test_setitem_with_view_copies(using_copy_on_write, warn_copy_on_write):
|
32 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
|
33 |
+
view = df[:]
|
34 |
+
expected = df.copy()
|
35 |
+
|
36 |
+
df["b"] = 100
|
37 |
+
arr = get_array(df, "a")
|
38 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
39 |
+
df.iloc[0, 0] = 100 # Check that we correctly track reference
|
40 |
+
if using_copy_on_write:
|
41 |
+
assert not np.shares_memory(arr, get_array(df, "a"))
|
42 |
+
tm.assert_frame_equal(view, expected)
|
43 |
+
|
44 |
+
|
45 |
+
def test_setitem_with_view_invalidated_does_not_copy(
|
46 |
+
using_copy_on_write, warn_copy_on_write, request
|
47 |
+
):
|
48 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})
|
49 |
+
view = df[:]
|
50 |
+
|
51 |
+
df["b"] = 100
|
52 |
+
arr = get_array(df, "a")
|
53 |
+
view = None # noqa: F841
|
54 |
+
# TODO(CoW-warn) false positive? -> block gets split because of `df["b"] = 100`
|
55 |
+
# which introduces additional refs, even when those of `view` go out of scopes
|
56 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
57 |
+
df.iloc[0, 0] = 100
|
58 |
+
if using_copy_on_write:
|
59 |
+
# Setitem split the block. Since the old block shared data with view
|
60 |
+
# all the new blocks are referencing view and each other. When view
|
61 |
+
# goes out of scope, they don't share data with any other block,
|
62 |
+
# so we should not trigger a copy
|
63 |
+
mark = pytest.mark.xfail(
|
64 |
+
reason="blk.delete does not track references correctly"
|
65 |
+
)
|
66 |
+
request.applymarker(mark)
|
67 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
68 |
+
|
69 |
+
|
70 |
+
def test_out_of_scope(using_copy_on_write):
|
71 |
+
def func():
|
72 |
+
df = DataFrame({"a": [1, 2], "b": 1.5, "c": 1})
|
73 |
+
# create some subset
|
74 |
+
result = df[["a", "b"]]
|
75 |
+
return result
|
76 |
+
|
77 |
+
result = func()
|
78 |
+
if using_copy_on_write:
|
79 |
+
assert not result._mgr.blocks[0].refs.has_reference()
|
80 |
+
assert not result._mgr.blocks[1].refs.has_reference()
|
81 |
+
|
82 |
+
|
83 |
+
def test_delete(using_copy_on_write):
|
84 |
+
df = DataFrame(
|
85 |
+
np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"]
|
86 |
+
)
|
87 |
+
del df["b"]
|
88 |
+
if using_copy_on_write:
|
89 |
+
assert not df._mgr.blocks[0].refs.has_reference()
|
90 |
+
assert not df._mgr.blocks[1].refs.has_reference()
|
91 |
+
|
92 |
+
df = df[["a"]]
|
93 |
+
if using_copy_on_write:
|
94 |
+
assert not df._mgr.blocks[0].refs.has_reference()
|
95 |
+
|
96 |
+
|
97 |
+
def test_delete_reference(using_copy_on_write):
|
98 |
+
df = DataFrame(
|
99 |
+
np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"]
|
100 |
+
)
|
101 |
+
x = df[:]
|
102 |
+
del df["b"]
|
103 |
+
if using_copy_on_write:
|
104 |
+
assert df._mgr.blocks[0].refs.has_reference()
|
105 |
+
assert df._mgr.blocks[1].refs.has_reference()
|
106 |
+
assert x._mgr.blocks[0].refs.has_reference()
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_functions.py
ADDED
@@ -0,0 +1,396 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
DataFrame,
|
6 |
+
Index,
|
7 |
+
Series,
|
8 |
+
concat,
|
9 |
+
merge,
|
10 |
+
)
|
11 |
+
import pandas._testing as tm
|
12 |
+
from pandas.tests.copy_view.util import get_array
|
13 |
+
|
14 |
+
|
15 |
+
def test_concat_frames(using_copy_on_write):
|
16 |
+
df = DataFrame({"b": ["a"] * 3})
|
17 |
+
df2 = DataFrame({"a": ["a"] * 3})
|
18 |
+
df_orig = df.copy()
|
19 |
+
result = concat([df, df2], axis=1)
|
20 |
+
|
21 |
+
if using_copy_on_write:
|
22 |
+
assert np.shares_memory(get_array(result, "b"), get_array(df, "b"))
|
23 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
|
24 |
+
else:
|
25 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))
|
26 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
|
27 |
+
|
28 |
+
result.iloc[0, 0] = "d"
|
29 |
+
if using_copy_on_write:
|
30 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))
|
31 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
|
32 |
+
|
33 |
+
result.iloc[0, 1] = "d"
|
34 |
+
if using_copy_on_write:
|
35 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
|
36 |
+
tm.assert_frame_equal(df, df_orig)
|
37 |
+
|
38 |
+
|
39 |
+
def test_concat_frames_updating_input(using_copy_on_write):
|
40 |
+
df = DataFrame({"b": ["a"] * 3})
|
41 |
+
df2 = DataFrame({"a": ["a"] * 3})
|
42 |
+
result = concat([df, df2], axis=1)
|
43 |
+
|
44 |
+
if using_copy_on_write:
|
45 |
+
assert np.shares_memory(get_array(result, "b"), get_array(df, "b"))
|
46 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
|
47 |
+
else:
|
48 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))
|
49 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
|
50 |
+
|
51 |
+
expected = result.copy()
|
52 |
+
df.iloc[0, 0] = "d"
|
53 |
+
if using_copy_on_write:
|
54 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))
|
55 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
|
56 |
+
|
57 |
+
df2.iloc[0, 0] = "d"
|
58 |
+
if using_copy_on_write:
|
59 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))
|
60 |
+
tm.assert_frame_equal(result, expected)
|
61 |
+
|
62 |
+
|
63 |
+
def test_concat_series(using_copy_on_write):
|
64 |
+
ser = Series([1, 2], name="a")
|
65 |
+
ser2 = Series([3, 4], name="b")
|
66 |
+
ser_orig = ser.copy()
|
67 |
+
ser2_orig = ser2.copy()
|
68 |
+
result = concat([ser, ser2], axis=1)
|
69 |
+
|
70 |
+
if using_copy_on_write:
|
71 |
+
assert np.shares_memory(get_array(result, "a"), ser.values)
|
72 |
+
assert np.shares_memory(get_array(result, "b"), ser2.values)
|
73 |
+
else:
|
74 |
+
assert not np.shares_memory(get_array(result, "a"), ser.values)
|
75 |
+
assert not np.shares_memory(get_array(result, "b"), ser2.values)
|
76 |
+
|
77 |
+
result.iloc[0, 0] = 100
|
78 |
+
if using_copy_on_write:
|
79 |
+
assert not np.shares_memory(get_array(result, "a"), ser.values)
|
80 |
+
assert np.shares_memory(get_array(result, "b"), ser2.values)
|
81 |
+
|
82 |
+
result.iloc[0, 1] = 1000
|
83 |
+
if using_copy_on_write:
|
84 |
+
assert not np.shares_memory(get_array(result, "b"), ser2.values)
|
85 |
+
tm.assert_series_equal(ser, ser_orig)
|
86 |
+
tm.assert_series_equal(ser2, ser2_orig)
|
87 |
+
|
88 |
+
|
89 |
+
def test_concat_frames_chained(using_copy_on_write):
|
90 |
+
df1 = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
|
91 |
+
df2 = DataFrame({"c": [4, 5, 6]})
|
92 |
+
df3 = DataFrame({"d": [4, 5, 6]})
|
93 |
+
result = concat([concat([df1, df2], axis=1), df3], axis=1)
|
94 |
+
expected = result.copy()
|
95 |
+
|
96 |
+
if using_copy_on_write:
|
97 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
98 |
+
assert np.shares_memory(get_array(result, "c"), get_array(df2, "c"))
|
99 |
+
assert np.shares_memory(get_array(result, "d"), get_array(df3, "d"))
|
100 |
+
else:
|
101 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
102 |
+
assert not np.shares_memory(get_array(result, "c"), get_array(df2, "c"))
|
103 |
+
assert not np.shares_memory(get_array(result, "d"), get_array(df3, "d"))
|
104 |
+
|
105 |
+
df1.iloc[0, 0] = 100
|
106 |
+
if using_copy_on_write:
|
107 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
108 |
+
|
109 |
+
tm.assert_frame_equal(result, expected)
|
110 |
+
|
111 |
+
|
112 |
+
def test_concat_series_chained(using_copy_on_write):
|
113 |
+
ser1 = Series([1, 2, 3], name="a")
|
114 |
+
ser2 = Series([4, 5, 6], name="c")
|
115 |
+
ser3 = Series([4, 5, 6], name="d")
|
116 |
+
result = concat([concat([ser1, ser2], axis=1), ser3], axis=1)
|
117 |
+
expected = result.copy()
|
118 |
+
|
119 |
+
if using_copy_on_write:
|
120 |
+
assert np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))
|
121 |
+
assert np.shares_memory(get_array(result, "c"), get_array(ser2, "c"))
|
122 |
+
assert np.shares_memory(get_array(result, "d"), get_array(ser3, "d"))
|
123 |
+
else:
|
124 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))
|
125 |
+
assert not np.shares_memory(get_array(result, "c"), get_array(ser2, "c"))
|
126 |
+
assert not np.shares_memory(get_array(result, "d"), get_array(ser3, "d"))
|
127 |
+
|
128 |
+
ser1.iloc[0] = 100
|
129 |
+
if using_copy_on_write:
|
130 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))
|
131 |
+
|
132 |
+
tm.assert_frame_equal(result, expected)
|
133 |
+
|
134 |
+
|
135 |
+
def test_concat_series_updating_input(using_copy_on_write):
|
136 |
+
ser = Series([1, 2], name="a")
|
137 |
+
ser2 = Series([3, 4], name="b")
|
138 |
+
expected = DataFrame({"a": [1, 2], "b": [3, 4]})
|
139 |
+
result = concat([ser, ser2], axis=1)
|
140 |
+
|
141 |
+
if using_copy_on_write:
|
142 |
+
assert np.shares_memory(get_array(result, "a"), get_array(ser, "a"))
|
143 |
+
assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))
|
144 |
+
else:
|
145 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a"))
|
146 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))
|
147 |
+
|
148 |
+
ser.iloc[0] = 100
|
149 |
+
if using_copy_on_write:
|
150 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a"))
|
151 |
+
assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))
|
152 |
+
tm.assert_frame_equal(result, expected)
|
153 |
+
|
154 |
+
ser2.iloc[0] = 1000
|
155 |
+
if using_copy_on_write:
|
156 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))
|
157 |
+
tm.assert_frame_equal(result, expected)
|
158 |
+
|
159 |
+
|
160 |
+
def test_concat_mixed_series_frame(using_copy_on_write):
|
161 |
+
df = DataFrame({"a": [1, 2, 3], "c": 1})
|
162 |
+
ser = Series([4, 5, 6], name="d")
|
163 |
+
result = concat([df, ser], axis=1)
|
164 |
+
expected = result.copy()
|
165 |
+
|
166 |
+
if using_copy_on_write:
|
167 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
168 |
+
assert np.shares_memory(get_array(result, "c"), get_array(df, "c"))
|
169 |
+
assert np.shares_memory(get_array(result, "d"), get_array(ser, "d"))
|
170 |
+
else:
|
171 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
172 |
+
assert not np.shares_memory(get_array(result, "c"), get_array(df, "c"))
|
173 |
+
assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d"))
|
174 |
+
|
175 |
+
ser.iloc[0] = 100
|
176 |
+
if using_copy_on_write:
|
177 |
+
assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d"))
|
178 |
+
|
179 |
+
df.iloc[0, 0] = 100
|
180 |
+
if using_copy_on_write:
|
181 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
182 |
+
tm.assert_frame_equal(result, expected)
|
183 |
+
|
184 |
+
|
185 |
+
@pytest.mark.parametrize("copy", [True, None, False])
|
186 |
+
def test_concat_copy_keyword(using_copy_on_write, copy):
|
187 |
+
df = DataFrame({"a": [1, 2]})
|
188 |
+
df2 = DataFrame({"b": [1.5, 2.5]})
|
189 |
+
|
190 |
+
result = concat([df, df2], axis=1, copy=copy)
|
191 |
+
|
192 |
+
if using_copy_on_write or copy is False:
|
193 |
+
assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
194 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(result, "b"))
|
195 |
+
else:
|
196 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
197 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b"))
|
198 |
+
|
199 |
+
|
200 |
+
@pytest.mark.parametrize(
|
201 |
+
"func",
|
202 |
+
[
|
203 |
+
lambda df1, df2, **kwargs: df1.merge(df2, **kwargs),
|
204 |
+
lambda df1, df2, **kwargs: merge(df1, df2, **kwargs),
|
205 |
+
],
|
206 |
+
)
|
207 |
+
def test_merge_on_key(using_copy_on_write, func):
|
208 |
+
df1 = DataFrame({"key": ["a", "b", "c"], "a": [1, 2, 3]})
|
209 |
+
df2 = DataFrame({"key": ["a", "b", "c"], "b": [4, 5, 6]})
|
210 |
+
df1_orig = df1.copy()
|
211 |
+
df2_orig = df2.copy()
|
212 |
+
|
213 |
+
result = func(df1, df2, on="key")
|
214 |
+
|
215 |
+
if using_copy_on_write:
|
216 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
217 |
+
assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
218 |
+
assert np.shares_memory(get_array(result, "key"), get_array(df1, "key"))
|
219 |
+
assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key"))
|
220 |
+
else:
|
221 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
222 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
223 |
+
|
224 |
+
result.iloc[0, 1] = 0
|
225 |
+
if using_copy_on_write:
|
226 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
227 |
+
assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
228 |
+
|
229 |
+
result.iloc[0, 2] = 0
|
230 |
+
if using_copy_on_write:
|
231 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
232 |
+
tm.assert_frame_equal(df1, df1_orig)
|
233 |
+
tm.assert_frame_equal(df2, df2_orig)
|
234 |
+
|
235 |
+
|
236 |
+
def test_merge_on_index(using_copy_on_write):
|
237 |
+
df1 = DataFrame({"a": [1, 2, 3]})
|
238 |
+
df2 = DataFrame({"b": [4, 5, 6]})
|
239 |
+
df1_orig = df1.copy()
|
240 |
+
df2_orig = df2.copy()
|
241 |
+
|
242 |
+
result = merge(df1, df2, left_index=True, right_index=True)
|
243 |
+
|
244 |
+
if using_copy_on_write:
|
245 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
246 |
+
assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
247 |
+
else:
|
248 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
249 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
250 |
+
|
251 |
+
result.iloc[0, 0] = 0
|
252 |
+
if using_copy_on_write:
|
253 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
254 |
+
assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
255 |
+
|
256 |
+
result.iloc[0, 1] = 0
|
257 |
+
if using_copy_on_write:
|
258 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
259 |
+
tm.assert_frame_equal(df1, df1_orig)
|
260 |
+
tm.assert_frame_equal(df2, df2_orig)
|
261 |
+
|
262 |
+
|
263 |
+
@pytest.mark.parametrize(
|
264 |
+
"func, how",
|
265 |
+
[
|
266 |
+
(lambda df1, df2, **kwargs: merge(df2, df1, on="key", **kwargs), "right"),
|
267 |
+
(lambda df1, df2, **kwargs: merge(df1, df2, on="key", **kwargs), "left"),
|
268 |
+
],
|
269 |
+
)
|
270 |
+
def test_merge_on_key_enlarging_one(using_copy_on_write, func, how):
|
271 |
+
df1 = DataFrame({"key": ["a", "b", "c"], "a": [1, 2, 3]})
|
272 |
+
df2 = DataFrame({"key": ["a", "b"], "b": [4, 5]})
|
273 |
+
df1_orig = df1.copy()
|
274 |
+
df2_orig = df2.copy()
|
275 |
+
|
276 |
+
result = func(df1, df2, how=how)
|
277 |
+
|
278 |
+
if using_copy_on_write:
|
279 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
280 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
281 |
+
assert df2._mgr._has_no_reference(1)
|
282 |
+
assert df2._mgr._has_no_reference(0)
|
283 |
+
assert np.shares_memory(get_array(result, "key"), get_array(df1, "key")) is (
|
284 |
+
how == "left"
|
285 |
+
)
|
286 |
+
assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key"))
|
287 |
+
else:
|
288 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
289 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
290 |
+
|
291 |
+
if how == "left":
|
292 |
+
result.iloc[0, 1] = 0
|
293 |
+
else:
|
294 |
+
result.iloc[0, 2] = 0
|
295 |
+
if using_copy_on_write:
|
296 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
297 |
+
tm.assert_frame_equal(df1, df1_orig)
|
298 |
+
tm.assert_frame_equal(df2, df2_orig)
|
299 |
+
|
300 |
+
|
301 |
+
@pytest.mark.parametrize("copy", [True, None, False])
|
302 |
+
def test_merge_copy_keyword(using_copy_on_write, copy):
|
303 |
+
df = DataFrame({"a": [1, 2]})
|
304 |
+
df2 = DataFrame({"b": [3, 4.5]})
|
305 |
+
|
306 |
+
result = df.merge(df2, copy=copy, left_index=True, right_index=True)
|
307 |
+
|
308 |
+
if using_copy_on_write or copy is False:
|
309 |
+
assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
310 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(result, "b"))
|
311 |
+
else:
|
312 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
313 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b"))
|
314 |
+
|
315 |
+
|
316 |
+
def test_join_on_key(using_copy_on_write):
|
317 |
+
df_index = Index(["a", "b", "c"], name="key")
|
318 |
+
|
319 |
+
df1 = DataFrame({"a": [1, 2, 3]}, index=df_index.copy(deep=True))
|
320 |
+
df2 = DataFrame({"b": [4, 5, 6]}, index=df_index.copy(deep=True))
|
321 |
+
|
322 |
+
df1_orig = df1.copy()
|
323 |
+
df2_orig = df2.copy()
|
324 |
+
|
325 |
+
result = df1.join(df2, on="key")
|
326 |
+
|
327 |
+
if using_copy_on_write:
|
328 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
329 |
+
assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
330 |
+
assert np.shares_memory(get_array(result.index), get_array(df1.index))
|
331 |
+
assert not np.shares_memory(get_array(result.index), get_array(df2.index))
|
332 |
+
else:
|
333 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
334 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
335 |
+
|
336 |
+
result.iloc[0, 0] = 0
|
337 |
+
if using_copy_on_write:
|
338 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
339 |
+
assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
340 |
+
|
341 |
+
result.iloc[0, 1] = 0
|
342 |
+
if using_copy_on_write:
|
343 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))
|
344 |
+
|
345 |
+
tm.assert_frame_equal(df1, df1_orig)
|
346 |
+
tm.assert_frame_equal(df2, df2_orig)
|
347 |
+
|
348 |
+
|
349 |
+
def test_join_multiple_dataframes_on_key(using_copy_on_write):
|
350 |
+
df_index = Index(["a", "b", "c"], name="key")
|
351 |
+
|
352 |
+
df1 = DataFrame({"a": [1, 2, 3]}, index=df_index.copy(deep=True))
|
353 |
+
dfs_list = [
|
354 |
+
DataFrame({"b": [4, 5, 6]}, index=df_index.copy(deep=True)),
|
355 |
+
DataFrame({"c": [7, 8, 9]}, index=df_index.copy(deep=True)),
|
356 |
+
]
|
357 |
+
|
358 |
+
df1_orig = df1.copy()
|
359 |
+
dfs_list_orig = [df.copy() for df in dfs_list]
|
360 |
+
|
361 |
+
result = df1.join(dfs_list)
|
362 |
+
|
363 |
+
if using_copy_on_write:
|
364 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
365 |
+
assert np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b"))
|
366 |
+
assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))
|
367 |
+
assert np.shares_memory(get_array(result.index), get_array(df1.index))
|
368 |
+
assert not np.shares_memory(
|
369 |
+
get_array(result.index), get_array(dfs_list[0].index)
|
370 |
+
)
|
371 |
+
assert not np.shares_memory(
|
372 |
+
get_array(result.index), get_array(dfs_list[1].index)
|
373 |
+
)
|
374 |
+
else:
|
375 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
376 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b"))
|
377 |
+
assert not np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))
|
378 |
+
|
379 |
+
result.iloc[0, 0] = 0
|
380 |
+
if using_copy_on_write:
|
381 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))
|
382 |
+
assert np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b"))
|
383 |
+
assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))
|
384 |
+
|
385 |
+
result.iloc[0, 1] = 0
|
386 |
+
if using_copy_on_write:
|
387 |
+
assert not np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b"))
|
388 |
+
assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))
|
389 |
+
|
390 |
+
result.iloc[0, 2] = 0
|
391 |
+
if using_copy_on_write:
|
392 |
+
assert not np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))
|
393 |
+
|
394 |
+
tm.assert_frame_equal(df1, df1_orig)
|
395 |
+
for df, df_orig in zip(dfs_list, dfs_list_orig):
|
396 |
+
tm.assert_frame_equal(df, df_orig)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_indexing.py
ADDED
@@ -0,0 +1,1266 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.errors import SettingWithCopyWarning
|
5 |
+
|
6 |
+
from pandas.core.dtypes.common import is_float_dtype
|
7 |
+
|
8 |
+
import pandas as pd
|
9 |
+
from pandas import (
|
10 |
+
DataFrame,
|
11 |
+
Series,
|
12 |
+
)
|
13 |
+
import pandas._testing as tm
|
14 |
+
from pandas.tests.copy_view.util import get_array
|
15 |
+
|
16 |
+
|
17 |
+
@pytest.fixture(params=["numpy", "nullable"])
|
18 |
+
def backend(request):
|
19 |
+
if request.param == "numpy":
|
20 |
+
|
21 |
+
def make_dataframe(*args, **kwargs):
|
22 |
+
return DataFrame(*args, **kwargs)
|
23 |
+
|
24 |
+
def make_series(*args, **kwargs):
|
25 |
+
return Series(*args, **kwargs)
|
26 |
+
|
27 |
+
elif request.param == "nullable":
|
28 |
+
|
29 |
+
def make_dataframe(*args, **kwargs):
|
30 |
+
df = DataFrame(*args, **kwargs)
|
31 |
+
df_nullable = df.convert_dtypes()
|
32 |
+
# convert_dtypes will try to cast float to int if there is no loss in
|
33 |
+
# precision -> undo that change
|
34 |
+
for col in df.columns:
|
35 |
+
if is_float_dtype(df[col].dtype) and not is_float_dtype(
|
36 |
+
df_nullable[col].dtype
|
37 |
+
):
|
38 |
+
df_nullable[col] = df_nullable[col].astype("Float64")
|
39 |
+
# copy final result to ensure we start with a fully self-owning DataFrame
|
40 |
+
return df_nullable.copy()
|
41 |
+
|
42 |
+
def make_series(*args, **kwargs):
|
43 |
+
ser = Series(*args, **kwargs)
|
44 |
+
return ser.convert_dtypes().copy()
|
45 |
+
|
46 |
+
return request.param, make_dataframe, make_series
|
47 |
+
|
48 |
+
|
49 |
+
# -----------------------------------------------------------------------------
|
50 |
+
# Indexing operations taking subset + modifying the subset/parent
|
51 |
+
|
52 |
+
|
53 |
+
def test_subset_column_selection(backend, using_copy_on_write):
|
54 |
+
# Case: taking a subset of the columns of a DataFrame
|
55 |
+
# + afterwards modifying the subset
|
56 |
+
_, DataFrame, _ = backend
|
57 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
58 |
+
df_orig = df.copy()
|
59 |
+
|
60 |
+
subset = df[["a", "c"]]
|
61 |
+
|
62 |
+
if using_copy_on_write:
|
63 |
+
# the subset shares memory ...
|
64 |
+
assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
|
65 |
+
# ... but uses CoW when being modified
|
66 |
+
subset.iloc[0, 0] = 0
|
67 |
+
else:
|
68 |
+
assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
|
69 |
+
# INFO this no longer raise warning since pandas 1.4
|
70 |
+
# with pd.option_context("chained_assignment", "warn"):
|
71 |
+
# with tm.assert_produces_warning(SettingWithCopyWarning):
|
72 |
+
subset.iloc[0, 0] = 0
|
73 |
+
|
74 |
+
assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
|
75 |
+
|
76 |
+
expected = DataFrame({"a": [0, 2, 3], "c": [0.1, 0.2, 0.3]})
|
77 |
+
tm.assert_frame_equal(subset, expected)
|
78 |
+
tm.assert_frame_equal(df, df_orig)
|
79 |
+
|
80 |
+
|
81 |
+
def test_subset_column_selection_modify_parent(backend, using_copy_on_write):
|
82 |
+
# Case: taking a subset of the columns of a DataFrame
|
83 |
+
# + afterwards modifying the parent
|
84 |
+
_, DataFrame, _ = backend
|
85 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
86 |
+
|
87 |
+
subset = df[["a", "c"]]
|
88 |
+
|
89 |
+
if using_copy_on_write:
|
90 |
+
# the subset shares memory ...
|
91 |
+
assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
|
92 |
+
# ... but parent uses CoW parent when it is modified
|
93 |
+
df.iloc[0, 0] = 0
|
94 |
+
|
95 |
+
assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
|
96 |
+
if using_copy_on_write:
|
97 |
+
# different column/block still shares memory
|
98 |
+
assert np.shares_memory(get_array(subset, "c"), get_array(df, "c"))
|
99 |
+
|
100 |
+
expected = DataFrame({"a": [1, 2, 3], "c": [0.1, 0.2, 0.3]})
|
101 |
+
tm.assert_frame_equal(subset, expected)
|
102 |
+
|
103 |
+
|
104 |
+
def test_subset_row_slice(backend, using_copy_on_write, warn_copy_on_write):
|
105 |
+
# Case: taking a subset of the rows of a DataFrame using a slice
|
106 |
+
# + afterwards modifying the subset
|
107 |
+
_, DataFrame, _ = backend
|
108 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
109 |
+
df_orig = df.copy()
|
110 |
+
|
111 |
+
subset = df[1:3]
|
112 |
+
subset._mgr._verify_integrity()
|
113 |
+
|
114 |
+
assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
|
115 |
+
|
116 |
+
if using_copy_on_write:
|
117 |
+
subset.iloc[0, 0] = 0
|
118 |
+
assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))
|
119 |
+
|
120 |
+
else:
|
121 |
+
# INFO this no longer raise warning since pandas 1.4
|
122 |
+
# with pd.option_context("chained_assignment", "warn"):
|
123 |
+
# with tm.assert_produces_warning(SettingWithCopyWarning):
|
124 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
125 |
+
subset.iloc[0, 0] = 0
|
126 |
+
|
127 |
+
subset._mgr._verify_integrity()
|
128 |
+
|
129 |
+
expected = DataFrame({"a": [0, 3], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3))
|
130 |
+
tm.assert_frame_equal(subset, expected)
|
131 |
+
if using_copy_on_write:
|
132 |
+
# original parent dataframe is not modified (CoW)
|
133 |
+
tm.assert_frame_equal(df, df_orig)
|
134 |
+
else:
|
135 |
+
# original parent dataframe is actually updated
|
136 |
+
df_orig.iloc[1, 0] = 0
|
137 |
+
tm.assert_frame_equal(df, df_orig)
|
138 |
+
|
139 |
+
|
140 |
+
@pytest.mark.parametrize(
|
141 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
142 |
+
)
|
143 |
+
def test_subset_column_slice(
|
144 |
+
backend, using_copy_on_write, warn_copy_on_write, using_array_manager, dtype
|
145 |
+
):
|
146 |
+
# Case: taking a subset of the columns of a DataFrame using a slice
|
147 |
+
# + afterwards modifying the subset
|
148 |
+
dtype_backend, DataFrame, _ = backend
|
149 |
+
single_block = (
|
150 |
+
dtype == "int64" and dtype_backend == "numpy"
|
151 |
+
) and not using_array_manager
|
152 |
+
df = DataFrame(
|
153 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
|
154 |
+
)
|
155 |
+
df_orig = df.copy()
|
156 |
+
|
157 |
+
subset = df.iloc[:, 1:]
|
158 |
+
subset._mgr._verify_integrity()
|
159 |
+
|
160 |
+
if using_copy_on_write:
|
161 |
+
assert np.shares_memory(get_array(subset, "b"), get_array(df, "b"))
|
162 |
+
|
163 |
+
subset.iloc[0, 0] = 0
|
164 |
+
assert not np.shares_memory(get_array(subset, "b"), get_array(df, "b"))
|
165 |
+
elif warn_copy_on_write:
|
166 |
+
with tm.assert_cow_warning(single_block):
|
167 |
+
subset.iloc[0, 0] = 0
|
168 |
+
else:
|
169 |
+
# we only get a warning in case of a single block
|
170 |
+
warn = SettingWithCopyWarning if single_block else None
|
171 |
+
with pd.option_context("chained_assignment", "warn"):
|
172 |
+
with tm.assert_produces_warning(warn):
|
173 |
+
subset.iloc[0, 0] = 0
|
174 |
+
|
175 |
+
expected = DataFrame({"b": [0, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)})
|
176 |
+
tm.assert_frame_equal(subset, expected)
|
177 |
+
# original parent dataframe is not modified (also not for BlockManager case,
|
178 |
+
# except for single block)
|
179 |
+
if not using_copy_on_write and (using_array_manager or single_block):
|
180 |
+
df_orig.iloc[0, 1] = 0
|
181 |
+
tm.assert_frame_equal(df, df_orig)
|
182 |
+
else:
|
183 |
+
tm.assert_frame_equal(df, df_orig)
|
184 |
+
|
185 |
+
|
186 |
+
@pytest.mark.parametrize(
|
187 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
188 |
+
)
|
189 |
+
@pytest.mark.parametrize(
|
190 |
+
"row_indexer",
|
191 |
+
[slice(1, 2), np.array([False, True, True]), np.array([1, 2])],
|
192 |
+
ids=["slice", "mask", "array"],
|
193 |
+
)
|
194 |
+
@pytest.mark.parametrize(
|
195 |
+
"column_indexer",
|
196 |
+
[slice("b", "c"), np.array([False, True, True]), ["b", "c"]],
|
197 |
+
ids=["slice", "mask", "array"],
|
198 |
+
)
|
199 |
+
def test_subset_loc_rows_columns(
|
200 |
+
backend,
|
201 |
+
dtype,
|
202 |
+
row_indexer,
|
203 |
+
column_indexer,
|
204 |
+
using_array_manager,
|
205 |
+
using_copy_on_write,
|
206 |
+
warn_copy_on_write,
|
207 |
+
):
|
208 |
+
# Case: taking a subset of the rows+columns of a DataFrame using .loc
|
209 |
+
# + afterwards modifying the subset
|
210 |
+
# Generic test for several combinations of row/column indexers, not all
|
211 |
+
# of those could actually return a view / need CoW (so this test is not
|
212 |
+
# checking memory sharing, only ensuring subsequent mutation doesn't
|
213 |
+
# affect the parent dataframe)
|
214 |
+
dtype_backend, DataFrame, _ = backend
|
215 |
+
df = DataFrame(
|
216 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
|
217 |
+
)
|
218 |
+
df_orig = df.copy()
|
219 |
+
|
220 |
+
subset = df.loc[row_indexer, column_indexer]
|
221 |
+
|
222 |
+
# a few corner cases _do_ actually modify the parent (with both row and column
|
223 |
+
# slice, and in case of ArrayManager or BlockManager with single block)
|
224 |
+
mutate_parent = (
|
225 |
+
isinstance(row_indexer, slice)
|
226 |
+
and isinstance(column_indexer, slice)
|
227 |
+
and (
|
228 |
+
using_array_manager
|
229 |
+
or (
|
230 |
+
dtype == "int64"
|
231 |
+
and dtype_backend == "numpy"
|
232 |
+
and not using_copy_on_write
|
233 |
+
)
|
234 |
+
)
|
235 |
+
)
|
236 |
+
|
237 |
+
# modifying the subset never modifies the parent
|
238 |
+
with tm.assert_cow_warning(warn_copy_on_write and mutate_parent):
|
239 |
+
subset.iloc[0, 0] = 0
|
240 |
+
|
241 |
+
expected = DataFrame(
|
242 |
+
{"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)
|
243 |
+
)
|
244 |
+
tm.assert_frame_equal(subset, expected)
|
245 |
+
if mutate_parent:
|
246 |
+
df_orig.iloc[1, 1] = 0
|
247 |
+
tm.assert_frame_equal(df, df_orig)
|
248 |
+
|
249 |
+
|
250 |
+
@pytest.mark.parametrize(
|
251 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
252 |
+
)
|
253 |
+
@pytest.mark.parametrize(
|
254 |
+
"row_indexer",
|
255 |
+
[slice(1, 3), np.array([False, True, True]), np.array([1, 2])],
|
256 |
+
ids=["slice", "mask", "array"],
|
257 |
+
)
|
258 |
+
@pytest.mark.parametrize(
|
259 |
+
"column_indexer",
|
260 |
+
[slice(1, 3), np.array([False, True, True]), [1, 2]],
|
261 |
+
ids=["slice", "mask", "array"],
|
262 |
+
)
|
263 |
+
def test_subset_iloc_rows_columns(
|
264 |
+
backend,
|
265 |
+
dtype,
|
266 |
+
row_indexer,
|
267 |
+
column_indexer,
|
268 |
+
using_array_manager,
|
269 |
+
using_copy_on_write,
|
270 |
+
warn_copy_on_write,
|
271 |
+
):
|
272 |
+
# Case: taking a subset of the rows+columns of a DataFrame using .iloc
|
273 |
+
# + afterwards modifying the subset
|
274 |
+
# Generic test for several combinations of row/column indexers, not all
|
275 |
+
# of those could actually return a view / need CoW (so this test is not
|
276 |
+
# checking memory sharing, only ensuring subsequent mutation doesn't
|
277 |
+
# affect the parent dataframe)
|
278 |
+
dtype_backend, DataFrame, _ = backend
|
279 |
+
df = DataFrame(
|
280 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
|
281 |
+
)
|
282 |
+
df_orig = df.copy()
|
283 |
+
|
284 |
+
subset = df.iloc[row_indexer, column_indexer]
|
285 |
+
|
286 |
+
# a few corner cases _do_ actually modify the parent (with both row and column
|
287 |
+
# slice, and in case of ArrayManager or BlockManager with single block)
|
288 |
+
mutate_parent = (
|
289 |
+
isinstance(row_indexer, slice)
|
290 |
+
and isinstance(column_indexer, slice)
|
291 |
+
and (
|
292 |
+
using_array_manager
|
293 |
+
or (
|
294 |
+
dtype == "int64"
|
295 |
+
and dtype_backend == "numpy"
|
296 |
+
and not using_copy_on_write
|
297 |
+
)
|
298 |
+
)
|
299 |
+
)
|
300 |
+
|
301 |
+
# modifying the subset never modifies the parent
|
302 |
+
with tm.assert_cow_warning(warn_copy_on_write and mutate_parent):
|
303 |
+
subset.iloc[0, 0] = 0
|
304 |
+
|
305 |
+
expected = DataFrame(
|
306 |
+
{"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)
|
307 |
+
)
|
308 |
+
tm.assert_frame_equal(subset, expected)
|
309 |
+
if mutate_parent:
|
310 |
+
df_orig.iloc[1, 1] = 0
|
311 |
+
tm.assert_frame_equal(df, df_orig)
|
312 |
+
|
313 |
+
|
314 |
+
@pytest.mark.parametrize(
|
315 |
+
"indexer",
|
316 |
+
[slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
|
317 |
+
ids=["slice", "mask", "array"],
|
318 |
+
)
|
319 |
+
def test_subset_set_with_row_indexer(
|
320 |
+
backend, indexer_si, indexer, using_copy_on_write, warn_copy_on_write
|
321 |
+
):
|
322 |
+
# Case: setting values with a row indexer on a viewing subset
|
323 |
+
# subset[indexer] = value and subset.iloc[indexer] = value
|
324 |
+
_, DataFrame, _ = backend
|
325 |
+
df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
|
326 |
+
df_orig = df.copy()
|
327 |
+
subset = df[1:4]
|
328 |
+
|
329 |
+
if (
|
330 |
+
indexer_si is tm.setitem
|
331 |
+
and isinstance(indexer, np.ndarray)
|
332 |
+
and indexer.dtype == "int"
|
333 |
+
):
|
334 |
+
pytest.skip("setitem with labels selects on columns")
|
335 |
+
|
336 |
+
if using_copy_on_write:
|
337 |
+
indexer_si(subset)[indexer] = 0
|
338 |
+
elif warn_copy_on_write:
|
339 |
+
with tm.assert_cow_warning():
|
340 |
+
indexer_si(subset)[indexer] = 0
|
341 |
+
else:
|
342 |
+
# INFO iloc no longer raises warning since pandas 1.4
|
343 |
+
warn = SettingWithCopyWarning if indexer_si is tm.setitem else None
|
344 |
+
with pd.option_context("chained_assignment", "warn"):
|
345 |
+
with tm.assert_produces_warning(warn):
|
346 |
+
indexer_si(subset)[indexer] = 0
|
347 |
+
|
348 |
+
expected = DataFrame(
|
349 |
+
{"a": [0, 0, 4], "b": [0, 0, 7], "c": [0.0, 0.0, 0.4]}, index=range(1, 4)
|
350 |
+
)
|
351 |
+
tm.assert_frame_equal(subset, expected)
|
352 |
+
if using_copy_on_write:
|
353 |
+
# original parent dataframe is not modified (CoW)
|
354 |
+
tm.assert_frame_equal(df, df_orig)
|
355 |
+
else:
|
356 |
+
# original parent dataframe is actually updated
|
357 |
+
df_orig[1:3] = 0
|
358 |
+
tm.assert_frame_equal(df, df_orig)
|
359 |
+
|
360 |
+
|
361 |
+
def test_subset_set_with_mask(backend, using_copy_on_write, warn_copy_on_write):
|
362 |
+
# Case: setting values with a mask on a viewing subset: subset[mask] = value
|
363 |
+
_, DataFrame, _ = backend
|
364 |
+
df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})
|
365 |
+
df_orig = df.copy()
|
366 |
+
subset = df[1:4]
|
367 |
+
|
368 |
+
mask = subset > 3
|
369 |
+
|
370 |
+
if using_copy_on_write:
|
371 |
+
subset[mask] = 0
|
372 |
+
elif warn_copy_on_write:
|
373 |
+
with tm.assert_cow_warning():
|
374 |
+
subset[mask] = 0
|
375 |
+
else:
|
376 |
+
with pd.option_context("chained_assignment", "warn"):
|
377 |
+
with tm.assert_produces_warning(SettingWithCopyWarning):
|
378 |
+
subset[mask] = 0
|
379 |
+
|
380 |
+
expected = DataFrame(
|
381 |
+
{"a": [2, 3, 0], "b": [0, 0, 0], "c": [0.20, 0.3, 0.4]}, index=range(1, 4)
|
382 |
+
)
|
383 |
+
tm.assert_frame_equal(subset, expected)
|
384 |
+
if using_copy_on_write:
|
385 |
+
# original parent dataframe is not modified (CoW)
|
386 |
+
tm.assert_frame_equal(df, df_orig)
|
387 |
+
else:
|
388 |
+
# original parent dataframe is actually updated
|
389 |
+
df_orig.loc[3, "a"] = 0
|
390 |
+
df_orig.loc[1:3, "b"] = 0
|
391 |
+
tm.assert_frame_equal(df, df_orig)
|
392 |
+
|
393 |
+
|
394 |
+
def test_subset_set_column(backend, using_copy_on_write, warn_copy_on_write):
|
395 |
+
# Case: setting a single column on a viewing subset -> subset[col] = value
|
396 |
+
dtype_backend, DataFrame, _ = backend
|
397 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
398 |
+
df_orig = df.copy()
|
399 |
+
subset = df[1:3]
|
400 |
+
|
401 |
+
if dtype_backend == "numpy":
|
402 |
+
arr = np.array([10, 11], dtype="int64")
|
403 |
+
else:
|
404 |
+
arr = pd.array([10, 11], dtype="Int64")
|
405 |
+
|
406 |
+
if using_copy_on_write or warn_copy_on_write:
|
407 |
+
subset["a"] = arr
|
408 |
+
else:
|
409 |
+
with pd.option_context("chained_assignment", "warn"):
|
410 |
+
with tm.assert_produces_warning(SettingWithCopyWarning):
|
411 |
+
subset["a"] = arr
|
412 |
+
|
413 |
+
subset._mgr._verify_integrity()
|
414 |
+
expected = DataFrame(
|
415 |
+
{"a": [10, 11], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3)
|
416 |
+
)
|
417 |
+
tm.assert_frame_equal(subset, expected)
|
418 |
+
tm.assert_frame_equal(df, df_orig)
|
419 |
+
|
420 |
+
|
421 |
+
@pytest.mark.parametrize(
|
422 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
423 |
+
)
|
424 |
+
def test_subset_set_column_with_loc(
|
425 |
+
backend, using_copy_on_write, warn_copy_on_write, using_array_manager, dtype
|
426 |
+
):
|
427 |
+
# Case: setting a single column with loc on a viewing subset
|
428 |
+
# -> subset.loc[:, col] = value
|
429 |
+
_, DataFrame, _ = backend
|
430 |
+
df = DataFrame(
|
431 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
|
432 |
+
)
|
433 |
+
df_orig = df.copy()
|
434 |
+
subset = df[1:3]
|
435 |
+
|
436 |
+
if using_copy_on_write:
|
437 |
+
subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
|
438 |
+
elif warn_copy_on_write:
|
439 |
+
with tm.assert_cow_warning():
|
440 |
+
subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
|
441 |
+
else:
|
442 |
+
with pd.option_context("chained_assignment", "warn"):
|
443 |
+
with tm.assert_produces_warning(
|
444 |
+
None,
|
445 |
+
raise_on_extra_warnings=not using_array_manager,
|
446 |
+
):
|
447 |
+
subset.loc[:, "a"] = np.array([10, 11], dtype="int64")
|
448 |
+
|
449 |
+
subset._mgr._verify_integrity()
|
450 |
+
expected = DataFrame(
|
451 |
+
{"a": [10, 11], "b": [5, 6], "c": np.array([8, 9], dtype=dtype)},
|
452 |
+
index=range(1, 3),
|
453 |
+
)
|
454 |
+
tm.assert_frame_equal(subset, expected)
|
455 |
+
if using_copy_on_write:
|
456 |
+
# original parent dataframe is not modified (CoW)
|
457 |
+
tm.assert_frame_equal(df, df_orig)
|
458 |
+
else:
|
459 |
+
# original parent dataframe is actually updated
|
460 |
+
df_orig.loc[1:3, "a"] = np.array([10, 11], dtype="int64")
|
461 |
+
tm.assert_frame_equal(df, df_orig)
|
462 |
+
|
463 |
+
|
464 |
+
def test_subset_set_column_with_loc2(
|
465 |
+
backend, using_copy_on_write, warn_copy_on_write, using_array_manager
|
466 |
+
):
|
467 |
+
# Case: setting a single column with loc on a viewing subset
|
468 |
+
# -> subset.loc[:, col] = value
|
469 |
+
# separate test for case of DataFrame of a single column -> takes a separate
|
470 |
+
# code path
|
471 |
+
_, DataFrame, _ = backend
|
472 |
+
df = DataFrame({"a": [1, 2, 3]})
|
473 |
+
df_orig = df.copy()
|
474 |
+
subset = df[1:3]
|
475 |
+
|
476 |
+
if using_copy_on_write:
|
477 |
+
subset.loc[:, "a"] = 0
|
478 |
+
elif warn_copy_on_write:
|
479 |
+
with tm.assert_cow_warning():
|
480 |
+
subset.loc[:, "a"] = 0
|
481 |
+
else:
|
482 |
+
with pd.option_context("chained_assignment", "warn"):
|
483 |
+
with tm.assert_produces_warning(
|
484 |
+
None,
|
485 |
+
raise_on_extra_warnings=not using_array_manager,
|
486 |
+
):
|
487 |
+
subset.loc[:, "a"] = 0
|
488 |
+
|
489 |
+
subset._mgr._verify_integrity()
|
490 |
+
expected = DataFrame({"a": [0, 0]}, index=range(1, 3))
|
491 |
+
tm.assert_frame_equal(subset, expected)
|
492 |
+
if using_copy_on_write:
|
493 |
+
# original parent dataframe is not modified (CoW)
|
494 |
+
tm.assert_frame_equal(df, df_orig)
|
495 |
+
else:
|
496 |
+
# original parent dataframe is actually updated
|
497 |
+
df_orig.loc[1:3, "a"] = 0
|
498 |
+
tm.assert_frame_equal(df, df_orig)
|
499 |
+
|
500 |
+
|
501 |
+
@pytest.mark.parametrize(
|
502 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
503 |
+
)
|
504 |
+
def test_subset_set_columns(backend, using_copy_on_write, warn_copy_on_write, dtype):
|
505 |
+
# Case: setting multiple columns on a viewing subset
|
506 |
+
# -> subset[[col1, col2]] = value
|
507 |
+
dtype_backend, DataFrame, _ = backend
|
508 |
+
df = DataFrame(
|
509 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
|
510 |
+
)
|
511 |
+
df_orig = df.copy()
|
512 |
+
subset = df[1:3]
|
513 |
+
|
514 |
+
if using_copy_on_write or warn_copy_on_write:
|
515 |
+
subset[["a", "c"]] = 0
|
516 |
+
else:
|
517 |
+
with pd.option_context("chained_assignment", "warn"):
|
518 |
+
with tm.assert_produces_warning(SettingWithCopyWarning):
|
519 |
+
subset[["a", "c"]] = 0
|
520 |
+
|
521 |
+
subset._mgr._verify_integrity()
|
522 |
+
if using_copy_on_write:
|
523 |
+
# first and third column should certainly have no references anymore
|
524 |
+
assert all(subset._mgr._has_no_reference(i) for i in [0, 2])
|
525 |
+
expected = DataFrame({"a": [0, 0], "b": [5, 6], "c": [0, 0]}, index=range(1, 3))
|
526 |
+
if dtype_backend == "nullable":
|
527 |
+
# there is not yet a global option, so overriding a column by setting a scalar
|
528 |
+
# defaults to numpy dtype even if original column was nullable
|
529 |
+
expected["a"] = expected["a"].astype("int64")
|
530 |
+
expected["c"] = expected["c"].astype("int64")
|
531 |
+
|
532 |
+
tm.assert_frame_equal(subset, expected)
|
533 |
+
tm.assert_frame_equal(df, df_orig)
|
534 |
+
|
535 |
+
|
536 |
+
@pytest.mark.parametrize(
|
537 |
+
"indexer",
|
538 |
+
[slice("a", "b"), np.array([True, True, False]), ["a", "b"]],
|
539 |
+
ids=["slice", "mask", "array"],
|
540 |
+
)
|
541 |
+
def test_subset_set_with_column_indexer(
|
542 |
+
backend, indexer, using_copy_on_write, warn_copy_on_write
|
543 |
+
):
|
544 |
+
# Case: setting multiple columns with a column indexer on a viewing subset
|
545 |
+
# -> subset.loc[:, [col1, col2]] = value
|
546 |
+
_, DataFrame, _ = backend
|
547 |
+
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})
|
548 |
+
df_orig = df.copy()
|
549 |
+
subset = df[1:3]
|
550 |
+
|
551 |
+
if using_copy_on_write:
|
552 |
+
subset.loc[:, indexer] = 0
|
553 |
+
elif warn_copy_on_write:
|
554 |
+
with tm.assert_cow_warning():
|
555 |
+
subset.loc[:, indexer] = 0
|
556 |
+
else:
|
557 |
+
with pd.option_context("chained_assignment", "warn"):
|
558 |
+
# As of 2.0, this setitem attempts (successfully) to set values
|
559 |
+
# inplace, so the assignment is not chained.
|
560 |
+
subset.loc[:, indexer] = 0
|
561 |
+
|
562 |
+
subset._mgr._verify_integrity()
|
563 |
+
expected = DataFrame({"a": [0, 0], "b": [0.0, 0.0], "c": [5, 6]}, index=range(1, 3))
|
564 |
+
tm.assert_frame_equal(subset, expected)
|
565 |
+
if using_copy_on_write:
|
566 |
+
tm.assert_frame_equal(df, df_orig)
|
567 |
+
else:
|
568 |
+
# pre-2.0, in the mixed case with BlockManager, only column "a"
|
569 |
+
# would be mutated in the parent frame. this changed with the
|
570 |
+
# enforcement of GH#45333
|
571 |
+
df_orig.loc[1:2, ["a", "b"]] = 0
|
572 |
+
tm.assert_frame_equal(df, df_orig)
|
573 |
+
|
574 |
+
|
575 |
+
@pytest.mark.parametrize(
|
576 |
+
"method",
|
577 |
+
[
|
578 |
+
lambda df: df[["a", "b"]][0:2],
|
579 |
+
lambda df: df[0:2][["a", "b"]],
|
580 |
+
lambda df: df[["a", "b"]].iloc[0:2],
|
581 |
+
lambda df: df[["a", "b"]].loc[0:1],
|
582 |
+
lambda df: df[0:2].iloc[:, 0:2],
|
583 |
+
lambda df: df[0:2].loc[:, "a":"b"], # type: ignore[misc]
|
584 |
+
],
|
585 |
+
ids=[
|
586 |
+
"row-getitem-slice",
|
587 |
+
"column-getitem",
|
588 |
+
"row-iloc-slice",
|
589 |
+
"row-loc-slice",
|
590 |
+
"column-iloc-slice",
|
591 |
+
"column-loc-slice",
|
592 |
+
],
|
593 |
+
)
|
594 |
+
@pytest.mark.parametrize(
|
595 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
596 |
+
)
|
597 |
+
def test_subset_chained_getitem(
|
598 |
+
request,
|
599 |
+
backend,
|
600 |
+
method,
|
601 |
+
dtype,
|
602 |
+
using_copy_on_write,
|
603 |
+
using_array_manager,
|
604 |
+
warn_copy_on_write,
|
605 |
+
):
|
606 |
+
# Case: creating a subset using multiple, chained getitem calls using views
|
607 |
+
# still needs to guarantee proper CoW behaviour
|
608 |
+
_, DataFrame, _ = backend
|
609 |
+
df = DataFrame(
|
610 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
|
611 |
+
)
|
612 |
+
df_orig = df.copy()
|
613 |
+
|
614 |
+
# when not using CoW, it depends on whether we have a single block or not
|
615 |
+
# and whether we are slicing the columns -> in that case we have a view
|
616 |
+
test_callspec = request.node.callspec.id
|
617 |
+
if not using_array_manager:
|
618 |
+
subset_is_view = test_callspec in (
|
619 |
+
"numpy-single-block-column-iloc-slice",
|
620 |
+
"numpy-single-block-column-loc-slice",
|
621 |
+
)
|
622 |
+
else:
|
623 |
+
# with ArrayManager, it doesn't matter whether we have
|
624 |
+
# single vs mixed block or numpy vs nullable dtypes
|
625 |
+
subset_is_view = test_callspec.endswith(
|
626 |
+
("column-iloc-slice", "column-loc-slice")
|
627 |
+
)
|
628 |
+
|
629 |
+
# modify subset -> don't modify parent
|
630 |
+
subset = method(df)
|
631 |
+
|
632 |
+
with tm.assert_cow_warning(warn_copy_on_write and subset_is_view):
|
633 |
+
subset.iloc[0, 0] = 0
|
634 |
+
if using_copy_on_write or (not subset_is_view):
|
635 |
+
tm.assert_frame_equal(df, df_orig)
|
636 |
+
else:
|
637 |
+
assert df.iloc[0, 0] == 0
|
638 |
+
|
639 |
+
# modify parent -> don't modify subset
|
640 |
+
subset = method(df)
|
641 |
+
with tm.assert_cow_warning(warn_copy_on_write and subset_is_view):
|
642 |
+
df.iloc[0, 0] = 0
|
643 |
+
expected = DataFrame({"a": [1, 2], "b": [4, 5]})
|
644 |
+
if using_copy_on_write or not subset_is_view:
|
645 |
+
tm.assert_frame_equal(subset, expected)
|
646 |
+
else:
|
647 |
+
assert subset.iloc[0, 0] == 0
|
648 |
+
|
649 |
+
|
650 |
+
@pytest.mark.parametrize(
|
651 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
652 |
+
)
|
653 |
+
def test_subset_chained_getitem_column(
|
654 |
+
backend, dtype, using_copy_on_write, warn_copy_on_write
|
655 |
+
):
|
656 |
+
# Case: creating a subset using multiple, chained getitem calls using views
|
657 |
+
# still needs to guarantee proper CoW behaviour
|
658 |
+
dtype_backend, DataFrame, Series = backend
|
659 |
+
df = DataFrame(
|
660 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
|
661 |
+
)
|
662 |
+
df_orig = df.copy()
|
663 |
+
|
664 |
+
# modify subset -> don't modify parent
|
665 |
+
subset = df[:]["a"][0:2]
|
666 |
+
df._clear_item_cache()
|
667 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
668 |
+
subset.iloc[0] = 0
|
669 |
+
if using_copy_on_write:
|
670 |
+
tm.assert_frame_equal(df, df_orig)
|
671 |
+
else:
|
672 |
+
assert df.iloc[0, 0] == 0
|
673 |
+
|
674 |
+
# modify parent -> don't modify subset
|
675 |
+
subset = df[:]["a"][0:2]
|
676 |
+
df._clear_item_cache()
|
677 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
678 |
+
df.iloc[0, 0] = 0
|
679 |
+
expected = Series([1, 2], name="a")
|
680 |
+
if using_copy_on_write:
|
681 |
+
tm.assert_series_equal(subset, expected)
|
682 |
+
else:
|
683 |
+
assert subset.iloc[0] == 0
|
684 |
+
|
685 |
+
|
686 |
+
@pytest.mark.parametrize(
|
687 |
+
"method",
|
688 |
+
[
|
689 |
+
lambda s: s["a":"c"]["a":"b"], # type: ignore[misc]
|
690 |
+
lambda s: s.iloc[0:3].iloc[0:2],
|
691 |
+
lambda s: s.loc["a":"c"].loc["a":"b"], # type: ignore[misc]
|
692 |
+
lambda s: s.loc["a":"c"] # type: ignore[misc]
|
693 |
+
.iloc[0:3]
|
694 |
+
.iloc[0:2]
|
695 |
+
.loc["a":"b"] # type: ignore[misc]
|
696 |
+
.iloc[0:1],
|
697 |
+
],
|
698 |
+
ids=["getitem", "iloc", "loc", "long-chain"],
|
699 |
+
)
|
700 |
+
def test_subset_chained_getitem_series(
|
701 |
+
backend, method, using_copy_on_write, warn_copy_on_write
|
702 |
+
):
|
703 |
+
# Case: creating a subset using multiple, chained getitem calls using views
|
704 |
+
# still needs to guarantee proper CoW behaviour
|
705 |
+
_, _, Series = backend
|
706 |
+
s = Series([1, 2, 3], index=["a", "b", "c"])
|
707 |
+
s_orig = s.copy()
|
708 |
+
|
709 |
+
# modify subset -> don't modify parent
|
710 |
+
subset = method(s)
|
711 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
712 |
+
subset.iloc[0] = 0
|
713 |
+
if using_copy_on_write:
|
714 |
+
tm.assert_series_equal(s, s_orig)
|
715 |
+
else:
|
716 |
+
assert s.iloc[0] == 0
|
717 |
+
|
718 |
+
# modify parent -> don't modify subset
|
719 |
+
subset = s.iloc[0:3].iloc[0:2]
|
720 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
721 |
+
s.iloc[0] = 0
|
722 |
+
expected = Series([1, 2], index=["a", "b"])
|
723 |
+
if using_copy_on_write:
|
724 |
+
tm.assert_series_equal(subset, expected)
|
725 |
+
else:
|
726 |
+
assert subset.iloc[0] == 0
|
727 |
+
|
728 |
+
|
729 |
+
def test_subset_chained_single_block_row(
|
730 |
+
using_copy_on_write, using_array_manager, warn_copy_on_write
|
731 |
+
):
|
732 |
+
# not parametrizing this for dtype backend, since this explicitly tests single block
|
733 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
|
734 |
+
df_orig = df.copy()
|
735 |
+
|
736 |
+
# modify subset -> don't modify parent
|
737 |
+
subset = df[:].iloc[0].iloc[0:2]
|
738 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
739 |
+
subset.iloc[0] = 0
|
740 |
+
if using_copy_on_write or using_array_manager:
|
741 |
+
tm.assert_frame_equal(df, df_orig)
|
742 |
+
else:
|
743 |
+
assert df.iloc[0, 0] == 0
|
744 |
+
|
745 |
+
# modify parent -> don't modify subset
|
746 |
+
subset = df[:].iloc[0].iloc[0:2]
|
747 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
748 |
+
df.iloc[0, 0] = 0
|
749 |
+
expected = Series([1, 4], index=["a", "b"], name=0)
|
750 |
+
if using_copy_on_write or using_array_manager:
|
751 |
+
tm.assert_series_equal(subset, expected)
|
752 |
+
else:
|
753 |
+
assert subset.iloc[0] == 0
|
754 |
+
|
755 |
+
|
756 |
+
@pytest.mark.parametrize(
|
757 |
+
"method",
|
758 |
+
[
|
759 |
+
lambda df: df[:],
|
760 |
+
lambda df: df.loc[:, :],
|
761 |
+
lambda df: df.loc[:],
|
762 |
+
lambda df: df.iloc[:, :],
|
763 |
+
lambda df: df.iloc[:],
|
764 |
+
],
|
765 |
+
ids=["getitem", "loc", "loc-rows", "iloc", "iloc-rows"],
|
766 |
+
)
|
767 |
+
def test_null_slice(backend, method, using_copy_on_write, warn_copy_on_write):
|
768 |
+
# Case: also all variants of indexing with a null slice (:) should return
|
769 |
+
# new objects to ensure we correctly use CoW for the results
|
770 |
+
dtype_backend, DataFrame, _ = backend
|
771 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
|
772 |
+
df_orig = df.copy()
|
773 |
+
|
774 |
+
df2 = method(df)
|
775 |
+
|
776 |
+
# we always return new objects (shallow copy), regardless of CoW or not
|
777 |
+
assert df2 is not df
|
778 |
+
|
779 |
+
# and those trigger CoW when mutated
|
780 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
781 |
+
df2.iloc[0, 0] = 0
|
782 |
+
if using_copy_on_write:
|
783 |
+
tm.assert_frame_equal(df, df_orig)
|
784 |
+
else:
|
785 |
+
assert df.iloc[0, 0] == 0
|
786 |
+
|
787 |
+
|
788 |
+
@pytest.mark.parametrize(
|
789 |
+
"method",
|
790 |
+
[
|
791 |
+
lambda s: s[:],
|
792 |
+
lambda s: s.loc[:],
|
793 |
+
lambda s: s.iloc[:],
|
794 |
+
],
|
795 |
+
ids=["getitem", "loc", "iloc"],
|
796 |
+
)
|
797 |
+
def test_null_slice_series(backend, method, using_copy_on_write, warn_copy_on_write):
|
798 |
+
_, _, Series = backend
|
799 |
+
s = Series([1, 2, 3], index=["a", "b", "c"])
|
800 |
+
s_orig = s.copy()
|
801 |
+
|
802 |
+
s2 = method(s)
|
803 |
+
|
804 |
+
# we always return new objects, regardless of CoW or not
|
805 |
+
assert s2 is not s
|
806 |
+
|
807 |
+
# and those trigger CoW when mutated
|
808 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
809 |
+
s2.iloc[0] = 0
|
810 |
+
if using_copy_on_write:
|
811 |
+
tm.assert_series_equal(s, s_orig)
|
812 |
+
else:
|
813 |
+
assert s.iloc[0] == 0
|
814 |
+
|
815 |
+
|
816 |
+
# TODO add more tests modifying the parent
|
817 |
+
|
818 |
+
|
819 |
+
# -----------------------------------------------------------------------------
|
820 |
+
# Series -- Indexing operations taking subset + modifying the subset/parent
|
821 |
+
|
822 |
+
|
823 |
+
def test_series_getitem_slice(backend, using_copy_on_write, warn_copy_on_write):
|
824 |
+
# Case: taking a slice of a Series + afterwards modifying the subset
|
825 |
+
_, _, Series = backend
|
826 |
+
s = Series([1, 2, 3], index=["a", "b", "c"])
|
827 |
+
s_orig = s.copy()
|
828 |
+
|
829 |
+
subset = s[:]
|
830 |
+
assert np.shares_memory(get_array(subset), get_array(s))
|
831 |
+
|
832 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
833 |
+
subset.iloc[0] = 0
|
834 |
+
|
835 |
+
if using_copy_on_write:
|
836 |
+
assert not np.shares_memory(get_array(subset), get_array(s))
|
837 |
+
|
838 |
+
expected = Series([0, 2, 3], index=["a", "b", "c"])
|
839 |
+
tm.assert_series_equal(subset, expected)
|
840 |
+
|
841 |
+
if using_copy_on_write:
|
842 |
+
# original parent series is not modified (CoW)
|
843 |
+
tm.assert_series_equal(s, s_orig)
|
844 |
+
else:
|
845 |
+
# original parent series is actually updated
|
846 |
+
assert s.iloc[0] == 0
|
847 |
+
|
848 |
+
|
849 |
+
def test_series_getitem_ellipsis(using_copy_on_write, warn_copy_on_write):
|
850 |
+
# Case: taking a view of a Series using Ellipsis + afterwards modifying the subset
|
851 |
+
s = Series([1, 2, 3])
|
852 |
+
s_orig = s.copy()
|
853 |
+
|
854 |
+
subset = s[...]
|
855 |
+
assert np.shares_memory(get_array(subset), get_array(s))
|
856 |
+
|
857 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
858 |
+
subset.iloc[0] = 0
|
859 |
+
|
860 |
+
if using_copy_on_write:
|
861 |
+
assert not np.shares_memory(get_array(subset), get_array(s))
|
862 |
+
|
863 |
+
expected = Series([0, 2, 3])
|
864 |
+
tm.assert_series_equal(subset, expected)
|
865 |
+
|
866 |
+
if using_copy_on_write:
|
867 |
+
# original parent series is not modified (CoW)
|
868 |
+
tm.assert_series_equal(s, s_orig)
|
869 |
+
else:
|
870 |
+
# original parent series is actually updated
|
871 |
+
assert s.iloc[0] == 0
|
872 |
+
|
873 |
+
|
874 |
+
@pytest.mark.parametrize(
|
875 |
+
"indexer",
|
876 |
+
[slice(0, 2), np.array([True, True, False]), np.array([0, 1])],
|
877 |
+
ids=["slice", "mask", "array"],
|
878 |
+
)
|
879 |
+
def test_series_subset_set_with_indexer(
|
880 |
+
backend, indexer_si, indexer, using_copy_on_write, warn_copy_on_write
|
881 |
+
):
|
882 |
+
# Case: setting values in a viewing Series with an indexer
|
883 |
+
_, _, Series = backend
|
884 |
+
s = Series([1, 2, 3], index=["a", "b", "c"])
|
885 |
+
s_orig = s.copy()
|
886 |
+
subset = s[:]
|
887 |
+
|
888 |
+
warn = None
|
889 |
+
msg = "Series.__setitem__ treating keys as positions is deprecated"
|
890 |
+
if (
|
891 |
+
indexer_si is tm.setitem
|
892 |
+
and isinstance(indexer, np.ndarray)
|
893 |
+
and indexer.dtype.kind == "i"
|
894 |
+
):
|
895 |
+
warn = FutureWarning
|
896 |
+
if warn_copy_on_write:
|
897 |
+
with tm.assert_cow_warning(raise_on_extra_warnings=warn is not None):
|
898 |
+
indexer_si(subset)[indexer] = 0
|
899 |
+
else:
|
900 |
+
with tm.assert_produces_warning(warn, match=msg):
|
901 |
+
indexer_si(subset)[indexer] = 0
|
902 |
+
expected = Series([0, 0, 3], index=["a", "b", "c"])
|
903 |
+
tm.assert_series_equal(subset, expected)
|
904 |
+
|
905 |
+
if using_copy_on_write:
|
906 |
+
tm.assert_series_equal(s, s_orig)
|
907 |
+
else:
|
908 |
+
tm.assert_series_equal(s, expected)
|
909 |
+
|
910 |
+
|
911 |
+
# -----------------------------------------------------------------------------
|
912 |
+
# del operator
|
913 |
+
|
914 |
+
|
915 |
+
def test_del_frame(backend, using_copy_on_write, warn_copy_on_write):
|
916 |
+
# Case: deleting a column with `del` on a viewing child dataframe should
|
917 |
+
# not modify parent + update the references
|
918 |
+
dtype_backend, DataFrame, _ = backend
|
919 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
920 |
+
df_orig = df.copy()
|
921 |
+
df2 = df[:]
|
922 |
+
|
923 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
924 |
+
|
925 |
+
del df2["b"]
|
926 |
+
|
927 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
928 |
+
tm.assert_frame_equal(df, df_orig)
|
929 |
+
tm.assert_frame_equal(df2, df_orig[["a", "c"]])
|
930 |
+
df2._mgr._verify_integrity()
|
931 |
+
|
932 |
+
with tm.assert_cow_warning(warn_copy_on_write and dtype_backend == "numpy"):
|
933 |
+
df.loc[0, "b"] = 200
|
934 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
935 |
+
df_orig = df.copy()
|
936 |
+
|
937 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
938 |
+
df2.loc[0, "a"] = 100
|
939 |
+
if using_copy_on_write:
|
940 |
+
# modifying child after deleting a column still doesn't update parent
|
941 |
+
tm.assert_frame_equal(df, df_orig)
|
942 |
+
else:
|
943 |
+
assert df.loc[0, "a"] == 100
|
944 |
+
|
945 |
+
|
946 |
+
def test_del_series(backend):
|
947 |
+
_, _, Series = backend
|
948 |
+
s = Series([1, 2, 3], index=["a", "b", "c"])
|
949 |
+
s_orig = s.copy()
|
950 |
+
s2 = s[:]
|
951 |
+
|
952 |
+
assert np.shares_memory(get_array(s), get_array(s2))
|
953 |
+
|
954 |
+
del s2["a"]
|
955 |
+
|
956 |
+
assert not np.shares_memory(get_array(s), get_array(s2))
|
957 |
+
tm.assert_series_equal(s, s_orig)
|
958 |
+
tm.assert_series_equal(s2, s_orig[["b", "c"]])
|
959 |
+
|
960 |
+
# modifying s2 doesn't need copy on write (due to `del`, s2 is backed by new array)
|
961 |
+
values = s2.values
|
962 |
+
s2.loc["b"] = 100
|
963 |
+
assert values[0] == 100
|
964 |
+
|
965 |
+
|
966 |
+
# -----------------------------------------------------------------------------
|
967 |
+
# Accessing column as Series
|
968 |
+
|
969 |
+
|
970 |
+
def test_column_as_series(
|
971 |
+
backend, using_copy_on_write, warn_copy_on_write, using_array_manager
|
972 |
+
):
|
973 |
+
# Case: selecting a single column now also uses Copy-on-Write
|
974 |
+
dtype_backend, DataFrame, Series = backend
|
975 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
976 |
+
df_orig = df.copy()
|
977 |
+
|
978 |
+
s = df["a"]
|
979 |
+
|
980 |
+
assert np.shares_memory(get_array(s, "a"), get_array(df, "a"))
|
981 |
+
|
982 |
+
if using_copy_on_write or using_array_manager:
|
983 |
+
s[0] = 0
|
984 |
+
else:
|
985 |
+
if warn_copy_on_write:
|
986 |
+
with tm.assert_cow_warning():
|
987 |
+
s[0] = 0
|
988 |
+
else:
|
989 |
+
warn = SettingWithCopyWarning if dtype_backend == "numpy" else None
|
990 |
+
with pd.option_context("chained_assignment", "warn"):
|
991 |
+
with tm.assert_produces_warning(warn):
|
992 |
+
s[0] = 0
|
993 |
+
|
994 |
+
expected = Series([0, 2, 3], name="a")
|
995 |
+
tm.assert_series_equal(s, expected)
|
996 |
+
if using_copy_on_write:
|
997 |
+
# assert not np.shares_memory(s.values, get_array(df, "a"))
|
998 |
+
tm.assert_frame_equal(df, df_orig)
|
999 |
+
# ensure cached series on getitem is not the changed series
|
1000 |
+
tm.assert_series_equal(df["a"], df_orig["a"])
|
1001 |
+
else:
|
1002 |
+
df_orig.iloc[0, 0] = 0
|
1003 |
+
tm.assert_frame_equal(df, df_orig)
|
1004 |
+
|
1005 |
+
|
1006 |
+
def test_column_as_series_set_with_upcast(
|
1007 |
+
backend, using_copy_on_write, using_array_manager, warn_copy_on_write
|
1008 |
+
):
|
1009 |
+
# Case: selecting a single column now also uses Copy-on-Write -> when
|
1010 |
+
# setting a value causes an upcast, we don't need to update the parent
|
1011 |
+
# DataFrame through the cache mechanism
|
1012 |
+
dtype_backend, DataFrame, Series = backend
|
1013 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
1014 |
+
df_orig = df.copy()
|
1015 |
+
|
1016 |
+
s = df["a"]
|
1017 |
+
if dtype_backend == "nullable":
|
1018 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1019 |
+
with pytest.raises(TypeError, match="Invalid value"):
|
1020 |
+
s[0] = "foo"
|
1021 |
+
expected = Series([1, 2, 3], name="a")
|
1022 |
+
elif using_copy_on_write or warn_copy_on_write or using_array_manager:
|
1023 |
+
# TODO(CoW-warn) assert the FutureWarning for CoW is also raised
|
1024 |
+
with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):
|
1025 |
+
s[0] = "foo"
|
1026 |
+
expected = Series(["foo", 2, 3], dtype=object, name="a")
|
1027 |
+
else:
|
1028 |
+
with pd.option_context("chained_assignment", "warn"):
|
1029 |
+
msg = "|".join(
|
1030 |
+
[
|
1031 |
+
"A value is trying to be set on a copy of a slice from a DataFrame",
|
1032 |
+
"Setting an item of incompatible dtype is deprecated",
|
1033 |
+
]
|
1034 |
+
)
|
1035 |
+
with tm.assert_produces_warning(
|
1036 |
+
(SettingWithCopyWarning, FutureWarning), match=msg
|
1037 |
+
):
|
1038 |
+
s[0] = "foo"
|
1039 |
+
expected = Series(["foo", 2, 3], dtype=object, name="a")
|
1040 |
+
|
1041 |
+
tm.assert_series_equal(s, expected)
|
1042 |
+
if using_copy_on_write:
|
1043 |
+
tm.assert_frame_equal(df, df_orig)
|
1044 |
+
# ensure cached series on getitem is not the changed series
|
1045 |
+
tm.assert_series_equal(df["a"], df_orig["a"])
|
1046 |
+
else:
|
1047 |
+
df_orig["a"] = expected
|
1048 |
+
tm.assert_frame_equal(df, df_orig)
|
1049 |
+
|
1050 |
+
|
1051 |
+
@pytest.mark.parametrize(
|
1052 |
+
"method",
|
1053 |
+
[
|
1054 |
+
lambda df: df["a"],
|
1055 |
+
lambda df: df.loc[:, "a"],
|
1056 |
+
lambda df: df.iloc[:, 0],
|
1057 |
+
],
|
1058 |
+
ids=["getitem", "loc", "iloc"],
|
1059 |
+
)
|
1060 |
+
def test_column_as_series_no_item_cache(
|
1061 |
+
request,
|
1062 |
+
backend,
|
1063 |
+
method,
|
1064 |
+
using_copy_on_write,
|
1065 |
+
warn_copy_on_write,
|
1066 |
+
using_array_manager,
|
1067 |
+
):
|
1068 |
+
# Case: selecting a single column (which now also uses Copy-on-Write to protect
|
1069 |
+
# the view) should always give a new object (i.e. not make use of a cache)
|
1070 |
+
dtype_backend, DataFrame, _ = backend
|
1071 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
1072 |
+
df_orig = df.copy()
|
1073 |
+
|
1074 |
+
s1 = method(df)
|
1075 |
+
s2 = method(df)
|
1076 |
+
|
1077 |
+
is_iloc = "iloc" in request.node.name
|
1078 |
+
if using_copy_on_write or warn_copy_on_write or is_iloc:
|
1079 |
+
assert s1 is not s2
|
1080 |
+
else:
|
1081 |
+
assert s1 is s2
|
1082 |
+
|
1083 |
+
if using_copy_on_write or using_array_manager:
|
1084 |
+
s1.iloc[0] = 0
|
1085 |
+
elif warn_copy_on_write:
|
1086 |
+
with tm.assert_cow_warning():
|
1087 |
+
s1.iloc[0] = 0
|
1088 |
+
else:
|
1089 |
+
warn = SettingWithCopyWarning if dtype_backend == "numpy" else None
|
1090 |
+
with pd.option_context("chained_assignment", "warn"):
|
1091 |
+
with tm.assert_produces_warning(warn):
|
1092 |
+
s1.iloc[0] = 0
|
1093 |
+
|
1094 |
+
if using_copy_on_write:
|
1095 |
+
tm.assert_series_equal(s2, df_orig["a"])
|
1096 |
+
tm.assert_frame_equal(df, df_orig)
|
1097 |
+
else:
|
1098 |
+
assert s2.iloc[0] == 0
|
1099 |
+
|
1100 |
+
|
1101 |
+
# TODO add tests for other indexing methods on the Series
|
1102 |
+
|
1103 |
+
|
1104 |
+
def test_dataframe_add_column_from_series(backend, using_copy_on_write):
|
1105 |
+
# Case: adding a new column to a DataFrame from an existing column/series
|
1106 |
+
# -> delays copy under CoW
|
1107 |
+
_, DataFrame, Series = backend
|
1108 |
+
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
|
1109 |
+
|
1110 |
+
s = Series([10, 11, 12])
|
1111 |
+
df["new"] = s
|
1112 |
+
if using_copy_on_write:
|
1113 |
+
assert np.shares_memory(get_array(df, "new"), get_array(s))
|
1114 |
+
else:
|
1115 |
+
assert not np.shares_memory(get_array(df, "new"), get_array(s))
|
1116 |
+
|
1117 |
+
# editing series -> doesn't modify column in frame
|
1118 |
+
s[0] = 0
|
1119 |
+
expected = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "new": [10, 11, 12]})
|
1120 |
+
tm.assert_frame_equal(df, expected)
|
1121 |
+
|
1122 |
+
|
1123 |
+
@pytest.mark.parametrize("val", [100, "a"])
|
1124 |
+
@pytest.mark.parametrize(
|
1125 |
+
"indexer_func, indexer",
|
1126 |
+
[
|
1127 |
+
(tm.loc, (0, "a")),
|
1128 |
+
(tm.iloc, (0, 0)),
|
1129 |
+
(tm.loc, ([0], "a")),
|
1130 |
+
(tm.iloc, ([0], 0)),
|
1131 |
+
(tm.loc, (slice(None), "a")),
|
1132 |
+
(tm.iloc, (slice(None), 0)),
|
1133 |
+
],
|
1134 |
+
)
|
1135 |
+
@pytest.mark.parametrize(
|
1136 |
+
"col", [[0.1, 0.2, 0.3], [7, 8, 9]], ids=["mixed-block", "single-block"]
|
1137 |
+
)
|
1138 |
+
def test_set_value_copy_only_necessary_column(
|
1139 |
+
using_copy_on_write, warn_copy_on_write, indexer_func, indexer, val, col
|
1140 |
+
):
|
1141 |
+
# When setting inplace, only copy column that is modified instead of the whole
|
1142 |
+
# block (by splitting the block)
|
1143 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": col})
|
1144 |
+
df_orig = df.copy()
|
1145 |
+
view = df[:]
|
1146 |
+
|
1147 |
+
if val == "a" and not warn_copy_on_write:
|
1148 |
+
with tm.assert_produces_warning(
|
1149 |
+
FutureWarning, match="Setting an item of incompatible dtype is deprecated"
|
1150 |
+
):
|
1151 |
+
indexer_func(df)[indexer] = val
|
1152 |
+
if val == "a" and warn_copy_on_write:
|
1153 |
+
with tm.assert_produces_warning(
|
1154 |
+
FutureWarning, match="incompatible dtype|Setting a value on a view"
|
1155 |
+
):
|
1156 |
+
indexer_func(df)[indexer] = val
|
1157 |
+
else:
|
1158 |
+
with tm.assert_cow_warning(warn_copy_on_write and val == 100):
|
1159 |
+
indexer_func(df)[indexer] = val
|
1160 |
+
|
1161 |
+
if using_copy_on_write:
|
1162 |
+
assert np.shares_memory(get_array(df, "b"), get_array(view, "b"))
|
1163 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))
|
1164 |
+
tm.assert_frame_equal(view, df_orig)
|
1165 |
+
else:
|
1166 |
+
assert np.shares_memory(get_array(df, "c"), get_array(view, "c"))
|
1167 |
+
if val == "a":
|
1168 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))
|
1169 |
+
else:
|
1170 |
+
assert np.shares_memory(get_array(df, "a"), get_array(view, "a"))
|
1171 |
+
|
1172 |
+
|
1173 |
+
def test_series_midx_slice(using_copy_on_write, warn_copy_on_write):
|
1174 |
+
ser = Series([1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]]))
|
1175 |
+
ser_orig = ser.copy()
|
1176 |
+
result = ser[1]
|
1177 |
+
assert np.shares_memory(get_array(ser), get_array(result))
|
1178 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1179 |
+
result.iloc[0] = 100
|
1180 |
+
if using_copy_on_write:
|
1181 |
+
tm.assert_series_equal(ser, ser_orig)
|
1182 |
+
else:
|
1183 |
+
expected = Series(
|
1184 |
+
[100, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]])
|
1185 |
+
)
|
1186 |
+
tm.assert_series_equal(ser, expected)
|
1187 |
+
|
1188 |
+
|
1189 |
+
def test_getitem_midx_slice(
|
1190 |
+
using_copy_on_write, warn_copy_on_write, using_array_manager
|
1191 |
+
):
|
1192 |
+
df = DataFrame({("a", "x"): [1, 2], ("a", "y"): 1, ("b", "x"): 2})
|
1193 |
+
df_orig = df.copy()
|
1194 |
+
new_df = df[("a",)]
|
1195 |
+
|
1196 |
+
if using_copy_on_write:
|
1197 |
+
assert not new_df._mgr._has_no_reference(0)
|
1198 |
+
|
1199 |
+
if not using_array_manager:
|
1200 |
+
assert np.shares_memory(get_array(df, ("a", "x")), get_array(new_df, "x"))
|
1201 |
+
if using_copy_on_write:
|
1202 |
+
new_df.iloc[0, 0] = 100
|
1203 |
+
tm.assert_frame_equal(df_orig, df)
|
1204 |
+
else:
|
1205 |
+
if warn_copy_on_write:
|
1206 |
+
with tm.assert_cow_warning():
|
1207 |
+
new_df.iloc[0, 0] = 100
|
1208 |
+
else:
|
1209 |
+
with pd.option_context("chained_assignment", "warn"):
|
1210 |
+
with tm.assert_produces_warning(SettingWithCopyWarning):
|
1211 |
+
new_df.iloc[0, 0] = 100
|
1212 |
+
assert df.iloc[0, 0] == 100
|
1213 |
+
|
1214 |
+
|
1215 |
+
def test_series_midx_tuples_slice(using_copy_on_write, warn_copy_on_write):
|
1216 |
+
ser = Series(
|
1217 |
+
[1, 2, 3],
|
1218 |
+
index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]),
|
1219 |
+
)
|
1220 |
+
result = ser[(1, 2)]
|
1221 |
+
assert np.shares_memory(get_array(ser), get_array(result))
|
1222 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1223 |
+
result.iloc[0] = 100
|
1224 |
+
if using_copy_on_write:
|
1225 |
+
expected = Series(
|
1226 |
+
[1, 2, 3],
|
1227 |
+
index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]),
|
1228 |
+
)
|
1229 |
+
tm.assert_series_equal(ser, expected)
|
1230 |
+
|
1231 |
+
|
1232 |
+
def test_midx_read_only_bool_indexer():
|
1233 |
+
# GH#56635
|
1234 |
+
def mklbl(prefix, n):
|
1235 |
+
return [f"{prefix}{i}" for i in range(n)]
|
1236 |
+
|
1237 |
+
idx = pd.MultiIndex.from_product(
|
1238 |
+
[mklbl("A", 4), mklbl("B", 2), mklbl("C", 4), mklbl("D", 2)]
|
1239 |
+
)
|
1240 |
+
cols = pd.MultiIndex.from_tuples(
|
1241 |
+
[("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], names=["lvl0", "lvl1"]
|
1242 |
+
)
|
1243 |
+
df = DataFrame(1, index=idx, columns=cols).sort_index().sort_index(axis=1)
|
1244 |
+
|
1245 |
+
mask = df[("a", "foo")] == 1
|
1246 |
+
expected_mask = mask.copy()
|
1247 |
+
result = df.loc[pd.IndexSlice[mask, :, ["C1", "C3"]], :]
|
1248 |
+
expected = df.loc[pd.IndexSlice[:, :, ["C1", "C3"]], :]
|
1249 |
+
tm.assert_frame_equal(result, expected)
|
1250 |
+
tm.assert_series_equal(mask, expected_mask)
|
1251 |
+
|
1252 |
+
|
1253 |
+
def test_loc_enlarging_with_dataframe(using_copy_on_write):
|
1254 |
+
df = DataFrame({"a": [1, 2, 3]})
|
1255 |
+
rhs = DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]})
|
1256 |
+
rhs_orig = rhs.copy()
|
1257 |
+
df.loc[:, ["b", "c"]] = rhs
|
1258 |
+
if using_copy_on_write:
|
1259 |
+
assert np.shares_memory(get_array(df, "b"), get_array(rhs, "b"))
|
1260 |
+
assert np.shares_memory(get_array(df, "c"), get_array(rhs, "c"))
|
1261 |
+
assert not df._mgr._has_no_reference(1)
|
1262 |
+
else:
|
1263 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(rhs, "b"))
|
1264 |
+
|
1265 |
+
df.iloc[0, 1] = 100
|
1266 |
+
tm.assert_frame_equal(rhs, rhs_orig)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_internals.py
ADDED
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas.util._test_decorators as td
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
from pandas import DataFrame
|
8 |
+
import pandas._testing as tm
|
9 |
+
from pandas.tests.copy_view.util import get_array
|
10 |
+
|
11 |
+
|
12 |
+
@td.skip_array_manager_invalid_test
|
13 |
+
def test_consolidate(using_copy_on_write):
|
14 |
+
# create unconsolidated DataFrame
|
15 |
+
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
|
16 |
+
df["c"] = [4, 5, 6]
|
17 |
+
|
18 |
+
# take a viewing subset
|
19 |
+
subset = df[:]
|
20 |
+
|
21 |
+
# each block of subset references a block of df
|
22 |
+
assert all(blk.refs.has_reference() for blk in subset._mgr.blocks)
|
23 |
+
|
24 |
+
# consolidate the two int64 blocks
|
25 |
+
subset._consolidate_inplace()
|
26 |
+
|
27 |
+
# the float64 block still references the parent one because it still a view
|
28 |
+
assert subset._mgr.blocks[0].refs.has_reference()
|
29 |
+
# equivalent of assert np.shares_memory(df["b"].values, subset["b"].values)
|
30 |
+
# but avoids caching df["b"]
|
31 |
+
assert np.shares_memory(get_array(df, "b"), get_array(subset, "b"))
|
32 |
+
|
33 |
+
# the new consolidated int64 block does not reference another
|
34 |
+
assert not subset._mgr.blocks[1].refs.has_reference()
|
35 |
+
|
36 |
+
# the parent dataframe now also only is linked for the float column
|
37 |
+
assert not df._mgr.blocks[0].refs.has_reference()
|
38 |
+
assert df._mgr.blocks[1].refs.has_reference()
|
39 |
+
assert not df._mgr.blocks[2].refs.has_reference()
|
40 |
+
|
41 |
+
# and modifying subset still doesn't modify parent
|
42 |
+
if using_copy_on_write:
|
43 |
+
subset.iloc[0, 1] = 0.0
|
44 |
+
assert not df._mgr.blocks[1].refs.has_reference()
|
45 |
+
assert df.loc[0, "b"] == 0.1
|
46 |
+
|
47 |
+
|
48 |
+
@pytest.mark.single_cpu
|
49 |
+
@td.skip_array_manager_invalid_test
|
50 |
+
def test_switch_options():
|
51 |
+
# ensure we can switch the value of the option within one session
|
52 |
+
# (assuming data is constructed after switching)
|
53 |
+
|
54 |
+
# using the option_context to ensure we set back to global option value
|
55 |
+
# after running the test
|
56 |
+
with pd.option_context("mode.copy_on_write", False):
|
57 |
+
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
|
58 |
+
subset = df[:]
|
59 |
+
subset.iloc[0, 0] = 0
|
60 |
+
# df updated with CoW disabled
|
61 |
+
assert df.iloc[0, 0] == 0
|
62 |
+
|
63 |
+
pd.options.mode.copy_on_write = True
|
64 |
+
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
|
65 |
+
subset = df[:]
|
66 |
+
subset.iloc[0, 0] = 0
|
67 |
+
# df not updated with CoW enabled
|
68 |
+
assert df.iloc[0, 0] == 1
|
69 |
+
|
70 |
+
pd.options.mode.copy_on_write = False
|
71 |
+
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
|
72 |
+
subset = df[:]
|
73 |
+
subset.iloc[0, 0] = 0
|
74 |
+
# df updated with CoW disabled
|
75 |
+
assert df.iloc[0, 0] == 0
|
76 |
+
|
77 |
+
|
78 |
+
@td.skip_array_manager_invalid_test
|
79 |
+
@pytest.mark.parametrize("dtype", [np.intp, np.int8])
|
80 |
+
@pytest.mark.parametrize(
|
81 |
+
"locs, arr",
|
82 |
+
[
|
83 |
+
([0], np.array([-1, -2, -3])),
|
84 |
+
([1], np.array([-1, -2, -3])),
|
85 |
+
([5], np.array([-1, -2, -3])),
|
86 |
+
([0, 1], np.array([[-1, -2, -3], [-4, -5, -6]]).T),
|
87 |
+
([0, 2], np.array([[-1, -2, -3], [-4, -5, -6]]).T),
|
88 |
+
([0, 1, 2], np.array([[-1, -2, -3], [-4, -5, -6], [-4, -5, -6]]).T),
|
89 |
+
([1, 2], np.array([[-1, -2, -3], [-4, -5, -6]]).T),
|
90 |
+
([1, 3], np.array([[-1, -2, -3], [-4, -5, -6]]).T),
|
91 |
+
([1, 3], np.array([[-1, -2, -3], [-4, -5, -6]]).T),
|
92 |
+
],
|
93 |
+
)
|
94 |
+
def test_iset_splits_blocks_inplace(using_copy_on_write, locs, arr, dtype):
|
95 |
+
# Nothing currently calls iset with
|
96 |
+
# more than 1 loc with inplace=True (only happens with inplace=False)
|
97 |
+
# but ensure that it works
|
98 |
+
df = DataFrame(
|
99 |
+
{
|
100 |
+
"a": [1, 2, 3],
|
101 |
+
"b": [4, 5, 6],
|
102 |
+
"c": [7, 8, 9],
|
103 |
+
"d": [10, 11, 12],
|
104 |
+
"e": [13, 14, 15],
|
105 |
+
"f": ["a", "b", "c"],
|
106 |
+
},
|
107 |
+
)
|
108 |
+
arr = arr.astype(dtype)
|
109 |
+
df_orig = df.copy()
|
110 |
+
df2 = df.copy(deep=None) # Trigger a CoW (if enabled, otherwise makes copy)
|
111 |
+
df2._mgr.iset(locs, arr, inplace=True)
|
112 |
+
|
113 |
+
tm.assert_frame_equal(df, df_orig)
|
114 |
+
|
115 |
+
if using_copy_on_write:
|
116 |
+
for i, col in enumerate(df.columns):
|
117 |
+
if i not in locs:
|
118 |
+
assert np.shares_memory(get_array(df, col), get_array(df2, col))
|
119 |
+
else:
|
120 |
+
for col in df.columns:
|
121 |
+
assert not np.shares_memory(get_array(df, col), get_array(df2, col))
|
122 |
+
|
123 |
+
|
124 |
+
def test_exponential_backoff():
|
125 |
+
# GH#55518
|
126 |
+
df = DataFrame({"a": [1, 2, 3]})
|
127 |
+
for i in range(490):
|
128 |
+
df.copy(deep=False)
|
129 |
+
|
130 |
+
assert len(df._mgr.blocks[0].refs.referenced_blocks) == 491
|
131 |
+
|
132 |
+
df = DataFrame({"a": [1, 2, 3]})
|
133 |
+
dfs = [df.copy(deep=False) for i in range(510)]
|
134 |
+
|
135 |
+
for i in range(20):
|
136 |
+
df.copy(deep=False)
|
137 |
+
assert len(df._mgr.blocks[0].refs.referenced_blocks) == 531
|
138 |
+
assert df._mgr.blocks[0].refs.clear_counter == 1000
|
139 |
+
|
140 |
+
for i in range(500):
|
141 |
+
df.copy(deep=False)
|
142 |
+
|
143 |
+
# Don't reduce since we still have over 500 objects alive
|
144 |
+
assert df._mgr.blocks[0].refs.clear_counter == 1000
|
145 |
+
|
146 |
+
dfs = dfs[:300]
|
147 |
+
for i in range(500):
|
148 |
+
df.copy(deep=False)
|
149 |
+
|
150 |
+
# Reduce since there are less than 500 objects alive
|
151 |
+
assert df._mgr.blocks[0].refs.clear_counter == 500
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_interp_fillna.py
ADDED
@@ -0,0 +1,432 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
NA,
|
6 |
+
ArrowDtype,
|
7 |
+
DataFrame,
|
8 |
+
Interval,
|
9 |
+
NaT,
|
10 |
+
Series,
|
11 |
+
Timestamp,
|
12 |
+
interval_range,
|
13 |
+
option_context,
|
14 |
+
)
|
15 |
+
import pandas._testing as tm
|
16 |
+
from pandas.tests.copy_view.util import get_array
|
17 |
+
|
18 |
+
|
19 |
+
@pytest.mark.parametrize("method", ["pad", "nearest", "linear"])
|
20 |
+
def test_interpolate_no_op(using_copy_on_write, method):
|
21 |
+
df = DataFrame({"a": [1, 2]})
|
22 |
+
df_orig = df.copy()
|
23 |
+
|
24 |
+
warn = None
|
25 |
+
if method == "pad":
|
26 |
+
warn = FutureWarning
|
27 |
+
msg = "DataFrame.interpolate with method=pad is deprecated"
|
28 |
+
with tm.assert_produces_warning(warn, match=msg):
|
29 |
+
result = df.interpolate(method=method)
|
30 |
+
|
31 |
+
if using_copy_on_write:
|
32 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
33 |
+
else:
|
34 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
35 |
+
|
36 |
+
result.iloc[0, 0] = 100
|
37 |
+
|
38 |
+
if using_copy_on_write:
|
39 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
40 |
+
tm.assert_frame_equal(df, df_orig)
|
41 |
+
|
42 |
+
|
43 |
+
@pytest.mark.parametrize("func", ["ffill", "bfill"])
|
44 |
+
def test_interp_fill_functions(using_copy_on_write, func):
|
45 |
+
# Check that these takes the same code paths as interpolate
|
46 |
+
df = DataFrame({"a": [1, 2]})
|
47 |
+
df_orig = df.copy()
|
48 |
+
|
49 |
+
result = getattr(df, func)()
|
50 |
+
|
51 |
+
if using_copy_on_write:
|
52 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
53 |
+
else:
|
54 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
55 |
+
|
56 |
+
result.iloc[0, 0] = 100
|
57 |
+
|
58 |
+
if using_copy_on_write:
|
59 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
60 |
+
tm.assert_frame_equal(df, df_orig)
|
61 |
+
|
62 |
+
|
63 |
+
@pytest.mark.parametrize("func", ["ffill", "bfill"])
|
64 |
+
@pytest.mark.parametrize(
|
65 |
+
"vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
|
66 |
+
)
|
67 |
+
def test_interpolate_triggers_copy(using_copy_on_write, vals, func):
|
68 |
+
df = DataFrame({"a": vals})
|
69 |
+
result = getattr(df, func)()
|
70 |
+
|
71 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
72 |
+
if using_copy_on_write:
|
73 |
+
# Check that we don't have references when triggering a copy
|
74 |
+
assert result._mgr._has_no_reference(0)
|
75 |
+
|
76 |
+
|
77 |
+
@pytest.mark.parametrize(
|
78 |
+
"vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
|
79 |
+
)
|
80 |
+
def test_interpolate_inplace_no_reference_no_copy(using_copy_on_write, vals):
|
81 |
+
df = DataFrame({"a": vals})
|
82 |
+
arr = get_array(df, "a")
|
83 |
+
df.interpolate(method="linear", inplace=True)
|
84 |
+
|
85 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
86 |
+
if using_copy_on_write:
|
87 |
+
# Check that we don't have references when triggering a copy
|
88 |
+
assert df._mgr._has_no_reference(0)
|
89 |
+
|
90 |
+
|
91 |
+
@pytest.mark.parametrize(
|
92 |
+
"vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]
|
93 |
+
)
|
94 |
+
def test_interpolate_inplace_with_refs(using_copy_on_write, vals, warn_copy_on_write):
|
95 |
+
df = DataFrame({"a": [1, np.nan, 2]})
|
96 |
+
df_orig = df.copy()
|
97 |
+
arr = get_array(df, "a")
|
98 |
+
view = df[:]
|
99 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
100 |
+
df.interpolate(method="linear", inplace=True)
|
101 |
+
|
102 |
+
if using_copy_on_write:
|
103 |
+
# Check that copy was triggered in interpolate and that we don't
|
104 |
+
# have any references left
|
105 |
+
assert not np.shares_memory(arr, get_array(df, "a"))
|
106 |
+
tm.assert_frame_equal(df_orig, view)
|
107 |
+
assert df._mgr._has_no_reference(0)
|
108 |
+
assert view._mgr._has_no_reference(0)
|
109 |
+
else:
|
110 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
111 |
+
|
112 |
+
|
113 |
+
@pytest.mark.parametrize("func", ["ffill", "bfill"])
|
114 |
+
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
|
115 |
+
def test_interp_fill_functions_inplace(
|
116 |
+
using_copy_on_write, func, warn_copy_on_write, dtype
|
117 |
+
):
|
118 |
+
# Check that these takes the same code paths as interpolate
|
119 |
+
df = DataFrame({"a": [1, np.nan, 2]}, dtype=dtype)
|
120 |
+
df_orig = df.copy()
|
121 |
+
arr = get_array(df, "a")
|
122 |
+
view = df[:]
|
123 |
+
|
124 |
+
with tm.assert_cow_warning(warn_copy_on_write and dtype == "float64"):
|
125 |
+
getattr(df, func)(inplace=True)
|
126 |
+
|
127 |
+
if using_copy_on_write:
|
128 |
+
# Check that copy was triggered in interpolate and that we don't
|
129 |
+
# have any references left
|
130 |
+
assert not np.shares_memory(arr, get_array(df, "a"))
|
131 |
+
tm.assert_frame_equal(df_orig, view)
|
132 |
+
assert df._mgr._has_no_reference(0)
|
133 |
+
assert view._mgr._has_no_reference(0)
|
134 |
+
else:
|
135 |
+
assert np.shares_memory(arr, get_array(df, "a")) is (dtype == "float64")
|
136 |
+
|
137 |
+
|
138 |
+
def test_interpolate_cleaned_fill_method(using_copy_on_write):
|
139 |
+
# Check that "method is set to None" case works correctly
|
140 |
+
df = DataFrame({"a": ["a", np.nan, "c"], "b": 1})
|
141 |
+
df_orig = df.copy()
|
142 |
+
|
143 |
+
msg = "DataFrame.interpolate with object dtype"
|
144 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
145 |
+
result = df.interpolate(method="linear")
|
146 |
+
|
147 |
+
if using_copy_on_write:
|
148 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
149 |
+
else:
|
150 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
151 |
+
|
152 |
+
result.iloc[0, 0] = Timestamp("2021-12-31")
|
153 |
+
|
154 |
+
if using_copy_on_write:
|
155 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
156 |
+
tm.assert_frame_equal(df, df_orig)
|
157 |
+
|
158 |
+
|
159 |
+
def test_interpolate_object_convert_no_op(using_copy_on_write):
|
160 |
+
df = DataFrame({"a": ["a", "b", "c"], "b": 1})
|
161 |
+
arr_a = get_array(df, "a")
|
162 |
+
msg = "DataFrame.interpolate with method=pad is deprecated"
|
163 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
164 |
+
df.interpolate(method="pad", inplace=True)
|
165 |
+
|
166 |
+
# Now CoW makes a copy, it should not!
|
167 |
+
if using_copy_on_write:
|
168 |
+
assert df._mgr._has_no_reference(0)
|
169 |
+
assert np.shares_memory(arr_a, get_array(df, "a"))
|
170 |
+
|
171 |
+
|
172 |
+
def test_interpolate_object_convert_copies(using_copy_on_write):
|
173 |
+
df = DataFrame({"a": Series([1, 2], dtype=object), "b": 1})
|
174 |
+
arr_a = get_array(df, "a")
|
175 |
+
msg = "DataFrame.interpolate with method=pad is deprecated"
|
176 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
177 |
+
df.interpolate(method="pad", inplace=True)
|
178 |
+
|
179 |
+
if using_copy_on_write:
|
180 |
+
assert df._mgr._has_no_reference(0)
|
181 |
+
assert not np.shares_memory(arr_a, get_array(df, "a"))
|
182 |
+
|
183 |
+
|
184 |
+
def test_interpolate_downcast(using_copy_on_write):
|
185 |
+
df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})
|
186 |
+
arr_a = get_array(df, "a")
|
187 |
+
msg = "DataFrame.interpolate with method=pad is deprecated"
|
188 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
189 |
+
df.interpolate(method="pad", inplace=True, downcast="infer")
|
190 |
+
|
191 |
+
if using_copy_on_write:
|
192 |
+
assert df._mgr._has_no_reference(0)
|
193 |
+
assert np.shares_memory(arr_a, get_array(df, "a"))
|
194 |
+
|
195 |
+
|
196 |
+
def test_interpolate_downcast_reference_triggers_copy(using_copy_on_write):
|
197 |
+
df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})
|
198 |
+
df_orig = df.copy()
|
199 |
+
arr_a = get_array(df, "a")
|
200 |
+
view = df[:]
|
201 |
+
msg = "DataFrame.interpolate with method=pad is deprecated"
|
202 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
203 |
+
df.interpolate(method="pad", inplace=True, downcast="infer")
|
204 |
+
|
205 |
+
if using_copy_on_write:
|
206 |
+
assert df._mgr._has_no_reference(0)
|
207 |
+
assert not np.shares_memory(arr_a, get_array(df, "a"))
|
208 |
+
tm.assert_frame_equal(df_orig, view)
|
209 |
+
else:
|
210 |
+
tm.assert_frame_equal(df, view)
|
211 |
+
|
212 |
+
|
213 |
+
def test_fillna(using_copy_on_write):
|
214 |
+
df = DataFrame({"a": [1.5, np.nan], "b": 1})
|
215 |
+
df_orig = df.copy()
|
216 |
+
|
217 |
+
df2 = df.fillna(5.5)
|
218 |
+
if using_copy_on_write:
|
219 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
220 |
+
else:
|
221 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
222 |
+
|
223 |
+
df2.iloc[0, 1] = 100
|
224 |
+
tm.assert_frame_equal(df_orig, df)
|
225 |
+
|
226 |
+
|
227 |
+
def test_fillna_dict(using_copy_on_write):
|
228 |
+
df = DataFrame({"a": [1.5, np.nan], "b": 1})
|
229 |
+
df_orig = df.copy()
|
230 |
+
|
231 |
+
df2 = df.fillna({"a": 100.5})
|
232 |
+
if using_copy_on_write:
|
233 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
234 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
235 |
+
else:
|
236 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
237 |
+
|
238 |
+
df2.iloc[0, 1] = 100
|
239 |
+
tm.assert_frame_equal(df_orig, df)
|
240 |
+
|
241 |
+
|
242 |
+
@pytest.mark.parametrize("downcast", [None, False])
|
243 |
+
def test_fillna_inplace(using_copy_on_write, downcast):
|
244 |
+
df = DataFrame({"a": [1.5, np.nan], "b": 1})
|
245 |
+
arr_a = get_array(df, "a")
|
246 |
+
arr_b = get_array(df, "b")
|
247 |
+
|
248 |
+
msg = "The 'downcast' keyword in fillna is deprecated"
|
249 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
250 |
+
df.fillna(5.5, inplace=True, downcast=downcast)
|
251 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
252 |
+
assert np.shares_memory(get_array(df, "b"), arr_b)
|
253 |
+
if using_copy_on_write:
|
254 |
+
assert df._mgr._has_no_reference(0)
|
255 |
+
assert df._mgr._has_no_reference(1)
|
256 |
+
|
257 |
+
|
258 |
+
def test_fillna_inplace_reference(using_copy_on_write, warn_copy_on_write):
|
259 |
+
df = DataFrame({"a": [1.5, np.nan], "b": 1})
|
260 |
+
df_orig = df.copy()
|
261 |
+
arr_a = get_array(df, "a")
|
262 |
+
arr_b = get_array(df, "b")
|
263 |
+
view = df[:]
|
264 |
+
|
265 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
266 |
+
df.fillna(5.5, inplace=True)
|
267 |
+
if using_copy_on_write:
|
268 |
+
assert not np.shares_memory(get_array(df, "a"), arr_a)
|
269 |
+
assert np.shares_memory(get_array(df, "b"), arr_b)
|
270 |
+
assert view._mgr._has_no_reference(0)
|
271 |
+
assert df._mgr._has_no_reference(0)
|
272 |
+
tm.assert_frame_equal(view, df_orig)
|
273 |
+
else:
|
274 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
275 |
+
assert np.shares_memory(get_array(df, "b"), arr_b)
|
276 |
+
expected = DataFrame({"a": [1.5, 5.5], "b": 1})
|
277 |
+
tm.assert_frame_equal(df, expected)
|
278 |
+
|
279 |
+
|
280 |
+
def test_fillna_interval_inplace_reference(using_copy_on_write, warn_copy_on_write):
|
281 |
+
# Set dtype explicitly to avoid implicit cast when setting nan
|
282 |
+
ser = Series(
|
283 |
+
interval_range(start=0, end=5), name="a", dtype="interval[float64, right]"
|
284 |
+
)
|
285 |
+
ser.iloc[1] = np.nan
|
286 |
+
|
287 |
+
ser_orig = ser.copy()
|
288 |
+
view = ser[:]
|
289 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
290 |
+
ser.fillna(value=Interval(left=0, right=5), inplace=True)
|
291 |
+
|
292 |
+
if using_copy_on_write:
|
293 |
+
assert not np.shares_memory(
|
294 |
+
get_array(ser, "a").left.values, get_array(view, "a").left.values
|
295 |
+
)
|
296 |
+
tm.assert_series_equal(view, ser_orig)
|
297 |
+
else:
|
298 |
+
assert np.shares_memory(
|
299 |
+
get_array(ser, "a").left.values, get_array(view, "a").left.values
|
300 |
+
)
|
301 |
+
|
302 |
+
|
303 |
+
def test_fillna_series_empty_arg(using_copy_on_write):
|
304 |
+
ser = Series([1, np.nan, 2])
|
305 |
+
ser_orig = ser.copy()
|
306 |
+
result = ser.fillna({})
|
307 |
+
|
308 |
+
if using_copy_on_write:
|
309 |
+
assert np.shares_memory(get_array(ser), get_array(result))
|
310 |
+
else:
|
311 |
+
assert not np.shares_memory(get_array(ser), get_array(result))
|
312 |
+
|
313 |
+
ser.iloc[0] = 100.5
|
314 |
+
tm.assert_series_equal(ser_orig, result)
|
315 |
+
|
316 |
+
|
317 |
+
def test_fillna_series_empty_arg_inplace(using_copy_on_write):
|
318 |
+
ser = Series([1, np.nan, 2])
|
319 |
+
arr = get_array(ser)
|
320 |
+
ser.fillna({}, inplace=True)
|
321 |
+
|
322 |
+
assert np.shares_memory(get_array(ser), arr)
|
323 |
+
if using_copy_on_write:
|
324 |
+
assert ser._mgr._has_no_reference(0)
|
325 |
+
|
326 |
+
|
327 |
+
def test_fillna_ea_noop_shares_memory(
|
328 |
+
using_copy_on_write, any_numeric_ea_and_arrow_dtype
|
329 |
+
):
|
330 |
+
df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype)
|
331 |
+
df_orig = df.copy()
|
332 |
+
df2 = df.fillna(100)
|
333 |
+
|
334 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
335 |
+
|
336 |
+
if using_copy_on_write:
|
337 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
338 |
+
assert not df2._mgr._has_no_reference(1)
|
339 |
+
elif isinstance(df.dtypes.iloc[0], ArrowDtype):
|
340 |
+
# arrow is immutable, so no-ops do not need to copy underlying array
|
341 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
342 |
+
else:
|
343 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
344 |
+
|
345 |
+
tm.assert_frame_equal(df_orig, df)
|
346 |
+
|
347 |
+
df2.iloc[0, 1] = 100
|
348 |
+
if using_copy_on_write:
|
349 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
350 |
+
assert df2._mgr._has_no_reference(1)
|
351 |
+
assert df._mgr._has_no_reference(1)
|
352 |
+
tm.assert_frame_equal(df_orig, df)
|
353 |
+
|
354 |
+
|
355 |
+
def test_fillna_inplace_ea_noop_shares_memory(
|
356 |
+
using_copy_on_write, warn_copy_on_write, any_numeric_ea_and_arrow_dtype
|
357 |
+
):
|
358 |
+
df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype)
|
359 |
+
df_orig = df.copy()
|
360 |
+
view = df[:]
|
361 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
362 |
+
df.fillna(100, inplace=True)
|
363 |
+
|
364 |
+
if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write:
|
365 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))
|
366 |
+
else:
|
367 |
+
# MaskedArray can actually respect inplace=True
|
368 |
+
assert np.shares_memory(get_array(df, "a"), get_array(view, "a"))
|
369 |
+
|
370 |
+
assert np.shares_memory(get_array(df, "b"), get_array(view, "b"))
|
371 |
+
if using_copy_on_write:
|
372 |
+
assert not df._mgr._has_no_reference(1)
|
373 |
+
assert not view._mgr._has_no_reference(1)
|
374 |
+
|
375 |
+
with tm.assert_cow_warning(
|
376 |
+
warn_copy_on_write and "pyarrow" not in any_numeric_ea_and_arrow_dtype
|
377 |
+
):
|
378 |
+
df.iloc[0, 1] = 100
|
379 |
+
if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write:
|
380 |
+
tm.assert_frame_equal(df_orig, view)
|
381 |
+
else:
|
382 |
+
# we actually have a view
|
383 |
+
tm.assert_frame_equal(df, view)
|
384 |
+
|
385 |
+
|
386 |
+
def test_fillna_chained_assignment(using_copy_on_write):
|
387 |
+
df = DataFrame({"a": [1, np.nan, 2], "b": 1})
|
388 |
+
df_orig = df.copy()
|
389 |
+
if using_copy_on_write:
|
390 |
+
with tm.raises_chained_assignment_error():
|
391 |
+
df["a"].fillna(100, inplace=True)
|
392 |
+
tm.assert_frame_equal(df, df_orig)
|
393 |
+
|
394 |
+
with tm.raises_chained_assignment_error():
|
395 |
+
df[["a"]].fillna(100, inplace=True)
|
396 |
+
tm.assert_frame_equal(df, df_orig)
|
397 |
+
else:
|
398 |
+
with tm.assert_produces_warning(None):
|
399 |
+
with option_context("mode.chained_assignment", None):
|
400 |
+
df[["a"]].fillna(100, inplace=True)
|
401 |
+
|
402 |
+
with tm.assert_produces_warning(None):
|
403 |
+
with option_context("mode.chained_assignment", None):
|
404 |
+
df[df.a > 5].fillna(100, inplace=True)
|
405 |
+
|
406 |
+
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
|
407 |
+
df["a"].fillna(100, inplace=True)
|
408 |
+
|
409 |
+
|
410 |
+
@pytest.mark.parametrize("func", ["interpolate", "ffill", "bfill"])
|
411 |
+
def test_interpolate_chained_assignment(using_copy_on_write, func):
|
412 |
+
df = DataFrame({"a": [1, np.nan, 2], "b": 1})
|
413 |
+
df_orig = df.copy()
|
414 |
+
if using_copy_on_write:
|
415 |
+
with tm.raises_chained_assignment_error():
|
416 |
+
getattr(df["a"], func)(inplace=True)
|
417 |
+
tm.assert_frame_equal(df, df_orig)
|
418 |
+
|
419 |
+
with tm.raises_chained_assignment_error():
|
420 |
+
getattr(df[["a"]], func)(inplace=True)
|
421 |
+
tm.assert_frame_equal(df, df_orig)
|
422 |
+
else:
|
423 |
+
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
|
424 |
+
getattr(df["a"], func)(inplace=True)
|
425 |
+
|
426 |
+
with tm.assert_produces_warning(None):
|
427 |
+
with option_context("mode.chained_assignment", None):
|
428 |
+
getattr(df[["a"]], func)(inplace=True)
|
429 |
+
|
430 |
+
with tm.assert_produces_warning(None):
|
431 |
+
with option_context("mode.chained_assignment", None):
|
432 |
+
getattr(df[df["a"] > 1], func)(inplace=True)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_methods.py
ADDED
@@ -0,0 +1,2055 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas.errors import SettingWithCopyWarning
|
5 |
+
|
6 |
+
import pandas as pd
|
7 |
+
from pandas import (
|
8 |
+
DataFrame,
|
9 |
+
Index,
|
10 |
+
MultiIndex,
|
11 |
+
Period,
|
12 |
+
Series,
|
13 |
+
Timestamp,
|
14 |
+
date_range,
|
15 |
+
option_context,
|
16 |
+
period_range,
|
17 |
+
)
|
18 |
+
import pandas._testing as tm
|
19 |
+
from pandas.tests.copy_view.util import get_array
|
20 |
+
|
21 |
+
|
22 |
+
def test_copy(using_copy_on_write):
|
23 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
24 |
+
df_copy = df.copy()
|
25 |
+
|
26 |
+
# the deep copy by defaults takes a shallow copy of the Index
|
27 |
+
assert df_copy.index is not df.index
|
28 |
+
assert df_copy.columns is not df.columns
|
29 |
+
assert df_copy.index.is_(df.index)
|
30 |
+
assert df_copy.columns.is_(df.columns)
|
31 |
+
|
32 |
+
# the deep copy doesn't share memory
|
33 |
+
assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
|
34 |
+
if using_copy_on_write:
|
35 |
+
assert not df_copy._mgr.blocks[0].refs.has_reference()
|
36 |
+
assert not df_copy._mgr.blocks[1].refs.has_reference()
|
37 |
+
|
38 |
+
# mutating copy doesn't mutate original
|
39 |
+
df_copy.iloc[0, 0] = 0
|
40 |
+
assert df.iloc[0, 0] == 1
|
41 |
+
|
42 |
+
|
43 |
+
def test_copy_shallow(using_copy_on_write, warn_copy_on_write):
|
44 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
45 |
+
df_copy = df.copy(deep=False)
|
46 |
+
|
47 |
+
# the shallow copy also makes a shallow copy of the index
|
48 |
+
if using_copy_on_write:
|
49 |
+
assert df_copy.index is not df.index
|
50 |
+
assert df_copy.columns is not df.columns
|
51 |
+
assert df_copy.index.is_(df.index)
|
52 |
+
assert df_copy.columns.is_(df.columns)
|
53 |
+
else:
|
54 |
+
assert df_copy.index is df.index
|
55 |
+
assert df_copy.columns is df.columns
|
56 |
+
|
57 |
+
# the shallow copy still shares memory
|
58 |
+
assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
|
59 |
+
if using_copy_on_write:
|
60 |
+
assert df_copy._mgr.blocks[0].refs.has_reference()
|
61 |
+
assert df_copy._mgr.blocks[1].refs.has_reference()
|
62 |
+
|
63 |
+
if using_copy_on_write:
|
64 |
+
# mutating shallow copy doesn't mutate original
|
65 |
+
df_copy.iloc[0, 0] = 0
|
66 |
+
assert df.iloc[0, 0] == 1
|
67 |
+
# mutating triggered a copy-on-write -> no longer shares memory
|
68 |
+
assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
|
69 |
+
# but still shares memory for the other columns/blocks
|
70 |
+
assert np.shares_memory(get_array(df_copy, "c"), get_array(df, "c"))
|
71 |
+
else:
|
72 |
+
# mutating shallow copy does mutate original
|
73 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
74 |
+
df_copy.iloc[0, 0] = 0
|
75 |
+
assert df.iloc[0, 0] == 0
|
76 |
+
# and still shares memory
|
77 |
+
assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))
|
78 |
+
|
79 |
+
|
80 |
+
@pytest.mark.parametrize("copy", [True, None, False])
|
81 |
+
@pytest.mark.parametrize(
|
82 |
+
"method",
|
83 |
+
[
|
84 |
+
lambda df, copy: df.rename(columns=str.lower, copy=copy),
|
85 |
+
lambda df, copy: df.reindex(columns=["a", "c"], copy=copy),
|
86 |
+
lambda df, copy: df.reindex_like(df, copy=copy),
|
87 |
+
lambda df, copy: df.align(df, copy=copy)[0],
|
88 |
+
lambda df, copy: df.set_axis(["a", "b", "c"], axis="index", copy=copy),
|
89 |
+
lambda df, copy: df.rename_axis(index="test", copy=copy),
|
90 |
+
lambda df, copy: df.rename_axis(columns="test", copy=copy),
|
91 |
+
lambda df, copy: df.astype({"b": "int64"}, copy=copy),
|
92 |
+
# lambda df, copy: df.swaplevel(0, 0, copy=copy),
|
93 |
+
lambda df, copy: df.swapaxes(0, 0, copy=copy),
|
94 |
+
lambda df, copy: df.truncate(0, 5, copy=copy),
|
95 |
+
lambda df, copy: df.infer_objects(copy=copy),
|
96 |
+
lambda df, copy: df.to_timestamp(copy=copy),
|
97 |
+
lambda df, copy: df.to_period(freq="D", copy=copy),
|
98 |
+
lambda df, copy: df.tz_localize("US/Central", copy=copy),
|
99 |
+
lambda df, copy: df.tz_convert("US/Central", copy=copy),
|
100 |
+
lambda df, copy: df.set_flags(allows_duplicate_labels=False, copy=copy),
|
101 |
+
],
|
102 |
+
ids=[
|
103 |
+
"rename",
|
104 |
+
"reindex",
|
105 |
+
"reindex_like",
|
106 |
+
"align",
|
107 |
+
"set_axis",
|
108 |
+
"rename_axis0",
|
109 |
+
"rename_axis1",
|
110 |
+
"astype",
|
111 |
+
# "swaplevel", # only series
|
112 |
+
"swapaxes",
|
113 |
+
"truncate",
|
114 |
+
"infer_objects",
|
115 |
+
"to_timestamp",
|
116 |
+
"to_period",
|
117 |
+
"tz_localize",
|
118 |
+
"tz_convert",
|
119 |
+
"set_flags",
|
120 |
+
],
|
121 |
+
)
|
122 |
+
def test_methods_copy_keyword(
|
123 |
+
request, method, copy, using_copy_on_write, using_array_manager
|
124 |
+
):
|
125 |
+
index = None
|
126 |
+
if "to_timestamp" in request.node.callspec.id:
|
127 |
+
index = period_range("2012-01-01", freq="D", periods=3)
|
128 |
+
elif "to_period" in request.node.callspec.id:
|
129 |
+
index = date_range("2012-01-01", freq="D", periods=3)
|
130 |
+
elif "tz_localize" in request.node.callspec.id:
|
131 |
+
index = date_range("2012-01-01", freq="D", periods=3)
|
132 |
+
elif "tz_convert" in request.node.callspec.id:
|
133 |
+
index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels")
|
134 |
+
|
135 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=index)
|
136 |
+
|
137 |
+
if "swapaxes" in request.node.callspec.id:
|
138 |
+
msg = "'DataFrame.swapaxes' is deprecated"
|
139 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
140 |
+
df2 = method(df, copy=copy)
|
141 |
+
else:
|
142 |
+
df2 = method(df, copy=copy)
|
143 |
+
|
144 |
+
share_memory = using_copy_on_write or copy is False
|
145 |
+
|
146 |
+
if request.node.callspec.id.startswith("reindex-"):
|
147 |
+
# TODO copy=False without CoW still returns a copy in this case
|
148 |
+
if not using_copy_on_write and not using_array_manager and copy is False:
|
149 |
+
share_memory = False
|
150 |
+
|
151 |
+
if share_memory:
|
152 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
153 |
+
else:
|
154 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
155 |
+
|
156 |
+
|
157 |
+
@pytest.mark.parametrize("copy", [True, None, False])
|
158 |
+
@pytest.mark.parametrize(
|
159 |
+
"method",
|
160 |
+
[
|
161 |
+
lambda ser, copy: ser.rename(index={0: 100}, copy=copy),
|
162 |
+
lambda ser, copy: ser.rename(None, copy=copy),
|
163 |
+
lambda ser, copy: ser.reindex(index=ser.index, copy=copy),
|
164 |
+
lambda ser, copy: ser.reindex_like(ser, copy=copy),
|
165 |
+
lambda ser, copy: ser.align(ser, copy=copy)[0],
|
166 |
+
lambda ser, copy: ser.set_axis(["a", "b", "c"], axis="index", copy=copy),
|
167 |
+
lambda ser, copy: ser.rename_axis(index="test", copy=copy),
|
168 |
+
lambda ser, copy: ser.astype("int64", copy=copy),
|
169 |
+
lambda ser, copy: ser.swaplevel(0, 1, copy=copy),
|
170 |
+
lambda ser, copy: ser.swapaxes(0, 0, copy=copy),
|
171 |
+
lambda ser, copy: ser.truncate(0, 5, copy=copy),
|
172 |
+
lambda ser, copy: ser.infer_objects(copy=copy),
|
173 |
+
lambda ser, copy: ser.to_timestamp(copy=copy),
|
174 |
+
lambda ser, copy: ser.to_period(freq="D", copy=copy),
|
175 |
+
lambda ser, copy: ser.tz_localize("US/Central", copy=copy),
|
176 |
+
lambda ser, copy: ser.tz_convert("US/Central", copy=copy),
|
177 |
+
lambda ser, copy: ser.set_flags(allows_duplicate_labels=False, copy=copy),
|
178 |
+
],
|
179 |
+
ids=[
|
180 |
+
"rename (dict)",
|
181 |
+
"rename",
|
182 |
+
"reindex",
|
183 |
+
"reindex_like",
|
184 |
+
"align",
|
185 |
+
"set_axis",
|
186 |
+
"rename_axis0",
|
187 |
+
"astype",
|
188 |
+
"swaplevel",
|
189 |
+
"swapaxes",
|
190 |
+
"truncate",
|
191 |
+
"infer_objects",
|
192 |
+
"to_timestamp",
|
193 |
+
"to_period",
|
194 |
+
"tz_localize",
|
195 |
+
"tz_convert",
|
196 |
+
"set_flags",
|
197 |
+
],
|
198 |
+
)
|
199 |
+
def test_methods_series_copy_keyword(request, method, copy, using_copy_on_write):
|
200 |
+
index = None
|
201 |
+
if "to_timestamp" in request.node.callspec.id:
|
202 |
+
index = period_range("2012-01-01", freq="D", periods=3)
|
203 |
+
elif "to_period" in request.node.callspec.id:
|
204 |
+
index = date_range("2012-01-01", freq="D", periods=3)
|
205 |
+
elif "tz_localize" in request.node.callspec.id:
|
206 |
+
index = date_range("2012-01-01", freq="D", periods=3)
|
207 |
+
elif "tz_convert" in request.node.callspec.id:
|
208 |
+
index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels")
|
209 |
+
elif "swaplevel" in request.node.callspec.id:
|
210 |
+
index = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]])
|
211 |
+
|
212 |
+
ser = Series([1, 2, 3], index=index)
|
213 |
+
|
214 |
+
if "swapaxes" in request.node.callspec.id:
|
215 |
+
msg = "'Series.swapaxes' is deprecated"
|
216 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
217 |
+
ser2 = method(ser, copy=copy)
|
218 |
+
else:
|
219 |
+
ser2 = method(ser, copy=copy)
|
220 |
+
|
221 |
+
share_memory = using_copy_on_write or copy is False
|
222 |
+
|
223 |
+
if share_memory:
|
224 |
+
assert np.shares_memory(get_array(ser2), get_array(ser))
|
225 |
+
else:
|
226 |
+
assert not np.shares_memory(get_array(ser2), get_array(ser))
|
227 |
+
|
228 |
+
|
229 |
+
@pytest.mark.parametrize("copy", [True, None, False])
|
230 |
+
def test_transpose_copy_keyword(using_copy_on_write, copy, using_array_manager):
|
231 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
232 |
+
result = df.transpose(copy=copy)
|
233 |
+
share_memory = using_copy_on_write or copy is False or copy is None
|
234 |
+
share_memory = share_memory and not using_array_manager
|
235 |
+
|
236 |
+
if share_memory:
|
237 |
+
assert np.shares_memory(get_array(df, "a"), get_array(result, 0))
|
238 |
+
else:
|
239 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
|
240 |
+
|
241 |
+
|
242 |
+
# -----------------------------------------------------------------------------
|
243 |
+
# DataFrame methods returning new DataFrame using shallow copy
|
244 |
+
|
245 |
+
|
246 |
+
def test_reset_index(using_copy_on_write):
|
247 |
+
# Case: resetting the index (i.e. adding a new column) + mutating the
|
248 |
+
# resulting dataframe
|
249 |
+
df = DataFrame(
|
250 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=[10, 11, 12]
|
251 |
+
)
|
252 |
+
df_orig = df.copy()
|
253 |
+
df2 = df.reset_index()
|
254 |
+
df2._mgr._verify_integrity()
|
255 |
+
|
256 |
+
if using_copy_on_write:
|
257 |
+
# still shares memory (df2 is a shallow copy)
|
258 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
259 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
260 |
+
# mutating df2 triggers a copy-on-write for that column / block
|
261 |
+
df2.iloc[0, 2] = 0
|
262 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
263 |
+
if using_copy_on_write:
|
264 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
265 |
+
tm.assert_frame_equal(df, df_orig)
|
266 |
+
|
267 |
+
|
268 |
+
@pytest.mark.parametrize("index", [pd.RangeIndex(0, 2), Index([1, 2])])
|
269 |
+
def test_reset_index_series_drop(using_copy_on_write, index):
|
270 |
+
ser = Series([1, 2], index=index)
|
271 |
+
ser_orig = ser.copy()
|
272 |
+
ser2 = ser.reset_index(drop=True)
|
273 |
+
if using_copy_on_write:
|
274 |
+
assert np.shares_memory(get_array(ser), get_array(ser2))
|
275 |
+
assert not ser._mgr._has_no_reference(0)
|
276 |
+
else:
|
277 |
+
assert not np.shares_memory(get_array(ser), get_array(ser2))
|
278 |
+
|
279 |
+
ser2.iloc[0] = 100
|
280 |
+
tm.assert_series_equal(ser, ser_orig)
|
281 |
+
|
282 |
+
|
283 |
+
def test_groupby_column_index_in_references():
|
284 |
+
df = DataFrame(
|
285 |
+
{"A": ["a", "b", "c", "d"], "B": [1, 2, 3, 4], "C": ["a", "a", "b", "b"]}
|
286 |
+
)
|
287 |
+
df = df.set_index("A")
|
288 |
+
key = df["C"]
|
289 |
+
result = df.groupby(key, observed=True).sum()
|
290 |
+
expected = df.groupby("C", observed=True).sum()
|
291 |
+
tm.assert_frame_equal(result, expected)
|
292 |
+
|
293 |
+
|
294 |
+
def test_rename_columns(using_copy_on_write):
|
295 |
+
# Case: renaming columns returns a new dataframe
|
296 |
+
# + afterwards modifying the result
|
297 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
298 |
+
df_orig = df.copy()
|
299 |
+
df2 = df.rename(columns=str.upper)
|
300 |
+
|
301 |
+
if using_copy_on_write:
|
302 |
+
assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
|
303 |
+
df2.iloc[0, 0] = 0
|
304 |
+
assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
|
305 |
+
if using_copy_on_write:
|
306 |
+
assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
|
307 |
+
expected = DataFrame({"A": [0, 2, 3], "B": [4, 5, 6], "C": [0.1, 0.2, 0.3]})
|
308 |
+
tm.assert_frame_equal(df2, expected)
|
309 |
+
tm.assert_frame_equal(df, df_orig)
|
310 |
+
|
311 |
+
|
312 |
+
def test_rename_columns_modify_parent(using_copy_on_write):
|
313 |
+
# Case: renaming columns returns a new dataframe
|
314 |
+
# + afterwards modifying the original (parent) dataframe
|
315 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
316 |
+
df2 = df.rename(columns=str.upper)
|
317 |
+
df2_orig = df2.copy()
|
318 |
+
|
319 |
+
if using_copy_on_write:
|
320 |
+
assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
|
321 |
+
else:
|
322 |
+
assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
|
323 |
+
df.iloc[0, 0] = 0
|
324 |
+
assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))
|
325 |
+
if using_copy_on_write:
|
326 |
+
assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))
|
327 |
+
expected = DataFrame({"a": [0, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
328 |
+
tm.assert_frame_equal(df, expected)
|
329 |
+
tm.assert_frame_equal(df2, df2_orig)
|
330 |
+
|
331 |
+
|
332 |
+
def test_pipe(using_copy_on_write):
|
333 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1.5})
|
334 |
+
df_orig = df.copy()
|
335 |
+
|
336 |
+
def testfunc(df):
|
337 |
+
return df
|
338 |
+
|
339 |
+
df2 = df.pipe(testfunc)
|
340 |
+
|
341 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
342 |
+
|
343 |
+
# mutating df2 triggers a copy-on-write for that column
|
344 |
+
df2.iloc[0, 0] = 0
|
345 |
+
if using_copy_on_write:
|
346 |
+
tm.assert_frame_equal(df, df_orig)
|
347 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
348 |
+
else:
|
349 |
+
expected = DataFrame({"a": [0, 2, 3], "b": 1.5})
|
350 |
+
tm.assert_frame_equal(df, expected)
|
351 |
+
|
352 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
353 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
354 |
+
|
355 |
+
|
356 |
+
def test_pipe_modify_df(using_copy_on_write):
|
357 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1.5})
|
358 |
+
df_orig = df.copy()
|
359 |
+
|
360 |
+
def testfunc(df):
|
361 |
+
df.iloc[0, 0] = 100
|
362 |
+
return df
|
363 |
+
|
364 |
+
df2 = df.pipe(testfunc)
|
365 |
+
|
366 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
367 |
+
|
368 |
+
if using_copy_on_write:
|
369 |
+
tm.assert_frame_equal(df, df_orig)
|
370 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
371 |
+
else:
|
372 |
+
expected = DataFrame({"a": [100, 2, 3], "b": 1.5})
|
373 |
+
tm.assert_frame_equal(df, expected)
|
374 |
+
|
375 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
376 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
377 |
+
|
378 |
+
|
379 |
+
def test_reindex_columns(using_copy_on_write):
|
380 |
+
# Case: reindexing the column returns a new dataframe
|
381 |
+
# + afterwards modifying the result
|
382 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
383 |
+
df_orig = df.copy()
|
384 |
+
df2 = df.reindex(columns=["a", "c"])
|
385 |
+
|
386 |
+
if using_copy_on_write:
|
387 |
+
# still shares memory (df2 is a shallow copy)
|
388 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
389 |
+
else:
|
390 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
391 |
+
# mutating df2 triggers a copy-on-write for that column
|
392 |
+
df2.iloc[0, 0] = 0
|
393 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
394 |
+
if using_copy_on_write:
|
395 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
396 |
+
tm.assert_frame_equal(df, df_orig)
|
397 |
+
|
398 |
+
|
399 |
+
@pytest.mark.parametrize(
|
400 |
+
"index",
|
401 |
+
[
|
402 |
+
lambda idx: idx,
|
403 |
+
lambda idx: idx.view(),
|
404 |
+
lambda idx: idx.copy(),
|
405 |
+
lambda idx: list(idx),
|
406 |
+
],
|
407 |
+
ids=["identical", "view", "copy", "values"],
|
408 |
+
)
|
409 |
+
def test_reindex_rows(index, using_copy_on_write):
|
410 |
+
# Case: reindexing the rows with an index that matches the current index
|
411 |
+
# can use a shallow copy
|
412 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
413 |
+
df_orig = df.copy()
|
414 |
+
df2 = df.reindex(index=index(df.index))
|
415 |
+
|
416 |
+
if using_copy_on_write:
|
417 |
+
# still shares memory (df2 is a shallow copy)
|
418 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
419 |
+
else:
|
420 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
421 |
+
# mutating df2 triggers a copy-on-write for that column
|
422 |
+
df2.iloc[0, 0] = 0
|
423 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
424 |
+
if using_copy_on_write:
|
425 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
426 |
+
tm.assert_frame_equal(df, df_orig)
|
427 |
+
|
428 |
+
|
429 |
+
def test_drop_on_column(using_copy_on_write):
|
430 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
431 |
+
df_orig = df.copy()
|
432 |
+
df2 = df.drop(columns="a")
|
433 |
+
df2._mgr._verify_integrity()
|
434 |
+
|
435 |
+
if using_copy_on_write:
|
436 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
437 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
438 |
+
else:
|
439 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
440 |
+
assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
441 |
+
df2.iloc[0, 0] = 0
|
442 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
443 |
+
if using_copy_on_write:
|
444 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
445 |
+
tm.assert_frame_equal(df, df_orig)
|
446 |
+
|
447 |
+
|
448 |
+
def test_select_dtypes(using_copy_on_write):
|
449 |
+
# Case: selecting columns using `select_dtypes()` returns a new dataframe
|
450 |
+
# + afterwards modifying the result
|
451 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
452 |
+
df_orig = df.copy()
|
453 |
+
df2 = df.select_dtypes("int64")
|
454 |
+
df2._mgr._verify_integrity()
|
455 |
+
|
456 |
+
if using_copy_on_write:
|
457 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
458 |
+
else:
|
459 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
460 |
+
|
461 |
+
# mutating df2 triggers a copy-on-write for that column/block
|
462 |
+
df2.iloc[0, 0] = 0
|
463 |
+
if using_copy_on_write:
|
464 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
465 |
+
tm.assert_frame_equal(df, df_orig)
|
466 |
+
|
467 |
+
|
468 |
+
@pytest.mark.parametrize(
|
469 |
+
"filter_kwargs", [{"items": ["a"]}, {"like": "a"}, {"regex": "a"}]
|
470 |
+
)
|
471 |
+
def test_filter(using_copy_on_write, filter_kwargs):
|
472 |
+
# Case: selecting columns using `filter()` returns a new dataframe
|
473 |
+
# + afterwards modifying the result
|
474 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
475 |
+
df_orig = df.copy()
|
476 |
+
df2 = df.filter(**filter_kwargs)
|
477 |
+
if using_copy_on_write:
|
478 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
479 |
+
else:
|
480 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
481 |
+
|
482 |
+
# mutating df2 triggers a copy-on-write for that column/block
|
483 |
+
if using_copy_on_write:
|
484 |
+
df2.iloc[0, 0] = 0
|
485 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
486 |
+
tm.assert_frame_equal(df, df_orig)
|
487 |
+
|
488 |
+
|
489 |
+
def test_shift_no_op(using_copy_on_write):
|
490 |
+
df = DataFrame(
|
491 |
+
[[1, 2], [3, 4], [5, 6]],
|
492 |
+
index=date_range("2020-01-01", "2020-01-03"),
|
493 |
+
columns=["a", "b"],
|
494 |
+
)
|
495 |
+
df_orig = df.copy()
|
496 |
+
df2 = df.shift(periods=0)
|
497 |
+
|
498 |
+
if using_copy_on_write:
|
499 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
500 |
+
else:
|
501 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
502 |
+
|
503 |
+
df.iloc[0, 0] = 0
|
504 |
+
if using_copy_on_write:
|
505 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
506 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
507 |
+
tm.assert_frame_equal(df2, df_orig)
|
508 |
+
|
509 |
+
|
510 |
+
def test_shift_index(using_copy_on_write):
|
511 |
+
df = DataFrame(
|
512 |
+
[[1, 2], [3, 4], [5, 6]],
|
513 |
+
index=date_range("2020-01-01", "2020-01-03"),
|
514 |
+
columns=["a", "b"],
|
515 |
+
)
|
516 |
+
df2 = df.shift(periods=1, axis=0)
|
517 |
+
|
518 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
519 |
+
|
520 |
+
|
521 |
+
def test_shift_rows_freq(using_copy_on_write):
|
522 |
+
df = DataFrame(
|
523 |
+
[[1, 2], [3, 4], [5, 6]],
|
524 |
+
index=date_range("2020-01-01", "2020-01-03"),
|
525 |
+
columns=["a", "b"],
|
526 |
+
)
|
527 |
+
df_orig = df.copy()
|
528 |
+
df_orig.index = date_range("2020-01-02", "2020-01-04")
|
529 |
+
df2 = df.shift(periods=1, freq="1D")
|
530 |
+
|
531 |
+
if using_copy_on_write:
|
532 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
533 |
+
else:
|
534 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
535 |
+
|
536 |
+
df.iloc[0, 0] = 0
|
537 |
+
if using_copy_on_write:
|
538 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
539 |
+
tm.assert_frame_equal(df2, df_orig)
|
540 |
+
|
541 |
+
|
542 |
+
def test_shift_columns(using_copy_on_write, warn_copy_on_write):
|
543 |
+
df = DataFrame(
|
544 |
+
[[1, 2], [3, 4], [5, 6]], columns=date_range("2020-01-01", "2020-01-02")
|
545 |
+
)
|
546 |
+
df2 = df.shift(periods=1, axis=1)
|
547 |
+
|
548 |
+
assert np.shares_memory(get_array(df2, "2020-01-02"), get_array(df, "2020-01-01"))
|
549 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
550 |
+
df.iloc[0, 0] = 0
|
551 |
+
if using_copy_on_write:
|
552 |
+
assert not np.shares_memory(
|
553 |
+
get_array(df2, "2020-01-02"), get_array(df, "2020-01-01")
|
554 |
+
)
|
555 |
+
expected = DataFrame(
|
556 |
+
[[np.nan, 1], [np.nan, 3], [np.nan, 5]],
|
557 |
+
columns=date_range("2020-01-01", "2020-01-02"),
|
558 |
+
)
|
559 |
+
tm.assert_frame_equal(df2, expected)
|
560 |
+
|
561 |
+
|
562 |
+
def test_pop(using_copy_on_write, warn_copy_on_write):
|
563 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
564 |
+
df_orig = df.copy()
|
565 |
+
view_original = df[:]
|
566 |
+
result = df.pop("a")
|
567 |
+
|
568 |
+
assert np.shares_memory(result.values, get_array(view_original, "a"))
|
569 |
+
assert np.shares_memory(get_array(df, "b"), get_array(view_original, "b"))
|
570 |
+
|
571 |
+
if using_copy_on_write:
|
572 |
+
result.iloc[0] = 0
|
573 |
+
assert not np.shares_memory(result.values, get_array(view_original, "a"))
|
574 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
575 |
+
df.iloc[0, 0] = 0
|
576 |
+
if using_copy_on_write:
|
577 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(view_original, "b"))
|
578 |
+
tm.assert_frame_equal(view_original, df_orig)
|
579 |
+
else:
|
580 |
+
expected = DataFrame({"a": [1, 2, 3], "b": [0, 5, 6], "c": [0.1, 0.2, 0.3]})
|
581 |
+
tm.assert_frame_equal(view_original, expected)
|
582 |
+
|
583 |
+
|
584 |
+
@pytest.mark.parametrize(
|
585 |
+
"func",
|
586 |
+
[
|
587 |
+
lambda x, y: x.align(y),
|
588 |
+
lambda x, y: x.align(y.a, axis=0),
|
589 |
+
lambda x, y: x.align(y.a.iloc[slice(0, 1)], axis=1),
|
590 |
+
],
|
591 |
+
)
|
592 |
+
def test_align_frame(using_copy_on_write, func):
|
593 |
+
df = DataFrame({"a": [1, 2, 3], "b": "a"})
|
594 |
+
df_orig = df.copy()
|
595 |
+
df_changed = df[["b", "a"]].copy()
|
596 |
+
df2, _ = func(df, df_changed)
|
597 |
+
|
598 |
+
if using_copy_on_write:
|
599 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
600 |
+
else:
|
601 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
602 |
+
|
603 |
+
df2.iloc[0, 0] = 0
|
604 |
+
if using_copy_on_write:
|
605 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
606 |
+
tm.assert_frame_equal(df, df_orig)
|
607 |
+
|
608 |
+
|
609 |
+
def test_align_series(using_copy_on_write):
|
610 |
+
ser = Series([1, 2])
|
611 |
+
ser_orig = ser.copy()
|
612 |
+
ser_other = ser.copy()
|
613 |
+
ser2, ser_other_result = ser.align(ser_other)
|
614 |
+
|
615 |
+
if using_copy_on_write:
|
616 |
+
assert np.shares_memory(ser2.values, ser.values)
|
617 |
+
assert np.shares_memory(ser_other_result.values, ser_other.values)
|
618 |
+
else:
|
619 |
+
assert not np.shares_memory(ser2.values, ser.values)
|
620 |
+
assert not np.shares_memory(ser_other_result.values, ser_other.values)
|
621 |
+
|
622 |
+
ser2.iloc[0] = 0
|
623 |
+
ser_other_result.iloc[0] = 0
|
624 |
+
if using_copy_on_write:
|
625 |
+
assert not np.shares_memory(ser2.values, ser.values)
|
626 |
+
assert not np.shares_memory(ser_other_result.values, ser_other.values)
|
627 |
+
tm.assert_series_equal(ser, ser_orig)
|
628 |
+
tm.assert_series_equal(ser_other, ser_orig)
|
629 |
+
|
630 |
+
|
631 |
+
def test_align_copy_false(using_copy_on_write):
|
632 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
633 |
+
df_orig = df.copy()
|
634 |
+
df2, df3 = df.align(df, copy=False)
|
635 |
+
|
636 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
637 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
638 |
+
|
639 |
+
if using_copy_on_write:
|
640 |
+
df2.loc[0, "a"] = 0
|
641 |
+
tm.assert_frame_equal(df, df_orig) # Original is unchanged
|
642 |
+
|
643 |
+
df3.loc[0, "a"] = 0
|
644 |
+
tm.assert_frame_equal(df, df_orig) # Original is unchanged
|
645 |
+
|
646 |
+
|
647 |
+
def test_align_with_series_copy_false(using_copy_on_write):
|
648 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
649 |
+
ser = Series([1, 2, 3], name="x")
|
650 |
+
ser_orig = ser.copy()
|
651 |
+
df_orig = df.copy()
|
652 |
+
df2, ser2 = df.align(ser, copy=False, axis=0)
|
653 |
+
|
654 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
655 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
656 |
+
assert np.shares_memory(get_array(ser, "x"), get_array(ser2, "x"))
|
657 |
+
|
658 |
+
if using_copy_on_write:
|
659 |
+
df2.loc[0, "a"] = 0
|
660 |
+
tm.assert_frame_equal(df, df_orig) # Original is unchanged
|
661 |
+
|
662 |
+
ser2.loc[0] = 0
|
663 |
+
tm.assert_series_equal(ser, ser_orig) # Original is unchanged
|
664 |
+
|
665 |
+
|
666 |
+
def test_to_frame(using_copy_on_write, warn_copy_on_write):
|
667 |
+
# Case: converting a Series to a DataFrame with to_frame
|
668 |
+
ser = Series([1, 2, 3])
|
669 |
+
ser_orig = ser.copy()
|
670 |
+
|
671 |
+
df = ser[:].to_frame()
|
672 |
+
|
673 |
+
# currently this always returns a "view"
|
674 |
+
assert np.shares_memory(ser.values, get_array(df, 0))
|
675 |
+
|
676 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
677 |
+
df.iloc[0, 0] = 0
|
678 |
+
|
679 |
+
if using_copy_on_write:
|
680 |
+
# mutating df triggers a copy-on-write for that column
|
681 |
+
assert not np.shares_memory(ser.values, get_array(df, 0))
|
682 |
+
tm.assert_series_equal(ser, ser_orig)
|
683 |
+
else:
|
684 |
+
# but currently select_dtypes() actually returns a view -> mutates parent
|
685 |
+
expected = ser_orig.copy()
|
686 |
+
expected.iloc[0] = 0
|
687 |
+
tm.assert_series_equal(ser, expected)
|
688 |
+
|
689 |
+
# modify original series -> don't modify dataframe
|
690 |
+
df = ser[:].to_frame()
|
691 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
692 |
+
ser.iloc[0] = 0
|
693 |
+
|
694 |
+
if using_copy_on_write:
|
695 |
+
tm.assert_frame_equal(df, ser_orig.to_frame())
|
696 |
+
else:
|
697 |
+
expected = ser_orig.copy().to_frame()
|
698 |
+
expected.iloc[0, 0] = 0
|
699 |
+
tm.assert_frame_equal(df, expected)
|
700 |
+
|
701 |
+
|
702 |
+
@pytest.mark.parametrize("ax", ["index", "columns"])
|
703 |
+
def test_swapaxes_noop(using_copy_on_write, ax):
|
704 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
705 |
+
df_orig = df.copy()
|
706 |
+
msg = "'DataFrame.swapaxes' is deprecated"
|
707 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
708 |
+
df2 = df.swapaxes(ax, ax)
|
709 |
+
|
710 |
+
if using_copy_on_write:
|
711 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
712 |
+
else:
|
713 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
714 |
+
|
715 |
+
# mutating df2 triggers a copy-on-write for that column/block
|
716 |
+
df2.iloc[0, 0] = 0
|
717 |
+
if using_copy_on_write:
|
718 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
719 |
+
tm.assert_frame_equal(df, df_orig)
|
720 |
+
|
721 |
+
|
722 |
+
def test_swapaxes_single_block(using_copy_on_write):
|
723 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["x", "y", "z"])
|
724 |
+
df_orig = df.copy()
|
725 |
+
msg = "'DataFrame.swapaxes' is deprecated"
|
726 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
727 |
+
df2 = df.swapaxes("index", "columns")
|
728 |
+
|
729 |
+
if using_copy_on_write:
|
730 |
+
assert np.shares_memory(get_array(df2, "x"), get_array(df, "a"))
|
731 |
+
else:
|
732 |
+
assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a"))
|
733 |
+
|
734 |
+
# mutating df2 triggers a copy-on-write for that column/block
|
735 |
+
df2.iloc[0, 0] = 0
|
736 |
+
if using_copy_on_write:
|
737 |
+
assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a"))
|
738 |
+
tm.assert_frame_equal(df, df_orig)
|
739 |
+
|
740 |
+
|
741 |
+
def test_swapaxes_read_only_array():
|
742 |
+
df = DataFrame({"a": [1, 2], "b": 3})
|
743 |
+
msg = "'DataFrame.swapaxes' is deprecated"
|
744 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
745 |
+
df = df.swapaxes(axis1="index", axis2="columns")
|
746 |
+
df.iloc[0, 0] = 100
|
747 |
+
expected = DataFrame({0: [100, 3], 1: [2, 3]}, index=["a", "b"])
|
748 |
+
tm.assert_frame_equal(df, expected)
|
749 |
+
|
750 |
+
|
751 |
+
@pytest.mark.parametrize(
|
752 |
+
"method, idx",
|
753 |
+
[
|
754 |
+
(lambda df: df.copy(deep=False).copy(deep=False), 0),
|
755 |
+
(lambda df: df.reset_index().reset_index(), 2),
|
756 |
+
(lambda df: df.rename(columns=str.upper).rename(columns=str.lower), 0),
|
757 |
+
(lambda df: df.copy(deep=False).select_dtypes(include="number"), 0),
|
758 |
+
],
|
759 |
+
ids=["shallow-copy", "reset_index", "rename", "select_dtypes"],
|
760 |
+
)
|
761 |
+
def test_chained_methods(request, method, idx, using_copy_on_write, warn_copy_on_write):
|
762 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
763 |
+
df_orig = df.copy()
|
764 |
+
|
765 |
+
# when not using CoW, only the copy() variant actually gives a view
|
766 |
+
df2_is_view = not using_copy_on_write and request.node.callspec.id == "shallow-copy"
|
767 |
+
|
768 |
+
# modify df2 -> don't modify df
|
769 |
+
df2 = method(df)
|
770 |
+
with tm.assert_cow_warning(warn_copy_on_write and df2_is_view):
|
771 |
+
df2.iloc[0, idx] = 0
|
772 |
+
if not df2_is_view:
|
773 |
+
tm.assert_frame_equal(df, df_orig)
|
774 |
+
|
775 |
+
# modify df -> don't modify df2
|
776 |
+
df2 = method(df)
|
777 |
+
with tm.assert_cow_warning(warn_copy_on_write and df2_is_view):
|
778 |
+
df.iloc[0, 0] = 0
|
779 |
+
if not df2_is_view:
|
780 |
+
tm.assert_frame_equal(df2.iloc[:, idx:], df_orig)
|
781 |
+
|
782 |
+
|
783 |
+
@pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})])
|
784 |
+
def test_to_timestamp(using_copy_on_write, obj):
|
785 |
+
obj.index = Index([Period("2012-1-1", freq="D"), Period("2012-1-2", freq="D")])
|
786 |
+
|
787 |
+
obj_orig = obj.copy()
|
788 |
+
obj2 = obj.to_timestamp()
|
789 |
+
|
790 |
+
if using_copy_on_write:
|
791 |
+
assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
792 |
+
else:
|
793 |
+
assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
794 |
+
|
795 |
+
# mutating obj2 triggers a copy-on-write for that column / block
|
796 |
+
obj2.iloc[0] = 0
|
797 |
+
assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
798 |
+
tm.assert_equal(obj, obj_orig)
|
799 |
+
|
800 |
+
|
801 |
+
@pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})])
|
802 |
+
def test_to_period(using_copy_on_write, obj):
|
803 |
+
obj.index = Index([Timestamp("2019-12-31"), Timestamp("2020-12-31")])
|
804 |
+
|
805 |
+
obj_orig = obj.copy()
|
806 |
+
obj2 = obj.to_period(freq="Y")
|
807 |
+
|
808 |
+
if using_copy_on_write:
|
809 |
+
assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
810 |
+
else:
|
811 |
+
assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
812 |
+
|
813 |
+
# mutating obj2 triggers a copy-on-write for that column / block
|
814 |
+
obj2.iloc[0] = 0
|
815 |
+
assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
816 |
+
tm.assert_equal(obj, obj_orig)
|
817 |
+
|
818 |
+
|
819 |
+
def test_set_index(using_copy_on_write):
|
820 |
+
# GH 49473
|
821 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
822 |
+
df_orig = df.copy()
|
823 |
+
df2 = df.set_index("a")
|
824 |
+
|
825 |
+
if using_copy_on_write:
|
826 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
827 |
+
else:
|
828 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
829 |
+
|
830 |
+
# mutating df2 triggers a copy-on-write for that column / block
|
831 |
+
df2.iloc[0, 1] = 0
|
832 |
+
assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
833 |
+
tm.assert_frame_equal(df, df_orig)
|
834 |
+
|
835 |
+
|
836 |
+
def test_set_index_mutating_parent_does_not_mutate_index():
|
837 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1})
|
838 |
+
result = df.set_index("a")
|
839 |
+
expected = result.copy()
|
840 |
+
|
841 |
+
df.iloc[0, 0] = 100
|
842 |
+
tm.assert_frame_equal(result, expected)
|
843 |
+
|
844 |
+
|
845 |
+
def test_add_prefix(using_copy_on_write):
|
846 |
+
# GH 49473
|
847 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
848 |
+
df_orig = df.copy()
|
849 |
+
df2 = df.add_prefix("CoW_")
|
850 |
+
|
851 |
+
if using_copy_on_write:
|
852 |
+
assert np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a"))
|
853 |
+
df2.iloc[0, 0] = 0
|
854 |
+
|
855 |
+
assert not np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a"))
|
856 |
+
|
857 |
+
if using_copy_on_write:
|
858 |
+
assert np.shares_memory(get_array(df2, "CoW_c"), get_array(df, "c"))
|
859 |
+
expected = DataFrame(
|
860 |
+
{"CoW_a": [0, 2, 3], "CoW_b": [4, 5, 6], "CoW_c": [0.1, 0.2, 0.3]}
|
861 |
+
)
|
862 |
+
tm.assert_frame_equal(df2, expected)
|
863 |
+
tm.assert_frame_equal(df, df_orig)
|
864 |
+
|
865 |
+
|
866 |
+
def test_add_suffix(using_copy_on_write):
|
867 |
+
# GH 49473
|
868 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
869 |
+
df_orig = df.copy()
|
870 |
+
df2 = df.add_suffix("_CoW")
|
871 |
+
if using_copy_on_write:
|
872 |
+
assert np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a"))
|
873 |
+
df2.iloc[0, 0] = 0
|
874 |
+
assert not np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a"))
|
875 |
+
if using_copy_on_write:
|
876 |
+
assert np.shares_memory(get_array(df2, "c_CoW"), get_array(df, "c"))
|
877 |
+
expected = DataFrame(
|
878 |
+
{"a_CoW": [0, 2, 3], "b_CoW": [4, 5, 6], "c_CoW": [0.1, 0.2, 0.3]}
|
879 |
+
)
|
880 |
+
tm.assert_frame_equal(df2, expected)
|
881 |
+
tm.assert_frame_equal(df, df_orig)
|
882 |
+
|
883 |
+
|
884 |
+
@pytest.mark.parametrize("axis, val", [(0, 5.5), (1, np.nan)])
|
885 |
+
def test_dropna(using_copy_on_write, axis, val):
|
886 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, val, 6], "c": "d"})
|
887 |
+
df_orig = df.copy()
|
888 |
+
df2 = df.dropna(axis=axis)
|
889 |
+
|
890 |
+
if using_copy_on_write:
|
891 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
892 |
+
else:
|
893 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
894 |
+
|
895 |
+
df2.iloc[0, 0] = 0
|
896 |
+
if using_copy_on_write:
|
897 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
898 |
+
tm.assert_frame_equal(df, df_orig)
|
899 |
+
|
900 |
+
|
901 |
+
@pytest.mark.parametrize("val", [5, 5.5])
|
902 |
+
def test_dropna_series(using_copy_on_write, val):
|
903 |
+
ser = Series([1, val, 4])
|
904 |
+
ser_orig = ser.copy()
|
905 |
+
ser2 = ser.dropna()
|
906 |
+
|
907 |
+
if using_copy_on_write:
|
908 |
+
assert np.shares_memory(ser2.values, ser.values)
|
909 |
+
else:
|
910 |
+
assert not np.shares_memory(ser2.values, ser.values)
|
911 |
+
|
912 |
+
ser2.iloc[0] = 0
|
913 |
+
if using_copy_on_write:
|
914 |
+
assert not np.shares_memory(ser2.values, ser.values)
|
915 |
+
tm.assert_series_equal(ser, ser_orig)
|
916 |
+
|
917 |
+
|
918 |
+
@pytest.mark.parametrize(
|
919 |
+
"method",
|
920 |
+
[
|
921 |
+
lambda df: df.head(),
|
922 |
+
lambda df: df.head(2),
|
923 |
+
lambda df: df.tail(),
|
924 |
+
lambda df: df.tail(3),
|
925 |
+
],
|
926 |
+
)
|
927 |
+
def test_head_tail(method, using_copy_on_write, warn_copy_on_write):
|
928 |
+
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
|
929 |
+
df_orig = df.copy()
|
930 |
+
df2 = method(df)
|
931 |
+
df2._mgr._verify_integrity()
|
932 |
+
|
933 |
+
if using_copy_on_write:
|
934 |
+
# We are explicitly deviating for CoW here to make an eager copy (avoids
|
935 |
+
# tracking references for very cheap ops)
|
936 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
937 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
938 |
+
|
939 |
+
# modify df2 to trigger CoW for that block
|
940 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
941 |
+
df2.iloc[0, 0] = 0
|
942 |
+
if using_copy_on_write:
|
943 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
944 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
945 |
+
else:
|
946 |
+
# without CoW enabled, head and tail return views. Mutating df2 also mutates df.
|
947 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
948 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
949 |
+
df2.iloc[0, 0] = 1
|
950 |
+
tm.assert_frame_equal(df, df_orig)
|
951 |
+
|
952 |
+
|
953 |
+
def test_infer_objects(using_copy_on_write):
|
954 |
+
df = DataFrame({"a": [1, 2], "b": "c", "c": 1, "d": "x"})
|
955 |
+
df_orig = df.copy()
|
956 |
+
df2 = df.infer_objects()
|
957 |
+
|
958 |
+
if using_copy_on_write:
|
959 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
960 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
961 |
+
|
962 |
+
else:
|
963 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
964 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
965 |
+
|
966 |
+
df2.iloc[0, 0] = 0
|
967 |
+
df2.iloc[0, 1] = "d"
|
968 |
+
if using_copy_on_write:
|
969 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
970 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
971 |
+
tm.assert_frame_equal(df, df_orig)
|
972 |
+
|
973 |
+
|
974 |
+
def test_infer_objects_no_reference(using_copy_on_write):
|
975 |
+
df = DataFrame(
|
976 |
+
{
|
977 |
+
"a": [1, 2],
|
978 |
+
"b": "c",
|
979 |
+
"c": 1,
|
980 |
+
"d": Series(
|
981 |
+
[Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object"
|
982 |
+
),
|
983 |
+
"e": "b",
|
984 |
+
}
|
985 |
+
)
|
986 |
+
df = df.infer_objects()
|
987 |
+
|
988 |
+
arr_a = get_array(df, "a")
|
989 |
+
arr_b = get_array(df, "b")
|
990 |
+
arr_d = get_array(df, "d")
|
991 |
+
|
992 |
+
df.iloc[0, 0] = 0
|
993 |
+
df.iloc[0, 1] = "d"
|
994 |
+
df.iloc[0, 3] = Timestamp("2018-12-31")
|
995 |
+
if using_copy_on_write:
|
996 |
+
assert np.shares_memory(arr_a, get_array(df, "a"))
|
997 |
+
# TODO(CoW): Block splitting causes references here
|
998 |
+
assert not np.shares_memory(arr_b, get_array(df, "b"))
|
999 |
+
assert np.shares_memory(arr_d, get_array(df, "d"))
|
1000 |
+
|
1001 |
+
|
1002 |
+
def test_infer_objects_reference(using_copy_on_write):
|
1003 |
+
df = DataFrame(
|
1004 |
+
{
|
1005 |
+
"a": [1, 2],
|
1006 |
+
"b": "c",
|
1007 |
+
"c": 1,
|
1008 |
+
"d": Series(
|
1009 |
+
[Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object"
|
1010 |
+
),
|
1011 |
+
}
|
1012 |
+
)
|
1013 |
+
view = df[:] # noqa: F841
|
1014 |
+
df = df.infer_objects()
|
1015 |
+
|
1016 |
+
arr_a = get_array(df, "a")
|
1017 |
+
arr_b = get_array(df, "b")
|
1018 |
+
arr_d = get_array(df, "d")
|
1019 |
+
|
1020 |
+
df.iloc[0, 0] = 0
|
1021 |
+
df.iloc[0, 1] = "d"
|
1022 |
+
df.iloc[0, 3] = Timestamp("2018-12-31")
|
1023 |
+
if using_copy_on_write:
|
1024 |
+
assert not np.shares_memory(arr_a, get_array(df, "a"))
|
1025 |
+
assert not np.shares_memory(arr_b, get_array(df, "b"))
|
1026 |
+
assert np.shares_memory(arr_d, get_array(df, "d"))
|
1027 |
+
|
1028 |
+
|
1029 |
+
@pytest.mark.parametrize(
|
1030 |
+
"kwargs",
|
1031 |
+
[
|
1032 |
+
{"before": "a", "after": "b", "axis": 1},
|
1033 |
+
{"before": 0, "after": 1, "axis": 0},
|
1034 |
+
],
|
1035 |
+
)
|
1036 |
+
def test_truncate(using_copy_on_write, kwargs):
|
1037 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2})
|
1038 |
+
df_orig = df.copy()
|
1039 |
+
df2 = df.truncate(**kwargs)
|
1040 |
+
df2._mgr._verify_integrity()
|
1041 |
+
|
1042 |
+
if using_copy_on_write:
|
1043 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1044 |
+
else:
|
1045 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1046 |
+
|
1047 |
+
df2.iloc[0, 0] = 0
|
1048 |
+
if using_copy_on_write:
|
1049 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1050 |
+
tm.assert_frame_equal(df, df_orig)
|
1051 |
+
|
1052 |
+
|
1053 |
+
@pytest.mark.parametrize("method", ["assign", "drop_duplicates"])
|
1054 |
+
def test_assign_drop_duplicates(using_copy_on_write, method):
|
1055 |
+
df = DataFrame({"a": [1, 2, 3]})
|
1056 |
+
df_orig = df.copy()
|
1057 |
+
df2 = getattr(df, method)()
|
1058 |
+
df2._mgr._verify_integrity()
|
1059 |
+
|
1060 |
+
if using_copy_on_write:
|
1061 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1062 |
+
else:
|
1063 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1064 |
+
|
1065 |
+
df2.iloc[0, 0] = 0
|
1066 |
+
if using_copy_on_write:
|
1067 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1068 |
+
tm.assert_frame_equal(df, df_orig)
|
1069 |
+
|
1070 |
+
|
1071 |
+
@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})])
|
1072 |
+
def test_take(using_copy_on_write, obj):
|
1073 |
+
# Check that no copy is made when we take all rows in original order
|
1074 |
+
obj_orig = obj.copy()
|
1075 |
+
obj2 = obj.take([0, 1])
|
1076 |
+
|
1077 |
+
if using_copy_on_write:
|
1078 |
+
assert np.shares_memory(obj2.values, obj.values)
|
1079 |
+
else:
|
1080 |
+
assert not np.shares_memory(obj2.values, obj.values)
|
1081 |
+
|
1082 |
+
obj2.iloc[0] = 0
|
1083 |
+
if using_copy_on_write:
|
1084 |
+
assert not np.shares_memory(obj2.values, obj.values)
|
1085 |
+
tm.assert_equal(obj, obj_orig)
|
1086 |
+
|
1087 |
+
|
1088 |
+
@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})])
|
1089 |
+
def test_between_time(using_copy_on_write, obj):
|
1090 |
+
obj.index = date_range("2018-04-09", periods=2, freq="1D20min")
|
1091 |
+
obj_orig = obj.copy()
|
1092 |
+
obj2 = obj.between_time("0:00", "1:00")
|
1093 |
+
|
1094 |
+
if using_copy_on_write:
|
1095 |
+
assert np.shares_memory(obj2.values, obj.values)
|
1096 |
+
else:
|
1097 |
+
assert not np.shares_memory(obj2.values, obj.values)
|
1098 |
+
|
1099 |
+
obj2.iloc[0] = 0
|
1100 |
+
if using_copy_on_write:
|
1101 |
+
assert not np.shares_memory(obj2.values, obj.values)
|
1102 |
+
tm.assert_equal(obj, obj_orig)
|
1103 |
+
|
1104 |
+
|
1105 |
+
def test_reindex_like(using_copy_on_write):
|
1106 |
+
df = DataFrame({"a": [1, 2], "b": "a"})
|
1107 |
+
other = DataFrame({"b": "a", "a": [1, 2]})
|
1108 |
+
|
1109 |
+
df_orig = df.copy()
|
1110 |
+
df2 = df.reindex_like(other)
|
1111 |
+
|
1112 |
+
if using_copy_on_write:
|
1113 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1114 |
+
else:
|
1115 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1116 |
+
|
1117 |
+
df2.iloc[0, 1] = 0
|
1118 |
+
if using_copy_on_write:
|
1119 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1120 |
+
tm.assert_frame_equal(df, df_orig)
|
1121 |
+
|
1122 |
+
|
1123 |
+
def test_sort_index(using_copy_on_write):
|
1124 |
+
# GH 49473
|
1125 |
+
ser = Series([1, 2, 3])
|
1126 |
+
ser_orig = ser.copy()
|
1127 |
+
ser2 = ser.sort_index()
|
1128 |
+
|
1129 |
+
if using_copy_on_write:
|
1130 |
+
assert np.shares_memory(ser.values, ser2.values)
|
1131 |
+
else:
|
1132 |
+
assert not np.shares_memory(ser.values, ser2.values)
|
1133 |
+
|
1134 |
+
# mutating ser triggers a copy-on-write for the column / block
|
1135 |
+
ser2.iloc[0] = 0
|
1136 |
+
assert not np.shares_memory(ser2.values, ser.values)
|
1137 |
+
tm.assert_series_equal(ser, ser_orig)
|
1138 |
+
|
1139 |
+
|
1140 |
+
@pytest.mark.parametrize(
|
1141 |
+
"obj, kwargs",
|
1142 |
+
[(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})],
|
1143 |
+
)
|
1144 |
+
def test_sort_values(using_copy_on_write, obj, kwargs):
|
1145 |
+
obj_orig = obj.copy()
|
1146 |
+
obj2 = obj.sort_values(**kwargs)
|
1147 |
+
|
1148 |
+
if using_copy_on_write:
|
1149 |
+
assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
1150 |
+
else:
|
1151 |
+
assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
1152 |
+
|
1153 |
+
# mutating df triggers a copy-on-write for the column / block
|
1154 |
+
obj2.iloc[0] = 0
|
1155 |
+
assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))
|
1156 |
+
tm.assert_equal(obj, obj_orig)
|
1157 |
+
|
1158 |
+
|
1159 |
+
@pytest.mark.parametrize(
|
1160 |
+
"obj, kwargs",
|
1161 |
+
[(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})],
|
1162 |
+
)
|
1163 |
+
def test_sort_values_inplace(using_copy_on_write, obj, kwargs, warn_copy_on_write):
|
1164 |
+
obj_orig = obj.copy()
|
1165 |
+
view = obj[:]
|
1166 |
+
obj.sort_values(inplace=True, **kwargs)
|
1167 |
+
|
1168 |
+
assert np.shares_memory(get_array(obj, "a"), get_array(view, "a"))
|
1169 |
+
|
1170 |
+
# mutating obj triggers a copy-on-write for the column / block
|
1171 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1172 |
+
obj.iloc[0] = 0
|
1173 |
+
if using_copy_on_write:
|
1174 |
+
assert not np.shares_memory(get_array(obj, "a"), get_array(view, "a"))
|
1175 |
+
tm.assert_equal(view, obj_orig)
|
1176 |
+
else:
|
1177 |
+
assert np.shares_memory(get_array(obj, "a"), get_array(view, "a"))
|
1178 |
+
|
1179 |
+
|
1180 |
+
@pytest.mark.parametrize("decimals", [-1, 0, 1])
|
1181 |
+
def test_round(using_copy_on_write, warn_copy_on_write, decimals):
|
1182 |
+
df = DataFrame({"a": [1, 2], "b": "c"})
|
1183 |
+
df_orig = df.copy()
|
1184 |
+
df2 = df.round(decimals=decimals)
|
1185 |
+
|
1186 |
+
if using_copy_on_write:
|
1187 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
1188 |
+
# TODO: Make inplace by using out parameter of ndarray.round?
|
1189 |
+
if decimals >= 0:
|
1190 |
+
# Ensure lazy copy if no-op
|
1191 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1192 |
+
else:
|
1193 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1194 |
+
else:
|
1195 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
1196 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1197 |
+
|
1198 |
+
df2.iloc[0, 1] = "d"
|
1199 |
+
df2.iloc[0, 0] = 4
|
1200 |
+
if using_copy_on_write:
|
1201 |
+
assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
1202 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1203 |
+
tm.assert_frame_equal(df, df_orig)
|
1204 |
+
|
1205 |
+
|
1206 |
+
def test_reorder_levels(using_copy_on_write):
|
1207 |
+
index = MultiIndex.from_tuples(
|
1208 |
+
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"]
|
1209 |
+
)
|
1210 |
+
df = DataFrame({"a": [1, 2, 3, 4]}, index=index)
|
1211 |
+
df_orig = df.copy()
|
1212 |
+
df2 = df.reorder_levels(order=["two", "one"])
|
1213 |
+
|
1214 |
+
if using_copy_on_write:
|
1215 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1216 |
+
else:
|
1217 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1218 |
+
|
1219 |
+
df2.iloc[0, 0] = 0
|
1220 |
+
if using_copy_on_write:
|
1221 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1222 |
+
tm.assert_frame_equal(df, df_orig)
|
1223 |
+
|
1224 |
+
|
1225 |
+
def test_series_reorder_levels(using_copy_on_write):
|
1226 |
+
index = MultiIndex.from_tuples(
|
1227 |
+
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"]
|
1228 |
+
)
|
1229 |
+
ser = Series([1, 2, 3, 4], index=index)
|
1230 |
+
ser_orig = ser.copy()
|
1231 |
+
ser2 = ser.reorder_levels(order=["two", "one"])
|
1232 |
+
|
1233 |
+
if using_copy_on_write:
|
1234 |
+
assert np.shares_memory(ser2.values, ser.values)
|
1235 |
+
else:
|
1236 |
+
assert not np.shares_memory(ser2.values, ser.values)
|
1237 |
+
|
1238 |
+
ser2.iloc[0] = 0
|
1239 |
+
if using_copy_on_write:
|
1240 |
+
assert not np.shares_memory(ser2.values, ser.values)
|
1241 |
+
tm.assert_series_equal(ser, ser_orig)
|
1242 |
+
|
1243 |
+
|
1244 |
+
@pytest.mark.parametrize("obj", [Series([1, 2, 3]), DataFrame({"a": [1, 2, 3]})])
|
1245 |
+
def test_swaplevel(using_copy_on_write, obj):
|
1246 |
+
index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"])
|
1247 |
+
obj.index = index
|
1248 |
+
obj_orig = obj.copy()
|
1249 |
+
obj2 = obj.swaplevel()
|
1250 |
+
|
1251 |
+
if using_copy_on_write:
|
1252 |
+
assert np.shares_memory(obj2.values, obj.values)
|
1253 |
+
else:
|
1254 |
+
assert not np.shares_memory(obj2.values, obj.values)
|
1255 |
+
|
1256 |
+
obj2.iloc[0] = 0
|
1257 |
+
if using_copy_on_write:
|
1258 |
+
assert not np.shares_memory(obj2.values, obj.values)
|
1259 |
+
tm.assert_equal(obj, obj_orig)
|
1260 |
+
|
1261 |
+
|
1262 |
+
def test_frame_set_axis(using_copy_on_write):
|
1263 |
+
# GH 49473
|
1264 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})
|
1265 |
+
df_orig = df.copy()
|
1266 |
+
df2 = df.set_axis(["a", "b", "c"], axis="index")
|
1267 |
+
|
1268 |
+
if using_copy_on_write:
|
1269 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1270 |
+
else:
|
1271 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1272 |
+
|
1273 |
+
# mutating df2 triggers a copy-on-write for that column / block
|
1274 |
+
df2.iloc[0, 0] = 0
|
1275 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1276 |
+
tm.assert_frame_equal(df, df_orig)
|
1277 |
+
|
1278 |
+
|
1279 |
+
def test_series_set_axis(using_copy_on_write):
|
1280 |
+
# GH 49473
|
1281 |
+
ser = Series([1, 2, 3])
|
1282 |
+
ser_orig = ser.copy()
|
1283 |
+
ser2 = ser.set_axis(["a", "b", "c"], axis="index")
|
1284 |
+
|
1285 |
+
if using_copy_on_write:
|
1286 |
+
assert np.shares_memory(ser, ser2)
|
1287 |
+
else:
|
1288 |
+
assert not np.shares_memory(ser, ser2)
|
1289 |
+
|
1290 |
+
# mutating ser triggers a copy-on-write for the column / block
|
1291 |
+
ser2.iloc[0] = 0
|
1292 |
+
assert not np.shares_memory(ser2, ser)
|
1293 |
+
tm.assert_series_equal(ser, ser_orig)
|
1294 |
+
|
1295 |
+
|
1296 |
+
def test_set_flags(using_copy_on_write, warn_copy_on_write):
|
1297 |
+
ser = Series([1, 2, 3])
|
1298 |
+
ser_orig = ser.copy()
|
1299 |
+
ser2 = ser.set_flags(allows_duplicate_labels=False)
|
1300 |
+
|
1301 |
+
assert np.shares_memory(ser, ser2)
|
1302 |
+
|
1303 |
+
# mutating ser triggers a copy-on-write for the column / block
|
1304 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1305 |
+
ser2.iloc[0] = 0
|
1306 |
+
if using_copy_on_write:
|
1307 |
+
assert not np.shares_memory(ser2, ser)
|
1308 |
+
tm.assert_series_equal(ser, ser_orig)
|
1309 |
+
else:
|
1310 |
+
assert np.shares_memory(ser2, ser)
|
1311 |
+
expected = Series([0, 2, 3])
|
1312 |
+
tm.assert_series_equal(ser, expected)
|
1313 |
+
|
1314 |
+
|
1315 |
+
@pytest.mark.parametrize("kwargs", [{"mapper": "test"}, {"index": "test"}])
|
1316 |
+
def test_rename_axis(using_copy_on_write, kwargs):
|
1317 |
+
df = DataFrame({"a": [1, 2, 3, 4]}, index=Index([1, 2, 3, 4], name="a"))
|
1318 |
+
df_orig = df.copy()
|
1319 |
+
df2 = df.rename_axis(**kwargs)
|
1320 |
+
|
1321 |
+
if using_copy_on_write:
|
1322 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1323 |
+
else:
|
1324 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1325 |
+
|
1326 |
+
df2.iloc[0, 0] = 0
|
1327 |
+
if using_copy_on_write:
|
1328 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1329 |
+
tm.assert_frame_equal(df, df_orig)
|
1330 |
+
|
1331 |
+
|
1332 |
+
@pytest.mark.parametrize(
|
1333 |
+
"func, tz", [("tz_convert", "Europe/Berlin"), ("tz_localize", None)]
|
1334 |
+
)
|
1335 |
+
def test_tz_convert_localize(using_copy_on_write, func, tz):
|
1336 |
+
# GH 49473
|
1337 |
+
ser = Series(
|
1338 |
+
[1, 2], index=date_range(start="2014-08-01 09:00", freq="h", periods=2, tz=tz)
|
1339 |
+
)
|
1340 |
+
ser_orig = ser.copy()
|
1341 |
+
ser2 = getattr(ser, func)("US/Central")
|
1342 |
+
|
1343 |
+
if using_copy_on_write:
|
1344 |
+
assert np.shares_memory(ser.values, ser2.values)
|
1345 |
+
else:
|
1346 |
+
assert not np.shares_memory(ser.values, ser2.values)
|
1347 |
+
|
1348 |
+
# mutating ser triggers a copy-on-write for the column / block
|
1349 |
+
ser2.iloc[0] = 0
|
1350 |
+
assert not np.shares_memory(ser2.values, ser.values)
|
1351 |
+
tm.assert_series_equal(ser, ser_orig)
|
1352 |
+
|
1353 |
+
|
1354 |
+
def test_droplevel(using_copy_on_write):
|
1355 |
+
# GH 49473
|
1356 |
+
index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"])
|
1357 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, index=index)
|
1358 |
+
df_orig = df.copy()
|
1359 |
+
df2 = df.droplevel(0)
|
1360 |
+
|
1361 |
+
if using_copy_on_write:
|
1362 |
+
assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
1363 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1364 |
+
else:
|
1365 |
+
assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))
|
1366 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1367 |
+
|
1368 |
+
# mutating df2 triggers a copy-on-write for that column / block
|
1369 |
+
df2.iloc[0, 0] = 0
|
1370 |
+
|
1371 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1372 |
+
if using_copy_on_write:
|
1373 |
+
assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))
|
1374 |
+
|
1375 |
+
tm.assert_frame_equal(df, df_orig)
|
1376 |
+
|
1377 |
+
|
1378 |
+
def test_squeeze(using_copy_on_write, warn_copy_on_write):
|
1379 |
+
df = DataFrame({"a": [1, 2, 3]})
|
1380 |
+
df_orig = df.copy()
|
1381 |
+
series = df.squeeze()
|
1382 |
+
|
1383 |
+
# Should share memory regardless of CoW since squeeze is just an iloc
|
1384 |
+
assert np.shares_memory(series.values, get_array(df, "a"))
|
1385 |
+
|
1386 |
+
# mutating squeezed df triggers a copy-on-write for that column/block
|
1387 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1388 |
+
series.iloc[0] = 0
|
1389 |
+
if using_copy_on_write:
|
1390 |
+
assert not np.shares_memory(series.values, get_array(df, "a"))
|
1391 |
+
tm.assert_frame_equal(df, df_orig)
|
1392 |
+
else:
|
1393 |
+
# Without CoW the original will be modified
|
1394 |
+
assert np.shares_memory(series.values, get_array(df, "a"))
|
1395 |
+
assert df.loc[0, "a"] == 0
|
1396 |
+
|
1397 |
+
|
1398 |
+
def test_items(using_copy_on_write, warn_copy_on_write):
|
1399 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
|
1400 |
+
df_orig = df.copy()
|
1401 |
+
|
1402 |
+
# Test this twice, since the second time, the item cache will be
|
1403 |
+
# triggered, and we want to make sure it still works then.
|
1404 |
+
for i in range(2):
|
1405 |
+
for name, ser in df.items():
|
1406 |
+
assert np.shares_memory(get_array(ser, name), get_array(df, name))
|
1407 |
+
|
1408 |
+
# mutating df triggers a copy-on-write for that column / block
|
1409 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1410 |
+
ser.iloc[0] = 0
|
1411 |
+
|
1412 |
+
if using_copy_on_write:
|
1413 |
+
assert not np.shares_memory(get_array(ser, name), get_array(df, name))
|
1414 |
+
tm.assert_frame_equal(df, df_orig)
|
1415 |
+
else:
|
1416 |
+
# Original frame will be modified
|
1417 |
+
assert df.loc[0, name] == 0
|
1418 |
+
|
1419 |
+
|
1420 |
+
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
|
1421 |
+
def test_putmask(using_copy_on_write, dtype, warn_copy_on_write):
|
1422 |
+
df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype)
|
1423 |
+
view = df[:]
|
1424 |
+
df_orig = df.copy()
|
1425 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1426 |
+
df[df == df] = 5
|
1427 |
+
|
1428 |
+
if using_copy_on_write:
|
1429 |
+
assert not np.shares_memory(get_array(view, "a"), get_array(df, "a"))
|
1430 |
+
tm.assert_frame_equal(view, df_orig)
|
1431 |
+
else:
|
1432 |
+
# Without CoW the original will be modified
|
1433 |
+
assert np.shares_memory(get_array(view, "a"), get_array(df, "a"))
|
1434 |
+
assert view.iloc[0, 0] == 5
|
1435 |
+
|
1436 |
+
|
1437 |
+
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
|
1438 |
+
def test_putmask_no_reference(using_copy_on_write, dtype):
|
1439 |
+
df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype)
|
1440 |
+
arr_a = get_array(df, "a")
|
1441 |
+
df[df == df] = 5
|
1442 |
+
|
1443 |
+
if using_copy_on_write:
|
1444 |
+
assert np.shares_memory(arr_a, get_array(df, "a"))
|
1445 |
+
|
1446 |
+
|
1447 |
+
@pytest.mark.parametrize("dtype", ["float64", "Float64"])
|
1448 |
+
def test_putmask_aligns_rhs_no_reference(using_copy_on_write, dtype):
|
1449 |
+
df = DataFrame({"a": [1.5, 2], "b": 1.5}, dtype=dtype)
|
1450 |
+
arr_a = get_array(df, "a")
|
1451 |
+
df[df == df] = DataFrame({"a": [5.5, 5]})
|
1452 |
+
|
1453 |
+
if using_copy_on_write:
|
1454 |
+
assert np.shares_memory(arr_a, get_array(df, "a"))
|
1455 |
+
|
1456 |
+
|
1457 |
+
@pytest.mark.parametrize(
|
1458 |
+
"val, exp, warn", [(5.5, True, FutureWarning), (5, False, None)]
|
1459 |
+
)
|
1460 |
+
def test_putmask_dont_copy_some_blocks(
|
1461 |
+
using_copy_on_write, val, exp, warn, warn_copy_on_write
|
1462 |
+
):
|
1463 |
+
df = DataFrame({"a": [1, 2], "b": 1, "c": 1.5})
|
1464 |
+
view = df[:]
|
1465 |
+
df_orig = df.copy()
|
1466 |
+
indexer = DataFrame(
|
1467 |
+
[[True, False, False], [True, False, False]], columns=list("abc")
|
1468 |
+
)
|
1469 |
+
if warn_copy_on_write:
|
1470 |
+
with tm.assert_cow_warning():
|
1471 |
+
df[indexer] = val
|
1472 |
+
else:
|
1473 |
+
with tm.assert_produces_warning(warn, match="incompatible dtype"):
|
1474 |
+
df[indexer] = val
|
1475 |
+
|
1476 |
+
if using_copy_on_write:
|
1477 |
+
assert not np.shares_memory(get_array(view, "a"), get_array(df, "a"))
|
1478 |
+
# TODO(CoW): Could split blocks to avoid copying the whole block
|
1479 |
+
assert np.shares_memory(get_array(view, "b"), get_array(df, "b")) is exp
|
1480 |
+
assert np.shares_memory(get_array(view, "c"), get_array(df, "c"))
|
1481 |
+
assert df._mgr._has_no_reference(1) is not exp
|
1482 |
+
assert not df._mgr._has_no_reference(2)
|
1483 |
+
tm.assert_frame_equal(view, df_orig)
|
1484 |
+
elif val == 5:
|
1485 |
+
# Without CoW the original will be modified, the other case upcasts, e.g. copy
|
1486 |
+
assert np.shares_memory(get_array(view, "a"), get_array(df, "a"))
|
1487 |
+
assert np.shares_memory(get_array(view, "c"), get_array(df, "c"))
|
1488 |
+
assert view.iloc[0, 0] == 5
|
1489 |
+
|
1490 |
+
|
1491 |
+
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
|
1492 |
+
@pytest.mark.parametrize(
|
1493 |
+
"func",
|
1494 |
+
[
|
1495 |
+
lambda ser: ser.where(ser > 0, 10),
|
1496 |
+
lambda ser: ser.mask(ser <= 0, 10),
|
1497 |
+
],
|
1498 |
+
)
|
1499 |
+
def test_where_mask_noop(using_copy_on_write, dtype, func):
|
1500 |
+
ser = Series([1, 2, 3], dtype=dtype)
|
1501 |
+
ser_orig = ser.copy()
|
1502 |
+
|
1503 |
+
result = func(ser)
|
1504 |
+
|
1505 |
+
if using_copy_on_write:
|
1506 |
+
assert np.shares_memory(get_array(ser), get_array(result))
|
1507 |
+
else:
|
1508 |
+
assert not np.shares_memory(get_array(ser), get_array(result))
|
1509 |
+
|
1510 |
+
result.iloc[0] = 10
|
1511 |
+
if using_copy_on_write:
|
1512 |
+
assert not np.shares_memory(get_array(ser), get_array(result))
|
1513 |
+
tm.assert_series_equal(ser, ser_orig)
|
1514 |
+
|
1515 |
+
|
1516 |
+
@pytest.mark.parametrize("dtype", ["int64", "Int64"])
|
1517 |
+
@pytest.mark.parametrize(
|
1518 |
+
"func",
|
1519 |
+
[
|
1520 |
+
lambda ser: ser.where(ser < 0, 10),
|
1521 |
+
lambda ser: ser.mask(ser >= 0, 10),
|
1522 |
+
],
|
1523 |
+
)
|
1524 |
+
def test_where_mask(using_copy_on_write, dtype, func):
|
1525 |
+
ser = Series([1, 2, 3], dtype=dtype)
|
1526 |
+
ser_orig = ser.copy()
|
1527 |
+
|
1528 |
+
result = func(ser)
|
1529 |
+
|
1530 |
+
assert not np.shares_memory(get_array(ser), get_array(result))
|
1531 |
+
tm.assert_series_equal(ser, ser_orig)
|
1532 |
+
|
1533 |
+
|
1534 |
+
@pytest.mark.parametrize("dtype, val", [("int64", 10.5), ("Int64", 10)])
|
1535 |
+
@pytest.mark.parametrize(
|
1536 |
+
"func",
|
1537 |
+
[
|
1538 |
+
lambda df, val: df.where(df < 0, val),
|
1539 |
+
lambda df, val: df.mask(df >= 0, val),
|
1540 |
+
],
|
1541 |
+
)
|
1542 |
+
def test_where_mask_noop_on_single_column(using_copy_on_write, dtype, val, func):
|
1543 |
+
df = DataFrame({"a": [1, 2, 3], "b": [-4, -5, -6]}, dtype=dtype)
|
1544 |
+
df_orig = df.copy()
|
1545 |
+
|
1546 |
+
result = func(df, val)
|
1547 |
+
|
1548 |
+
if using_copy_on_write:
|
1549 |
+
assert np.shares_memory(get_array(df, "b"), get_array(result, "b"))
|
1550 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
1551 |
+
else:
|
1552 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(result, "b"))
|
1553 |
+
|
1554 |
+
result.iloc[0, 1] = 10
|
1555 |
+
if using_copy_on_write:
|
1556 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(result, "b"))
|
1557 |
+
tm.assert_frame_equal(df, df_orig)
|
1558 |
+
|
1559 |
+
|
1560 |
+
@pytest.mark.parametrize("func", ["mask", "where"])
|
1561 |
+
def test_chained_where_mask(using_copy_on_write, func):
|
1562 |
+
df = DataFrame({"a": [1, 4, 2], "b": 1})
|
1563 |
+
df_orig = df.copy()
|
1564 |
+
if using_copy_on_write:
|
1565 |
+
with tm.raises_chained_assignment_error():
|
1566 |
+
getattr(df["a"], func)(df["a"] > 2, 5, inplace=True)
|
1567 |
+
tm.assert_frame_equal(df, df_orig)
|
1568 |
+
|
1569 |
+
with tm.raises_chained_assignment_error():
|
1570 |
+
getattr(df[["a"]], func)(df["a"] > 2, 5, inplace=True)
|
1571 |
+
tm.assert_frame_equal(df, df_orig)
|
1572 |
+
else:
|
1573 |
+
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
|
1574 |
+
getattr(df["a"], func)(df["a"] > 2, 5, inplace=True)
|
1575 |
+
|
1576 |
+
with tm.assert_produces_warning(None):
|
1577 |
+
with option_context("mode.chained_assignment", None):
|
1578 |
+
getattr(df[["a"]], func)(df["a"] > 2, 5, inplace=True)
|
1579 |
+
|
1580 |
+
with tm.assert_produces_warning(None):
|
1581 |
+
with option_context("mode.chained_assignment", None):
|
1582 |
+
getattr(df[df["a"] > 1], func)(df["a"] > 2, 5, inplace=True)
|
1583 |
+
|
1584 |
+
|
1585 |
+
def test_asfreq_noop(using_copy_on_write):
|
1586 |
+
df = DataFrame(
|
1587 |
+
{"a": [0.0, None, 2.0, 3.0]},
|
1588 |
+
index=date_range("1/1/2000", periods=4, freq="min"),
|
1589 |
+
)
|
1590 |
+
df_orig = df.copy()
|
1591 |
+
df2 = df.asfreq(freq="min")
|
1592 |
+
|
1593 |
+
if using_copy_on_write:
|
1594 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1595 |
+
else:
|
1596 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1597 |
+
|
1598 |
+
# mutating df2 triggers a copy-on-write for that column / block
|
1599 |
+
df2.iloc[0, 0] = 0
|
1600 |
+
|
1601 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
1602 |
+
tm.assert_frame_equal(df, df_orig)
|
1603 |
+
|
1604 |
+
|
1605 |
+
def test_iterrows(using_copy_on_write):
|
1606 |
+
df = DataFrame({"a": 0, "b": 1}, index=[1, 2, 3])
|
1607 |
+
df_orig = df.copy()
|
1608 |
+
|
1609 |
+
for _, sub in df.iterrows():
|
1610 |
+
sub.iloc[0] = 100
|
1611 |
+
if using_copy_on_write:
|
1612 |
+
tm.assert_frame_equal(df, df_orig)
|
1613 |
+
|
1614 |
+
|
1615 |
+
def test_interpolate_creates_copy(using_copy_on_write, warn_copy_on_write):
|
1616 |
+
# GH#51126
|
1617 |
+
df = DataFrame({"a": [1.5, np.nan, 3]})
|
1618 |
+
view = df[:]
|
1619 |
+
expected = df.copy()
|
1620 |
+
|
1621 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1622 |
+
df.ffill(inplace=True)
|
1623 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1624 |
+
df.iloc[0, 0] = 100.5
|
1625 |
+
|
1626 |
+
if using_copy_on_write:
|
1627 |
+
tm.assert_frame_equal(view, expected)
|
1628 |
+
else:
|
1629 |
+
expected = DataFrame({"a": [100.5, 1.5, 3]})
|
1630 |
+
tm.assert_frame_equal(view, expected)
|
1631 |
+
|
1632 |
+
|
1633 |
+
def test_isetitem(using_copy_on_write):
|
1634 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})
|
1635 |
+
df_orig = df.copy()
|
1636 |
+
df2 = df.copy(deep=None) # Trigger a CoW
|
1637 |
+
df2.isetitem(1, np.array([-1, -2, -3])) # This is inplace
|
1638 |
+
|
1639 |
+
if using_copy_on_write:
|
1640 |
+
assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
1641 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
1642 |
+
else:
|
1643 |
+
assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
1644 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
1645 |
+
|
1646 |
+
df2.loc[0, "a"] = 0
|
1647 |
+
tm.assert_frame_equal(df, df_orig) # Original is unchanged
|
1648 |
+
|
1649 |
+
if using_copy_on_write:
|
1650 |
+
assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
1651 |
+
else:
|
1652 |
+
assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
1653 |
+
|
1654 |
+
|
1655 |
+
@pytest.mark.parametrize(
|
1656 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
1657 |
+
)
|
1658 |
+
def test_isetitem_series(using_copy_on_write, dtype):
|
1659 |
+
df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)})
|
1660 |
+
ser = Series([7, 8, 9])
|
1661 |
+
ser_orig = ser.copy()
|
1662 |
+
df.isetitem(0, ser)
|
1663 |
+
|
1664 |
+
if using_copy_on_write:
|
1665 |
+
assert np.shares_memory(get_array(df, "a"), get_array(ser))
|
1666 |
+
assert not df._mgr._has_no_reference(0)
|
1667 |
+
|
1668 |
+
# mutating dataframe doesn't update series
|
1669 |
+
df.loc[0, "a"] = 0
|
1670 |
+
tm.assert_series_equal(ser, ser_orig)
|
1671 |
+
|
1672 |
+
# mutating series doesn't update dataframe
|
1673 |
+
df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)})
|
1674 |
+
ser = Series([7, 8, 9])
|
1675 |
+
df.isetitem(0, ser)
|
1676 |
+
|
1677 |
+
ser.loc[0] = 0
|
1678 |
+
expected = DataFrame({"a": [7, 8, 9], "b": np.array([4, 5, 6], dtype=dtype)})
|
1679 |
+
tm.assert_frame_equal(df, expected)
|
1680 |
+
|
1681 |
+
|
1682 |
+
def test_isetitem_frame(using_copy_on_write):
|
1683 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2})
|
1684 |
+
rhs = DataFrame({"a": [4, 5, 6], "b": 2})
|
1685 |
+
df.isetitem([0, 1], rhs)
|
1686 |
+
if using_copy_on_write:
|
1687 |
+
assert np.shares_memory(get_array(df, "a"), get_array(rhs, "a"))
|
1688 |
+
assert np.shares_memory(get_array(df, "b"), get_array(rhs, "b"))
|
1689 |
+
assert not df._mgr._has_no_reference(0)
|
1690 |
+
else:
|
1691 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(rhs, "a"))
|
1692 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(rhs, "b"))
|
1693 |
+
expected = df.copy()
|
1694 |
+
rhs.iloc[0, 0] = 100
|
1695 |
+
rhs.iloc[0, 1] = 100
|
1696 |
+
tm.assert_frame_equal(df, expected)
|
1697 |
+
|
1698 |
+
|
1699 |
+
@pytest.mark.parametrize("key", ["a", ["a"]])
|
1700 |
+
def test_get(using_copy_on_write, warn_copy_on_write, key):
|
1701 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
1702 |
+
df_orig = df.copy()
|
1703 |
+
|
1704 |
+
result = df.get(key)
|
1705 |
+
|
1706 |
+
if using_copy_on_write:
|
1707 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
1708 |
+
result.iloc[0] = 0
|
1709 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
1710 |
+
tm.assert_frame_equal(df, df_orig)
|
1711 |
+
else:
|
1712 |
+
# for non-CoW it depends on whether we got a Series or DataFrame if it
|
1713 |
+
# is a view or copy or triggers a warning or not
|
1714 |
+
if warn_copy_on_write:
|
1715 |
+
warn = FutureWarning if isinstance(key, str) else None
|
1716 |
+
else:
|
1717 |
+
warn = SettingWithCopyWarning if isinstance(key, list) else None
|
1718 |
+
with option_context("chained_assignment", "warn"):
|
1719 |
+
with tm.assert_produces_warning(warn):
|
1720 |
+
result.iloc[0] = 0
|
1721 |
+
|
1722 |
+
if isinstance(key, list):
|
1723 |
+
tm.assert_frame_equal(df, df_orig)
|
1724 |
+
else:
|
1725 |
+
assert df.iloc[0, 0] == 0
|
1726 |
+
|
1727 |
+
|
1728 |
+
@pytest.mark.parametrize("axis, key", [(0, 0), (1, "a")])
|
1729 |
+
@pytest.mark.parametrize(
|
1730 |
+
"dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]
|
1731 |
+
)
|
1732 |
+
def test_xs(
|
1733 |
+
using_copy_on_write, warn_copy_on_write, using_array_manager, axis, key, dtype
|
1734 |
+
):
|
1735 |
+
single_block = (dtype == "int64") and not using_array_manager
|
1736 |
+
is_view = single_block or (using_array_manager and axis == 1)
|
1737 |
+
df = DataFrame(
|
1738 |
+
{"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}
|
1739 |
+
)
|
1740 |
+
df_orig = df.copy()
|
1741 |
+
|
1742 |
+
result = df.xs(key, axis=axis)
|
1743 |
+
|
1744 |
+
if axis == 1 or single_block:
|
1745 |
+
assert np.shares_memory(get_array(df, "a"), get_array(result))
|
1746 |
+
elif using_copy_on_write:
|
1747 |
+
assert result._mgr._has_no_reference(0)
|
1748 |
+
|
1749 |
+
if using_copy_on_write or (is_view and not warn_copy_on_write):
|
1750 |
+
result.iloc[0] = 0
|
1751 |
+
elif warn_copy_on_write:
|
1752 |
+
with tm.assert_cow_warning(single_block or axis == 1):
|
1753 |
+
result.iloc[0] = 0
|
1754 |
+
else:
|
1755 |
+
with option_context("chained_assignment", "warn"):
|
1756 |
+
with tm.assert_produces_warning(SettingWithCopyWarning):
|
1757 |
+
result.iloc[0] = 0
|
1758 |
+
|
1759 |
+
if using_copy_on_write or (not single_block and axis == 0):
|
1760 |
+
tm.assert_frame_equal(df, df_orig)
|
1761 |
+
else:
|
1762 |
+
assert df.iloc[0, 0] == 0
|
1763 |
+
|
1764 |
+
|
1765 |
+
@pytest.mark.parametrize("axis", [0, 1])
|
1766 |
+
@pytest.mark.parametrize("key, level", [("l1", 0), (2, 1)])
|
1767 |
+
def test_xs_multiindex(
|
1768 |
+
using_copy_on_write, warn_copy_on_write, using_array_manager, key, level, axis
|
1769 |
+
):
|
1770 |
+
arr = np.arange(18).reshape(6, 3)
|
1771 |
+
index = MultiIndex.from_product([["l1", "l2"], [1, 2, 3]], names=["lev1", "lev2"])
|
1772 |
+
df = DataFrame(arr, index=index, columns=list("abc"))
|
1773 |
+
if axis == 1:
|
1774 |
+
df = df.transpose().copy()
|
1775 |
+
df_orig = df.copy()
|
1776 |
+
|
1777 |
+
result = df.xs(key, level=level, axis=axis)
|
1778 |
+
|
1779 |
+
if level == 0:
|
1780 |
+
assert np.shares_memory(
|
1781 |
+
get_array(df, df.columns[0]), get_array(result, result.columns[0])
|
1782 |
+
)
|
1783 |
+
|
1784 |
+
if warn_copy_on_write:
|
1785 |
+
warn = FutureWarning if level == 0 else None
|
1786 |
+
elif not using_copy_on_write and not using_array_manager:
|
1787 |
+
warn = SettingWithCopyWarning
|
1788 |
+
else:
|
1789 |
+
warn = None
|
1790 |
+
with option_context("chained_assignment", "warn"):
|
1791 |
+
with tm.assert_produces_warning(warn):
|
1792 |
+
result.iloc[0, 0] = 0
|
1793 |
+
|
1794 |
+
tm.assert_frame_equal(df, df_orig)
|
1795 |
+
|
1796 |
+
|
1797 |
+
def test_update_frame(using_copy_on_write, warn_copy_on_write):
|
1798 |
+
df1 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]})
|
1799 |
+
df2 = DataFrame({"b": [100.0]}, index=[1])
|
1800 |
+
df1_orig = df1.copy()
|
1801 |
+
view = df1[:]
|
1802 |
+
|
1803 |
+
# TODO(CoW) better warning message?
|
1804 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1805 |
+
df1.update(df2)
|
1806 |
+
|
1807 |
+
expected = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 100.0, 6.0]})
|
1808 |
+
tm.assert_frame_equal(df1, expected)
|
1809 |
+
if using_copy_on_write:
|
1810 |
+
# df1 is updated, but its view not
|
1811 |
+
tm.assert_frame_equal(view, df1_orig)
|
1812 |
+
assert np.shares_memory(get_array(df1, "a"), get_array(view, "a"))
|
1813 |
+
assert not np.shares_memory(get_array(df1, "b"), get_array(view, "b"))
|
1814 |
+
else:
|
1815 |
+
tm.assert_frame_equal(view, expected)
|
1816 |
+
|
1817 |
+
|
1818 |
+
def test_update_series(using_copy_on_write, warn_copy_on_write):
|
1819 |
+
ser1 = Series([1.0, 2.0, 3.0])
|
1820 |
+
ser2 = Series([100.0], index=[1])
|
1821 |
+
ser1_orig = ser1.copy()
|
1822 |
+
view = ser1[:]
|
1823 |
+
|
1824 |
+
if warn_copy_on_write:
|
1825 |
+
with tm.assert_cow_warning():
|
1826 |
+
ser1.update(ser2)
|
1827 |
+
else:
|
1828 |
+
ser1.update(ser2)
|
1829 |
+
|
1830 |
+
expected = Series([1.0, 100.0, 3.0])
|
1831 |
+
tm.assert_series_equal(ser1, expected)
|
1832 |
+
if using_copy_on_write:
|
1833 |
+
# ser1 is updated, but its view not
|
1834 |
+
tm.assert_series_equal(view, ser1_orig)
|
1835 |
+
else:
|
1836 |
+
tm.assert_series_equal(view, expected)
|
1837 |
+
|
1838 |
+
|
1839 |
+
def test_update_chained_assignment(using_copy_on_write):
|
1840 |
+
df = DataFrame({"a": [1, 2, 3]})
|
1841 |
+
ser2 = Series([100.0], index=[1])
|
1842 |
+
df_orig = df.copy()
|
1843 |
+
if using_copy_on_write:
|
1844 |
+
with tm.raises_chained_assignment_error():
|
1845 |
+
df["a"].update(ser2)
|
1846 |
+
tm.assert_frame_equal(df, df_orig)
|
1847 |
+
|
1848 |
+
with tm.raises_chained_assignment_error():
|
1849 |
+
df[["a"]].update(ser2.to_frame())
|
1850 |
+
tm.assert_frame_equal(df, df_orig)
|
1851 |
+
else:
|
1852 |
+
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
|
1853 |
+
df["a"].update(ser2)
|
1854 |
+
|
1855 |
+
with tm.assert_produces_warning(None):
|
1856 |
+
with option_context("mode.chained_assignment", None):
|
1857 |
+
df[["a"]].update(ser2.to_frame())
|
1858 |
+
|
1859 |
+
with tm.assert_produces_warning(None):
|
1860 |
+
with option_context("mode.chained_assignment", None):
|
1861 |
+
df[df["a"] > 1].update(ser2.to_frame())
|
1862 |
+
|
1863 |
+
|
1864 |
+
def test_inplace_arithmetic_series(using_copy_on_write):
|
1865 |
+
ser = Series([1, 2, 3])
|
1866 |
+
ser_orig = ser.copy()
|
1867 |
+
data = get_array(ser)
|
1868 |
+
ser *= 2
|
1869 |
+
if using_copy_on_write:
|
1870 |
+
# https://github.com/pandas-dev/pandas/pull/55745
|
1871 |
+
# changed to NOT update inplace because there is no benefit (actual
|
1872 |
+
# operation already done non-inplace). This was only for the optics
|
1873 |
+
# of updating the backing array inplace, but we no longer want to make
|
1874 |
+
# that guarantee
|
1875 |
+
assert not np.shares_memory(get_array(ser), data)
|
1876 |
+
tm.assert_numpy_array_equal(data, get_array(ser_orig))
|
1877 |
+
else:
|
1878 |
+
assert np.shares_memory(get_array(ser), data)
|
1879 |
+
tm.assert_numpy_array_equal(data, get_array(ser))
|
1880 |
+
|
1881 |
+
|
1882 |
+
def test_inplace_arithmetic_series_with_reference(
|
1883 |
+
using_copy_on_write, warn_copy_on_write
|
1884 |
+
):
|
1885 |
+
ser = Series([1, 2, 3])
|
1886 |
+
ser_orig = ser.copy()
|
1887 |
+
view = ser[:]
|
1888 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1889 |
+
ser *= 2
|
1890 |
+
if using_copy_on_write:
|
1891 |
+
assert not np.shares_memory(get_array(ser), get_array(view))
|
1892 |
+
tm.assert_series_equal(ser_orig, view)
|
1893 |
+
else:
|
1894 |
+
assert np.shares_memory(get_array(ser), get_array(view))
|
1895 |
+
|
1896 |
+
|
1897 |
+
@pytest.mark.parametrize("copy", [True, False])
|
1898 |
+
def test_transpose(using_copy_on_write, copy, using_array_manager):
|
1899 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1})
|
1900 |
+
df_orig = df.copy()
|
1901 |
+
result = df.transpose(copy=copy)
|
1902 |
+
|
1903 |
+
if not copy and not using_array_manager or using_copy_on_write:
|
1904 |
+
assert np.shares_memory(get_array(df, "a"), get_array(result, 0))
|
1905 |
+
else:
|
1906 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
|
1907 |
+
|
1908 |
+
result.iloc[0, 0] = 100
|
1909 |
+
if using_copy_on_write:
|
1910 |
+
tm.assert_frame_equal(df, df_orig)
|
1911 |
+
|
1912 |
+
|
1913 |
+
def test_transpose_different_dtypes(using_copy_on_write):
|
1914 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1.5})
|
1915 |
+
df_orig = df.copy()
|
1916 |
+
result = df.T
|
1917 |
+
|
1918 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
|
1919 |
+
result.iloc[0, 0] = 100
|
1920 |
+
if using_copy_on_write:
|
1921 |
+
tm.assert_frame_equal(df, df_orig)
|
1922 |
+
|
1923 |
+
|
1924 |
+
def test_transpose_ea_single_column(using_copy_on_write):
|
1925 |
+
df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
|
1926 |
+
result = df.T
|
1927 |
+
|
1928 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))
|
1929 |
+
|
1930 |
+
|
1931 |
+
def test_transform_frame(using_copy_on_write, warn_copy_on_write):
|
1932 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1})
|
1933 |
+
df_orig = df.copy()
|
1934 |
+
|
1935 |
+
def func(ser):
|
1936 |
+
ser.iloc[0] = 100
|
1937 |
+
return ser
|
1938 |
+
|
1939 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1940 |
+
df.transform(func)
|
1941 |
+
if using_copy_on_write:
|
1942 |
+
tm.assert_frame_equal(df, df_orig)
|
1943 |
+
|
1944 |
+
|
1945 |
+
def test_transform_series(using_copy_on_write, warn_copy_on_write):
|
1946 |
+
ser = Series([1, 2, 3])
|
1947 |
+
ser_orig = ser.copy()
|
1948 |
+
|
1949 |
+
def func(ser):
|
1950 |
+
ser.iloc[0] = 100
|
1951 |
+
return ser
|
1952 |
+
|
1953 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1954 |
+
ser.transform(func)
|
1955 |
+
if using_copy_on_write:
|
1956 |
+
tm.assert_series_equal(ser, ser_orig)
|
1957 |
+
|
1958 |
+
|
1959 |
+
def test_count_read_only_array():
|
1960 |
+
df = DataFrame({"a": [1, 2], "b": 3})
|
1961 |
+
result = df.count()
|
1962 |
+
result.iloc[0] = 100
|
1963 |
+
expected = Series([100, 2], index=["a", "b"])
|
1964 |
+
tm.assert_series_equal(result, expected)
|
1965 |
+
|
1966 |
+
|
1967 |
+
def test_series_view(using_copy_on_write, warn_copy_on_write):
|
1968 |
+
ser = Series([1, 2, 3])
|
1969 |
+
ser_orig = ser.copy()
|
1970 |
+
|
1971 |
+
with tm.assert_produces_warning(FutureWarning, match="is deprecated"):
|
1972 |
+
ser2 = ser.view()
|
1973 |
+
assert np.shares_memory(get_array(ser), get_array(ser2))
|
1974 |
+
if using_copy_on_write:
|
1975 |
+
assert not ser2._mgr._has_no_reference(0)
|
1976 |
+
|
1977 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
1978 |
+
ser2.iloc[0] = 100
|
1979 |
+
|
1980 |
+
if using_copy_on_write:
|
1981 |
+
tm.assert_series_equal(ser_orig, ser)
|
1982 |
+
else:
|
1983 |
+
expected = Series([100, 2, 3])
|
1984 |
+
tm.assert_series_equal(ser, expected)
|
1985 |
+
|
1986 |
+
|
1987 |
+
def test_insert_series(using_copy_on_write):
|
1988 |
+
df = DataFrame({"a": [1, 2, 3]})
|
1989 |
+
ser = Series([1, 2, 3])
|
1990 |
+
ser_orig = ser.copy()
|
1991 |
+
df.insert(loc=1, value=ser, column="b")
|
1992 |
+
if using_copy_on_write:
|
1993 |
+
assert np.shares_memory(get_array(ser), get_array(df, "b"))
|
1994 |
+
assert not df._mgr._has_no_reference(1)
|
1995 |
+
else:
|
1996 |
+
assert not np.shares_memory(get_array(ser), get_array(df, "b"))
|
1997 |
+
|
1998 |
+
df.iloc[0, 1] = 100
|
1999 |
+
tm.assert_series_equal(ser, ser_orig)
|
2000 |
+
|
2001 |
+
|
2002 |
+
def test_eval(using_copy_on_write):
|
2003 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1})
|
2004 |
+
df_orig = df.copy()
|
2005 |
+
|
2006 |
+
result = df.eval("c = a+b")
|
2007 |
+
if using_copy_on_write:
|
2008 |
+
assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
2009 |
+
else:
|
2010 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
2011 |
+
|
2012 |
+
result.iloc[0, 0] = 100
|
2013 |
+
tm.assert_frame_equal(df, df_orig)
|
2014 |
+
|
2015 |
+
|
2016 |
+
def test_eval_inplace(using_copy_on_write, warn_copy_on_write):
|
2017 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1})
|
2018 |
+
df_orig = df.copy()
|
2019 |
+
df_view = df[:]
|
2020 |
+
|
2021 |
+
df.eval("c = a+b", inplace=True)
|
2022 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df_view, "a"))
|
2023 |
+
|
2024 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
2025 |
+
df.iloc[0, 0] = 100
|
2026 |
+
if using_copy_on_write:
|
2027 |
+
tm.assert_frame_equal(df_view, df_orig)
|
2028 |
+
|
2029 |
+
|
2030 |
+
def test_apply_modify_row(using_copy_on_write, warn_copy_on_write):
|
2031 |
+
# Case: applying a function on each row as a Series object, where the
|
2032 |
+
# function mutates the row object (which needs to trigger CoW if row is a view)
|
2033 |
+
df = DataFrame({"A": [1, 2], "B": [3, 4]})
|
2034 |
+
df_orig = df.copy()
|
2035 |
+
|
2036 |
+
def transform(row):
|
2037 |
+
row["B"] = 100
|
2038 |
+
return row
|
2039 |
+
|
2040 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
2041 |
+
df.apply(transform, axis=1)
|
2042 |
+
|
2043 |
+
if using_copy_on_write:
|
2044 |
+
tm.assert_frame_equal(df, df_orig)
|
2045 |
+
else:
|
2046 |
+
assert df.loc[0, "B"] == 100
|
2047 |
+
|
2048 |
+
# row Series is a copy
|
2049 |
+
df = DataFrame({"A": [1, 2], "B": ["b", "c"]})
|
2050 |
+
df_orig = df.copy()
|
2051 |
+
|
2052 |
+
with tm.assert_produces_warning(None):
|
2053 |
+
df.apply(transform, axis=1)
|
2054 |
+
|
2055 |
+
tm.assert_frame_equal(df, df_orig)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_replace.py
ADDED
@@ -0,0 +1,481 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
Categorical,
|
6 |
+
DataFrame,
|
7 |
+
option_context,
|
8 |
+
)
|
9 |
+
import pandas._testing as tm
|
10 |
+
from pandas.tests.copy_view.util import get_array
|
11 |
+
|
12 |
+
|
13 |
+
@pytest.mark.parametrize(
|
14 |
+
"replace_kwargs",
|
15 |
+
[
|
16 |
+
{"to_replace": {"a": 1, "b": 4}, "value": -1},
|
17 |
+
# Test CoW splits blocks to avoid copying unchanged columns
|
18 |
+
{"to_replace": {"a": 1}, "value": -1},
|
19 |
+
{"to_replace": {"b": 4}, "value": -1},
|
20 |
+
{"to_replace": {"b": {4: 1}}},
|
21 |
+
# TODO: Add these in a further optimization
|
22 |
+
# We would need to see which columns got replaced in the mask
|
23 |
+
# which could be expensive
|
24 |
+
# {"to_replace": {"b": 1}},
|
25 |
+
# 1
|
26 |
+
],
|
27 |
+
)
|
28 |
+
def test_replace(using_copy_on_write, replace_kwargs):
|
29 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": ["foo", "bar", "baz"]})
|
30 |
+
df_orig = df.copy()
|
31 |
+
|
32 |
+
df_replaced = df.replace(**replace_kwargs)
|
33 |
+
|
34 |
+
if using_copy_on_write:
|
35 |
+
if (df_replaced["b"] == df["b"]).all():
|
36 |
+
assert np.shares_memory(get_array(df_replaced, "b"), get_array(df, "b"))
|
37 |
+
assert np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))
|
38 |
+
|
39 |
+
# mutating squeezed df triggers a copy-on-write for that column/block
|
40 |
+
df_replaced.loc[0, "c"] = -1
|
41 |
+
if using_copy_on_write:
|
42 |
+
assert not np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))
|
43 |
+
|
44 |
+
if "a" in replace_kwargs["to_replace"]:
|
45 |
+
arr = get_array(df_replaced, "a")
|
46 |
+
df_replaced.loc[0, "a"] = 100
|
47 |
+
assert np.shares_memory(get_array(df_replaced, "a"), arr)
|
48 |
+
tm.assert_frame_equal(df, df_orig)
|
49 |
+
|
50 |
+
|
51 |
+
def test_replace_regex_inplace_refs(using_copy_on_write, warn_copy_on_write):
|
52 |
+
df = DataFrame({"a": ["aaa", "bbb"]})
|
53 |
+
df_orig = df.copy()
|
54 |
+
view = df[:]
|
55 |
+
arr = get_array(df, "a")
|
56 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
57 |
+
df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True)
|
58 |
+
if using_copy_on_write:
|
59 |
+
assert not np.shares_memory(arr, get_array(df, "a"))
|
60 |
+
assert df._mgr._has_no_reference(0)
|
61 |
+
tm.assert_frame_equal(view, df_orig)
|
62 |
+
else:
|
63 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
64 |
+
|
65 |
+
|
66 |
+
def test_replace_regex_inplace(using_copy_on_write):
|
67 |
+
df = DataFrame({"a": ["aaa", "bbb"]})
|
68 |
+
arr = get_array(df, "a")
|
69 |
+
df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True)
|
70 |
+
if using_copy_on_write:
|
71 |
+
assert df._mgr._has_no_reference(0)
|
72 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
73 |
+
|
74 |
+
df_orig = df.copy()
|
75 |
+
df2 = df.replace(to_replace=r"^b.*$", value="new", regex=True)
|
76 |
+
tm.assert_frame_equal(df_orig, df)
|
77 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
78 |
+
|
79 |
+
|
80 |
+
def test_replace_regex_inplace_no_op(using_copy_on_write):
|
81 |
+
df = DataFrame({"a": [1, 2]})
|
82 |
+
arr = get_array(df, "a")
|
83 |
+
df.replace(to_replace=r"^a.$", value="new", inplace=True, regex=True)
|
84 |
+
if using_copy_on_write:
|
85 |
+
assert df._mgr._has_no_reference(0)
|
86 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
87 |
+
|
88 |
+
df_orig = df.copy()
|
89 |
+
df2 = df.replace(to_replace=r"^x.$", value="new", regex=True)
|
90 |
+
tm.assert_frame_equal(df_orig, df)
|
91 |
+
if using_copy_on_write:
|
92 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
93 |
+
else:
|
94 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
95 |
+
|
96 |
+
|
97 |
+
def test_replace_mask_all_false_second_block(using_copy_on_write):
|
98 |
+
df = DataFrame({"a": [1.5, 2, 3], "b": 100.5, "c": 1, "d": 2})
|
99 |
+
df_orig = df.copy()
|
100 |
+
|
101 |
+
df2 = df.replace(to_replace=1.5, value=55.5)
|
102 |
+
|
103 |
+
if using_copy_on_write:
|
104 |
+
# TODO: Block splitting would allow us to avoid copying b
|
105 |
+
assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
106 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
107 |
+
|
108 |
+
else:
|
109 |
+
assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
110 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
111 |
+
|
112 |
+
df2.loc[0, "c"] = 1
|
113 |
+
tm.assert_frame_equal(df, df_orig) # Original is unchanged
|
114 |
+
|
115 |
+
if using_copy_on_write:
|
116 |
+
assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
117 |
+
# TODO: This should split and not copy the whole block
|
118 |
+
# assert np.shares_memory(get_array(df, "d"), get_array(df2, "d"))
|
119 |
+
|
120 |
+
|
121 |
+
def test_replace_coerce_single_column(using_copy_on_write, using_array_manager):
|
122 |
+
df = DataFrame({"a": [1.5, 2, 3], "b": 100.5})
|
123 |
+
df_orig = df.copy()
|
124 |
+
|
125 |
+
df2 = df.replace(to_replace=1.5, value="a")
|
126 |
+
|
127 |
+
if using_copy_on_write:
|
128 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
129 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
130 |
+
|
131 |
+
elif not using_array_manager:
|
132 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
133 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
134 |
+
|
135 |
+
if using_copy_on_write:
|
136 |
+
df2.loc[0, "b"] = 0.5
|
137 |
+
tm.assert_frame_equal(df, df_orig) # Original is unchanged
|
138 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
139 |
+
|
140 |
+
|
141 |
+
def test_replace_to_replace_wrong_dtype(using_copy_on_write):
|
142 |
+
df = DataFrame({"a": [1.5, 2, 3], "b": 100.5})
|
143 |
+
df_orig = df.copy()
|
144 |
+
|
145 |
+
df2 = df.replace(to_replace="xxx", value=1.5)
|
146 |
+
|
147 |
+
if using_copy_on_write:
|
148 |
+
assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
149 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
150 |
+
|
151 |
+
else:
|
152 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
153 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
154 |
+
|
155 |
+
df2.loc[0, "b"] = 0.5
|
156 |
+
tm.assert_frame_equal(df, df_orig) # Original is unchanged
|
157 |
+
|
158 |
+
if using_copy_on_write:
|
159 |
+
assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))
|
160 |
+
|
161 |
+
|
162 |
+
def test_replace_list_categorical(using_copy_on_write):
|
163 |
+
df = DataFrame({"a": ["a", "b", "c"]}, dtype="category")
|
164 |
+
arr = get_array(df, "a")
|
165 |
+
msg = (
|
166 |
+
r"The behavior of Series\.replace \(and DataFrame.replace\) "
|
167 |
+
"with CategoricalDtype"
|
168 |
+
)
|
169 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
170 |
+
df.replace(["c"], value="a", inplace=True)
|
171 |
+
assert np.shares_memory(arr.codes, get_array(df, "a").codes)
|
172 |
+
if using_copy_on_write:
|
173 |
+
assert df._mgr._has_no_reference(0)
|
174 |
+
|
175 |
+
df_orig = df.copy()
|
176 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
177 |
+
df2 = df.replace(["b"], value="a")
|
178 |
+
assert not np.shares_memory(arr.codes, get_array(df2, "a").codes)
|
179 |
+
|
180 |
+
tm.assert_frame_equal(df, df_orig)
|
181 |
+
|
182 |
+
|
183 |
+
def test_replace_list_inplace_refs_categorical(using_copy_on_write):
|
184 |
+
df = DataFrame({"a": ["a", "b", "c"]}, dtype="category")
|
185 |
+
view = df[:]
|
186 |
+
df_orig = df.copy()
|
187 |
+
msg = (
|
188 |
+
r"The behavior of Series\.replace \(and DataFrame.replace\) "
|
189 |
+
"with CategoricalDtype"
|
190 |
+
)
|
191 |
+
with tm.assert_produces_warning(FutureWarning, match=msg):
|
192 |
+
df.replace(["c"], value="a", inplace=True)
|
193 |
+
if using_copy_on_write:
|
194 |
+
assert not np.shares_memory(
|
195 |
+
get_array(view, "a").codes, get_array(df, "a").codes
|
196 |
+
)
|
197 |
+
tm.assert_frame_equal(df_orig, view)
|
198 |
+
else:
|
199 |
+
# This could be inplace
|
200 |
+
assert not np.shares_memory(
|
201 |
+
get_array(view, "a").codes, get_array(df, "a").codes
|
202 |
+
)
|
203 |
+
|
204 |
+
|
205 |
+
@pytest.mark.parametrize("to_replace", [1.5, [1.5], []])
|
206 |
+
def test_replace_inplace(using_copy_on_write, to_replace):
|
207 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
208 |
+
arr_a = get_array(df, "a")
|
209 |
+
df.replace(to_replace=1.5, value=15.5, inplace=True)
|
210 |
+
|
211 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
212 |
+
if using_copy_on_write:
|
213 |
+
assert df._mgr._has_no_reference(0)
|
214 |
+
|
215 |
+
|
216 |
+
@pytest.mark.parametrize("to_replace", [1.5, [1.5]])
|
217 |
+
def test_replace_inplace_reference(using_copy_on_write, to_replace, warn_copy_on_write):
|
218 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
219 |
+
arr_a = get_array(df, "a")
|
220 |
+
view = df[:]
|
221 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
222 |
+
df.replace(to_replace=to_replace, value=15.5, inplace=True)
|
223 |
+
|
224 |
+
if using_copy_on_write:
|
225 |
+
assert not np.shares_memory(get_array(df, "a"), arr_a)
|
226 |
+
assert df._mgr._has_no_reference(0)
|
227 |
+
assert view._mgr._has_no_reference(0)
|
228 |
+
else:
|
229 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
230 |
+
|
231 |
+
|
232 |
+
@pytest.mark.parametrize("to_replace", ["a", 100.5])
|
233 |
+
def test_replace_inplace_reference_no_op(using_copy_on_write, to_replace):
|
234 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
235 |
+
arr_a = get_array(df, "a")
|
236 |
+
view = df[:]
|
237 |
+
df.replace(to_replace=to_replace, value=15.5, inplace=True)
|
238 |
+
|
239 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
240 |
+
if using_copy_on_write:
|
241 |
+
assert not df._mgr._has_no_reference(0)
|
242 |
+
assert not view._mgr._has_no_reference(0)
|
243 |
+
|
244 |
+
|
245 |
+
@pytest.mark.parametrize("to_replace", [1, [1]])
|
246 |
+
@pytest.mark.parametrize("val", [1, 1.5])
|
247 |
+
def test_replace_categorical_inplace_reference(using_copy_on_write, val, to_replace):
|
248 |
+
df = DataFrame({"a": Categorical([1, 2, 3])})
|
249 |
+
df_orig = df.copy()
|
250 |
+
arr_a = get_array(df, "a")
|
251 |
+
view = df[:]
|
252 |
+
msg = (
|
253 |
+
r"The behavior of Series\.replace \(and DataFrame.replace\) "
|
254 |
+
"with CategoricalDtype"
|
255 |
+
)
|
256 |
+
warn = FutureWarning if val == 1.5 else None
|
257 |
+
with tm.assert_produces_warning(warn, match=msg):
|
258 |
+
df.replace(to_replace=to_replace, value=val, inplace=True)
|
259 |
+
|
260 |
+
if using_copy_on_write:
|
261 |
+
assert not np.shares_memory(get_array(df, "a").codes, arr_a.codes)
|
262 |
+
assert df._mgr._has_no_reference(0)
|
263 |
+
assert view._mgr._has_no_reference(0)
|
264 |
+
tm.assert_frame_equal(view, df_orig)
|
265 |
+
else:
|
266 |
+
assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)
|
267 |
+
|
268 |
+
|
269 |
+
@pytest.mark.parametrize("val", [1, 1.5])
|
270 |
+
def test_replace_categorical_inplace(using_copy_on_write, val):
|
271 |
+
df = DataFrame({"a": Categorical([1, 2, 3])})
|
272 |
+
arr_a = get_array(df, "a")
|
273 |
+
msg = (
|
274 |
+
r"The behavior of Series\.replace \(and DataFrame.replace\) "
|
275 |
+
"with CategoricalDtype"
|
276 |
+
)
|
277 |
+
warn = FutureWarning if val == 1.5 else None
|
278 |
+
with tm.assert_produces_warning(warn, match=msg):
|
279 |
+
df.replace(to_replace=1, value=val, inplace=True)
|
280 |
+
|
281 |
+
assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)
|
282 |
+
if using_copy_on_write:
|
283 |
+
assert df._mgr._has_no_reference(0)
|
284 |
+
|
285 |
+
expected = DataFrame({"a": Categorical([val, 2, 3])})
|
286 |
+
tm.assert_frame_equal(df, expected)
|
287 |
+
|
288 |
+
|
289 |
+
@pytest.mark.parametrize("val", [1, 1.5])
|
290 |
+
def test_replace_categorical(using_copy_on_write, val):
|
291 |
+
df = DataFrame({"a": Categorical([1, 2, 3])})
|
292 |
+
df_orig = df.copy()
|
293 |
+
msg = (
|
294 |
+
r"The behavior of Series\.replace \(and DataFrame.replace\) "
|
295 |
+
"with CategoricalDtype"
|
296 |
+
)
|
297 |
+
warn = FutureWarning if val == 1.5 else None
|
298 |
+
with tm.assert_produces_warning(warn, match=msg):
|
299 |
+
df2 = df.replace(to_replace=1, value=val)
|
300 |
+
|
301 |
+
if using_copy_on_write:
|
302 |
+
assert df._mgr._has_no_reference(0)
|
303 |
+
assert df2._mgr._has_no_reference(0)
|
304 |
+
assert not np.shares_memory(get_array(df, "a").codes, get_array(df2, "a").codes)
|
305 |
+
tm.assert_frame_equal(df, df_orig)
|
306 |
+
|
307 |
+
arr_a = get_array(df2, "a").codes
|
308 |
+
df2.iloc[0, 0] = 2.0
|
309 |
+
assert np.shares_memory(get_array(df2, "a").codes, arr_a)
|
310 |
+
|
311 |
+
|
312 |
+
@pytest.mark.parametrize("method", ["where", "mask"])
|
313 |
+
def test_masking_inplace(using_copy_on_write, method, warn_copy_on_write):
|
314 |
+
df = DataFrame({"a": [1.5, 2, 3]})
|
315 |
+
df_orig = df.copy()
|
316 |
+
arr_a = get_array(df, "a")
|
317 |
+
view = df[:]
|
318 |
+
|
319 |
+
method = getattr(df, method)
|
320 |
+
if warn_copy_on_write:
|
321 |
+
with tm.assert_cow_warning():
|
322 |
+
method(df["a"] > 1.6, -1, inplace=True)
|
323 |
+
else:
|
324 |
+
method(df["a"] > 1.6, -1, inplace=True)
|
325 |
+
|
326 |
+
if using_copy_on_write:
|
327 |
+
assert not np.shares_memory(get_array(df, "a"), arr_a)
|
328 |
+
assert df._mgr._has_no_reference(0)
|
329 |
+
assert view._mgr._has_no_reference(0)
|
330 |
+
tm.assert_frame_equal(view, df_orig)
|
331 |
+
else:
|
332 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
333 |
+
|
334 |
+
|
335 |
+
def test_replace_empty_list(using_copy_on_write):
|
336 |
+
df = DataFrame({"a": [1, 2]})
|
337 |
+
|
338 |
+
df2 = df.replace([], [])
|
339 |
+
if using_copy_on_write:
|
340 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
341 |
+
assert not df._mgr._has_no_reference(0)
|
342 |
+
else:
|
343 |
+
assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
344 |
+
|
345 |
+
arr_a = get_array(df, "a")
|
346 |
+
df.replace([], [])
|
347 |
+
if using_copy_on_write:
|
348 |
+
assert np.shares_memory(get_array(df, "a"), arr_a)
|
349 |
+
assert not df._mgr._has_no_reference(0)
|
350 |
+
assert not df2._mgr._has_no_reference(0)
|
351 |
+
|
352 |
+
|
353 |
+
@pytest.mark.parametrize("value", ["d", None])
|
354 |
+
def test_replace_object_list_inplace(using_copy_on_write, value):
|
355 |
+
df = DataFrame({"a": ["a", "b", "c"]})
|
356 |
+
arr = get_array(df, "a")
|
357 |
+
df.replace(["c"], value, inplace=True)
|
358 |
+
if using_copy_on_write or value is None:
|
359 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
360 |
+
else:
|
361 |
+
# This could be inplace
|
362 |
+
assert not np.shares_memory(arr, get_array(df, "a"))
|
363 |
+
if using_copy_on_write:
|
364 |
+
assert df._mgr._has_no_reference(0)
|
365 |
+
|
366 |
+
|
367 |
+
def test_replace_list_multiple_elements_inplace(using_copy_on_write):
|
368 |
+
df = DataFrame({"a": [1, 2, 3]})
|
369 |
+
arr = get_array(df, "a")
|
370 |
+
df.replace([1, 2], 4, inplace=True)
|
371 |
+
if using_copy_on_write:
|
372 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
373 |
+
assert df._mgr._has_no_reference(0)
|
374 |
+
else:
|
375 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
376 |
+
|
377 |
+
|
378 |
+
def test_replace_list_none(using_copy_on_write):
|
379 |
+
df = DataFrame({"a": ["a", "b", "c"]})
|
380 |
+
|
381 |
+
df_orig = df.copy()
|
382 |
+
df2 = df.replace(["b"], value=None)
|
383 |
+
tm.assert_frame_equal(df, df_orig)
|
384 |
+
|
385 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))
|
386 |
+
|
387 |
+
|
388 |
+
def test_replace_list_none_inplace_refs(using_copy_on_write, warn_copy_on_write):
|
389 |
+
df = DataFrame({"a": ["a", "b", "c"]})
|
390 |
+
arr = get_array(df, "a")
|
391 |
+
df_orig = df.copy()
|
392 |
+
view = df[:]
|
393 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
394 |
+
df.replace(["a"], value=None, inplace=True)
|
395 |
+
if using_copy_on_write:
|
396 |
+
assert df._mgr._has_no_reference(0)
|
397 |
+
assert not np.shares_memory(arr, get_array(df, "a"))
|
398 |
+
tm.assert_frame_equal(df_orig, view)
|
399 |
+
else:
|
400 |
+
assert np.shares_memory(arr, get_array(df, "a"))
|
401 |
+
|
402 |
+
|
403 |
+
def test_replace_columnwise_no_op_inplace(using_copy_on_write):
|
404 |
+
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
|
405 |
+
view = df[:]
|
406 |
+
df_orig = df.copy()
|
407 |
+
df.replace({"a": 10}, 100, inplace=True)
|
408 |
+
if using_copy_on_write:
|
409 |
+
assert np.shares_memory(get_array(view, "a"), get_array(df, "a"))
|
410 |
+
df.iloc[0, 0] = 100
|
411 |
+
tm.assert_frame_equal(view, df_orig)
|
412 |
+
|
413 |
+
|
414 |
+
def test_replace_columnwise_no_op(using_copy_on_write):
|
415 |
+
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
|
416 |
+
df_orig = df.copy()
|
417 |
+
df2 = df.replace({"a": 10}, 100)
|
418 |
+
if using_copy_on_write:
|
419 |
+
assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))
|
420 |
+
df2.iloc[0, 0] = 100
|
421 |
+
tm.assert_frame_equal(df, df_orig)
|
422 |
+
|
423 |
+
|
424 |
+
def test_replace_chained_assignment(using_copy_on_write):
|
425 |
+
df = DataFrame({"a": [1, np.nan, 2], "b": 1})
|
426 |
+
df_orig = df.copy()
|
427 |
+
if using_copy_on_write:
|
428 |
+
with tm.raises_chained_assignment_error():
|
429 |
+
df["a"].replace(1, 100, inplace=True)
|
430 |
+
tm.assert_frame_equal(df, df_orig)
|
431 |
+
|
432 |
+
with tm.raises_chained_assignment_error():
|
433 |
+
df[["a"]].replace(1, 100, inplace=True)
|
434 |
+
tm.assert_frame_equal(df, df_orig)
|
435 |
+
else:
|
436 |
+
with tm.assert_produces_warning(None):
|
437 |
+
with option_context("mode.chained_assignment", None):
|
438 |
+
df[["a"]].replace(1, 100, inplace=True)
|
439 |
+
|
440 |
+
with tm.assert_produces_warning(None):
|
441 |
+
with option_context("mode.chained_assignment", None):
|
442 |
+
df[df.a > 5].replace(1, 100, inplace=True)
|
443 |
+
|
444 |
+
with tm.assert_produces_warning(FutureWarning, match="inplace method"):
|
445 |
+
df["a"].replace(1, 100, inplace=True)
|
446 |
+
|
447 |
+
|
448 |
+
def test_replace_listlike(using_copy_on_write):
|
449 |
+
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
|
450 |
+
df_orig = df.copy()
|
451 |
+
|
452 |
+
result = df.replace([200, 201], [11, 11])
|
453 |
+
if using_copy_on_write:
|
454 |
+
assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
455 |
+
else:
|
456 |
+
assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))
|
457 |
+
|
458 |
+
result.iloc[0, 0] = 100
|
459 |
+
tm.assert_frame_equal(df, df)
|
460 |
+
|
461 |
+
result = df.replace([200, 2], [10, 10])
|
462 |
+
assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))
|
463 |
+
tm.assert_frame_equal(df, df_orig)
|
464 |
+
|
465 |
+
|
466 |
+
def test_replace_listlike_inplace(using_copy_on_write, warn_copy_on_write):
|
467 |
+
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})
|
468 |
+
arr = get_array(df, "a")
|
469 |
+
df.replace([200, 2], [10, 11], inplace=True)
|
470 |
+
assert np.shares_memory(get_array(df, "a"), arr)
|
471 |
+
|
472 |
+
view = df[:]
|
473 |
+
df_orig = df.copy()
|
474 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
475 |
+
df.replace([200, 3], [10, 11], inplace=True)
|
476 |
+
if using_copy_on_write:
|
477 |
+
assert not np.shares_memory(get_array(df, "a"), arr)
|
478 |
+
tm.assert_frame_equal(view, df_orig)
|
479 |
+
else:
|
480 |
+
assert np.shares_memory(get_array(df, "a"), arr)
|
481 |
+
tm.assert_frame_equal(df, view)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_setitem.py
ADDED
@@ -0,0 +1,156 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from pandas import (
|
4 |
+
DataFrame,
|
5 |
+
Index,
|
6 |
+
MultiIndex,
|
7 |
+
RangeIndex,
|
8 |
+
Series,
|
9 |
+
)
|
10 |
+
import pandas._testing as tm
|
11 |
+
from pandas.tests.copy_view.util import get_array
|
12 |
+
|
13 |
+
# -----------------------------------------------------------------------------
|
14 |
+
# Copy/view behaviour for the values that are set in a DataFrame
|
15 |
+
|
16 |
+
|
17 |
+
def test_set_column_with_array():
|
18 |
+
# Case: setting an array as a new column (df[col] = arr) copies that data
|
19 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
20 |
+
arr = np.array([1, 2, 3], dtype="int64")
|
21 |
+
|
22 |
+
df["c"] = arr
|
23 |
+
|
24 |
+
# the array data is copied
|
25 |
+
assert not np.shares_memory(get_array(df, "c"), arr)
|
26 |
+
# and thus modifying the array does not modify the DataFrame
|
27 |
+
arr[0] = 0
|
28 |
+
tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
|
29 |
+
|
30 |
+
|
31 |
+
def test_set_column_with_series(using_copy_on_write):
|
32 |
+
# Case: setting a series as a new column (df[col] = s) copies that data
|
33 |
+
# (with delayed copy with CoW)
|
34 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
35 |
+
ser = Series([1, 2, 3])
|
36 |
+
|
37 |
+
df["c"] = ser
|
38 |
+
|
39 |
+
if using_copy_on_write:
|
40 |
+
assert np.shares_memory(get_array(df, "c"), get_array(ser))
|
41 |
+
else:
|
42 |
+
# the series data is copied
|
43 |
+
assert not np.shares_memory(get_array(df, "c"), get_array(ser))
|
44 |
+
|
45 |
+
# and modifying the series does not modify the DataFrame
|
46 |
+
ser.iloc[0] = 0
|
47 |
+
assert ser.iloc[0] == 0
|
48 |
+
tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))
|
49 |
+
|
50 |
+
|
51 |
+
def test_set_column_with_index(using_copy_on_write):
|
52 |
+
# Case: setting an index as a new column (df[col] = idx) copies that data
|
53 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
54 |
+
idx = Index([1, 2, 3])
|
55 |
+
|
56 |
+
df["c"] = idx
|
57 |
+
|
58 |
+
# the index data is copied
|
59 |
+
assert not np.shares_memory(get_array(df, "c"), idx.values)
|
60 |
+
|
61 |
+
idx = RangeIndex(1, 4)
|
62 |
+
arr = idx.values
|
63 |
+
|
64 |
+
df["d"] = idx
|
65 |
+
|
66 |
+
assert not np.shares_memory(get_array(df, "d"), arr)
|
67 |
+
|
68 |
+
|
69 |
+
def test_set_columns_with_dataframe(using_copy_on_write):
|
70 |
+
# Case: setting a DataFrame as new columns copies that data
|
71 |
+
# (with delayed copy with CoW)
|
72 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
73 |
+
df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})
|
74 |
+
|
75 |
+
df[["c", "d"]] = df2
|
76 |
+
|
77 |
+
if using_copy_on_write:
|
78 |
+
assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
79 |
+
else:
|
80 |
+
# the data is copied
|
81 |
+
assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))
|
82 |
+
|
83 |
+
# and modifying the set DataFrame does not modify the original DataFrame
|
84 |
+
df2.iloc[0, 0] = 0
|
85 |
+
tm.assert_series_equal(df["c"], Series([7, 8, 9], name="c"))
|
86 |
+
|
87 |
+
|
88 |
+
def test_setitem_series_no_copy(using_copy_on_write):
|
89 |
+
# Case: setting a Series as column into a DataFrame can delay copying that data
|
90 |
+
df = DataFrame({"a": [1, 2, 3]})
|
91 |
+
rhs = Series([4, 5, 6])
|
92 |
+
rhs_orig = rhs.copy()
|
93 |
+
|
94 |
+
# adding a new column
|
95 |
+
df["b"] = rhs
|
96 |
+
if using_copy_on_write:
|
97 |
+
assert np.shares_memory(get_array(rhs), get_array(df, "b"))
|
98 |
+
|
99 |
+
df.iloc[0, 1] = 100
|
100 |
+
tm.assert_series_equal(rhs, rhs_orig)
|
101 |
+
|
102 |
+
|
103 |
+
def test_setitem_series_no_copy_single_block(using_copy_on_write):
|
104 |
+
# Overwriting an existing column that is a single block
|
105 |
+
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
|
106 |
+
rhs = Series([4, 5, 6])
|
107 |
+
rhs_orig = rhs.copy()
|
108 |
+
|
109 |
+
df["a"] = rhs
|
110 |
+
if using_copy_on_write:
|
111 |
+
assert np.shares_memory(get_array(rhs), get_array(df, "a"))
|
112 |
+
|
113 |
+
df.iloc[0, 0] = 100
|
114 |
+
tm.assert_series_equal(rhs, rhs_orig)
|
115 |
+
|
116 |
+
|
117 |
+
def test_setitem_series_no_copy_split_block(using_copy_on_write):
|
118 |
+
# Overwriting an existing column that is part of a larger block
|
119 |
+
df = DataFrame({"a": [1, 2, 3], "b": 1})
|
120 |
+
rhs = Series([4, 5, 6])
|
121 |
+
rhs_orig = rhs.copy()
|
122 |
+
|
123 |
+
df["b"] = rhs
|
124 |
+
if using_copy_on_write:
|
125 |
+
assert np.shares_memory(get_array(rhs), get_array(df, "b"))
|
126 |
+
|
127 |
+
df.iloc[0, 1] = 100
|
128 |
+
tm.assert_series_equal(rhs, rhs_orig)
|
129 |
+
|
130 |
+
|
131 |
+
def test_setitem_series_column_midx_broadcasting(using_copy_on_write):
|
132 |
+
# Setting a Series to multiple columns will repeat the data
|
133 |
+
# (currently copying the data eagerly)
|
134 |
+
df = DataFrame(
|
135 |
+
[[1, 2, 3], [3, 4, 5]],
|
136 |
+
columns=MultiIndex.from_arrays([["a", "a", "b"], [1, 2, 3]]),
|
137 |
+
)
|
138 |
+
rhs = Series([10, 11])
|
139 |
+
df["a"] = rhs
|
140 |
+
assert not np.shares_memory(get_array(rhs), df._get_column_array(0))
|
141 |
+
if using_copy_on_write:
|
142 |
+
assert df._mgr._has_no_reference(0)
|
143 |
+
|
144 |
+
|
145 |
+
def test_set_column_with_inplace_operator(using_copy_on_write, warn_copy_on_write):
|
146 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
147 |
+
|
148 |
+
# this should not raise any warning
|
149 |
+
with tm.assert_produces_warning(None):
|
150 |
+
df["a"] += 1
|
151 |
+
|
152 |
+
# when it is not in a chain, then it should produce a warning
|
153 |
+
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
|
154 |
+
ser = df["a"]
|
155 |
+
with tm.assert_cow_warning(warn_copy_on_write):
|
156 |
+
ser += 1
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/test_util.py
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
|
3 |
+
from pandas import DataFrame
|
4 |
+
from pandas.tests.copy_view.util import get_array
|
5 |
+
|
6 |
+
|
7 |
+
def test_get_array_numpy():
|
8 |
+
df = DataFrame({"a": [1, 2, 3]})
|
9 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df, "a"))
|
10 |
+
|
11 |
+
|
12 |
+
def test_get_array_masked():
|
13 |
+
df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")
|
14 |
+
assert np.shares_memory(get_array(df, "a"), get_array(df, "a"))
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/copy_view/util.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pandas import (
|
2 |
+
Categorical,
|
3 |
+
Index,
|
4 |
+
Series,
|
5 |
+
)
|
6 |
+
from pandas.core.arrays import BaseMaskedArray
|
7 |
+
|
8 |
+
|
9 |
+
def get_array(obj, col=None):
|
10 |
+
"""
|
11 |
+
Helper method to get array for a DataFrame column or a Series.
|
12 |
+
|
13 |
+
Equivalent of df[col].values, but without going through normal getitem,
|
14 |
+
which triggers tracking references / CoW (and we might be testing that
|
15 |
+
this is done by some other operation).
|
16 |
+
"""
|
17 |
+
if isinstance(obj, Index):
|
18 |
+
arr = obj._values
|
19 |
+
elif isinstance(obj, Series) and (col is None or obj.name == col):
|
20 |
+
arr = obj._values
|
21 |
+
else:
|
22 |
+
assert col is not None
|
23 |
+
icol = obj.columns.get_loc(col)
|
24 |
+
assert isinstance(icol, int)
|
25 |
+
arr = obj._get_column_array(icol)
|
26 |
+
if isinstance(arr, BaseMaskedArray):
|
27 |
+
return arr._data
|
28 |
+
elif isinstance(arr, Categorical):
|
29 |
+
return arr
|
30 |
+
return getattr(arr, "_ndarray", arr)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__init__.py
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_append.cpython-310.pyc
ADDED
Binary file (24.2 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_store.cpython-310.pyc
ADDED
Binary file (31.4 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/__pycache__/test_subclass.cpython-310.pyc
ADDED
Binary file (1.71 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/common.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Generator
|
2 |
+
from contextlib import contextmanager
|
3 |
+
import pathlib
|
4 |
+
import tempfile
|
5 |
+
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from pandas.io.pytables import HDFStore
|
9 |
+
|
10 |
+
tables = pytest.importorskip("tables")
|
11 |
+
# set these parameters so we don't have file sharing
|
12 |
+
tables.parameters.MAX_NUMEXPR_THREADS = 1
|
13 |
+
tables.parameters.MAX_BLOSC_THREADS = 1
|
14 |
+
tables.parameters.MAX_THREADS = 1
|
15 |
+
|
16 |
+
|
17 |
+
def safe_close(store):
|
18 |
+
try:
|
19 |
+
if store is not None:
|
20 |
+
store.close()
|
21 |
+
except OSError:
|
22 |
+
pass
|
23 |
+
|
24 |
+
|
25 |
+
# contextmanager to ensure the file cleanup
|
26 |
+
@contextmanager
|
27 |
+
def ensure_clean_store(
|
28 |
+
path, mode="a", complevel=None, complib=None, fletcher32=False
|
29 |
+
) -> Generator[HDFStore, None, None]:
|
30 |
+
with tempfile.TemporaryDirectory() as tmpdirname:
|
31 |
+
tmp_path = pathlib.Path(tmpdirname, path)
|
32 |
+
with HDFStore(
|
33 |
+
tmp_path,
|
34 |
+
mode=mode,
|
35 |
+
complevel=complevel,
|
36 |
+
complib=complib,
|
37 |
+
fletcher32=fletcher32,
|
38 |
+
) as store:
|
39 |
+
yield store
|
40 |
+
|
41 |
+
|
42 |
+
def _maybe_remove(store, key):
|
43 |
+
"""
|
44 |
+
For tests using tables, try removing the table to be sure there is
|
45 |
+
no content from previous tests using the same table name.
|
46 |
+
"""
|
47 |
+
try:
|
48 |
+
store.remove(key)
|
49 |
+
except (ValueError, KeyError):
|
50 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_append.py
ADDED
@@ -0,0 +1,986 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datetime
|
2 |
+
from datetime import timedelta
|
3 |
+
import re
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from pandas._libs.tslibs import Timestamp
|
9 |
+
import pandas.util._test_decorators as td
|
10 |
+
|
11 |
+
import pandas as pd
|
12 |
+
from pandas import (
|
13 |
+
DataFrame,
|
14 |
+
Index,
|
15 |
+
Series,
|
16 |
+
_testing as tm,
|
17 |
+
concat,
|
18 |
+
date_range,
|
19 |
+
read_hdf,
|
20 |
+
)
|
21 |
+
from pandas.tests.io.pytables.common import (
|
22 |
+
_maybe_remove,
|
23 |
+
ensure_clean_store,
|
24 |
+
)
|
25 |
+
|
26 |
+
pytestmark = pytest.mark.single_cpu
|
27 |
+
|
28 |
+
tables = pytest.importorskip("tables")
|
29 |
+
|
30 |
+
|
31 |
+
@pytest.mark.filterwarnings("ignore::tables.NaturalNameWarning")
|
32 |
+
def test_append(setup_path):
|
33 |
+
with ensure_clean_store(setup_path) as store:
|
34 |
+
# this is allowed by almost always don't want to do it
|
35 |
+
# tables.NaturalNameWarning):
|
36 |
+
df = DataFrame(
|
37 |
+
np.random.default_rng(2).standard_normal((20, 4)),
|
38 |
+
columns=Index(list("ABCD"), dtype=object),
|
39 |
+
index=date_range("2000-01-01", periods=20, freq="B"),
|
40 |
+
)
|
41 |
+
_maybe_remove(store, "df1")
|
42 |
+
store.append("df1", df[:10])
|
43 |
+
store.append("df1", df[10:])
|
44 |
+
tm.assert_frame_equal(store["df1"], df)
|
45 |
+
|
46 |
+
_maybe_remove(store, "df2")
|
47 |
+
store.put("df2", df[:10], format="table")
|
48 |
+
store.append("df2", df[10:])
|
49 |
+
tm.assert_frame_equal(store["df2"], df)
|
50 |
+
|
51 |
+
_maybe_remove(store, "df3")
|
52 |
+
store.append("/df3", df[:10])
|
53 |
+
store.append("/df3", df[10:])
|
54 |
+
tm.assert_frame_equal(store["df3"], df)
|
55 |
+
|
56 |
+
# this is allowed by almost always don't want to do it
|
57 |
+
# tables.NaturalNameWarning
|
58 |
+
_maybe_remove(store, "/df3 foo")
|
59 |
+
store.append("/df3 foo", df[:10])
|
60 |
+
store.append("/df3 foo", df[10:])
|
61 |
+
tm.assert_frame_equal(store["df3 foo"], df)
|
62 |
+
|
63 |
+
# dtype issues - mizxed type in a single object column
|
64 |
+
df = DataFrame(data=[[1, 2], [0, 1], [1, 2], [0, 0]])
|
65 |
+
df["mixed_column"] = "testing"
|
66 |
+
df.loc[2, "mixed_column"] = np.nan
|
67 |
+
_maybe_remove(store, "df")
|
68 |
+
store.append("df", df)
|
69 |
+
tm.assert_frame_equal(store["df"], df)
|
70 |
+
|
71 |
+
# uints - test storage of uints
|
72 |
+
uint_data = DataFrame(
|
73 |
+
{
|
74 |
+
"u08": Series(
|
75 |
+
np.random.default_rng(2).integers(0, high=255, size=5),
|
76 |
+
dtype=np.uint8,
|
77 |
+
),
|
78 |
+
"u16": Series(
|
79 |
+
np.random.default_rng(2).integers(0, high=65535, size=5),
|
80 |
+
dtype=np.uint16,
|
81 |
+
),
|
82 |
+
"u32": Series(
|
83 |
+
np.random.default_rng(2).integers(0, high=2**30, size=5),
|
84 |
+
dtype=np.uint32,
|
85 |
+
),
|
86 |
+
"u64": Series(
|
87 |
+
[2**58, 2**59, 2**60, 2**61, 2**62],
|
88 |
+
dtype=np.uint64,
|
89 |
+
),
|
90 |
+
},
|
91 |
+
index=np.arange(5),
|
92 |
+
)
|
93 |
+
_maybe_remove(store, "uints")
|
94 |
+
store.append("uints", uint_data)
|
95 |
+
tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
|
96 |
+
|
97 |
+
# uints - test storage of uints in indexable columns
|
98 |
+
_maybe_remove(store, "uints")
|
99 |
+
# 64-bit indices not yet supported
|
100 |
+
store.append("uints", uint_data, data_columns=["u08", "u16", "u32"])
|
101 |
+
tm.assert_frame_equal(store["uints"], uint_data, check_index_type=True)
|
102 |
+
|
103 |
+
|
104 |
+
def test_append_series(setup_path):
|
105 |
+
with ensure_clean_store(setup_path) as store:
|
106 |
+
# basic
|
107 |
+
ss = Series(range(20), dtype=np.float64, index=[f"i_{i}" for i in range(20)])
|
108 |
+
ts = Series(
|
109 |
+
np.arange(10, dtype=np.float64), index=date_range("2020-01-01", periods=10)
|
110 |
+
)
|
111 |
+
ns = Series(np.arange(100))
|
112 |
+
|
113 |
+
store.append("ss", ss)
|
114 |
+
result = store["ss"]
|
115 |
+
tm.assert_series_equal(result, ss)
|
116 |
+
assert result.name is None
|
117 |
+
|
118 |
+
store.append("ts", ts)
|
119 |
+
result = store["ts"]
|
120 |
+
tm.assert_series_equal(result, ts)
|
121 |
+
assert result.name is None
|
122 |
+
|
123 |
+
ns.name = "foo"
|
124 |
+
store.append("ns", ns)
|
125 |
+
result = store["ns"]
|
126 |
+
tm.assert_series_equal(result, ns)
|
127 |
+
assert result.name == ns.name
|
128 |
+
|
129 |
+
# select on the values
|
130 |
+
expected = ns[ns > 60]
|
131 |
+
result = store.select("ns", "foo>60")
|
132 |
+
tm.assert_series_equal(result, expected)
|
133 |
+
|
134 |
+
# select on the index and values
|
135 |
+
expected = ns[(ns > 70) & (ns.index < 90)]
|
136 |
+
result = store.select("ns", "foo>70 and index<90")
|
137 |
+
tm.assert_series_equal(result, expected, check_index_type=True)
|
138 |
+
|
139 |
+
# multi-index
|
140 |
+
mi = DataFrame(np.random.default_rng(2).standard_normal((5, 1)), columns=["A"])
|
141 |
+
mi["B"] = np.arange(len(mi))
|
142 |
+
mi["C"] = "foo"
|
143 |
+
mi.loc[3:5, "C"] = "bar"
|
144 |
+
mi.set_index(["C", "B"], inplace=True)
|
145 |
+
s = mi.stack(future_stack=True)
|
146 |
+
s.index = s.index.droplevel(2)
|
147 |
+
store.append("mi", s)
|
148 |
+
tm.assert_series_equal(store["mi"], s, check_index_type=True)
|
149 |
+
|
150 |
+
|
151 |
+
def test_append_some_nans(setup_path):
|
152 |
+
with ensure_clean_store(setup_path) as store:
|
153 |
+
df = DataFrame(
|
154 |
+
{
|
155 |
+
"A": Series(np.random.default_rng(2).standard_normal(20)).astype(
|
156 |
+
"int32"
|
157 |
+
),
|
158 |
+
"A1": np.random.default_rng(2).standard_normal(20),
|
159 |
+
"A2": np.random.default_rng(2).standard_normal(20),
|
160 |
+
"B": "foo",
|
161 |
+
"C": "bar",
|
162 |
+
"D": Timestamp("2001-01-01").as_unit("ns"),
|
163 |
+
"E": Timestamp("2001-01-02").as_unit("ns"),
|
164 |
+
},
|
165 |
+
index=np.arange(20),
|
166 |
+
)
|
167 |
+
# some nans
|
168 |
+
_maybe_remove(store, "df1")
|
169 |
+
df.loc[0:15, ["A1", "B", "D", "E"]] = np.nan
|
170 |
+
store.append("df1", df[:10])
|
171 |
+
store.append("df1", df[10:])
|
172 |
+
tm.assert_frame_equal(store["df1"], df, check_index_type=True)
|
173 |
+
|
174 |
+
# first column
|
175 |
+
df1 = df.copy()
|
176 |
+
df1["A1"] = np.nan
|
177 |
+
_maybe_remove(store, "df1")
|
178 |
+
store.append("df1", df1[:10])
|
179 |
+
store.append("df1", df1[10:])
|
180 |
+
tm.assert_frame_equal(store["df1"], df1, check_index_type=True)
|
181 |
+
|
182 |
+
# 2nd column
|
183 |
+
df2 = df.copy()
|
184 |
+
df2["A2"] = np.nan
|
185 |
+
_maybe_remove(store, "df2")
|
186 |
+
store.append("df2", df2[:10])
|
187 |
+
store.append("df2", df2[10:])
|
188 |
+
tm.assert_frame_equal(store["df2"], df2, check_index_type=True)
|
189 |
+
|
190 |
+
# datetimes
|
191 |
+
df3 = df.copy()
|
192 |
+
df3["E"] = np.nan
|
193 |
+
_maybe_remove(store, "df3")
|
194 |
+
store.append("df3", df3[:10])
|
195 |
+
store.append("df3", df3[10:])
|
196 |
+
tm.assert_frame_equal(store["df3"], df3, check_index_type=True)
|
197 |
+
|
198 |
+
|
199 |
+
def test_append_all_nans(setup_path):
|
200 |
+
with ensure_clean_store(setup_path) as store:
|
201 |
+
df = DataFrame(
|
202 |
+
{
|
203 |
+
"A1": np.random.default_rng(2).standard_normal(20),
|
204 |
+
"A2": np.random.default_rng(2).standard_normal(20),
|
205 |
+
},
|
206 |
+
index=np.arange(20),
|
207 |
+
)
|
208 |
+
df.loc[0:15, :] = np.nan
|
209 |
+
|
210 |
+
# nan some entire rows (dropna=True)
|
211 |
+
_maybe_remove(store, "df")
|
212 |
+
store.append("df", df[:10], dropna=True)
|
213 |
+
store.append("df", df[10:], dropna=True)
|
214 |
+
tm.assert_frame_equal(store["df"], df[-4:], check_index_type=True)
|
215 |
+
|
216 |
+
# nan some entire rows (dropna=False)
|
217 |
+
_maybe_remove(store, "df2")
|
218 |
+
store.append("df2", df[:10], dropna=False)
|
219 |
+
store.append("df2", df[10:], dropna=False)
|
220 |
+
tm.assert_frame_equal(store["df2"], df, check_index_type=True)
|
221 |
+
|
222 |
+
# tests the option io.hdf.dropna_table
|
223 |
+
with pd.option_context("io.hdf.dropna_table", False):
|
224 |
+
_maybe_remove(store, "df3")
|
225 |
+
store.append("df3", df[:10])
|
226 |
+
store.append("df3", df[10:])
|
227 |
+
tm.assert_frame_equal(store["df3"], df)
|
228 |
+
|
229 |
+
with pd.option_context("io.hdf.dropna_table", True):
|
230 |
+
_maybe_remove(store, "df4")
|
231 |
+
store.append("df4", df[:10])
|
232 |
+
store.append("df4", df[10:])
|
233 |
+
tm.assert_frame_equal(store["df4"], df[-4:])
|
234 |
+
|
235 |
+
# nan some entire rows (string are still written!)
|
236 |
+
df = DataFrame(
|
237 |
+
{
|
238 |
+
"A1": np.random.default_rng(2).standard_normal(20),
|
239 |
+
"A2": np.random.default_rng(2).standard_normal(20),
|
240 |
+
"B": "foo",
|
241 |
+
"C": "bar",
|
242 |
+
},
|
243 |
+
index=np.arange(20),
|
244 |
+
)
|
245 |
+
|
246 |
+
df.loc[0:15, :] = np.nan
|
247 |
+
|
248 |
+
_maybe_remove(store, "df")
|
249 |
+
store.append("df", df[:10], dropna=True)
|
250 |
+
store.append("df", df[10:], dropna=True)
|
251 |
+
tm.assert_frame_equal(store["df"], df, check_index_type=True)
|
252 |
+
|
253 |
+
_maybe_remove(store, "df2")
|
254 |
+
store.append("df2", df[:10], dropna=False)
|
255 |
+
store.append("df2", df[10:], dropna=False)
|
256 |
+
tm.assert_frame_equal(store["df2"], df, check_index_type=True)
|
257 |
+
|
258 |
+
# nan some entire rows (but since we have dates they are still
|
259 |
+
# written!)
|
260 |
+
df = DataFrame(
|
261 |
+
{
|
262 |
+
"A1": np.random.default_rng(2).standard_normal(20),
|
263 |
+
"A2": np.random.default_rng(2).standard_normal(20),
|
264 |
+
"B": "foo",
|
265 |
+
"C": "bar",
|
266 |
+
"D": Timestamp("2001-01-01").as_unit("ns"),
|
267 |
+
"E": Timestamp("2001-01-02").as_unit("ns"),
|
268 |
+
},
|
269 |
+
index=np.arange(20),
|
270 |
+
)
|
271 |
+
|
272 |
+
df.loc[0:15, :] = np.nan
|
273 |
+
|
274 |
+
_maybe_remove(store, "df")
|
275 |
+
store.append("df", df[:10], dropna=True)
|
276 |
+
store.append("df", df[10:], dropna=True)
|
277 |
+
tm.assert_frame_equal(store["df"], df, check_index_type=True)
|
278 |
+
|
279 |
+
_maybe_remove(store, "df2")
|
280 |
+
store.append("df2", df[:10], dropna=False)
|
281 |
+
store.append("df2", df[10:], dropna=False)
|
282 |
+
tm.assert_frame_equal(store["df2"], df, check_index_type=True)
|
283 |
+
|
284 |
+
|
285 |
+
def test_append_frame_column_oriented(setup_path):
|
286 |
+
with ensure_clean_store(setup_path) as store:
|
287 |
+
# column oriented
|
288 |
+
df = DataFrame(
|
289 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
290 |
+
columns=Index(list("ABCD"), dtype=object),
|
291 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
292 |
+
)
|
293 |
+
df.index = df.index._with_freq(None) # freq doesn't round-trip
|
294 |
+
|
295 |
+
_maybe_remove(store, "df1")
|
296 |
+
store.append("df1", df.iloc[:, :2], axes=["columns"])
|
297 |
+
store.append("df1", df.iloc[:, 2:])
|
298 |
+
tm.assert_frame_equal(store["df1"], df)
|
299 |
+
|
300 |
+
result = store.select("df1", "columns=A")
|
301 |
+
expected = df.reindex(columns=["A"])
|
302 |
+
tm.assert_frame_equal(expected, result)
|
303 |
+
|
304 |
+
# selection on the non-indexable
|
305 |
+
result = store.select("df1", ("columns=A", "index=df.index[0:4]"))
|
306 |
+
expected = df.reindex(columns=["A"], index=df.index[0:4])
|
307 |
+
tm.assert_frame_equal(expected, result)
|
308 |
+
|
309 |
+
# this isn't supported
|
310 |
+
msg = re.escape(
|
311 |
+
"passing a filterable condition to a non-table indexer "
|
312 |
+
"[Filter: Not Initialized]"
|
313 |
+
)
|
314 |
+
with pytest.raises(TypeError, match=msg):
|
315 |
+
store.select("df1", "columns=A and index>df.index[4]")
|
316 |
+
|
317 |
+
|
318 |
+
def test_append_with_different_block_ordering(setup_path):
|
319 |
+
# GH 4096; using same frames, but different block orderings
|
320 |
+
with ensure_clean_store(setup_path) as store:
|
321 |
+
for i in range(10):
|
322 |
+
df = DataFrame(
|
323 |
+
np.random.default_rng(2).standard_normal((10, 2)), columns=list("AB")
|
324 |
+
)
|
325 |
+
df["index"] = range(10)
|
326 |
+
df["index"] += i * 10
|
327 |
+
df["int64"] = Series([1] * len(df), dtype="int64")
|
328 |
+
df["int16"] = Series([1] * len(df), dtype="int16")
|
329 |
+
|
330 |
+
if i % 2 == 0:
|
331 |
+
del df["int64"]
|
332 |
+
df["int64"] = Series([1] * len(df), dtype="int64")
|
333 |
+
if i % 3 == 0:
|
334 |
+
a = df.pop("A")
|
335 |
+
df["A"] = a
|
336 |
+
|
337 |
+
df.set_index("index", inplace=True)
|
338 |
+
|
339 |
+
store.append("df", df)
|
340 |
+
|
341 |
+
# test a different ordering but with more fields (like invalid
|
342 |
+
# combinations)
|
343 |
+
with ensure_clean_store(setup_path) as store:
|
344 |
+
df = DataFrame(
|
345 |
+
np.random.default_rng(2).standard_normal((10, 2)),
|
346 |
+
columns=list("AB"),
|
347 |
+
dtype="float64",
|
348 |
+
)
|
349 |
+
df["int64"] = Series([1] * len(df), dtype="int64")
|
350 |
+
df["int16"] = Series([1] * len(df), dtype="int16")
|
351 |
+
store.append("df", df)
|
352 |
+
|
353 |
+
# store additional fields in different blocks
|
354 |
+
df["int16_2"] = Series([1] * len(df), dtype="int16")
|
355 |
+
msg = re.escape(
|
356 |
+
"cannot match existing table structure for [int16] on appending data"
|
357 |
+
)
|
358 |
+
with pytest.raises(ValueError, match=msg):
|
359 |
+
store.append("df", df)
|
360 |
+
|
361 |
+
# store multiple additional fields in different blocks
|
362 |
+
df["float_3"] = Series([1.0] * len(df), dtype="float64")
|
363 |
+
msg = re.escape(
|
364 |
+
"cannot match existing table structure for [A,B] on appending data"
|
365 |
+
)
|
366 |
+
with pytest.raises(ValueError, match=msg):
|
367 |
+
store.append("df", df)
|
368 |
+
|
369 |
+
|
370 |
+
def test_append_with_strings(setup_path):
|
371 |
+
with ensure_clean_store(setup_path) as store:
|
372 |
+
|
373 |
+
def check_col(key, name, size):
|
374 |
+
assert (
|
375 |
+
getattr(store.get_storer(key).table.description, name).itemsize == size
|
376 |
+
)
|
377 |
+
|
378 |
+
# avoid truncation on elements
|
379 |
+
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
|
380 |
+
store.append("df_big", df)
|
381 |
+
tm.assert_frame_equal(store.select("df_big"), df)
|
382 |
+
check_col("df_big", "values_block_1", 15)
|
383 |
+
|
384 |
+
# appending smaller string ok
|
385 |
+
df2 = DataFrame([[124, "asdqy"], [346, "dggnhefbdfb"]])
|
386 |
+
store.append("df_big", df2)
|
387 |
+
expected = concat([df, df2])
|
388 |
+
tm.assert_frame_equal(store.select("df_big"), expected)
|
389 |
+
check_col("df_big", "values_block_1", 15)
|
390 |
+
|
391 |
+
# avoid truncation on elements
|
392 |
+
df = DataFrame([[123, "asdqwerty"], [345, "dggnhebbsdfbdfb"]])
|
393 |
+
store.append("df_big2", df, min_itemsize={"values": 50})
|
394 |
+
tm.assert_frame_equal(store.select("df_big2"), df)
|
395 |
+
check_col("df_big2", "values_block_1", 50)
|
396 |
+
|
397 |
+
# bigger string on next append
|
398 |
+
store.append("df_new", df)
|
399 |
+
df_new = DataFrame([[124, "abcdefqhij"], [346, "abcdefghijklmnopqrtsuvwxyz"]])
|
400 |
+
msg = (
|
401 |
+
r"Trying to store a string with len \[26\] in "
|
402 |
+
r"\[values_block_1\] column but\n"
|
403 |
+
r"this column has a limit of \[15\]!\n"
|
404 |
+
"Consider using min_itemsize to preset the sizes on these "
|
405 |
+
"columns"
|
406 |
+
)
|
407 |
+
with pytest.raises(ValueError, match=msg):
|
408 |
+
store.append("df_new", df_new)
|
409 |
+
|
410 |
+
# min_itemsize on Series index (GH 11412)
|
411 |
+
df = DataFrame(
|
412 |
+
{
|
413 |
+
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
|
414 |
+
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
|
415 |
+
"C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),
|
416 |
+
"D": date_range("20130101", periods=5),
|
417 |
+
}
|
418 |
+
).set_index("C")
|
419 |
+
store.append("ss", df["B"], min_itemsize={"index": 4})
|
420 |
+
tm.assert_series_equal(store.select("ss"), df["B"])
|
421 |
+
|
422 |
+
# same as above, with data_columns=True
|
423 |
+
store.append("ss2", df["B"], data_columns=True, min_itemsize={"index": 4})
|
424 |
+
tm.assert_series_equal(store.select("ss2"), df["B"])
|
425 |
+
|
426 |
+
# min_itemsize in index without appending (GH 10381)
|
427 |
+
store.put("ss3", df, format="table", min_itemsize={"index": 6})
|
428 |
+
# just make sure there is a longer string:
|
429 |
+
df2 = df.copy().reset_index().assign(C="longer").set_index("C")
|
430 |
+
store.append("ss3", df2)
|
431 |
+
tm.assert_frame_equal(store.select("ss3"), concat([df, df2]))
|
432 |
+
|
433 |
+
# same as above, with a Series
|
434 |
+
store.put("ss4", df["B"], format="table", min_itemsize={"index": 6})
|
435 |
+
store.append("ss4", df2["B"])
|
436 |
+
tm.assert_series_equal(store.select("ss4"), concat([df["B"], df2["B"]]))
|
437 |
+
|
438 |
+
# with nans
|
439 |
+
_maybe_remove(store, "df")
|
440 |
+
df = DataFrame(
|
441 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
442 |
+
columns=Index(list("ABCD"), dtype=object),
|
443 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
444 |
+
)
|
445 |
+
df["string"] = "foo"
|
446 |
+
df.loc[df.index[1:4], "string"] = np.nan
|
447 |
+
df["string2"] = "bar"
|
448 |
+
df.loc[df.index[4:8], "string2"] = np.nan
|
449 |
+
df["string3"] = "bah"
|
450 |
+
df.loc[df.index[1:], "string3"] = np.nan
|
451 |
+
store.append("df", df)
|
452 |
+
result = store.select("df")
|
453 |
+
tm.assert_frame_equal(result, df)
|
454 |
+
|
455 |
+
with ensure_clean_store(setup_path) as store:
|
456 |
+
df = DataFrame({"A": "foo", "B": "bar"}, index=range(10))
|
457 |
+
|
458 |
+
# a min_itemsize that creates a data_column
|
459 |
+
_maybe_remove(store, "df")
|
460 |
+
store.append("df", df, min_itemsize={"A": 200})
|
461 |
+
check_col("df", "A", 200)
|
462 |
+
assert store.get_storer("df").data_columns == ["A"]
|
463 |
+
|
464 |
+
# a min_itemsize that creates a data_column2
|
465 |
+
_maybe_remove(store, "df")
|
466 |
+
store.append("df", df, data_columns=["B"], min_itemsize={"A": 200})
|
467 |
+
check_col("df", "A", 200)
|
468 |
+
assert store.get_storer("df").data_columns == ["B", "A"]
|
469 |
+
|
470 |
+
# a min_itemsize that creates a data_column2
|
471 |
+
_maybe_remove(store, "df")
|
472 |
+
store.append("df", df, data_columns=["B"], min_itemsize={"values": 200})
|
473 |
+
check_col("df", "B", 200)
|
474 |
+
check_col("df", "values_block_0", 200)
|
475 |
+
assert store.get_storer("df").data_columns == ["B"]
|
476 |
+
|
477 |
+
# infer the .typ on subsequent appends
|
478 |
+
_maybe_remove(store, "df")
|
479 |
+
store.append("df", df[:5], min_itemsize=200)
|
480 |
+
store.append("df", df[5:], min_itemsize=200)
|
481 |
+
tm.assert_frame_equal(store["df"], df)
|
482 |
+
|
483 |
+
# invalid min_itemsize keys
|
484 |
+
df = DataFrame(["foo", "foo", "foo", "barh", "barh", "barh"], columns=["A"])
|
485 |
+
_maybe_remove(store, "df")
|
486 |
+
msg = re.escape(
|
487 |
+
"min_itemsize has the key [foo] which is not an axis or data_column"
|
488 |
+
)
|
489 |
+
with pytest.raises(ValueError, match=msg):
|
490 |
+
store.append("df", df, min_itemsize={"foo": 20, "foobar": 20})
|
491 |
+
|
492 |
+
|
493 |
+
def test_append_with_empty_string(setup_path):
|
494 |
+
with ensure_clean_store(setup_path) as store:
|
495 |
+
# with all empty strings (GH 12242)
|
496 |
+
df = DataFrame({"x": ["a", "b", "c", "d", "e", "f", ""]})
|
497 |
+
store.append("df", df[:-1], min_itemsize={"x": 1})
|
498 |
+
store.append("df", df[-1:], min_itemsize={"x": 1})
|
499 |
+
tm.assert_frame_equal(store.select("df"), df)
|
500 |
+
|
501 |
+
|
502 |
+
def test_append_with_data_columns(setup_path):
|
503 |
+
with ensure_clean_store(setup_path) as store:
|
504 |
+
df = DataFrame(
|
505 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
506 |
+
columns=Index(list("ABCD"), dtype=object),
|
507 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
508 |
+
)
|
509 |
+
df.iloc[0, df.columns.get_loc("B")] = 1.0
|
510 |
+
_maybe_remove(store, "df")
|
511 |
+
store.append("df", df[:2], data_columns=["B"])
|
512 |
+
store.append("df", df[2:])
|
513 |
+
tm.assert_frame_equal(store["df"], df)
|
514 |
+
|
515 |
+
# check that we have indices created
|
516 |
+
assert store._handle.root.df.table.cols.index.is_indexed is True
|
517 |
+
assert store._handle.root.df.table.cols.B.is_indexed is True
|
518 |
+
|
519 |
+
# data column searching
|
520 |
+
result = store.select("df", "B>0")
|
521 |
+
expected = df[df.B > 0]
|
522 |
+
tm.assert_frame_equal(result, expected)
|
523 |
+
|
524 |
+
# data column searching (with an indexable and a data_columns)
|
525 |
+
result = store.select("df", "B>0 and index>df.index[3]")
|
526 |
+
df_new = df.reindex(index=df.index[4:])
|
527 |
+
expected = df_new[df_new.B > 0]
|
528 |
+
tm.assert_frame_equal(result, expected)
|
529 |
+
|
530 |
+
# data column selection with a string data_column
|
531 |
+
df_new = df.copy()
|
532 |
+
df_new["string"] = "foo"
|
533 |
+
df_new.loc[df_new.index[1:4], "string"] = np.nan
|
534 |
+
df_new.loc[df_new.index[5:6], "string"] = "bar"
|
535 |
+
_maybe_remove(store, "df")
|
536 |
+
store.append("df", df_new, data_columns=["string"])
|
537 |
+
result = store.select("df", "string='foo'")
|
538 |
+
expected = df_new[df_new.string == "foo"]
|
539 |
+
tm.assert_frame_equal(result, expected)
|
540 |
+
|
541 |
+
# using min_itemsize and a data column
|
542 |
+
def check_col(key, name, size):
|
543 |
+
assert (
|
544 |
+
getattr(store.get_storer(key).table.description, name).itemsize == size
|
545 |
+
)
|
546 |
+
|
547 |
+
with ensure_clean_store(setup_path) as store:
|
548 |
+
_maybe_remove(store, "df")
|
549 |
+
store.append("df", df_new, data_columns=["string"], min_itemsize={"string": 30})
|
550 |
+
check_col("df", "string", 30)
|
551 |
+
_maybe_remove(store, "df")
|
552 |
+
store.append("df", df_new, data_columns=["string"], min_itemsize=30)
|
553 |
+
check_col("df", "string", 30)
|
554 |
+
_maybe_remove(store, "df")
|
555 |
+
store.append("df", df_new, data_columns=["string"], min_itemsize={"values": 30})
|
556 |
+
check_col("df", "string", 30)
|
557 |
+
|
558 |
+
with ensure_clean_store(setup_path) as store:
|
559 |
+
df_new["string2"] = "foobarbah"
|
560 |
+
df_new["string_block1"] = "foobarbah1"
|
561 |
+
df_new["string_block2"] = "foobarbah2"
|
562 |
+
_maybe_remove(store, "df")
|
563 |
+
store.append(
|
564 |
+
"df",
|
565 |
+
df_new,
|
566 |
+
data_columns=["string", "string2"],
|
567 |
+
min_itemsize={"string": 30, "string2": 40, "values": 50},
|
568 |
+
)
|
569 |
+
check_col("df", "string", 30)
|
570 |
+
check_col("df", "string2", 40)
|
571 |
+
check_col("df", "values_block_1", 50)
|
572 |
+
|
573 |
+
with ensure_clean_store(setup_path) as store:
|
574 |
+
# multiple data columns
|
575 |
+
df_new = df.copy()
|
576 |
+
df_new.iloc[0, df_new.columns.get_loc("A")] = 1.0
|
577 |
+
df_new.iloc[0, df_new.columns.get_loc("B")] = -1.0
|
578 |
+
df_new["string"] = "foo"
|
579 |
+
|
580 |
+
sl = df_new.columns.get_loc("string")
|
581 |
+
df_new.iloc[1:4, sl] = np.nan
|
582 |
+
df_new.iloc[5:6, sl] = "bar"
|
583 |
+
|
584 |
+
df_new["string2"] = "foo"
|
585 |
+
sl = df_new.columns.get_loc("string2")
|
586 |
+
df_new.iloc[2:5, sl] = np.nan
|
587 |
+
df_new.iloc[7:8, sl] = "bar"
|
588 |
+
_maybe_remove(store, "df")
|
589 |
+
store.append("df", df_new, data_columns=["A", "B", "string", "string2"])
|
590 |
+
result = store.select("df", "string='foo' and string2='foo' and A>0 and B<0")
|
591 |
+
expected = df_new[
|
592 |
+
(df_new.string == "foo")
|
593 |
+
& (df_new.string2 == "foo")
|
594 |
+
& (df_new.A > 0)
|
595 |
+
& (df_new.B < 0)
|
596 |
+
]
|
597 |
+
tm.assert_frame_equal(result, expected, check_freq=False)
|
598 |
+
# FIXME: 2020-05-07 freq check randomly fails in the CI
|
599 |
+
|
600 |
+
# yield an empty frame
|
601 |
+
result = store.select("df", "string='foo' and string2='cool'")
|
602 |
+
expected = df_new[(df_new.string == "foo") & (df_new.string2 == "cool")]
|
603 |
+
tm.assert_frame_equal(result, expected)
|
604 |
+
|
605 |
+
with ensure_clean_store(setup_path) as store:
|
606 |
+
# doc example
|
607 |
+
df_dc = df.copy()
|
608 |
+
df_dc["string"] = "foo"
|
609 |
+
df_dc.loc[df_dc.index[4:6], "string"] = np.nan
|
610 |
+
df_dc.loc[df_dc.index[7:9], "string"] = "bar"
|
611 |
+
df_dc["string2"] = "cool"
|
612 |
+
df_dc["datetime"] = Timestamp("20010102").as_unit("ns")
|
613 |
+
df_dc.loc[df_dc.index[3:5], ["A", "B", "datetime"]] = np.nan
|
614 |
+
|
615 |
+
_maybe_remove(store, "df_dc")
|
616 |
+
store.append(
|
617 |
+
"df_dc", df_dc, data_columns=["B", "C", "string", "string2", "datetime"]
|
618 |
+
)
|
619 |
+
result = store.select("df_dc", "B>0")
|
620 |
+
|
621 |
+
expected = df_dc[df_dc.B > 0]
|
622 |
+
tm.assert_frame_equal(result, expected)
|
623 |
+
|
624 |
+
result = store.select("df_dc", ["B > 0", "C > 0", "string == foo"])
|
625 |
+
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
|
626 |
+
tm.assert_frame_equal(result, expected, check_freq=False)
|
627 |
+
# FIXME: 2020-12-07 intermittent build failures here with freq of
|
628 |
+
# None instead of BDay(4)
|
629 |
+
|
630 |
+
with ensure_clean_store(setup_path) as store:
|
631 |
+
# doc example part 2
|
632 |
+
|
633 |
+
index = date_range("1/1/2000", periods=8)
|
634 |
+
df_dc = DataFrame(
|
635 |
+
np.random.default_rng(2).standard_normal((8, 3)),
|
636 |
+
index=index,
|
637 |
+
columns=["A", "B", "C"],
|
638 |
+
)
|
639 |
+
df_dc["string"] = "foo"
|
640 |
+
df_dc.loc[df_dc.index[4:6], "string"] = np.nan
|
641 |
+
df_dc.loc[df_dc.index[7:9], "string"] = "bar"
|
642 |
+
df_dc[["B", "C"]] = df_dc[["B", "C"]].abs()
|
643 |
+
df_dc["string2"] = "cool"
|
644 |
+
|
645 |
+
# on-disk operations
|
646 |
+
store.append("df_dc", df_dc, data_columns=["B", "C", "string", "string2"])
|
647 |
+
|
648 |
+
result = store.select("df_dc", "B>0")
|
649 |
+
expected = df_dc[df_dc.B > 0]
|
650 |
+
tm.assert_frame_equal(result, expected)
|
651 |
+
|
652 |
+
result = store.select("df_dc", ["B > 0", "C > 0", 'string == "foo"'])
|
653 |
+
expected = df_dc[(df_dc.B > 0) & (df_dc.C > 0) & (df_dc.string == "foo")]
|
654 |
+
tm.assert_frame_equal(result, expected)
|
655 |
+
|
656 |
+
|
657 |
+
def test_append_hierarchical(tmp_path, setup_path, multiindex_dataframe_random_data):
|
658 |
+
df = multiindex_dataframe_random_data
|
659 |
+
df.columns.name = None
|
660 |
+
|
661 |
+
with ensure_clean_store(setup_path) as store:
|
662 |
+
store.append("mi", df)
|
663 |
+
result = store.select("mi")
|
664 |
+
tm.assert_frame_equal(result, df)
|
665 |
+
|
666 |
+
# GH 3748
|
667 |
+
result = store.select("mi", columns=["A", "B"])
|
668 |
+
expected = df.reindex(columns=["A", "B"])
|
669 |
+
tm.assert_frame_equal(result, expected)
|
670 |
+
|
671 |
+
path = tmp_path / "test.hdf"
|
672 |
+
df.to_hdf(path, key="df", format="table")
|
673 |
+
result = read_hdf(path, "df", columns=["A", "B"])
|
674 |
+
expected = df.reindex(columns=["A", "B"])
|
675 |
+
tm.assert_frame_equal(result, expected)
|
676 |
+
|
677 |
+
|
678 |
+
def test_append_misc(setup_path):
|
679 |
+
with ensure_clean_store(setup_path) as store:
|
680 |
+
df = DataFrame(
|
681 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
682 |
+
columns=Index(list("ABCD"), dtype=object),
|
683 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
684 |
+
)
|
685 |
+
store.append("df", df, chunksize=1)
|
686 |
+
result = store.select("df")
|
687 |
+
tm.assert_frame_equal(result, df)
|
688 |
+
|
689 |
+
store.append("df1", df, expectedrows=10)
|
690 |
+
result = store.select("df1")
|
691 |
+
tm.assert_frame_equal(result, df)
|
692 |
+
|
693 |
+
|
694 |
+
@pytest.mark.parametrize("chunksize", [10, 200, 1000])
|
695 |
+
def test_append_misc_chunksize(setup_path, chunksize):
|
696 |
+
# more chunksize in append tests
|
697 |
+
df = DataFrame(
|
698 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
699 |
+
columns=Index(list("ABCD"), dtype=object),
|
700 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
701 |
+
)
|
702 |
+
df["string"] = "foo"
|
703 |
+
df["float322"] = 1.0
|
704 |
+
df["float322"] = df["float322"].astype("float32")
|
705 |
+
df["bool"] = df["float322"] > 0
|
706 |
+
df["time1"] = Timestamp("20130101").as_unit("ns")
|
707 |
+
df["time2"] = Timestamp("20130102").as_unit("ns")
|
708 |
+
with ensure_clean_store(setup_path, mode="w") as store:
|
709 |
+
store.append("obj", df, chunksize=chunksize)
|
710 |
+
result = store.select("obj")
|
711 |
+
tm.assert_frame_equal(result, df)
|
712 |
+
|
713 |
+
|
714 |
+
def test_append_misc_empty_frame(setup_path):
|
715 |
+
# empty frame, GH4273
|
716 |
+
with ensure_clean_store(setup_path) as store:
|
717 |
+
# 0 len
|
718 |
+
df_empty = DataFrame(columns=list("ABC"))
|
719 |
+
store.append("df", df_empty)
|
720 |
+
with pytest.raises(KeyError, match="'No object named df in the file'"):
|
721 |
+
store.select("df")
|
722 |
+
|
723 |
+
# repeated append of 0/non-zero frames
|
724 |
+
df = DataFrame(np.random.default_rng(2).random((10, 3)), columns=list("ABC"))
|
725 |
+
store.append("df", df)
|
726 |
+
tm.assert_frame_equal(store.select("df"), df)
|
727 |
+
store.append("df", df_empty)
|
728 |
+
tm.assert_frame_equal(store.select("df"), df)
|
729 |
+
|
730 |
+
# store
|
731 |
+
df = DataFrame(columns=list("ABC"))
|
732 |
+
store.put("df2", df)
|
733 |
+
tm.assert_frame_equal(store.select("df2"), df)
|
734 |
+
|
735 |
+
|
736 |
+
# TODO(ArrayManager) currently we rely on falling back to BlockManager, but
|
737 |
+
# the conversion from AM->BM converts the invalid object dtype column into
|
738 |
+
# a datetime64 column no longer raising an error
|
739 |
+
@td.skip_array_manager_not_yet_implemented
|
740 |
+
def test_append_raise(setup_path):
|
741 |
+
with ensure_clean_store(setup_path) as store:
|
742 |
+
# test append with invalid input to get good error messages
|
743 |
+
|
744 |
+
# list in column
|
745 |
+
df = DataFrame(
|
746 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
747 |
+
columns=Index(list("ABCD"), dtype=object),
|
748 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
749 |
+
)
|
750 |
+
df["invalid"] = [["a"]] * len(df)
|
751 |
+
assert df.dtypes["invalid"] == np.object_
|
752 |
+
msg = re.escape(
|
753 |
+
"""Cannot serialize the column [invalid]
|
754 |
+
because its data contents are not [string] but [mixed] object dtype"""
|
755 |
+
)
|
756 |
+
with pytest.raises(TypeError, match=msg):
|
757 |
+
store.append("df", df)
|
758 |
+
|
759 |
+
# multiple invalid columns
|
760 |
+
df["invalid2"] = [["a"]] * len(df)
|
761 |
+
df["invalid3"] = [["a"]] * len(df)
|
762 |
+
with pytest.raises(TypeError, match=msg):
|
763 |
+
store.append("df", df)
|
764 |
+
|
765 |
+
# datetime with embedded nans as object
|
766 |
+
df = DataFrame(
|
767 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
768 |
+
columns=Index(list("ABCD"), dtype=object),
|
769 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
770 |
+
)
|
771 |
+
s = Series(datetime.datetime(2001, 1, 2), index=df.index)
|
772 |
+
s = s.astype(object)
|
773 |
+
s[0:5] = np.nan
|
774 |
+
df["invalid"] = s
|
775 |
+
assert df.dtypes["invalid"] == np.object_
|
776 |
+
msg = "too many timezones in this block, create separate data columns"
|
777 |
+
with pytest.raises(TypeError, match=msg):
|
778 |
+
store.append("df", df)
|
779 |
+
|
780 |
+
# directly ndarray
|
781 |
+
msg = "value must be None, Series, or DataFrame"
|
782 |
+
with pytest.raises(TypeError, match=msg):
|
783 |
+
store.append("df", np.arange(10))
|
784 |
+
|
785 |
+
# series directly
|
786 |
+
msg = re.escape(
|
787 |
+
"cannot properly create the storer for: "
|
788 |
+
"[group->df,value-><class 'pandas.core.series.Series'>]"
|
789 |
+
)
|
790 |
+
with pytest.raises(TypeError, match=msg):
|
791 |
+
store.append("df", Series(np.arange(10)))
|
792 |
+
|
793 |
+
# appending an incompatible table
|
794 |
+
df = DataFrame(
|
795 |
+
1.1 * np.arange(120).reshape((30, 4)),
|
796 |
+
columns=Index(list("ABCD"), dtype=object),
|
797 |
+
index=Index([f"i-{i}" for i in range(30)], dtype=object),
|
798 |
+
)
|
799 |
+
store.append("df", df)
|
800 |
+
|
801 |
+
df["foo"] = "foo"
|
802 |
+
msg = re.escape(
|
803 |
+
"invalid combination of [non_index_axes] on appending data "
|
804 |
+
"[(1, ['A', 'B', 'C', 'D', 'foo'])] vs current table "
|
805 |
+
"[(1, ['A', 'B', 'C', 'D'])]"
|
806 |
+
)
|
807 |
+
with pytest.raises(ValueError, match=msg):
|
808 |
+
store.append("df", df)
|
809 |
+
|
810 |
+
# incompatible type (GH 41897)
|
811 |
+
_maybe_remove(store, "df")
|
812 |
+
df["foo"] = Timestamp("20130101")
|
813 |
+
store.append("df", df)
|
814 |
+
df["foo"] = "bar"
|
815 |
+
msg = re.escape(
|
816 |
+
"invalid combination of [values_axes] on appending data "
|
817 |
+
"[name->values_block_1,cname->values_block_1,"
|
818 |
+
"dtype->bytes24,kind->string,shape->(1, 30)] "
|
819 |
+
"vs current table "
|
820 |
+
"[name->values_block_1,cname->values_block_1,"
|
821 |
+
"dtype->datetime64[s],kind->datetime64[s],shape->None]"
|
822 |
+
)
|
823 |
+
with pytest.raises(ValueError, match=msg):
|
824 |
+
store.append("df", df)
|
825 |
+
|
826 |
+
|
827 |
+
def test_append_with_timedelta(setup_path):
|
828 |
+
# GH 3577
|
829 |
+
# append timedelta
|
830 |
+
|
831 |
+
ts = Timestamp("20130101").as_unit("ns")
|
832 |
+
df = DataFrame(
|
833 |
+
{
|
834 |
+
"A": ts,
|
835 |
+
"B": [ts + timedelta(days=i, seconds=10) for i in range(10)],
|
836 |
+
}
|
837 |
+
)
|
838 |
+
df["C"] = df["A"] - df["B"]
|
839 |
+
df.loc[3:5, "C"] = np.nan
|
840 |
+
|
841 |
+
with ensure_clean_store(setup_path) as store:
|
842 |
+
# table
|
843 |
+
_maybe_remove(store, "df")
|
844 |
+
store.append("df", df, data_columns=True)
|
845 |
+
result = store.select("df")
|
846 |
+
tm.assert_frame_equal(result, df)
|
847 |
+
|
848 |
+
result = store.select("df", where="C<100000")
|
849 |
+
tm.assert_frame_equal(result, df)
|
850 |
+
|
851 |
+
result = store.select("df", where="C<pd.Timedelta('-3D')")
|
852 |
+
tm.assert_frame_equal(result, df.iloc[3:])
|
853 |
+
|
854 |
+
result = store.select("df", "C<'-3D'")
|
855 |
+
tm.assert_frame_equal(result, df.iloc[3:])
|
856 |
+
|
857 |
+
# a bit hacky here as we don't really deal with the NaT properly
|
858 |
+
|
859 |
+
result = store.select("df", "C<'-500000s'")
|
860 |
+
result = result.dropna(subset=["C"])
|
861 |
+
tm.assert_frame_equal(result, df.iloc[6:])
|
862 |
+
|
863 |
+
result = store.select("df", "C<'-3.5D'")
|
864 |
+
result = result.iloc[1:]
|
865 |
+
tm.assert_frame_equal(result, df.iloc[4:])
|
866 |
+
|
867 |
+
# fixed
|
868 |
+
_maybe_remove(store, "df2")
|
869 |
+
store.put("df2", df)
|
870 |
+
result = store.select("df2")
|
871 |
+
tm.assert_frame_equal(result, df)
|
872 |
+
|
873 |
+
|
874 |
+
def test_append_to_multiple(setup_path):
|
875 |
+
df1 = DataFrame(
|
876 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
877 |
+
columns=Index(list("ABCD"), dtype=object),
|
878 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
879 |
+
)
|
880 |
+
df2 = df1.copy().rename(columns="{}_2".format)
|
881 |
+
df2["foo"] = "bar"
|
882 |
+
df = concat([df1, df2], axis=1)
|
883 |
+
|
884 |
+
with ensure_clean_store(setup_path) as store:
|
885 |
+
# exceptions
|
886 |
+
msg = "append_to_multiple requires a selector that is in passed dict"
|
887 |
+
with pytest.raises(ValueError, match=msg):
|
888 |
+
store.append_to_multiple(
|
889 |
+
{"df1": ["A", "B"], "df2": None}, df, selector="df3"
|
890 |
+
)
|
891 |
+
|
892 |
+
with pytest.raises(ValueError, match=msg):
|
893 |
+
store.append_to_multiple({"df1": None, "df2": None}, df, selector="df3")
|
894 |
+
|
895 |
+
msg = (
|
896 |
+
"append_to_multiple must have a dictionary specified as the way to "
|
897 |
+
"split the value"
|
898 |
+
)
|
899 |
+
with pytest.raises(ValueError, match=msg):
|
900 |
+
store.append_to_multiple("df1", df, "df1")
|
901 |
+
|
902 |
+
# regular operation
|
903 |
+
store.append_to_multiple({"df1": ["A", "B"], "df2": None}, df, selector="df1")
|
904 |
+
result = store.select_as_multiple(
|
905 |
+
["df1", "df2"], where=["A>0", "B>0"], selector="df1"
|
906 |
+
)
|
907 |
+
expected = df[(df.A > 0) & (df.B > 0)]
|
908 |
+
tm.assert_frame_equal(result, expected)
|
909 |
+
|
910 |
+
|
911 |
+
def test_append_to_multiple_dropna(setup_path):
|
912 |
+
df1 = DataFrame(
|
913 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
914 |
+
columns=Index(list("ABCD"), dtype=object),
|
915 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
916 |
+
)
|
917 |
+
df2 = DataFrame(
|
918 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
919 |
+
columns=Index(list("ABCD"), dtype=object),
|
920 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
921 |
+
).rename(columns="{}_2".format)
|
922 |
+
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
|
923 |
+
df = concat([df1, df2], axis=1)
|
924 |
+
|
925 |
+
with ensure_clean_store(setup_path) as store:
|
926 |
+
# dropna=True should guarantee rows are synchronized
|
927 |
+
store.append_to_multiple(
|
928 |
+
{"df1": ["A", "B"], "df2": None}, df, selector="df1", dropna=True
|
929 |
+
)
|
930 |
+
result = store.select_as_multiple(["df1", "df2"])
|
931 |
+
expected = df.dropna()
|
932 |
+
tm.assert_frame_equal(result, expected, check_index_type=True)
|
933 |
+
tm.assert_index_equal(store.select("df1").index, store.select("df2").index)
|
934 |
+
|
935 |
+
|
936 |
+
def test_append_to_multiple_dropna_false(setup_path):
|
937 |
+
df1 = DataFrame(
|
938 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
939 |
+
columns=Index(list("ABCD"), dtype=object),
|
940 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
941 |
+
)
|
942 |
+
df2 = df1.copy().rename(columns="{}_2".format)
|
943 |
+
df1.iloc[1, df1.columns.get_indexer(["A", "B"])] = np.nan
|
944 |
+
df = concat([df1, df2], axis=1)
|
945 |
+
|
946 |
+
with ensure_clean_store(setup_path) as store, pd.option_context(
|
947 |
+
"io.hdf.dropna_table", True
|
948 |
+
):
|
949 |
+
# dropna=False shouldn't synchronize row indexes
|
950 |
+
store.append_to_multiple(
|
951 |
+
{"df1a": ["A", "B"], "df2a": None}, df, selector="df1a", dropna=False
|
952 |
+
)
|
953 |
+
|
954 |
+
msg = "all tables must have exactly the same nrows!"
|
955 |
+
with pytest.raises(ValueError, match=msg):
|
956 |
+
store.select_as_multiple(["df1a", "df2a"])
|
957 |
+
|
958 |
+
assert not store.select("df1a").index.equals(store.select("df2a").index)
|
959 |
+
|
960 |
+
|
961 |
+
def test_append_to_multiple_min_itemsize(setup_path):
|
962 |
+
# GH 11238
|
963 |
+
df = DataFrame(
|
964 |
+
{
|
965 |
+
"IX": np.arange(1, 21),
|
966 |
+
"Num": np.arange(1, 21),
|
967 |
+
"BigNum": np.arange(1, 21) * 88,
|
968 |
+
"Str": ["a" for _ in range(20)],
|
969 |
+
"LongStr": ["abcde" for _ in range(20)],
|
970 |
+
}
|
971 |
+
)
|
972 |
+
expected = df.iloc[[0]]
|
973 |
+
|
974 |
+
with ensure_clean_store(setup_path) as store:
|
975 |
+
store.append_to_multiple(
|
976 |
+
{
|
977 |
+
"index": ["IX"],
|
978 |
+
"nums": ["Num", "BigNum"],
|
979 |
+
"strs": ["Str", "LongStr"],
|
980 |
+
},
|
981 |
+
df.iloc[[0]],
|
982 |
+
"index",
|
983 |
+
min_itemsize={"Str": 10, "LongStr": 100, "Num": 2},
|
984 |
+
)
|
985 |
+
result = store.select_as_multiple(["index", "nums", "strs"])
|
986 |
+
tm.assert_frame_equal(result, expected, check_index_type=True)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_complex.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
import pandas as pd
|
5 |
+
from pandas import (
|
6 |
+
DataFrame,
|
7 |
+
Series,
|
8 |
+
)
|
9 |
+
import pandas._testing as tm
|
10 |
+
from pandas.tests.io.pytables.common import ensure_clean_store
|
11 |
+
|
12 |
+
from pandas.io.pytables import read_hdf
|
13 |
+
|
14 |
+
|
15 |
+
def test_complex_fixed(tmp_path, setup_path):
|
16 |
+
df = DataFrame(
|
17 |
+
np.random.default_rng(2).random((4, 5)).astype(np.complex64),
|
18 |
+
index=list("abcd"),
|
19 |
+
columns=list("ABCDE"),
|
20 |
+
)
|
21 |
+
|
22 |
+
path = tmp_path / setup_path
|
23 |
+
df.to_hdf(path, key="df")
|
24 |
+
reread = read_hdf(path, "df")
|
25 |
+
tm.assert_frame_equal(df, reread)
|
26 |
+
|
27 |
+
df = DataFrame(
|
28 |
+
np.random.default_rng(2).random((4, 5)).astype(np.complex128),
|
29 |
+
index=list("abcd"),
|
30 |
+
columns=list("ABCDE"),
|
31 |
+
)
|
32 |
+
path = tmp_path / setup_path
|
33 |
+
df.to_hdf(path, key="df")
|
34 |
+
reread = read_hdf(path, "df")
|
35 |
+
tm.assert_frame_equal(df, reread)
|
36 |
+
|
37 |
+
|
38 |
+
def test_complex_table(tmp_path, setup_path):
|
39 |
+
df = DataFrame(
|
40 |
+
np.random.default_rng(2).random((4, 5)).astype(np.complex64),
|
41 |
+
index=list("abcd"),
|
42 |
+
columns=list("ABCDE"),
|
43 |
+
)
|
44 |
+
|
45 |
+
path = tmp_path / setup_path
|
46 |
+
df.to_hdf(path, key="df", format="table")
|
47 |
+
reread = read_hdf(path, key="df")
|
48 |
+
tm.assert_frame_equal(df, reread)
|
49 |
+
|
50 |
+
df = DataFrame(
|
51 |
+
np.random.default_rng(2).random((4, 5)).astype(np.complex128),
|
52 |
+
index=list("abcd"),
|
53 |
+
columns=list("ABCDE"),
|
54 |
+
)
|
55 |
+
|
56 |
+
path = tmp_path / setup_path
|
57 |
+
df.to_hdf(path, key="df", format="table", mode="w")
|
58 |
+
reread = read_hdf(path, "df")
|
59 |
+
tm.assert_frame_equal(df, reread)
|
60 |
+
|
61 |
+
|
62 |
+
def test_complex_mixed_fixed(tmp_path, setup_path):
|
63 |
+
complex64 = np.array(
|
64 |
+
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
|
65 |
+
)
|
66 |
+
complex128 = np.array(
|
67 |
+
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
|
68 |
+
)
|
69 |
+
df = DataFrame(
|
70 |
+
{
|
71 |
+
"A": [1, 2, 3, 4],
|
72 |
+
"B": ["a", "b", "c", "d"],
|
73 |
+
"C": complex64,
|
74 |
+
"D": complex128,
|
75 |
+
"E": [1.0, 2.0, 3.0, 4.0],
|
76 |
+
},
|
77 |
+
index=list("abcd"),
|
78 |
+
)
|
79 |
+
path = tmp_path / setup_path
|
80 |
+
df.to_hdf(path, key="df")
|
81 |
+
reread = read_hdf(path, "df")
|
82 |
+
tm.assert_frame_equal(df, reread)
|
83 |
+
|
84 |
+
|
85 |
+
def test_complex_mixed_table(tmp_path, setup_path):
|
86 |
+
complex64 = np.array(
|
87 |
+
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex64
|
88 |
+
)
|
89 |
+
complex128 = np.array(
|
90 |
+
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
|
91 |
+
)
|
92 |
+
df = DataFrame(
|
93 |
+
{
|
94 |
+
"A": [1, 2, 3, 4],
|
95 |
+
"B": ["a", "b", "c", "d"],
|
96 |
+
"C": complex64,
|
97 |
+
"D": complex128,
|
98 |
+
"E": [1.0, 2.0, 3.0, 4.0],
|
99 |
+
},
|
100 |
+
index=list("abcd"),
|
101 |
+
)
|
102 |
+
|
103 |
+
with ensure_clean_store(setup_path) as store:
|
104 |
+
store.append("df", df, data_columns=["A", "B"])
|
105 |
+
result = store.select("df", where="A>2")
|
106 |
+
tm.assert_frame_equal(df.loc[df.A > 2], result)
|
107 |
+
|
108 |
+
path = tmp_path / setup_path
|
109 |
+
df.to_hdf(path, key="df", format="table")
|
110 |
+
reread = read_hdf(path, "df")
|
111 |
+
tm.assert_frame_equal(df, reread)
|
112 |
+
|
113 |
+
|
114 |
+
def test_complex_across_dimensions_fixed(tmp_path, setup_path):
|
115 |
+
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
|
116 |
+
s = Series(complex128, index=list("abcd"))
|
117 |
+
df = DataFrame({"A": s, "B": s})
|
118 |
+
|
119 |
+
objs = [s, df]
|
120 |
+
comps = [tm.assert_series_equal, tm.assert_frame_equal]
|
121 |
+
for obj, comp in zip(objs, comps):
|
122 |
+
path = tmp_path / setup_path
|
123 |
+
obj.to_hdf(path, key="obj", format="fixed")
|
124 |
+
reread = read_hdf(path, "obj")
|
125 |
+
comp(obj, reread)
|
126 |
+
|
127 |
+
|
128 |
+
def test_complex_across_dimensions(tmp_path, setup_path):
|
129 |
+
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
|
130 |
+
s = Series(complex128, index=list("abcd"))
|
131 |
+
df = DataFrame({"A": s, "B": s})
|
132 |
+
|
133 |
+
path = tmp_path / setup_path
|
134 |
+
df.to_hdf(path, key="obj", format="table")
|
135 |
+
reread = read_hdf(path, "obj")
|
136 |
+
tm.assert_frame_equal(df, reread)
|
137 |
+
|
138 |
+
|
139 |
+
def test_complex_indexing_error(setup_path):
|
140 |
+
complex128 = np.array(
|
141 |
+
[1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j], dtype=np.complex128
|
142 |
+
)
|
143 |
+
df = DataFrame(
|
144 |
+
{"A": [1, 2, 3, 4], "B": ["a", "b", "c", "d"], "C": complex128},
|
145 |
+
index=list("abcd"),
|
146 |
+
)
|
147 |
+
|
148 |
+
msg = (
|
149 |
+
"Columns containing complex values can be stored "
|
150 |
+
"but cannot be indexed when using table format. "
|
151 |
+
"Either use fixed format, set index=False, "
|
152 |
+
"or do not include the columns containing complex "
|
153 |
+
"values to data_columns when initializing the table."
|
154 |
+
)
|
155 |
+
|
156 |
+
with ensure_clean_store(setup_path) as store:
|
157 |
+
with pytest.raises(TypeError, match=msg):
|
158 |
+
store.append("df", df, data_columns=["C"])
|
159 |
+
|
160 |
+
|
161 |
+
def test_complex_series_error(tmp_path, setup_path):
|
162 |
+
complex128 = np.array([1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j, 1.0 + 1.0j])
|
163 |
+
s = Series(complex128, index=list("abcd"))
|
164 |
+
|
165 |
+
msg = (
|
166 |
+
"Columns containing complex values can be stored "
|
167 |
+
"but cannot be indexed when using table format. "
|
168 |
+
"Either use fixed format, set index=False, "
|
169 |
+
"or do not include the columns containing complex "
|
170 |
+
"values to data_columns when initializing the table."
|
171 |
+
)
|
172 |
+
|
173 |
+
path = tmp_path / setup_path
|
174 |
+
with pytest.raises(TypeError, match=msg):
|
175 |
+
s.to_hdf(path, key="obj", format="t")
|
176 |
+
|
177 |
+
path = tmp_path / setup_path
|
178 |
+
s.to_hdf(path, key="obj", format="t", index=False)
|
179 |
+
reread = read_hdf(path, "obj")
|
180 |
+
tm.assert_series_equal(s, reread)
|
181 |
+
|
182 |
+
|
183 |
+
def test_complex_append(setup_path):
|
184 |
+
df = DataFrame(
|
185 |
+
{
|
186 |
+
"a": np.random.default_rng(2).standard_normal(100).astype(np.complex128),
|
187 |
+
"b": np.random.default_rng(2).standard_normal(100),
|
188 |
+
}
|
189 |
+
)
|
190 |
+
|
191 |
+
with ensure_clean_store(setup_path) as store:
|
192 |
+
store.append("df", df, data_columns=["b"])
|
193 |
+
store.append("df", df)
|
194 |
+
result = store.select("df")
|
195 |
+
tm.assert_frame_equal(pd.concat([df, df], axis=0), result)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_read.py
ADDED
@@ -0,0 +1,412 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from contextlib import closing
|
2 |
+
from pathlib import Path
|
3 |
+
import re
|
4 |
+
|
5 |
+
import numpy as np
|
6 |
+
import pytest
|
7 |
+
|
8 |
+
from pandas._libs.tslibs import Timestamp
|
9 |
+
from pandas.compat import is_platform_windows
|
10 |
+
|
11 |
+
import pandas as pd
|
12 |
+
from pandas import (
|
13 |
+
DataFrame,
|
14 |
+
HDFStore,
|
15 |
+
Index,
|
16 |
+
Series,
|
17 |
+
_testing as tm,
|
18 |
+
date_range,
|
19 |
+
read_hdf,
|
20 |
+
)
|
21 |
+
from pandas.tests.io.pytables.common import (
|
22 |
+
_maybe_remove,
|
23 |
+
ensure_clean_store,
|
24 |
+
)
|
25 |
+
from pandas.util import _test_decorators as td
|
26 |
+
|
27 |
+
from pandas.io.pytables import TableIterator
|
28 |
+
|
29 |
+
pytestmark = pytest.mark.single_cpu
|
30 |
+
|
31 |
+
|
32 |
+
def test_read_missing_key_close_store(tmp_path, setup_path):
|
33 |
+
# GH 25766
|
34 |
+
path = tmp_path / setup_path
|
35 |
+
df = DataFrame({"a": range(2), "b": range(2)})
|
36 |
+
df.to_hdf(path, key="k1")
|
37 |
+
|
38 |
+
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
|
39 |
+
read_hdf(path, "k2")
|
40 |
+
|
41 |
+
# smoke test to test that file is properly closed after
|
42 |
+
# read with KeyError before another write
|
43 |
+
df.to_hdf(path, key="k2")
|
44 |
+
|
45 |
+
|
46 |
+
def test_read_index_error_close_store(tmp_path, setup_path):
|
47 |
+
# GH 25766
|
48 |
+
path = tmp_path / setup_path
|
49 |
+
df = DataFrame({"A": [], "B": []}, index=[])
|
50 |
+
df.to_hdf(path, key="k1")
|
51 |
+
|
52 |
+
with pytest.raises(IndexError, match=r"list index out of range"):
|
53 |
+
read_hdf(path, "k1", stop=0)
|
54 |
+
|
55 |
+
# smoke test to test that file is properly closed after
|
56 |
+
# read with IndexError before another write
|
57 |
+
df.to_hdf(path, key="k1")
|
58 |
+
|
59 |
+
|
60 |
+
def test_read_missing_key_opened_store(tmp_path, setup_path):
|
61 |
+
# GH 28699
|
62 |
+
path = tmp_path / setup_path
|
63 |
+
df = DataFrame({"a": range(2), "b": range(2)})
|
64 |
+
df.to_hdf(path, key="k1")
|
65 |
+
|
66 |
+
with HDFStore(path, "r") as store:
|
67 |
+
with pytest.raises(KeyError, match="'No object named k2 in the file'"):
|
68 |
+
read_hdf(store, "k2")
|
69 |
+
|
70 |
+
# Test that the file is still open after a KeyError and that we can
|
71 |
+
# still read from it.
|
72 |
+
read_hdf(store, "k1")
|
73 |
+
|
74 |
+
|
75 |
+
def test_read_column(setup_path):
|
76 |
+
df = DataFrame(
|
77 |
+
np.random.default_rng(2).standard_normal((10, 4)),
|
78 |
+
columns=Index(list("ABCD"), dtype=object),
|
79 |
+
index=date_range("2000-01-01", periods=10, freq="B"),
|
80 |
+
)
|
81 |
+
|
82 |
+
with ensure_clean_store(setup_path) as store:
|
83 |
+
_maybe_remove(store, "df")
|
84 |
+
|
85 |
+
# GH 17912
|
86 |
+
# HDFStore.select_column should raise a KeyError
|
87 |
+
# exception if the key is not a valid store
|
88 |
+
with pytest.raises(KeyError, match="No object named df in the file"):
|
89 |
+
store.select_column("df", "index")
|
90 |
+
|
91 |
+
store.append("df", df)
|
92 |
+
# error
|
93 |
+
with pytest.raises(
|
94 |
+
KeyError, match=re.escape("'column [foo] not found in the table'")
|
95 |
+
):
|
96 |
+
store.select_column("df", "foo")
|
97 |
+
|
98 |
+
msg = re.escape("select_column() got an unexpected keyword argument 'where'")
|
99 |
+
with pytest.raises(TypeError, match=msg):
|
100 |
+
store.select_column("df", "index", where=["index>5"])
|
101 |
+
|
102 |
+
# valid
|
103 |
+
result = store.select_column("df", "index")
|
104 |
+
tm.assert_almost_equal(result.values, Series(df.index).values)
|
105 |
+
assert isinstance(result, Series)
|
106 |
+
|
107 |
+
# not a data indexable column
|
108 |
+
msg = re.escape(
|
109 |
+
"column [values_block_0] can not be extracted individually; "
|
110 |
+
"it is not data indexable"
|
111 |
+
)
|
112 |
+
with pytest.raises(ValueError, match=msg):
|
113 |
+
store.select_column("df", "values_block_0")
|
114 |
+
|
115 |
+
# a data column
|
116 |
+
df2 = df.copy()
|
117 |
+
df2["string"] = "foo"
|
118 |
+
store.append("df2", df2, data_columns=["string"])
|
119 |
+
result = store.select_column("df2", "string")
|
120 |
+
tm.assert_almost_equal(result.values, df2["string"].values)
|
121 |
+
|
122 |
+
# a data column with NaNs, result excludes the NaNs
|
123 |
+
df3 = df.copy()
|
124 |
+
df3["string"] = "foo"
|
125 |
+
df3.loc[df3.index[4:6], "string"] = np.nan
|
126 |
+
store.append("df3", df3, data_columns=["string"])
|
127 |
+
result = store.select_column("df3", "string")
|
128 |
+
tm.assert_almost_equal(result.values, df3["string"].values)
|
129 |
+
|
130 |
+
# start/stop
|
131 |
+
result = store.select_column("df3", "string", start=2)
|
132 |
+
tm.assert_almost_equal(result.values, df3["string"].values[2:])
|
133 |
+
|
134 |
+
result = store.select_column("df3", "string", start=-2)
|
135 |
+
tm.assert_almost_equal(result.values, df3["string"].values[-2:])
|
136 |
+
|
137 |
+
result = store.select_column("df3", "string", stop=2)
|
138 |
+
tm.assert_almost_equal(result.values, df3["string"].values[:2])
|
139 |
+
|
140 |
+
result = store.select_column("df3", "string", stop=-2)
|
141 |
+
tm.assert_almost_equal(result.values, df3["string"].values[:-2])
|
142 |
+
|
143 |
+
result = store.select_column("df3", "string", start=2, stop=-2)
|
144 |
+
tm.assert_almost_equal(result.values, df3["string"].values[2:-2])
|
145 |
+
|
146 |
+
result = store.select_column("df3", "string", start=-2, stop=2)
|
147 |
+
tm.assert_almost_equal(result.values, df3["string"].values[-2:2])
|
148 |
+
|
149 |
+
# GH 10392 - make sure column name is preserved
|
150 |
+
df4 = DataFrame({"A": np.random.default_rng(2).standard_normal(10), "B": "foo"})
|
151 |
+
store.append("df4", df4, data_columns=True)
|
152 |
+
expected = df4["B"]
|
153 |
+
result = store.select_column("df4", "B")
|
154 |
+
tm.assert_series_equal(result, expected)
|
155 |
+
|
156 |
+
|
157 |
+
def test_pytables_native_read(datapath):
|
158 |
+
with ensure_clean_store(
|
159 |
+
datapath("io", "data", "legacy_hdf/pytables_native.h5"), mode="r"
|
160 |
+
) as store:
|
161 |
+
d2 = store["detector/readout"]
|
162 |
+
assert isinstance(d2, DataFrame)
|
163 |
+
|
164 |
+
|
165 |
+
@pytest.mark.skipif(is_platform_windows(), reason="native2 read fails oddly on windows")
|
166 |
+
def test_pytables_native2_read(datapath):
|
167 |
+
with ensure_clean_store(
|
168 |
+
datapath("io", "data", "legacy_hdf", "pytables_native2.h5"), mode="r"
|
169 |
+
) as store:
|
170 |
+
str(store)
|
171 |
+
d1 = store["detector"]
|
172 |
+
assert isinstance(d1, DataFrame)
|
173 |
+
|
174 |
+
|
175 |
+
def test_legacy_table_fixed_format_read_py2(datapath):
|
176 |
+
# GH 24510
|
177 |
+
# legacy table with fixed format written in Python 2
|
178 |
+
with ensure_clean_store(
|
179 |
+
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_py2.h5"), mode="r"
|
180 |
+
) as store:
|
181 |
+
result = store.select("df")
|
182 |
+
expected = DataFrame(
|
183 |
+
[[1, 2, 3, "D"]],
|
184 |
+
columns=["A", "B", "C", "D"],
|
185 |
+
index=Index(["ABC"], name="INDEX_NAME"),
|
186 |
+
)
|
187 |
+
tm.assert_frame_equal(expected, result)
|
188 |
+
|
189 |
+
|
190 |
+
def test_legacy_table_fixed_format_read_datetime_py2(datapath):
|
191 |
+
# GH 31750
|
192 |
+
# legacy table with fixed format and datetime64 column written in Python 2
|
193 |
+
expected = DataFrame(
|
194 |
+
[[Timestamp("2020-02-06T18:00")]],
|
195 |
+
columns=["A"],
|
196 |
+
index=Index(["date"]),
|
197 |
+
dtype="M8[ns]",
|
198 |
+
)
|
199 |
+
with ensure_clean_store(
|
200 |
+
datapath("io", "data", "legacy_hdf", "legacy_table_fixed_datetime_py2.h5"),
|
201 |
+
mode="r",
|
202 |
+
) as store:
|
203 |
+
result = store.select("df")
|
204 |
+
tm.assert_frame_equal(expected, result)
|
205 |
+
|
206 |
+
|
207 |
+
def test_legacy_table_read_py2(datapath):
|
208 |
+
# issue: 24925
|
209 |
+
# legacy table written in Python 2
|
210 |
+
with ensure_clean_store(
|
211 |
+
datapath("io", "data", "legacy_hdf", "legacy_table_py2.h5"), mode="r"
|
212 |
+
) as store:
|
213 |
+
result = store.select("table")
|
214 |
+
|
215 |
+
expected = DataFrame({"a": ["a", "b"], "b": [2, 3]})
|
216 |
+
tm.assert_frame_equal(expected, result)
|
217 |
+
|
218 |
+
|
219 |
+
def test_read_hdf_open_store(tmp_path, setup_path):
|
220 |
+
# GH10330
|
221 |
+
# No check for non-string path_or-buf, and no test of open store
|
222 |
+
df = DataFrame(
|
223 |
+
np.random.default_rng(2).random((4, 5)),
|
224 |
+
index=list("abcd"),
|
225 |
+
columns=list("ABCDE"),
|
226 |
+
)
|
227 |
+
df.index.name = "letters"
|
228 |
+
df = df.set_index(keys="E", append=True)
|
229 |
+
|
230 |
+
path = tmp_path / setup_path
|
231 |
+
df.to_hdf(path, key="df", mode="w")
|
232 |
+
direct = read_hdf(path, "df")
|
233 |
+
with HDFStore(path, mode="r") as store:
|
234 |
+
indirect = read_hdf(store, "df")
|
235 |
+
tm.assert_frame_equal(direct, indirect)
|
236 |
+
assert store.is_open
|
237 |
+
|
238 |
+
|
239 |
+
def test_read_hdf_index_not_view(tmp_path, setup_path):
|
240 |
+
# GH 37441
|
241 |
+
# Ensure that the index of the DataFrame is not a view
|
242 |
+
# into the original recarray that pytables reads in
|
243 |
+
df = DataFrame(
|
244 |
+
np.random.default_rng(2).random((4, 5)),
|
245 |
+
index=[0, 1, 2, 3],
|
246 |
+
columns=list("ABCDE"),
|
247 |
+
)
|
248 |
+
|
249 |
+
path = tmp_path / setup_path
|
250 |
+
df.to_hdf(path, key="df", mode="w", format="table")
|
251 |
+
|
252 |
+
df2 = read_hdf(path, "df")
|
253 |
+
assert df2.index._data.base is None
|
254 |
+
tm.assert_frame_equal(df, df2)
|
255 |
+
|
256 |
+
|
257 |
+
def test_read_hdf_iterator(tmp_path, setup_path):
|
258 |
+
df = DataFrame(
|
259 |
+
np.random.default_rng(2).random((4, 5)),
|
260 |
+
index=list("abcd"),
|
261 |
+
columns=list("ABCDE"),
|
262 |
+
)
|
263 |
+
df.index.name = "letters"
|
264 |
+
df = df.set_index(keys="E", append=True)
|
265 |
+
|
266 |
+
path = tmp_path / setup_path
|
267 |
+
df.to_hdf(path, key="df", mode="w", format="t")
|
268 |
+
direct = read_hdf(path, "df")
|
269 |
+
iterator = read_hdf(path, "df", iterator=True)
|
270 |
+
with closing(iterator.store):
|
271 |
+
assert isinstance(iterator, TableIterator)
|
272 |
+
indirect = next(iterator.__iter__())
|
273 |
+
tm.assert_frame_equal(direct, indirect)
|
274 |
+
|
275 |
+
|
276 |
+
def test_read_nokey(tmp_path, setup_path):
|
277 |
+
# GH10443
|
278 |
+
df = DataFrame(
|
279 |
+
np.random.default_rng(2).random((4, 5)),
|
280 |
+
index=list("abcd"),
|
281 |
+
columns=list("ABCDE"),
|
282 |
+
)
|
283 |
+
|
284 |
+
# Categorical dtype not supported for "fixed" format. So no need
|
285 |
+
# to test with that dtype in the dataframe here.
|
286 |
+
path = tmp_path / setup_path
|
287 |
+
df.to_hdf(path, key="df", mode="a")
|
288 |
+
reread = read_hdf(path)
|
289 |
+
tm.assert_frame_equal(df, reread)
|
290 |
+
df.to_hdf(path, key="df2", mode="a")
|
291 |
+
|
292 |
+
msg = "key must be provided when HDF5 file contains multiple datasets."
|
293 |
+
with pytest.raises(ValueError, match=msg):
|
294 |
+
read_hdf(path)
|
295 |
+
|
296 |
+
|
297 |
+
def test_read_nokey_table(tmp_path, setup_path):
|
298 |
+
# GH13231
|
299 |
+
df = DataFrame({"i": range(5), "c": Series(list("abacd"), dtype="category")})
|
300 |
+
|
301 |
+
path = tmp_path / setup_path
|
302 |
+
df.to_hdf(path, key="df", mode="a", format="table")
|
303 |
+
reread = read_hdf(path)
|
304 |
+
tm.assert_frame_equal(df, reread)
|
305 |
+
df.to_hdf(path, key="df2", mode="a", format="table")
|
306 |
+
|
307 |
+
msg = "key must be provided when HDF5 file contains multiple datasets."
|
308 |
+
with pytest.raises(ValueError, match=msg):
|
309 |
+
read_hdf(path)
|
310 |
+
|
311 |
+
|
312 |
+
def test_read_nokey_empty(tmp_path, setup_path):
|
313 |
+
path = tmp_path / setup_path
|
314 |
+
store = HDFStore(path)
|
315 |
+
store.close()
|
316 |
+
msg = re.escape(
|
317 |
+
"Dataset(s) incompatible with Pandas data types, not table, or no "
|
318 |
+
"datasets found in HDF5 file."
|
319 |
+
)
|
320 |
+
with pytest.raises(ValueError, match=msg):
|
321 |
+
read_hdf(path)
|
322 |
+
|
323 |
+
|
324 |
+
def test_read_from_pathlib_path(tmp_path, setup_path):
|
325 |
+
# GH11773
|
326 |
+
expected = DataFrame(
|
327 |
+
np.random.default_rng(2).random((4, 5)),
|
328 |
+
index=list("abcd"),
|
329 |
+
columns=list("ABCDE"),
|
330 |
+
)
|
331 |
+
filename = tmp_path / setup_path
|
332 |
+
path_obj = Path(filename)
|
333 |
+
|
334 |
+
expected.to_hdf(path_obj, key="df", mode="a")
|
335 |
+
actual = read_hdf(path_obj, key="df")
|
336 |
+
|
337 |
+
tm.assert_frame_equal(expected, actual)
|
338 |
+
|
339 |
+
|
340 |
+
@td.skip_if_no("py.path")
|
341 |
+
def test_read_from_py_localpath(tmp_path, setup_path):
|
342 |
+
# GH11773
|
343 |
+
from py.path import local as LocalPath
|
344 |
+
|
345 |
+
expected = DataFrame(
|
346 |
+
np.random.default_rng(2).random((4, 5)),
|
347 |
+
index=list("abcd"),
|
348 |
+
columns=list("ABCDE"),
|
349 |
+
)
|
350 |
+
filename = tmp_path / setup_path
|
351 |
+
path_obj = LocalPath(filename)
|
352 |
+
|
353 |
+
expected.to_hdf(path_obj, key="df", mode="a")
|
354 |
+
actual = read_hdf(path_obj, key="df")
|
355 |
+
|
356 |
+
tm.assert_frame_equal(expected, actual)
|
357 |
+
|
358 |
+
|
359 |
+
@pytest.mark.parametrize("format", ["fixed", "table"])
|
360 |
+
def test_read_hdf_series_mode_r(tmp_path, format, setup_path):
|
361 |
+
# GH 16583
|
362 |
+
# Tests that reading a Series saved to an HDF file
|
363 |
+
# still works if a mode='r' argument is supplied
|
364 |
+
series = Series(range(10), dtype=np.float64)
|
365 |
+
path = tmp_path / setup_path
|
366 |
+
series.to_hdf(path, key="data", format=format)
|
367 |
+
result = read_hdf(path, key="data", mode="r")
|
368 |
+
tm.assert_series_equal(result, series)
|
369 |
+
|
370 |
+
|
371 |
+
@pytest.mark.filterwarnings(r"ignore:Period with BDay freq is deprecated:FutureWarning")
|
372 |
+
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
|
373 |
+
def test_read_py2_hdf_file_in_py3(datapath):
|
374 |
+
# GH 16781
|
375 |
+
|
376 |
+
# tests reading a PeriodIndex DataFrame written in Python2 in Python3
|
377 |
+
|
378 |
+
# the file was generated in Python 2.7 like so:
|
379 |
+
#
|
380 |
+
# df = DataFrame([1.,2,3], index=pd.PeriodIndex(
|
381 |
+
# ['2015-01-01', '2015-01-02', '2015-01-05'], freq='B'))
|
382 |
+
# df.to_hdf('periodindex_0.20.1_x86_64_darwin_2.7.13.h5', 'p')
|
383 |
+
|
384 |
+
expected = DataFrame(
|
385 |
+
[1.0, 2, 3],
|
386 |
+
index=pd.PeriodIndex(["2015-01-01", "2015-01-02", "2015-01-05"], freq="B"),
|
387 |
+
)
|
388 |
+
|
389 |
+
with ensure_clean_store(
|
390 |
+
datapath(
|
391 |
+
"io", "data", "legacy_hdf", "periodindex_0.20.1_x86_64_darwin_2.7.13.h5"
|
392 |
+
),
|
393 |
+
mode="r",
|
394 |
+
) as store:
|
395 |
+
result = store["p"]
|
396 |
+
tm.assert_frame_equal(result, expected)
|
397 |
+
|
398 |
+
|
399 |
+
def test_read_infer_string(tmp_path, setup_path):
|
400 |
+
# GH#54431
|
401 |
+
pytest.importorskip("pyarrow")
|
402 |
+
df = DataFrame({"a": ["a", "b", None]})
|
403 |
+
path = tmp_path / setup_path
|
404 |
+
df.to_hdf(path, key="data", format="table")
|
405 |
+
with pd.option_context("future.infer_string", True):
|
406 |
+
result = read_hdf(path, key="data", mode="r")
|
407 |
+
expected = DataFrame(
|
408 |
+
{"a": ["a", "b", None]},
|
409 |
+
dtype="string[pyarrow_numpy]",
|
410 |
+
columns=Index(["a"], dtype="string[pyarrow_numpy]"),
|
411 |
+
)
|
412 |
+
tm.assert_frame_equal(result, expected)
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_retain_attributes.py
ADDED
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from pandas import (
|
4 |
+
DataFrame,
|
5 |
+
DatetimeIndex,
|
6 |
+
Series,
|
7 |
+
_testing as tm,
|
8 |
+
date_range,
|
9 |
+
errors,
|
10 |
+
read_hdf,
|
11 |
+
)
|
12 |
+
from pandas.tests.io.pytables.common import (
|
13 |
+
_maybe_remove,
|
14 |
+
ensure_clean_store,
|
15 |
+
)
|
16 |
+
|
17 |
+
pytestmark = pytest.mark.single_cpu
|
18 |
+
|
19 |
+
|
20 |
+
def test_retain_index_attributes(setup_path, unit):
|
21 |
+
# GH 3499, losing frequency info on index recreation
|
22 |
+
dti = date_range("2000-1-1", periods=3, freq="h", unit=unit)
|
23 |
+
df = DataFrame({"A": Series(range(3), index=dti)})
|
24 |
+
|
25 |
+
with ensure_clean_store(setup_path) as store:
|
26 |
+
_maybe_remove(store, "data")
|
27 |
+
store.put("data", df, format="table")
|
28 |
+
|
29 |
+
result = store.get("data")
|
30 |
+
tm.assert_frame_equal(df, result)
|
31 |
+
|
32 |
+
for attr in ["freq", "tz", "name"]:
|
33 |
+
for idx in ["index", "columns"]:
|
34 |
+
assert getattr(getattr(df, idx), attr, None) == getattr(
|
35 |
+
getattr(result, idx), attr, None
|
36 |
+
)
|
37 |
+
|
38 |
+
dti2 = date_range("2002-1-1", periods=3, freq="D", unit=unit)
|
39 |
+
# try to append a table with a different frequency
|
40 |
+
with tm.assert_produces_warning(errors.AttributeConflictWarning):
|
41 |
+
df2 = DataFrame({"A": Series(range(3), index=dti2)})
|
42 |
+
store.append("data", df2)
|
43 |
+
|
44 |
+
assert store.get_storer("data").info["index"]["freq"] is None
|
45 |
+
|
46 |
+
# this is ok
|
47 |
+
_maybe_remove(store, "df2")
|
48 |
+
dti3 = DatetimeIndex(
|
49 |
+
["2001-01-01", "2001-01-02", "2002-01-01"], dtype=f"M8[{unit}]"
|
50 |
+
)
|
51 |
+
df2 = DataFrame(
|
52 |
+
{
|
53 |
+
"A": Series(
|
54 |
+
range(3),
|
55 |
+
index=dti3,
|
56 |
+
)
|
57 |
+
}
|
58 |
+
)
|
59 |
+
store.append("df2", df2)
|
60 |
+
dti4 = date_range("2002-1-1", periods=3, freq="D", unit=unit)
|
61 |
+
df3 = DataFrame({"A": Series(range(3), index=dti4)})
|
62 |
+
store.append("df2", df3)
|
63 |
+
|
64 |
+
|
65 |
+
def test_retain_index_attributes2(tmp_path, setup_path):
|
66 |
+
path = tmp_path / setup_path
|
67 |
+
|
68 |
+
with tm.assert_produces_warning(errors.AttributeConflictWarning):
|
69 |
+
df = DataFrame(
|
70 |
+
{"A": Series(range(3), index=date_range("2000-1-1", periods=3, freq="h"))}
|
71 |
+
)
|
72 |
+
df.to_hdf(path, key="data", mode="w", append=True)
|
73 |
+
df2 = DataFrame(
|
74 |
+
{"A": Series(range(3), index=date_range("2002-1-1", periods=3, freq="D"))}
|
75 |
+
)
|
76 |
+
|
77 |
+
df2.to_hdf(path, key="data", append=True)
|
78 |
+
|
79 |
+
idx = date_range("2000-1-1", periods=3, freq="h")
|
80 |
+
idx.name = "foo"
|
81 |
+
df = DataFrame({"A": Series(range(3), index=idx)})
|
82 |
+
df.to_hdf(path, key="data", mode="w", append=True)
|
83 |
+
|
84 |
+
assert read_hdf(path, key="data").index.name == "foo"
|
85 |
+
|
86 |
+
with tm.assert_produces_warning(errors.AttributeConflictWarning):
|
87 |
+
idx2 = date_range("2001-1-1", periods=3, freq="h")
|
88 |
+
idx2.name = "bar"
|
89 |
+
df2 = DataFrame({"A": Series(range(3), index=idx2)})
|
90 |
+
df2.to_hdf(path, key="data", append=True)
|
91 |
+
|
92 |
+
assert read_hdf(path, "data").index.name is None
|
env-llmeval/lib/python3.10/site-packages/pandas/tests/io/pytables/test_subclass.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import pytest
|
3 |
+
|
4 |
+
from pandas import (
|
5 |
+
DataFrame,
|
6 |
+
Series,
|
7 |
+
)
|
8 |
+
import pandas._testing as tm
|
9 |
+
|
10 |
+
from pandas.io.pytables import (
|
11 |
+
HDFStore,
|
12 |
+
read_hdf,
|
13 |
+
)
|
14 |
+
|
15 |
+
pytest.importorskip("tables")
|
16 |
+
|
17 |
+
|
18 |
+
class TestHDFStoreSubclass:
|
19 |
+
# GH 33748
|
20 |
+
def test_supported_for_subclass_dataframe(self, tmp_path):
|
21 |
+
data = {"a": [1, 2], "b": [3, 4]}
|
22 |
+
sdf = tm.SubclassedDataFrame(data, dtype=np.intp)
|
23 |
+
|
24 |
+
expected = DataFrame(data, dtype=np.intp)
|
25 |
+
|
26 |
+
path = tmp_path / "temp.h5"
|
27 |
+
sdf.to_hdf(path, key="df")
|
28 |
+
result = read_hdf(path, "df")
|
29 |
+
tm.assert_frame_equal(result, expected)
|
30 |
+
|
31 |
+
path = tmp_path / "temp.h5"
|
32 |
+
with HDFStore(path) as store:
|
33 |
+
store.put("df", sdf)
|
34 |
+
result = read_hdf(path, "df")
|
35 |
+
tm.assert_frame_equal(result, expected)
|
36 |
+
|
37 |
+
def test_supported_for_subclass_series(self, tmp_path):
|
38 |
+
data = [1, 2, 3]
|
39 |
+
sser = tm.SubclassedSeries(data, dtype=np.intp)
|
40 |
+
|
41 |
+
expected = Series(data, dtype=np.intp)
|
42 |
+
|
43 |
+
path = tmp_path / "temp.h5"
|
44 |
+
sser.to_hdf(path, key="ser")
|
45 |
+
result = read_hdf(path, "ser")
|
46 |
+
tm.assert_series_equal(result, expected)
|
47 |
+
|
48 |
+
path = tmp_path / "temp.h5"
|
49 |
+
with HDFStore(path) as store:
|
50 |
+
store.put("ser", sser)
|
51 |
+
result = read_hdf(path, "ser")
|
52 |
+
tm.assert_series_equal(result, expected)
|