applied-ai-018 commited on
Commit
de38c38
·
verified ·
1 Parent(s): 2e44652

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/__pycache__/__init__.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/__pycache__/test_interval.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/__pycache__/test_interval_new.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/test_interval.py +227 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/test_interval_new.py +232 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/__init__.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_chaining_and_caching.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_getitem.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_iloc.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_indexing_slow.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_slice.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_sorted.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_chaining_and_caching.py +87 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_datetime.py +50 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_getitem.py +410 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_iloc.py +171 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.py +118 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_loc.py +992 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_multiindex.py +235 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_partial.py +269 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_setitem.py +589 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__init__.py +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/__init__.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/test_hashtable.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/test_join.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/test_lib.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/test_libalgos.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/test_hashtable.py +748 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/test_join.py +390 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/test_lib.py +285 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/test_libalgos.py +162 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/__init__.py +0 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__init__.py +0 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__pycache__/__init__.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__pycache__/test_freq_code.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__pycache__/test_frequencies.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/test_freq_code.py +69 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/test_frequencies.py +29 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/test_inference.py +558 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__init__.py +0 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/__init__.cpython-310.pyc +0 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_day.cpython-310.pyc +0 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_hour.cpython-310.pyc +0 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_year.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_hour.cpython-310.pyc +0 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_dst.cpython-310.pyc +0 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_index.cpython-310.pyc +0 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_month.cpython-310.pyc +0 -0
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/__pycache__/test_interval.cpython-310.pyc ADDED
Binary file (6.88 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/__pycache__/test_interval_new.cpython-310.pyc ADDED
Binary file (6.74 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/test_interval.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import index as libindex
5
+ from pandas.compat import IS64
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ DataFrame,
10
+ IntervalIndex,
11
+ Series,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+
16
+ class TestIntervalIndex:
17
+ @pytest.fixture
18
+ def series_with_interval_index(self):
19
+ return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
20
+
21
+ def test_getitem_with_scalar(self, series_with_interval_index, indexer_sl):
22
+ ser = series_with_interval_index.copy()
23
+
24
+ expected = ser.iloc[:3]
25
+ tm.assert_series_equal(expected, indexer_sl(ser)[:3])
26
+ tm.assert_series_equal(expected, indexer_sl(ser)[:2.5])
27
+ tm.assert_series_equal(expected, indexer_sl(ser)[0.1:2.5])
28
+ if indexer_sl is tm.loc:
29
+ tm.assert_series_equal(expected, ser.loc[-1:3])
30
+
31
+ expected = ser.iloc[1:4]
32
+ tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2.5, 3.5]])
33
+ tm.assert_series_equal(expected, indexer_sl(ser)[[2, 3, 4]])
34
+ tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 3, 4]])
35
+
36
+ expected = ser.iloc[2:5]
37
+ tm.assert_series_equal(expected, indexer_sl(ser)[ser >= 2])
38
+
39
+ @pytest.mark.parametrize("direction", ["increasing", "decreasing"])
40
+ def test_getitem_nonoverlapping_monotonic(self, direction, closed, indexer_sl):
41
+ tpls = [(0, 1), (2, 3), (4, 5)]
42
+ if direction == "decreasing":
43
+ tpls = tpls[::-1]
44
+
45
+ idx = IntervalIndex.from_tuples(tpls, closed=closed)
46
+ ser = Series(list("abc"), idx)
47
+
48
+ for key, expected in zip(idx.left, ser):
49
+ if idx.closed_left:
50
+ assert indexer_sl(ser)[key] == expected
51
+ else:
52
+ with pytest.raises(KeyError, match=str(key)):
53
+ indexer_sl(ser)[key]
54
+
55
+ for key, expected in zip(idx.right, ser):
56
+ if idx.closed_right:
57
+ assert indexer_sl(ser)[key] == expected
58
+ else:
59
+ with pytest.raises(KeyError, match=str(key)):
60
+ indexer_sl(ser)[key]
61
+
62
+ for key, expected in zip(idx.mid, ser):
63
+ assert indexer_sl(ser)[key] == expected
64
+
65
+ def test_getitem_non_matching(self, series_with_interval_index, indexer_sl):
66
+ ser = series_with_interval_index.copy()
67
+
68
+ # this is a departure from our current
69
+ # indexing scheme, but simpler
70
+ with pytest.raises(KeyError, match=r"\[-1\] not in index"):
71
+ indexer_sl(ser)[[-1, 3, 4, 5]]
72
+
73
+ with pytest.raises(KeyError, match=r"\[-1\] not in index"):
74
+ indexer_sl(ser)[[-1, 3]]
75
+
76
+ def test_loc_getitem_large_series(self, monkeypatch):
77
+ size_cutoff = 20
78
+ with monkeypatch.context():
79
+ monkeypatch.setattr(libindex, "_SIZE_CUTOFF", size_cutoff)
80
+ ser = Series(
81
+ np.arange(size_cutoff),
82
+ index=IntervalIndex.from_breaks(np.arange(size_cutoff + 1)),
83
+ )
84
+
85
+ result1 = ser.loc[:8]
86
+ result2 = ser.loc[0:8]
87
+ result3 = ser.loc[0:8:1]
88
+ tm.assert_series_equal(result1, result2)
89
+ tm.assert_series_equal(result1, result3)
90
+
91
+ def test_loc_getitem_frame(self):
92
+ # CategoricalIndex with IntervalIndex categories
93
+ df = DataFrame({"A": range(10)})
94
+ ser = pd.cut(df.A, 5)
95
+ df["B"] = ser
96
+ df = df.set_index("B")
97
+
98
+ result = df.loc[4]
99
+ expected = df.iloc[4:6]
100
+ tm.assert_frame_equal(result, expected)
101
+
102
+ with pytest.raises(KeyError, match="10"):
103
+ df.loc[10]
104
+
105
+ # single list-like
106
+ result = df.loc[[4]]
107
+ expected = df.iloc[4:6]
108
+ tm.assert_frame_equal(result, expected)
109
+
110
+ # non-unique
111
+ result = df.loc[[4, 5]]
112
+ expected = df.take([4, 5, 4, 5])
113
+ tm.assert_frame_equal(result, expected)
114
+
115
+ msg = (
116
+ r"None of \[Index\(\[10\], dtype='object', name='B'\)\] "
117
+ r"are in the \[index\]"
118
+ )
119
+ with pytest.raises(KeyError, match=msg):
120
+ df.loc[[10]]
121
+
122
+ # partial missing
123
+ with pytest.raises(KeyError, match=r"\[10\] not in index"):
124
+ df.loc[[10, 4]]
125
+
126
+ def test_getitem_interval_with_nans(self, frame_or_series, indexer_sl):
127
+ # GH#41831
128
+
129
+ index = IntervalIndex([np.nan, np.nan])
130
+ key = index[:-1]
131
+
132
+ obj = frame_or_series(range(2), index=index)
133
+ if frame_or_series is DataFrame and indexer_sl is tm.setitem:
134
+ obj = obj.T
135
+
136
+ result = indexer_sl(obj)[key]
137
+ expected = obj
138
+
139
+ tm.assert_equal(result, expected)
140
+
141
+ def test_setitem_interval_with_slice(self):
142
+ # GH#54722
143
+ ii = IntervalIndex.from_breaks(range(4, 15))
144
+ ser = Series(range(10), index=ii)
145
+
146
+ orig = ser.copy()
147
+
148
+ # This should be a no-op (used to raise)
149
+ ser.loc[1:3] = 20
150
+ tm.assert_series_equal(ser, orig)
151
+
152
+ ser.loc[6:8] = 19
153
+ orig.iloc[1:4] = 19
154
+ tm.assert_series_equal(ser, orig)
155
+
156
+ ser2 = Series(range(5), index=ii[::2])
157
+ orig2 = ser2.copy()
158
+
159
+ # this used to raise
160
+ ser2.loc[6:8] = 22 # <- raises on main, sets on branch
161
+ orig2.iloc[1] = 22
162
+ tm.assert_series_equal(ser2, orig2)
163
+
164
+ ser2.loc[5:7] = 21
165
+ orig2.iloc[:2] = 21
166
+ tm.assert_series_equal(ser2, orig2)
167
+
168
+
169
+ class TestIntervalIndexInsideMultiIndex:
170
+ def test_mi_intervalindex_slicing_with_scalar(self):
171
+ # GH#27456
172
+ ii = IntervalIndex.from_arrays(
173
+ [0, 1, 10, 11, 0, 1, 10, 11], [1, 2, 11, 12, 1, 2, 11, 12], name="MP"
174
+ )
175
+ idx = pd.MultiIndex.from_arrays(
176
+ [
177
+ pd.Index(["FC", "FC", "FC", "FC", "OWNER", "OWNER", "OWNER", "OWNER"]),
178
+ pd.Index(
179
+ ["RID1", "RID1", "RID2", "RID2", "RID1", "RID1", "RID2", "RID2"]
180
+ ),
181
+ ii,
182
+ ]
183
+ )
184
+
185
+ idx.names = ["Item", "RID", "MP"]
186
+ df = DataFrame({"value": [1, 2, 3, 4, 5, 6, 7, 8]})
187
+ df.index = idx
188
+
189
+ query_df = DataFrame(
190
+ {
191
+ "Item": ["FC", "OWNER", "FC", "OWNER", "OWNER"],
192
+ "RID": ["RID1", "RID1", "RID1", "RID2", "RID2"],
193
+ "MP": [0.2, 1.5, 1.6, 11.1, 10.9],
194
+ }
195
+ )
196
+
197
+ query_df = query_df.sort_index()
198
+
199
+ idx = pd.MultiIndex.from_arrays([query_df.Item, query_df.RID, query_df.MP])
200
+ query_df.index = idx
201
+ result = df.value.loc[query_df.index]
202
+
203
+ # the IntervalIndex level is indexed with floats, which map to
204
+ # the intervals containing them. Matching the behavior we would get
205
+ # with _only_ an IntervalIndex, we get an IntervalIndex level back.
206
+ sliced_level = ii.take([0, 1, 1, 3, 2])
207
+ expected_index = pd.MultiIndex.from_arrays(
208
+ [idx.get_level_values(0), idx.get_level_values(1), sliced_level]
209
+ )
210
+ expected = Series([1, 6, 2, 8, 7], index=expected_index, name="value")
211
+ tm.assert_series_equal(result, expected)
212
+
213
+ @pytest.mark.xfail(not IS64, reason="GH 23440")
214
+ @pytest.mark.parametrize(
215
+ "base",
216
+ [101, 1010],
217
+ )
218
+ def test_reindex_behavior_with_interval_index(self, base):
219
+ # GH 51826
220
+
221
+ ser = Series(
222
+ range(base),
223
+ index=IntervalIndex.from_arrays(range(base), range(1, base + 1)),
224
+ )
225
+ expected_result = Series([np.nan, 0], index=[np.nan, 1.0], dtype=float)
226
+ result = ser.reindex(index=[np.nan, 1.0])
227
+ tm.assert_series_equal(result, expected_result)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/interval/test_interval_new.py ADDED
@@ -0,0 +1,232 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas.compat import IS64
7
+
8
+ from pandas import (
9
+ Index,
10
+ Interval,
11
+ IntervalIndex,
12
+ Series,
13
+ )
14
+ import pandas._testing as tm
15
+
16
+
17
+ class TestIntervalIndex:
18
+ @pytest.fixture
19
+ def series_with_interval_index(self):
20
+ return Series(np.arange(5), IntervalIndex.from_breaks(np.arange(6)))
21
+
22
+ def test_loc_with_interval(self, series_with_interval_index, indexer_sl):
23
+ # loc with single label / list of labels:
24
+ # - Intervals: only exact matches
25
+ # - scalars: those that contain it
26
+
27
+ ser = series_with_interval_index.copy()
28
+
29
+ expected = 0
30
+ result = indexer_sl(ser)[Interval(0, 1)]
31
+ assert result == expected
32
+
33
+ expected = ser.iloc[3:5]
34
+ result = indexer_sl(ser)[[Interval(3, 4), Interval(4, 5)]]
35
+ tm.assert_series_equal(expected, result)
36
+
37
+ # missing or not exact
38
+ with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='left')")):
39
+ indexer_sl(ser)[Interval(3, 5, closed="left")]
40
+
41
+ with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")):
42
+ indexer_sl(ser)[Interval(3, 5)]
43
+
44
+ with pytest.raises(
45
+ KeyError, match=re.escape("Interval(-2, 0, closed='right')")
46
+ ):
47
+ indexer_sl(ser)[Interval(-2, 0)]
48
+
49
+ with pytest.raises(KeyError, match=re.escape("Interval(5, 6, closed='right')")):
50
+ indexer_sl(ser)[Interval(5, 6)]
51
+
52
+ def test_loc_with_scalar(self, series_with_interval_index, indexer_sl):
53
+ # loc with single label / list of labels:
54
+ # - Intervals: only exact matches
55
+ # - scalars: those that contain it
56
+
57
+ ser = series_with_interval_index.copy()
58
+
59
+ assert indexer_sl(ser)[1] == 0
60
+ assert indexer_sl(ser)[1.5] == 1
61
+ assert indexer_sl(ser)[2] == 1
62
+
63
+ expected = ser.iloc[1:4]
64
+ tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2.5, 3.5]])
65
+ tm.assert_series_equal(expected, indexer_sl(ser)[[2, 3, 4]])
66
+ tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 3, 4]])
67
+
68
+ expected = ser.iloc[[1, 1, 2, 1]]
69
+ tm.assert_series_equal(expected, indexer_sl(ser)[[1.5, 2, 2.5, 1.5]])
70
+
71
+ expected = ser.iloc[2:5]
72
+ tm.assert_series_equal(expected, indexer_sl(ser)[ser >= 2])
73
+
74
+ def test_loc_with_slices(self, series_with_interval_index, indexer_sl):
75
+ # loc with slices:
76
+ # - Interval objects: only works with exact matches
77
+ # - scalars: only works for non-overlapping, monotonic intervals,
78
+ # and start/stop select location based on the interval that
79
+ # contains them:
80
+ # (slice_loc(start, stop) == (idx.get_loc(start), idx.get_loc(stop))
81
+
82
+ ser = series_with_interval_index.copy()
83
+
84
+ # slice of interval
85
+
86
+ expected = ser.iloc[:3]
87
+ result = indexer_sl(ser)[Interval(0, 1) : Interval(2, 3)]
88
+ tm.assert_series_equal(expected, result)
89
+
90
+ expected = ser.iloc[3:]
91
+ result = indexer_sl(ser)[Interval(3, 4) :]
92
+ tm.assert_series_equal(expected, result)
93
+
94
+ msg = "Interval objects are not currently supported"
95
+ with pytest.raises(NotImplementedError, match=msg):
96
+ indexer_sl(ser)[Interval(3, 6) :]
97
+
98
+ with pytest.raises(NotImplementedError, match=msg):
99
+ indexer_sl(ser)[Interval(3, 4, closed="left") :]
100
+
101
+ def test_slice_step_ne1(self, series_with_interval_index):
102
+ # GH#31658 slice of scalar with step != 1
103
+ ser = series_with_interval_index.copy()
104
+ expected = ser.iloc[0:4:2]
105
+
106
+ result = ser[0:4:2]
107
+ tm.assert_series_equal(result, expected)
108
+
109
+ result2 = ser[0:4][::2]
110
+ tm.assert_series_equal(result2, expected)
111
+
112
+ def test_slice_float_start_stop(self, series_with_interval_index):
113
+ # GH#31658 slicing with integers is positional, with floats is not
114
+ # supported
115
+ ser = series_with_interval_index.copy()
116
+
117
+ msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
118
+ with pytest.raises(ValueError, match=msg):
119
+ ser[1.5:9.5:2]
120
+
121
+ def test_slice_interval_step(self, series_with_interval_index):
122
+ # GH#31658 allows for integer step!=1, not Interval step
123
+ ser = series_with_interval_index.copy()
124
+ msg = "label-based slicing with step!=1 is not supported for IntervalIndex"
125
+ with pytest.raises(ValueError, match=msg):
126
+ ser[0 : 4 : Interval(0, 1)]
127
+
128
+ def test_loc_with_overlap(self, indexer_sl):
129
+ idx = IntervalIndex.from_tuples([(1, 5), (3, 7)])
130
+ ser = Series(range(len(idx)), index=idx)
131
+
132
+ # scalar
133
+ expected = ser
134
+ result = indexer_sl(ser)[4]
135
+ tm.assert_series_equal(expected, result)
136
+
137
+ result = indexer_sl(ser)[[4]]
138
+ tm.assert_series_equal(expected, result)
139
+
140
+ # interval
141
+ expected = 0
142
+ result = indexer_sl(ser)[Interval(1, 5)]
143
+ assert expected == result
144
+
145
+ expected = ser
146
+ result = indexer_sl(ser)[[Interval(1, 5), Interval(3, 7)]]
147
+ tm.assert_series_equal(expected, result)
148
+
149
+ with pytest.raises(KeyError, match=re.escape("Interval(3, 5, closed='right')")):
150
+ indexer_sl(ser)[Interval(3, 5)]
151
+
152
+ msg = (
153
+ r"None of \[IntervalIndex\(\[\(3, 5\]\], "
154
+ r"dtype='interval\[int64, right\]'\)\] are in the \[index\]"
155
+ )
156
+ with pytest.raises(KeyError, match=msg):
157
+ indexer_sl(ser)[[Interval(3, 5)]]
158
+
159
+ # slices with interval (only exact matches)
160
+ expected = ser
161
+ result = indexer_sl(ser)[Interval(1, 5) : Interval(3, 7)]
162
+ tm.assert_series_equal(expected, result)
163
+
164
+ msg = (
165
+ "'can only get slices from an IntervalIndex if bounds are "
166
+ "non-overlapping and all monotonic increasing or decreasing'"
167
+ )
168
+ with pytest.raises(KeyError, match=msg):
169
+ indexer_sl(ser)[Interval(1, 6) : Interval(3, 8)]
170
+
171
+ if indexer_sl is tm.loc:
172
+ # slices with scalar raise for overlapping intervals
173
+ # TODO KeyError is the appropriate error?
174
+ with pytest.raises(KeyError, match=msg):
175
+ ser.loc[1:4]
176
+
177
+ def test_non_unique(self, indexer_sl):
178
+ idx = IntervalIndex.from_tuples([(1, 3), (3, 7)])
179
+ ser = Series(range(len(idx)), index=idx)
180
+
181
+ result = indexer_sl(ser)[Interval(1, 3)]
182
+ assert result == 0
183
+
184
+ result = indexer_sl(ser)[[Interval(1, 3)]]
185
+ expected = ser.iloc[0:1]
186
+ tm.assert_series_equal(expected, result)
187
+
188
+ def test_non_unique_moar(self, indexer_sl):
189
+ idx = IntervalIndex.from_tuples([(1, 3), (1, 3), (3, 7)])
190
+ ser = Series(range(len(idx)), index=idx)
191
+
192
+ expected = ser.iloc[[0, 1]]
193
+ result = indexer_sl(ser)[Interval(1, 3)]
194
+ tm.assert_series_equal(expected, result)
195
+
196
+ expected = ser
197
+ result = indexer_sl(ser)[Interval(1, 3) :]
198
+ tm.assert_series_equal(expected, result)
199
+
200
+ expected = ser.iloc[[0, 1]]
201
+ result = indexer_sl(ser)[[Interval(1, 3)]]
202
+ tm.assert_series_equal(expected, result)
203
+
204
+ def test_loc_getitem_missing_key_error_message(
205
+ self, frame_or_series, series_with_interval_index
206
+ ):
207
+ # GH#27365
208
+ ser = series_with_interval_index.copy()
209
+ obj = frame_or_series(ser)
210
+ with pytest.raises(KeyError, match=r"\[6\]"):
211
+ obj.loc[[4, 5, 6]]
212
+
213
+
214
+ @pytest.mark.xfail(not IS64, reason="GH 23440")
215
+ @pytest.mark.parametrize(
216
+ "intervals",
217
+ [
218
+ ([Interval(-np.inf, 0.0), Interval(0.0, 1.0)]),
219
+ ([Interval(-np.inf, -2.0), Interval(-2.0, -1.0)]),
220
+ ([Interval(-1.0, 0.0), Interval(0.0, np.inf)]),
221
+ ([Interval(1.0, 2.0), Interval(2.0, np.inf)]),
222
+ ],
223
+ )
224
+ def test_repeating_interval_index_with_infs(intervals):
225
+ # GH 46658
226
+
227
+ interval_index = Index(intervals * 51)
228
+
229
+ expected = np.arange(1, 102, 2, dtype=np.intp)
230
+ result = interval_index.get_indexer_for([intervals[1]])
231
+
232
+ tm.assert_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_chaining_and_caching.cpython-310.pyc ADDED
Binary file (2.79 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_getitem.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_iloc.cpython-310.pyc ADDED
Binary file (5.83 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_indexing_slow.cpython-310.pyc ADDED
Binary file (3.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_slice.cpython-310.pyc ADDED
Binary file (17.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/__pycache__/test_sorted.cpython-310.pyc ADDED
Binary file (5.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_chaining_and_caching.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import index as libindex
5
+ from pandas.errors import SettingWithCopyError
6
+ import pandas.util._test_decorators as td
7
+
8
+ from pandas import (
9
+ DataFrame,
10
+ MultiIndex,
11
+ Series,
12
+ )
13
+ import pandas._testing as tm
14
+
15
+
16
+ def test_detect_chained_assignment(using_copy_on_write, warn_copy_on_write):
17
+ # Inplace ops, originally from:
18
+ # https://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
19
+ a = [12, 23]
20
+ b = [123, None]
21
+ c = [1234, 2345]
22
+ d = [12345, 23456]
23
+ tuples = [("eyes", "left"), ("eyes", "right"), ("ears", "left"), ("ears", "right")]
24
+ events = {
25
+ ("eyes", "left"): a,
26
+ ("eyes", "right"): b,
27
+ ("ears", "left"): c,
28
+ ("ears", "right"): d,
29
+ }
30
+ multiind = MultiIndex.from_tuples(tuples, names=["part", "side"])
31
+ zed = DataFrame(events, index=["a", "b"], columns=multiind)
32
+
33
+ if using_copy_on_write:
34
+ with tm.raises_chained_assignment_error():
35
+ zed["eyes"]["right"].fillna(value=555, inplace=True)
36
+ elif warn_copy_on_write:
37
+ with tm.assert_produces_warning(None):
38
+ zed["eyes"]["right"].fillna(value=555, inplace=True)
39
+ else:
40
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
41
+ with pytest.raises(SettingWithCopyError, match=msg):
42
+ with tm.assert_produces_warning(None):
43
+ zed["eyes"]["right"].fillna(value=555, inplace=True)
44
+
45
+
46
+ @td.skip_array_manager_invalid_test # with ArrayManager df.loc[0] is not a view
47
+ def test_cache_updating(using_copy_on_write, warn_copy_on_write):
48
+ # 5216
49
+ # make sure that we don't try to set a dead cache
50
+ a = np.random.default_rng(2).random((10, 3))
51
+ df = DataFrame(a, columns=["x", "y", "z"])
52
+ df_original = df.copy()
53
+ tuples = [(i, j) for i in range(5) for j in range(2)]
54
+ index = MultiIndex.from_tuples(tuples)
55
+ df.index = index
56
+
57
+ # setting via chained assignment
58
+ # but actually works, since everything is a view
59
+
60
+ with tm.raises_chained_assignment_error():
61
+ df.loc[0]["z"].iloc[0] = 1.0
62
+
63
+ if using_copy_on_write:
64
+ assert df.loc[(0, 0), "z"] == df_original.loc[0, "z"]
65
+ else:
66
+ result = df.loc[(0, 0), "z"]
67
+ assert result == 1
68
+
69
+ # correct setting
70
+ df.loc[(0, 0), "z"] = 2
71
+ result = df.loc[(0, 0), "z"]
72
+ assert result == 2
73
+
74
+
75
+ def test_indexer_caching(monkeypatch):
76
+ # GH5727
77
+ # make sure that indexers are in the _internal_names_set
78
+ size_cutoff = 20
79
+ with monkeypatch.context():
80
+ monkeypatch.setattr(libindex, "_SIZE_CUTOFF", size_cutoff)
81
+ index = MultiIndex.from_arrays([np.arange(size_cutoff), np.arange(size_cutoff)])
82
+ s = Series(np.zeros(size_cutoff), index=index)
83
+
84
+ # setitem
85
+ s[s == 0] = 1
86
+ expected = Series(np.ones(size_cutoff), index=index)
87
+ tm.assert_series_equal(s, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_datetime.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+
3
+ import numpy as np
4
+
5
+ from pandas import (
6
+ DataFrame,
7
+ Index,
8
+ MultiIndex,
9
+ Period,
10
+ Series,
11
+ period_range,
12
+ to_datetime,
13
+ )
14
+ import pandas._testing as tm
15
+
16
+
17
+ def test_multiindex_period_datetime():
18
+ # GH4861, using datetime in period of multiindex raises exception
19
+
20
+ idx1 = Index(["a", "a", "a", "b", "b"])
21
+ idx2 = period_range("2012-01", periods=len(idx1), freq="M")
22
+ s = Series(np.random.default_rng(2).standard_normal(len(idx1)), [idx1, idx2])
23
+
24
+ # try Period as index
25
+ expected = s.iloc[0]
26
+ result = s.loc["a", Period("2012-01")]
27
+ assert result == expected
28
+
29
+ # try datetime as index
30
+ result = s.loc["a", datetime(2012, 1, 1)]
31
+ assert result == expected
32
+
33
+
34
+ def test_multiindex_datetime_columns():
35
+ # GH35015, using datetime as column indices raises exception
36
+
37
+ mi = MultiIndex.from_tuples(
38
+ [(to_datetime("02/29/2020"), to_datetime("03/01/2020"))], names=["a", "b"]
39
+ )
40
+
41
+ df = DataFrame([], columns=mi)
42
+
43
+ expected_df = DataFrame(
44
+ [],
45
+ columns=MultiIndex.from_arrays(
46
+ [[to_datetime("02/29/2020")], [to_datetime("03/01/2020")]], names=["a", "b"]
47
+ ),
48
+ )
49
+
50
+ tm.assert_frame_equal(df, expected_df)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_getitem.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ DataFrame,
6
+ Index,
7
+ MultiIndex,
8
+ Series,
9
+ )
10
+ import pandas._testing as tm
11
+ from pandas.core.indexing import IndexingError
12
+
13
+ # ----------------------------------------------------------------------------
14
+ # test indexing of Series with multi-level Index
15
+ # ----------------------------------------------------------------------------
16
+
17
+
18
+ @pytest.mark.parametrize(
19
+ "access_method",
20
+ [lambda s, x: s[:, x], lambda s, x: s.loc[:, x], lambda s, x: s.xs(x, level=1)],
21
+ )
22
+ @pytest.mark.parametrize(
23
+ "level1_value, expected",
24
+ [(0, Series([1], index=[0])), (1, Series([2, 3], index=[1, 2]))],
25
+ )
26
+ def test_series_getitem_multiindex(access_method, level1_value, expected):
27
+ # GH 6018
28
+ # series regression getitem with a multi-index
29
+
30
+ mi = MultiIndex.from_tuples([(0, 0), (1, 1), (2, 1)], names=["A", "B"])
31
+ ser = Series([1, 2, 3], index=mi)
32
+ expected.index.name = "A"
33
+
34
+ result = access_method(ser, level1_value)
35
+ tm.assert_series_equal(result, expected)
36
+
37
+
38
+ @pytest.mark.parametrize("level0_value", ["D", "A"])
39
+ def test_series_getitem_duplicates_multiindex(level0_value):
40
+ # GH 5725 the 'A' happens to be a valid Timestamp so the doesn't raise
41
+ # the appropriate error, only in PY3 of course!
42
+
43
+ index = MultiIndex(
44
+ levels=[[level0_value, "B", "C"], [0, 26, 27, 37, 57, 67, 75, 82]],
45
+ codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
46
+ names=["tag", "day"],
47
+ )
48
+ arr = np.random.default_rng(2).standard_normal((len(index), 1))
49
+ df = DataFrame(arr, index=index, columns=["val"])
50
+
51
+ # confirm indexing on missing value raises KeyError
52
+ if level0_value != "A":
53
+ with pytest.raises(KeyError, match=r"^'A'$"):
54
+ df.val["A"]
55
+
56
+ with pytest.raises(KeyError, match=r"^'X'$"):
57
+ df.val["X"]
58
+
59
+ result = df.val[level0_value]
60
+ expected = Series(
61
+ arr.ravel()[0:3], name="val", index=Index([26, 37, 57], name="day")
62
+ )
63
+ tm.assert_series_equal(result, expected)
64
+
65
+
66
+ def test_series_getitem(multiindex_year_month_day_dataframe_random_data, indexer_sl):
67
+ s = multiindex_year_month_day_dataframe_random_data["A"]
68
+ expected = s.reindex(s.index[42:65])
69
+ expected.index = expected.index.droplevel(0).droplevel(0)
70
+
71
+ result = indexer_sl(s)[2000, 3]
72
+ tm.assert_series_equal(result, expected)
73
+
74
+
75
+ def test_series_getitem_returns_scalar(
76
+ multiindex_year_month_day_dataframe_random_data, indexer_sl
77
+ ):
78
+ s = multiindex_year_month_day_dataframe_random_data["A"]
79
+ expected = s.iloc[49]
80
+
81
+ result = indexer_sl(s)[2000, 3, 10]
82
+ assert result == expected
83
+
84
+
85
+ @pytest.mark.parametrize(
86
+ "indexer,expected_error,expected_error_msg",
87
+ [
88
+ (lambda s: s.__getitem__((2000, 3, 4)), KeyError, r"^\(2000, 3, 4\)$"),
89
+ (lambda s: s[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"),
90
+ (lambda s: s.loc[(2000, 3, 4)], KeyError, r"^\(2000, 3, 4\)$"),
91
+ (lambda s: s.loc[(2000, 3, 4, 5)], IndexingError, "Too many indexers"),
92
+ (lambda s: s.__getitem__(len(s)), KeyError, ""), # match should include len(s)
93
+ (lambda s: s[len(s)], KeyError, ""), # match should include len(s)
94
+ (
95
+ lambda s: s.iloc[len(s)],
96
+ IndexError,
97
+ "single positional indexer is out-of-bounds",
98
+ ),
99
+ ],
100
+ )
101
+ def test_series_getitem_indexing_errors(
102
+ multiindex_year_month_day_dataframe_random_data,
103
+ indexer,
104
+ expected_error,
105
+ expected_error_msg,
106
+ ):
107
+ s = multiindex_year_month_day_dataframe_random_data["A"]
108
+ with pytest.raises(expected_error, match=expected_error_msg):
109
+ indexer(s)
110
+
111
+
112
+ def test_series_getitem_corner_generator(
113
+ multiindex_year_month_day_dataframe_random_data,
114
+ ):
115
+ s = multiindex_year_month_day_dataframe_random_data["A"]
116
+ result = s[(x > 0 for x in s)]
117
+ expected = s[s > 0]
118
+ tm.assert_series_equal(result, expected)
119
+
120
+
121
+ # ----------------------------------------------------------------------------
122
+ # test indexing of DataFrame with multi-level Index
123
+ # ----------------------------------------------------------------------------
124
+
125
+
126
+ def test_getitem_simple(multiindex_dataframe_random_data):
127
+ df = multiindex_dataframe_random_data.T
128
+ expected = df.values[:, 0]
129
+ result = df["foo", "one"].values
130
+ tm.assert_almost_equal(result, expected)
131
+
132
+
133
+ @pytest.mark.parametrize(
134
+ "indexer,expected_error_msg",
135
+ [
136
+ (lambda df: df[("foo", "four")], r"^\('foo', 'four'\)$"),
137
+ (lambda df: df["foobar"], r"^'foobar'$"),
138
+ ],
139
+ )
140
+ def test_frame_getitem_simple_key_error(
141
+ multiindex_dataframe_random_data, indexer, expected_error_msg
142
+ ):
143
+ df = multiindex_dataframe_random_data.T
144
+ with pytest.raises(KeyError, match=expected_error_msg):
145
+ indexer(df)
146
+
147
+
148
+ def test_tuple_string_column_names():
149
+ # GH#50372
150
+ mi = MultiIndex.from_tuples([("a", "aa"), ("a", "ab"), ("b", "ba"), ("b", "bb")])
151
+ df = DataFrame([range(4), range(1, 5), range(2, 6)], columns=mi)
152
+ df["single_index"] = 0
153
+
154
+ df_flat = df.copy()
155
+ df_flat.columns = df_flat.columns.to_flat_index()
156
+ df_flat["new_single_index"] = 0
157
+
158
+ result = df_flat[[("a", "aa"), "new_single_index"]]
159
+ expected = DataFrame(
160
+ [[0, 0], [1, 0], [2, 0]], columns=Index([("a", "aa"), "new_single_index"])
161
+ )
162
+ tm.assert_frame_equal(result, expected)
163
+
164
+
165
+ def test_frame_getitem_multicolumn_empty_level():
166
+ df = DataFrame({"a": ["1", "2", "3"], "b": ["2", "3", "4"]})
167
+ df.columns = [
168
+ ["level1 item1", "level1 item2"],
169
+ ["", "level2 item2"],
170
+ ["level3 item1", "level3 item2"],
171
+ ]
172
+
173
+ result = df["level1 item1"]
174
+ expected = DataFrame(
175
+ [["1"], ["2"], ["3"]], index=df.index, columns=["level3 item1"]
176
+ )
177
+ tm.assert_frame_equal(result, expected)
178
+
179
+
180
+ @pytest.mark.parametrize(
181
+ "indexer,expected_slice",
182
+ [
183
+ (lambda df: df["foo"], slice(3)),
184
+ (lambda df: df["bar"], slice(3, 5)),
185
+ (lambda df: df.loc[:, "bar"], slice(3, 5)),
186
+ ],
187
+ )
188
+ def test_frame_getitem_toplevel(
189
+ multiindex_dataframe_random_data, indexer, expected_slice
190
+ ):
191
+ df = multiindex_dataframe_random_data.T
192
+ expected = df.reindex(columns=df.columns[expected_slice])
193
+ expected.columns = expected.columns.droplevel(0)
194
+ result = indexer(df)
195
+ tm.assert_frame_equal(result, expected)
196
+
197
+
198
+ def test_frame_mixed_depth_get():
199
+ arrays = [
200
+ ["a", "top", "top", "routine1", "routine1", "routine2"],
201
+ ["", "OD", "OD", "result1", "result2", "result1"],
202
+ ["", "wx", "wy", "", "", ""],
203
+ ]
204
+
205
+ tuples = sorted(zip(*arrays))
206
+ index = MultiIndex.from_tuples(tuples)
207
+ df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index)
208
+
209
+ result = df["a"]
210
+ expected = df["a", "", ""].rename("a")
211
+ tm.assert_series_equal(result, expected)
212
+
213
+ result = df["routine1", "result1"]
214
+ expected = df["routine1", "result1", ""]
215
+ expected = expected.rename(("routine1", "result1"))
216
+ tm.assert_series_equal(result, expected)
217
+
218
+
219
+ def test_frame_getitem_nan_multiindex(nulls_fixture):
220
+ # GH#29751
221
+ # loc on a multiindex containing nan values
222
+ n = nulls_fixture # for code readability
223
+ cols = ["a", "b", "c"]
224
+ df = DataFrame(
225
+ [[11, n, 13], [21, n, 23], [31, n, 33], [41, n, 43]],
226
+ columns=cols,
227
+ ).set_index(["a", "b"])
228
+ df["c"] = df["c"].astype("int64")
229
+
230
+ idx = (21, n)
231
+ result = df.loc[:idx]
232
+ expected = DataFrame([[11, n, 13], [21, n, 23]], columns=cols).set_index(["a", "b"])
233
+ expected["c"] = expected["c"].astype("int64")
234
+ tm.assert_frame_equal(result, expected)
235
+
236
+ result = df.loc[idx:]
237
+ expected = DataFrame(
238
+ [[21, n, 23], [31, n, 33], [41, n, 43]], columns=cols
239
+ ).set_index(["a", "b"])
240
+ expected["c"] = expected["c"].astype("int64")
241
+ tm.assert_frame_equal(result, expected)
242
+
243
+ idx1, idx2 = (21, n), (31, n)
244
+ result = df.loc[idx1:idx2]
245
+ expected = DataFrame([[21, n, 23], [31, n, 33]], columns=cols).set_index(["a", "b"])
246
+ expected["c"] = expected["c"].astype("int64")
247
+ tm.assert_frame_equal(result, expected)
248
+
249
+
250
+ @pytest.mark.parametrize(
251
+ "indexer,expected",
252
+ [
253
+ (
254
+ (["b"], ["bar", np.nan]),
255
+ (
256
+ DataFrame(
257
+ [[2, 3], [5, 6]],
258
+ columns=MultiIndex.from_tuples([("b", "bar"), ("b", np.nan)]),
259
+ dtype="int64",
260
+ )
261
+ ),
262
+ ),
263
+ (
264
+ (["a", "b"]),
265
+ (
266
+ DataFrame(
267
+ [[1, 2, 3], [4, 5, 6]],
268
+ columns=MultiIndex.from_tuples(
269
+ [("a", "foo"), ("b", "bar"), ("b", np.nan)]
270
+ ),
271
+ dtype="int64",
272
+ )
273
+ ),
274
+ ),
275
+ (
276
+ (["b"]),
277
+ (
278
+ DataFrame(
279
+ [[2, 3], [5, 6]],
280
+ columns=MultiIndex.from_tuples([("b", "bar"), ("b", np.nan)]),
281
+ dtype="int64",
282
+ )
283
+ ),
284
+ ),
285
+ (
286
+ (["b"], ["bar"]),
287
+ (
288
+ DataFrame(
289
+ [[2], [5]],
290
+ columns=MultiIndex.from_tuples([("b", "bar")]),
291
+ dtype="int64",
292
+ )
293
+ ),
294
+ ),
295
+ (
296
+ (["b"], [np.nan]),
297
+ (
298
+ DataFrame(
299
+ [[3], [6]],
300
+ columns=MultiIndex(
301
+ codes=[[1], [-1]], levels=[["a", "b"], ["bar", "foo"]]
302
+ ),
303
+ dtype="int64",
304
+ )
305
+ ),
306
+ ),
307
+ (("b", np.nan), Series([3, 6], dtype="int64", name=("b", np.nan))),
308
+ ],
309
+ )
310
+ def test_frame_getitem_nan_cols_multiindex(
311
+ indexer,
312
+ expected,
313
+ nulls_fixture,
314
+ ):
315
+ # Slicing MultiIndex including levels with nan values, for more information
316
+ # see GH#25154
317
+ df = DataFrame(
318
+ [[1, 2, 3], [4, 5, 6]],
319
+ columns=MultiIndex.from_tuples(
320
+ [("a", "foo"), ("b", "bar"), ("b", nulls_fixture)]
321
+ ),
322
+ dtype="int64",
323
+ )
324
+
325
+ result = df.loc[:, indexer]
326
+ tm.assert_equal(result, expected)
327
+
328
+
329
+ # ----------------------------------------------------------------------------
330
+ # test indexing of DataFrame with multi-level Index with duplicates
331
+ # ----------------------------------------------------------------------------
332
+
333
+
334
+ @pytest.fixture
335
+ def dataframe_with_duplicate_index():
336
+ """Fixture for DataFrame used in tests for gh-4145 and gh-4146"""
337
+ data = [["a", "d", "e", "c", "f", "b"], [1, 4, 5, 3, 6, 2], [1, 4, 5, 3, 6, 2]]
338
+ index = ["h1", "h3", "h5"]
339
+ columns = MultiIndex(
340
+ levels=[["A", "B"], ["A1", "A2", "B1", "B2"]],
341
+ codes=[[0, 0, 0, 1, 1, 1], [0, 3, 3, 0, 1, 2]],
342
+ names=["main", "sub"],
343
+ )
344
+ return DataFrame(data, index=index, columns=columns)
345
+
346
+
347
+ @pytest.mark.parametrize(
348
+ "indexer", [lambda df: df[("A", "A1")], lambda df: df.loc[:, ("A", "A1")]]
349
+ )
350
+ def test_frame_mi_access(dataframe_with_duplicate_index, indexer):
351
+ # GH 4145
352
+ df = dataframe_with_duplicate_index
353
+ index = Index(["h1", "h3", "h5"])
354
+ columns = MultiIndex.from_tuples([("A", "A1")], names=["main", "sub"])
355
+ expected = DataFrame([["a", 1, 1]], index=columns, columns=index).T
356
+
357
+ result = indexer(df)
358
+ tm.assert_frame_equal(result, expected)
359
+
360
+
361
+ def test_frame_mi_access_returns_series(dataframe_with_duplicate_index):
362
+ # GH 4146, not returning a block manager when selecting a unique index
363
+ # from a duplicate index
364
+ # as of 4879, this returns a Series (which is similar to what happens
365
+ # with a non-unique)
366
+ df = dataframe_with_duplicate_index
367
+ expected = Series(["a", 1, 1], index=["h1", "h3", "h5"], name="A1")
368
+ result = df["A"]["A1"]
369
+ tm.assert_series_equal(result, expected)
370
+
371
+
372
+ def test_frame_mi_access_returns_frame(dataframe_with_duplicate_index):
373
+ # selecting a non_unique from the 2nd level
374
+ df = dataframe_with_duplicate_index
375
+ expected = DataFrame(
376
+ [["d", 4, 4], ["e", 5, 5]],
377
+ index=Index(["B2", "B2"], name="sub"),
378
+ columns=["h1", "h3", "h5"],
379
+ ).T
380
+ result = df["A"]["B2"]
381
+ tm.assert_frame_equal(result, expected)
382
+
383
+
384
+ def test_frame_mi_empty_slice():
385
+ # GH 15454
386
+ df = DataFrame(0, index=range(2), columns=MultiIndex.from_product([[1], [2]]))
387
+ result = df[[]]
388
+ expected = DataFrame(
389
+ index=[0, 1], columns=MultiIndex(levels=[[1], [2]], codes=[[], []])
390
+ )
391
+ tm.assert_frame_equal(result, expected)
392
+
393
+
394
+ def test_loc_empty_multiindex():
395
+ # GH#36936
396
+ arrays = [["a", "a", "b", "a"], ["a", "a", "b", "b"]]
397
+ index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))
398
+ df = DataFrame([1, 2, 3, 4], index=index, columns=["value"])
399
+
400
+ # loc on empty multiindex == loc with False mask
401
+ empty_multiindex = df.loc[df.loc[:, "value"] == 0, :].index
402
+ result = df.loc[empty_multiindex, :]
403
+ expected = df.loc[[False] * len(df.index), :]
404
+ tm.assert_frame_equal(result, expected)
405
+
406
+ # replacing value with loc on empty multiindex
407
+ df.loc[df.loc[df.loc[:, "value"] == 0].index, "value"] = 5
408
+ result = df
409
+ expected = DataFrame([1, 2, 3, 4], index=index, columns=["value"])
410
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_iloc.py ADDED
@@ -0,0 +1,171 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ DataFrame,
6
+ MultiIndex,
7
+ Series,
8
+ )
9
+ import pandas._testing as tm
10
+
11
+
12
+ @pytest.fixture
13
+ def simple_multiindex_dataframe():
14
+ """
15
+ Factory function to create simple 3 x 3 dataframe with
16
+ both columns and row MultiIndex using supplied data or
17
+ random data by default.
18
+ """
19
+
20
+ data = np.random.default_rng(2).standard_normal((3, 3))
21
+ return DataFrame(
22
+ data, columns=[[2, 2, 4], [6, 8, 10]], index=[[4, 4, 8], [8, 10, 12]]
23
+ )
24
+
25
+
26
+ @pytest.mark.parametrize(
27
+ "indexer, expected",
28
+ [
29
+ (
30
+ lambda df: df.iloc[0],
31
+ lambda arr: Series(arr[0], index=[[2, 2, 4], [6, 8, 10]], name=(4, 8)),
32
+ ),
33
+ (
34
+ lambda df: df.iloc[2],
35
+ lambda arr: Series(arr[2], index=[[2, 2, 4], [6, 8, 10]], name=(8, 12)),
36
+ ),
37
+ (
38
+ lambda df: df.iloc[:, 2],
39
+ lambda arr: Series(arr[:, 2], index=[[4, 4, 8], [8, 10, 12]], name=(4, 10)),
40
+ ),
41
+ ],
42
+ )
43
+ def test_iloc_returns_series(indexer, expected, simple_multiindex_dataframe):
44
+ df = simple_multiindex_dataframe
45
+ arr = df.values
46
+ result = indexer(df)
47
+ expected = expected(arr)
48
+ tm.assert_series_equal(result, expected)
49
+
50
+
51
+ def test_iloc_returns_dataframe(simple_multiindex_dataframe):
52
+ df = simple_multiindex_dataframe
53
+ result = df.iloc[[0, 1]]
54
+ expected = df.xs(4, drop_level=False)
55
+ tm.assert_frame_equal(result, expected)
56
+
57
+
58
+ def test_iloc_returns_scalar(simple_multiindex_dataframe):
59
+ df = simple_multiindex_dataframe
60
+ arr = df.values
61
+ result = df.iloc[2, 2]
62
+ expected = arr[2, 2]
63
+ assert result == expected
64
+
65
+
66
+ def test_iloc_getitem_multiple_items():
67
+ # GH 5528
68
+ tup = zip(*[["a", "a", "b", "b"], ["x", "y", "x", "y"]])
69
+ index = MultiIndex.from_tuples(tup)
70
+ df = DataFrame(np.random.default_rng(2).standard_normal((4, 4)), index=index)
71
+ result = df.iloc[[2, 3]]
72
+ expected = df.xs("b", drop_level=False)
73
+ tm.assert_frame_equal(result, expected)
74
+
75
+
76
+ def test_iloc_getitem_labels():
77
+ # this is basically regular indexing
78
+ arr = np.random.default_rng(2).standard_normal((4, 3))
79
+ df = DataFrame(
80
+ arr,
81
+ columns=[["i", "i", "j"], ["A", "A", "B"]],
82
+ index=[["i", "i", "j", "k"], ["X", "X", "Y", "Y"]],
83
+ )
84
+ result = df.iloc[2, 2]
85
+ expected = arr[2, 2]
86
+ assert result == expected
87
+
88
+
89
+ def test_frame_getitem_slice(multiindex_dataframe_random_data):
90
+ df = multiindex_dataframe_random_data
91
+ result = df.iloc[:4]
92
+ expected = df[:4]
93
+ tm.assert_frame_equal(result, expected)
94
+
95
+
96
+ def test_frame_setitem_slice(multiindex_dataframe_random_data):
97
+ df = multiindex_dataframe_random_data
98
+ df.iloc[:4] = 0
99
+
100
+ assert (df.values[:4] == 0).all()
101
+ assert (df.values[4:] != 0).all()
102
+
103
+
104
+ def test_indexing_ambiguity_bug_1678():
105
+ # GH 1678
106
+ columns = MultiIndex.from_tuples(
107
+ [("Ohio", "Green"), ("Ohio", "Red"), ("Colorado", "Green")]
108
+ )
109
+ index = MultiIndex.from_tuples([("a", 1), ("a", 2), ("b", 1), ("b", 2)])
110
+
111
+ df = DataFrame(np.arange(12).reshape((4, 3)), index=index, columns=columns)
112
+
113
+ result = df.iloc[:, 1]
114
+ expected = df.loc[:, ("Ohio", "Red")]
115
+ tm.assert_series_equal(result, expected)
116
+
117
+
118
+ def test_iloc_integer_locations():
119
+ # GH 13797
120
+ data = [
121
+ ["str00", "str01"],
122
+ ["str10", "str11"],
123
+ ["str20", "srt21"],
124
+ ["str30", "str31"],
125
+ ["str40", "str41"],
126
+ ]
127
+
128
+ index = MultiIndex.from_tuples(
129
+ [("CC", "A"), ("CC", "B"), ("CC", "B"), ("BB", "a"), ("BB", "b")]
130
+ )
131
+
132
+ expected = DataFrame(data)
133
+ df = DataFrame(data, index=index)
134
+
135
+ result = DataFrame([[df.iloc[r, c] for c in range(2)] for r in range(5)])
136
+
137
+ tm.assert_frame_equal(result, expected)
138
+
139
+
140
+ @pytest.mark.parametrize(
141
+ "data, indexes, values, expected_k",
142
+ [
143
+ # test without indexer value in first level of MultiIndex
144
+ ([[2, 22, 5], [2, 33, 6]], [0, -1, 1], [2, 3, 1], [7, 10]),
145
+ # test like code sample 1 in the issue
146
+ ([[1, 22, 555], [1, 33, 666]], [0, -1, 1], [200, 300, 100], [755, 1066]),
147
+ # test like code sample 2 in the issue
148
+ ([[1, 3, 7], [2, 4, 8]], [0, -1, 1], [10, 10, 1000], [17, 1018]),
149
+ # test like code sample 3 in the issue
150
+ ([[1, 11, 4], [2, 22, 5], [3, 33, 6]], [0, -1, 1], [4, 7, 10], [8, 15, 13]),
151
+ ],
152
+ )
153
+ def test_iloc_setitem_int_multiindex_series(data, indexes, values, expected_k):
154
+ # GH17148
155
+ df = DataFrame(data=data, columns=["i", "j", "k"])
156
+ df = df.set_index(["i", "j"])
157
+
158
+ series = df.k.copy()
159
+ for i, v in zip(indexes, values):
160
+ series.iloc[i] += v
161
+
162
+ df["k"] = expected_k
163
+ expected = df.k
164
+ tm.assert_series_equal(series, expected)
165
+
166
+
167
+ def test_getitem_iloc(multiindex_dataframe_random_data):
168
+ df = multiindex_dataframe_random_data
169
+ result = df.iloc[2]
170
+ expected = df.xs(df.index[2])
171
+ tm.assert_series_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_indexing_slow.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ from pandas import (
6
+ DataFrame,
7
+ Series,
8
+ )
9
+ import pandas._testing as tm
10
+
11
+
12
+ @pytest.fixture
13
+ def m():
14
+ return 5
15
+
16
+
17
+ @pytest.fixture
18
+ def n():
19
+ return 100
20
+
21
+
22
+ @pytest.fixture
23
+ def cols():
24
+ return ["jim", "joe", "jolie", "joline", "jolia"]
25
+
26
+
27
+ @pytest.fixture
28
+ def vals(n):
29
+ vals = [
30
+ np.random.default_rng(2).integers(0, 10, n),
31
+ np.random.default_rng(2).choice(list("abcdefghij"), n),
32
+ np.random.default_rng(2).choice(
33
+ pd.date_range("20141009", periods=10).tolist(), n
34
+ ),
35
+ np.random.default_rng(2).choice(list("ZYXWVUTSRQ"), n),
36
+ np.random.default_rng(2).standard_normal(n),
37
+ ]
38
+ vals = list(map(tuple, zip(*vals)))
39
+ return vals
40
+
41
+
42
+ @pytest.fixture
43
+ def keys(n, m, vals):
44
+ # bunch of keys for testing
45
+ keys = [
46
+ np.random.default_rng(2).integers(0, 11, m),
47
+ np.random.default_rng(2).choice(list("abcdefghijk"), m),
48
+ np.random.default_rng(2).choice(
49
+ pd.date_range("20141009", periods=11).tolist(), m
50
+ ),
51
+ np.random.default_rng(2).choice(list("ZYXWVUTSRQP"), m),
52
+ ]
53
+ keys = list(map(tuple, zip(*keys)))
54
+ keys += [t[:-1] for t in vals[:: n // m]]
55
+ return keys
56
+
57
+
58
+ # covers both unique index and non-unique index
59
+ @pytest.fixture
60
+ def df(vals, cols):
61
+ return DataFrame(vals, columns=cols)
62
+
63
+
64
+ @pytest.fixture
65
+ def a(df):
66
+ return pd.concat([df, df])
67
+
68
+
69
+ @pytest.fixture
70
+ def b(df, cols):
71
+ return df.drop_duplicates(subset=cols[:-1])
72
+
73
+
74
+ @pytest.mark.filterwarnings("ignore::pandas.errors.PerformanceWarning")
75
+ @pytest.mark.parametrize("lexsort_depth", list(range(5)))
76
+ @pytest.mark.parametrize("frame_fixture", ["a", "b"])
77
+ def test_multiindex_get_loc(request, lexsort_depth, keys, frame_fixture, cols):
78
+ # GH7724, GH2646
79
+
80
+ frame = request.getfixturevalue(frame_fixture)
81
+ if lexsort_depth == 0:
82
+ df = frame.copy(deep=False)
83
+ else:
84
+ df = frame.sort_values(by=cols[:lexsort_depth])
85
+
86
+ mi = df.set_index(cols[:-1])
87
+ assert not mi.index._lexsort_depth < lexsort_depth
88
+ for key in keys:
89
+ mask = np.ones(len(df), dtype=bool)
90
+
91
+ # test for all partials of this key
92
+ for i, k in enumerate(key):
93
+ mask &= df.iloc[:, i] == k
94
+
95
+ if not mask.any():
96
+ assert key[: i + 1] not in mi.index
97
+ continue
98
+
99
+ assert key[: i + 1] in mi.index
100
+ right = df[mask].copy(deep=False)
101
+
102
+ if i + 1 != len(key): # partial key
103
+ return_value = right.drop(cols[: i + 1], axis=1, inplace=True)
104
+ assert return_value is None
105
+ return_value = right.set_index(cols[i + 1 : -1], inplace=True)
106
+ assert return_value is None
107
+ tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
108
+
109
+ else: # full key
110
+ return_value = right.set_index(cols[:-1], inplace=True)
111
+ assert return_value is None
112
+ if len(right) == 1: # single hit
113
+ right = Series(
114
+ right["jolia"].values, name=right.index[0], index=["jolia"]
115
+ )
116
+ tm.assert_series_equal(mi.loc[key[: i + 1]], right)
117
+ else: # multi hit
118
+ tm.assert_frame_equal(mi.loc[key[: i + 1]], right)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_loc.py ADDED
@@ -0,0 +1,992 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import (
5
+ IndexingError,
6
+ PerformanceWarning,
7
+ )
8
+
9
+ import pandas as pd
10
+ from pandas import (
11
+ DataFrame,
12
+ Index,
13
+ MultiIndex,
14
+ Series,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+
19
+ @pytest.fixture
20
+ def single_level_multiindex():
21
+ """single level MultiIndex"""
22
+ return MultiIndex(
23
+ levels=[["foo", "bar", "baz", "qux"]], codes=[[0, 1, 2, 3]], names=["first"]
24
+ )
25
+
26
+
27
+ @pytest.fixture
28
+ def frame_random_data_integer_multi_index():
29
+ levels = [[0, 1], [0, 1, 2]]
30
+ codes = [[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]]
31
+ index = MultiIndex(levels=levels, codes=codes)
32
+ return DataFrame(np.random.default_rng(2).standard_normal((6, 2)), index=index)
33
+
34
+
35
+ class TestMultiIndexLoc:
36
+ def test_loc_setitem_frame_with_multiindex(self, multiindex_dataframe_random_data):
37
+ frame = multiindex_dataframe_random_data
38
+ frame.loc[("bar", "two"), "B"] = 5
39
+ assert frame.loc[("bar", "two"), "B"] == 5
40
+
41
+ # with integer labels
42
+ df = frame.copy()
43
+ df.columns = list(range(3))
44
+ df.loc[("bar", "two"), 1] = 7
45
+ assert df.loc[("bar", "two"), 1] == 7
46
+
47
+ def test_loc_getitem_general(self, any_real_numpy_dtype):
48
+ # GH#2817
49
+ dtype = any_real_numpy_dtype
50
+ data = {
51
+ "amount": {0: 700, 1: 600, 2: 222, 3: 333, 4: 444},
52
+ "col": {0: 3.5, 1: 3.5, 2: 4.0, 3: 4.0, 4: 4.0},
53
+ "num": {0: 12, 1: 11, 2: 12, 3: 12, 4: 12},
54
+ }
55
+ df = DataFrame(data)
56
+ df = df.astype({"col": dtype, "num": dtype})
57
+ df = df.set_index(keys=["col", "num"])
58
+ key = 4.0, 12
59
+
60
+ # emits a PerformanceWarning, ok
61
+ with tm.assert_produces_warning(PerformanceWarning):
62
+ tm.assert_frame_equal(df.loc[key], df.iloc[2:])
63
+
64
+ # this is ok
65
+ return_value = df.sort_index(inplace=True)
66
+ assert return_value is None
67
+ res = df.loc[key]
68
+
69
+ # col has float dtype, result should be float64 Index
70
+ col_arr = np.array([4.0] * 3, dtype=dtype)
71
+ year_arr = np.array([12] * 3, dtype=dtype)
72
+ index = MultiIndex.from_arrays([col_arr, year_arr], names=["col", "num"])
73
+ expected = DataFrame({"amount": [222, 333, 444]}, index=index)
74
+ tm.assert_frame_equal(res, expected)
75
+
76
+ def test_loc_getitem_multiindex_missing_label_raises(self):
77
+ # GH#21593
78
+ df = DataFrame(
79
+ np.random.default_rng(2).standard_normal((3, 3)),
80
+ columns=[[2, 2, 4], [6, 8, 10]],
81
+ index=[[4, 4, 8], [8, 10, 12]],
82
+ )
83
+
84
+ with pytest.raises(KeyError, match=r"^2$"):
85
+ df.loc[2]
86
+
87
+ def test_loc_getitem_list_of_tuples_with_multiindex(
88
+ self, multiindex_year_month_day_dataframe_random_data
89
+ ):
90
+ ser = multiindex_year_month_day_dataframe_random_data["A"]
91
+ expected = ser.reindex(ser.index[49:51])
92
+ result = ser.loc[[(2000, 3, 10), (2000, 3, 13)]]
93
+ tm.assert_series_equal(result, expected)
94
+
95
+ def test_loc_getitem_series(self):
96
+ # GH14730
97
+ # passing a series as a key with a MultiIndex
98
+ index = MultiIndex.from_product([[1, 2, 3], ["A", "B", "C"]])
99
+ x = Series(index=index, data=range(9), dtype=np.float64)
100
+ y = Series([1, 3])
101
+ expected = Series(
102
+ data=[0, 1, 2, 6, 7, 8],
103
+ index=MultiIndex.from_product([[1, 3], ["A", "B", "C"]]),
104
+ dtype=np.float64,
105
+ )
106
+ result = x.loc[y]
107
+ tm.assert_series_equal(result, expected)
108
+
109
+ result = x.loc[[1, 3]]
110
+ tm.assert_series_equal(result, expected)
111
+
112
+ # GH15424
113
+ y1 = Series([1, 3], index=[1, 2])
114
+ result = x.loc[y1]
115
+ tm.assert_series_equal(result, expected)
116
+
117
+ empty = Series(data=[], dtype=np.float64)
118
+ expected = Series(
119
+ [],
120
+ index=MultiIndex(levels=index.levels, codes=[[], []], dtype=np.float64),
121
+ dtype=np.float64,
122
+ )
123
+ result = x.loc[empty]
124
+ tm.assert_series_equal(result, expected)
125
+
126
+ def test_loc_getitem_array(self):
127
+ # GH15434
128
+ # passing an array as a key with a MultiIndex
129
+ index = MultiIndex.from_product([[1, 2, 3], ["A", "B", "C"]])
130
+ x = Series(index=index, data=range(9), dtype=np.float64)
131
+ y = np.array([1, 3])
132
+ expected = Series(
133
+ data=[0, 1, 2, 6, 7, 8],
134
+ index=MultiIndex.from_product([[1, 3], ["A", "B", "C"]]),
135
+ dtype=np.float64,
136
+ )
137
+ result = x.loc[y]
138
+ tm.assert_series_equal(result, expected)
139
+
140
+ # empty array:
141
+ empty = np.array([])
142
+ expected = Series(
143
+ [],
144
+ index=MultiIndex(levels=index.levels, codes=[[], []], dtype=np.float64),
145
+ dtype="float64",
146
+ )
147
+ result = x.loc[empty]
148
+ tm.assert_series_equal(result, expected)
149
+
150
+ # 0-dim array (scalar):
151
+ scalar = np.int64(1)
152
+ expected = Series(data=[0, 1, 2], index=["A", "B", "C"], dtype=np.float64)
153
+ result = x.loc[scalar]
154
+ tm.assert_series_equal(result, expected)
155
+
156
+ def test_loc_multiindex_labels(self):
157
+ df = DataFrame(
158
+ np.random.default_rng(2).standard_normal((3, 3)),
159
+ columns=[["i", "i", "j"], ["A", "A", "B"]],
160
+ index=[["i", "i", "j"], ["X", "X", "Y"]],
161
+ )
162
+
163
+ # the first 2 rows
164
+ expected = df.iloc[[0, 1]].droplevel(0)
165
+ result = df.loc["i"]
166
+ tm.assert_frame_equal(result, expected)
167
+
168
+ # 2nd (last) column
169
+ expected = df.iloc[:, [2]].droplevel(0, axis=1)
170
+ result = df.loc[:, "j"]
171
+ tm.assert_frame_equal(result, expected)
172
+
173
+ # bottom right corner
174
+ expected = df.iloc[[2], [2]].droplevel(0).droplevel(0, axis=1)
175
+ result = df.loc["j"].loc[:, "j"]
176
+ tm.assert_frame_equal(result, expected)
177
+
178
+ # with a tuple
179
+ expected = df.iloc[[0, 1]]
180
+ result = df.loc[("i", "X")]
181
+ tm.assert_frame_equal(result, expected)
182
+
183
+ def test_loc_multiindex_ints(self):
184
+ df = DataFrame(
185
+ np.random.default_rng(2).standard_normal((3, 3)),
186
+ columns=[[2, 2, 4], [6, 8, 10]],
187
+ index=[[4, 4, 8], [8, 10, 12]],
188
+ )
189
+ expected = df.iloc[[0, 1]].droplevel(0)
190
+ result = df.loc[4]
191
+ tm.assert_frame_equal(result, expected)
192
+
193
+ def test_loc_multiindex_missing_label_raises(self):
194
+ df = DataFrame(
195
+ np.random.default_rng(2).standard_normal((3, 3)),
196
+ columns=[[2, 2, 4], [6, 8, 10]],
197
+ index=[[4, 4, 8], [8, 10, 12]],
198
+ )
199
+
200
+ with pytest.raises(KeyError, match=r"^2$"):
201
+ df.loc[2]
202
+
203
+ @pytest.mark.parametrize("key, pos", [([2, 4], [0, 1]), ([2], []), ([2, 3], [])])
204
+ def test_loc_multiindex_list_missing_label(self, key, pos):
205
+ # GH 27148 - lists with missing labels _do_ raise
206
+ df = DataFrame(
207
+ np.random.default_rng(2).standard_normal((3, 3)),
208
+ columns=[[2, 2, 4], [6, 8, 10]],
209
+ index=[[4, 4, 8], [8, 10, 12]],
210
+ )
211
+
212
+ with pytest.raises(KeyError, match="not in index"):
213
+ df.loc[key]
214
+
215
+ def test_loc_multiindex_too_many_dims_raises(self):
216
+ # GH 14885
217
+ s = Series(
218
+ range(8),
219
+ index=MultiIndex.from_product([["a", "b"], ["c", "d"], ["e", "f"]]),
220
+ )
221
+
222
+ with pytest.raises(KeyError, match=r"^\('a', 'b'\)$"):
223
+ s.loc["a", "b"]
224
+ with pytest.raises(KeyError, match=r"^\('a', 'd', 'g'\)$"):
225
+ s.loc["a", "d", "g"]
226
+ with pytest.raises(IndexingError, match="Too many indexers"):
227
+ s.loc["a", "d", "g", "j"]
228
+
229
+ def test_loc_multiindex_indexer_none(self):
230
+ # GH6788
231
+ # multi-index indexer is None (meaning take all)
232
+ attributes = ["Attribute" + str(i) for i in range(1)]
233
+ attribute_values = ["Value" + str(i) for i in range(5)]
234
+
235
+ index = MultiIndex.from_product([attributes, attribute_values])
236
+ df = 0.1 * np.random.default_rng(2).standard_normal((10, 1 * 5)) + 0.5
237
+ df = DataFrame(df, columns=index)
238
+ result = df[attributes]
239
+ tm.assert_frame_equal(result, df)
240
+
241
+ # GH 7349
242
+ # loc with a multi-index seems to be doing fallback
243
+ df = DataFrame(
244
+ np.arange(12).reshape(-1, 1),
245
+ index=MultiIndex.from_product([[1, 2, 3, 4], [1, 2, 3]]),
246
+ )
247
+
248
+ expected = df.loc[([1, 2],), :]
249
+ result = df.loc[[1, 2]]
250
+ tm.assert_frame_equal(result, expected)
251
+
252
+ def test_loc_multiindex_incomplete(self):
253
+ # GH 7399
254
+ # incomplete indexers
255
+ s = Series(
256
+ np.arange(15, dtype="int64"),
257
+ MultiIndex.from_product([range(5), ["a", "b", "c"]]),
258
+ )
259
+ expected = s.loc[:, "a":"c"]
260
+
261
+ result = s.loc[0:4, "a":"c"]
262
+ tm.assert_series_equal(result, expected)
263
+
264
+ result = s.loc[:4, "a":"c"]
265
+ tm.assert_series_equal(result, expected)
266
+
267
+ result = s.loc[0:, "a":"c"]
268
+ tm.assert_series_equal(result, expected)
269
+
270
+ # GH 7400
271
+ # multiindexer getitem with list of indexers skips wrong element
272
+ s = Series(
273
+ np.arange(15, dtype="int64"),
274
+ MultiIndex.from_product([range(5), ["a", "b", "c"]]),
275
+ )
276
+ expected = s.iloc[[6, 7, 8, 12, 13, 14]]
277
+ result = s.loc[2:4:2, "a":"c"]
278
+ tm.assert_series_equal(result, expected)
279
+
280
+ def test_get_loc_single_level(self, single_level_multiindex):
281
+ single_level = single_level_multiindex
282
+ s = Series(
283
+ np.random.default_rng(2).standard_normal(len(single_level)),
284
+ index=single_level,
285
+ )
286
+ for k in single_level.values:
287
+ s[k]
288
+
289
+ def test_loc_getitem_int_slice(self):
290
+ # GH 3053
291
+ # loc should treat integer slices like label slices
292
+
293
+ index = MultiIndex.from_product([[6, 7, 8], ["a", "b"]])
294
+ df = DataFrame(np.random.default_rng(2).standard_normal((6, 6)), index, index)
295
+ result = df.loc[6:8, :]
296
+ expected = df
297
+ tm.assert_frame_equal(result, expected)
298
+
299
+ index = MultiIndex.from_product([[10, 20, 30], ["a", "b"]])
300
+ df = DataFrame(np.random.default_rng(2).standard_normal((6, 6)), index, index)
301
+ result = df.loc[20:30, :]
302
+ expected = df.iloc[2:]
303
+ tm.assert_frame_equal(result, expected)
304
+
305
+ # doc examples
306
+ result = df.loc[10, :]
307
+ expected = df.iloc[0:2]
308
+ expected.index = ["a", "b"]
309
+ tm.assert_frame_equal(result, expected)
310
+
311
+ result = df.loc[:, 10]
312
+ expected = df[10]
313
+ tm.assert_frame_equal(result, expected)
314
+
315
+ @pytest.mark.parametrize(
316
+ "indexer_type_1", (list, tuple, set, slice, np.ndarray, Series, Index)
317
+ )
318
+ @pytest.mark.parametrize(
319
+ "indexer_type_2", (list, tuple, set, slice, np.ndarray, Series, Index)
320
+ )
321
+ def test_loc_getitem_nested_indexer(self, indexer_type_1, indexer_type_2):
322
+ # GH #19686
323
+ # .loc should work with nested indexers which can be
324
+ # any list-like objects (see `is_list_like` (`pandas.api.types`)) or slices
325
+
326
+ def convert_nested_indexer(indexer_type, keys):
327
+ if indexer_type == np.ndarray:
328
+ return np.array(keys)
329
+ if indexer_type == slice:
330
+ return slice(*keys)
331
+ return indexer_type(keys)
332
+
333
+ a = [10, 20, 30]
334
+ b = [1, 2, 3]
335
+ index = MultiIndex.from_product([a, b])
336
+ df = DataFrame(
337
+ np.arange(len(index), dtype="int64"), index=index, columns=["Data"]
338
+ )
339
+
340
+ keys = ([10, 20], [2, 3])
341
+ types = (indexer_type_1, indexer_type_2)
342
+
343
+ # check indexers with all the combinations of nested objects
344
+ # of all the valid types
345
+ indexer = tuple(
346
+ convert_nested_indexer(indexer_type, k)
347
+ for indexer_type, k in zip(types, keys)
348
+ )
349
+ if indexer_type_1 is set or indexer_type_2 is set:
350
+ with pytest.raises(TypeError, match="as an indexer is not supported"):
351
+ df.loc[indexer, "Data"]
352
+
353
+ return
354
+ else:
355
+ result = df.loc[indexer, "Data"]
356
+ expected = Series(
357
+ [1, 2, 4, 5], name="Data", index=MultiIndex.from_product(keys)
358
+ )
359
+
360
+ tm.assert_series_equal(result, expected)
361
+
362
+ def test_multiindex_loc_one_dimensional_tuple(self, frame_or_series):
363
+ # GH#37711
364
+ mi = MultiIndex.from_tuples([("a", "A"), ("b", "A")])
365
+ obj = frame_or_series([1, 2], index=mi)
366
+ obj.loc[("a",)] = 0
367
+ expected = frame_or_series([0, 2], index=mi)
368
+ tm.assert_equal(obj, expected)
369
+
370
+ @pytest.mark.parametrize("indexer", [("a",), ("a")])
371
+ def test_multiindex_one_dimensional_tuple_columns(self, indexer):
372
+ # GH#37711
373
+ mi = MultiIndex.from_tuples([("a", "A"), ("b", "A")])
374
+ obj = DataFrame([1, 2], index=mi)
375
+ obj.loc[indexer, :] = 0
376
+ expected = DataFrame([0, 2], index=mi)
377
+ tm.assert_frame_equal(obj, expected)
378
+
379
+ @pytest.mark.parametrize(
380
+ "indexer, exp_value", [(slice(None), 1.0), ((1, 2), np.nan)]
381
+ )
382
+ def test_multiindex_setitem_columns_enlarging(self, indexer, exp_value):
383
+ # GH#39147
384
+ mi = MultiIndex.from_tuples([(1, 2), (3, 4)])
385
+ df = DataFrame([[1, 2], [3, 4]], index=mi, columns=["a", "b"])
386
+ df.loc[indexer, ["c", "d"]] = 1.0
387
+ expected = DataFrame(
388
+ [[1, 2, 1.0, 1.0], [3, 4, exp_value, exp_value]],
389
+ index=mi,
390
+ columns=["a", "b", "c", "d"],
391
+ )
392
+ tm.assert_frame_equal(df, expected)
393
+
394
+ def test_sorted_multiindex_after_union(self):
395
+ # GH#44752
396
+ midx = MultiIndex.from_product(
397
+ [pd.date_range("20110101", periods=2), Index(["a", "b"])]
398
+ )
399
+ ser1 = Series(1, index=midx)
400
+ ser2 = Series(1, index=midx[:2])
401
+ df = pd.concat([ser1, ser2], axis=1)
402
+ expected = df.copy()
403
+ result = df.loc["2011-01-01":"2011-01-02"]
404
+ tm.assert_frame_equal(result, expected)
405
+
406
+ df = DataFrame({0: ser1, 1: ser2})
407
+ result = df.loc["2011-01-01":"2011-01-02"]
408
+ tm.assert_frame_equal(result, expected)
409
+
410
+ df = pd.concat([ser1, ser2.reindex(ser1.index)], axis=1)
411
+ result = df.loc["2011-01-01":"2011-01-02"]
412
+ tm.assert_frame_equal(result, expected)
413
+
414
+ def test_loc_no_second_level_index(self):
415
+ # GH#43599
416
+ df = DataFrame(
417
+ index=MultiIndex.from_product([list("ab"), list("cd"), list("e")]),
418
+ columns=["Val"],
419
+ )
420
+ res = df.loc[np.s_[:, "c", :]]
421
+ expected = DataFrame(
422
+ index=MultiIndex.from_product([list("ab"), list("e")]), columns=["Val"]
423
+ )
424
+ tm.assert_frame_equal(res, expected)
425
+
426
+ def test_loc_multi_index_key_error(self):
427
+ # GH 51892
428
+ df = DataFrame(
429
+ {
430
+ (1, 2): ["a", "b", "c"],
431
+ (1, 3): ["d", "e", "f"],
432
+ (2, 2): ["g", "h", "i"],
433
+ (2, 4): ["j", "k", "l"],
434
+ }
435
+ )
436
+ with pytest.raises(KeyError, match=r"(1, 4)"):
437
+ df.loc[0, (1, 4)]
438
+
439
+
440
+ @pytest.mark.parametrize(
441
+ "indexer, pos",
442
+ [
443
+ ([], []), # empty ok
444
+ (["A"], slice(3)),
445
+ (["A", "D"], []), # "D" isn't present -> raise
446
+ (["D", "E"], []), # no values found -> raise
447
+ (["D"], []), # same, with single item list: GH 27148
448
+ (pd.IndexSlice[:, ["foo"]], slice(2, None, 3)),
449
+ (pd.IndexSlice[:, ["foo", "bah"]], slice(2, None, 3)),
450
+ ],
451
+ )
452
+ def test_loc_getitem_duplicates_multiindex_missing_indexers(indexer, pos):
453
+ # GH 7866
454
+ # multi-index slicing with missing indexers
455
+ idx = MultiIndex.from_product(
456
+ [["A", "B", "C"], ["foo", "bar", "baz"]], names=["one", "two"]
457
+ )
458
+ ser = Series(np.arange(9, dtype="int64"), index=idx).sort_index()
459
+ expected = ser.iloc[pos]
460
+
461
+ if expected.size == 0 and indexer != []:
462
+ with pytest.raises(KeyError, match=str(indexer)):
463
+ ser.loc[indexer]
464
+ elif indexer == (slice(None), ["foo", "bah"]):
465
+ # "bah" is not in idx.levels[1], raising KeyError enforced in 2.0
466
+ with pytest.raises(KeyError, match="'bah'"):
467
+ ser.loc[indexer]
468
+ else:
469
+ result = ser.loc[indexer]
470
+ tm.assert_series_equal(result, expected)
471
+
472
+
473
+ @pytest.mark.parametrize("columns_indexer", [([], slice(None)), (["foo"], [])])
474
+ def test_loc_getitem_duplicates_multiindex_empty_indexer(columns_indexer):
475
+ # GH 8737
476
+ # empty indexer
477
+ multi_index = MultiIndex.from_product((["foo", "bar", "baz"], ["alpha", "beta"]))
478
+ df = DataFrame(
479
+ np.random.default_rng(2).standard_normal((5, 6)),
480
+ index=range(5),
481
+ columns=multi_index,
482
+ )
483
+ df = df.sort_index(level=0, axis=1)
484
+
485
+ expected = DataFrame(index=range(5), columns=multi_index.reindex([])[0])
486
+ result = df.loc[:, columns_indexer]
487
+ tm.assert_frame_equal(result, expected)
488
+
489
+
490
+ def test_loc_getitem_duplicates_multiindex_non_scalar_type_object():
491
+ # regression from < 0.14.0
492
+ # GH 7914
493
+ df = DataFrame(
494
+ [[np.mean, np.median], ["mean", "median"]],
495
+ columns=MultiIndex.from_tuples([("functs", "mean"), ("functs", "median")]),
496
+ index=["function", "name"],
497
+ )
498
+ result = df.loc["function", ("functs", "mean")]
499
+ expected = np.mean
500
+ assert result == expected
501
+
502
+
503
+ def test_loc_getitem_tuple_plus_slice():
504
+ # GH 671
505
+ df = DataFrame(
506
+ {
507
+ "a": np.arange(10),
508
+ "b": np.arange(10),
509
+ "c": np.random.default_rng(2).standard_normal(10),
510
+ "d": np.random.default_rng(2).standard_normal(10),
511
+ }
512
+ ).set_index(["a", "b"])
513
+ expected = df.loc[0, 0]
514
+ result = df.loc[(0, 0), :]
515
+ tm.assert_series_equal(result, expected)
516
+
517
+
518
+ def test_loc_getitem_int(frame_random_data_integer_multi_index):
519
+ df = frame_random_data_integer_multi_index
520
+ result = df.loc[1]
521
+ expected = df[-3:]
522
+ expected.index = expected.index.droplevel(0)
523
+ tm.assert_frame_equal(result, expected)
524
+
525
+
526
+ def test_loc_getitem_int_raises_exception(frame_random_data_integer_multi_index):
527
+ df = frame_random_data_integer_multi_index
528
+ with pytest.raises(KeyError, match=r"^3$"):
529
+ df.loc[3]
530
+
531
+
532
+ def test_loc_getitem_lowerdim_corner(multiindex_dataframe_random_data):
533
+ df = multiindex_dataframe_random_data
534
+
535
+ # test setup - check key not in dataframe
536
+ with pytest.raises(KeyError, match=r"^\('bar', 'three'\)$"):
537
+ df.loc[("bar", "three"), "B"]
538
+
539
+ # in theory should be inserting in a sorted space????
540
+ df.loc[("bar", "three"), "B"] = 0
541
+ expected = 0
542
+ result = df.sort_index().loc[("bar", "three"), "B"]
543
+ assert result == expected
544
+
545
+
546
+ def test_loc_setitem_single_column_slice():
547
+ # case from https://github.com/pandas-dev/pandas/issues/27841
548
+ df = DataFrame(
549
+ "string",
550
+ index=list("abcd"),
551
+ columns=MultiIndex.from_product([["Main"], ("another", "one")]),
552
+ )
553
+ df["labels"] = "a"
554
+ df.loc[:, "labels"] = df.index
555
+ tm.assert_numpy_array_equal(np.asarray(df["labels"]), np.asarray(df.index))
556
+
557
+ # test with non-object block
558
+ df = DataFrame(
559
+ np.nan,
560
+ index=range(4),
561
+ columns=MultiIndex.from_tuples([("A", "1"), ("A", "2"), ("B", "1")]),
562
+ )
563
+ expected = df.copy()
564
+ df.loc[:, "B"] = np.arange(4)
565
+ expected.iloc[:, 2] = np.arange(4)
566
+ tm.assert_frame_equal(df, expected)
567
+
568
+
569
+ def test_loc_nan_multiindex(using_infer_string):
570
+ # GH 5286
571
+ tups = [
572
+ ("Good Things", "C", np.nan),
573
+ ("Good Things", "R", np.nan),
574
+ ("Bad Things", "C", np.nan),
575
+ ("Bad Things", "T", np.nan),
576
+ ("Okay Things", "N", "B"),
577
+ ("Okay Things", "N", "D"),
578
+ ("Okay Things", "B", np.nan),
579
+ ("Okay Things", "D", np.nan),
580
+ ]
581
+ df = DataFrame(
582
+ np.ones((8, 4)),
583
+ columns=Index(["d1", "d2", "d3", "d4"]),
584
+ index=MultiIndex.from_tuples(tups, names=["u1", "u2", "u3"]),
585
+ )
586
+ result = df.loc["Good Things"].loc["C"]
587
+ expected = DataFrame(
588
+ np.ones((1, 4)),
589
+ index=Index(
590
+ [np.nan],
591
+ dtype="object" if not using_infer_string else "string[pyarrow_numpy]",
592
+ name="u3",
593
+ ),
594
+ columns=Index(["d1", "d2", "d3", "d4"]),
595
+ )
596
+ tm.assert_frame_equal(result, expected)
597
+
598
+
599
+ def test_loc_period_string_indexing():
600
+ # GH 9892
601
+ a = pd.period_range("2013Q1", "2013Q4", freq="Q")
602
+ i = (1111, 2222, 3333)
603
+ idx = MultiIndex.from_product((a, i), names=("Period", "CVR"))
604
+ df = DataFrame(
605
+ index=idx,
606
+ columns=(
607
+ "OMS",
608
+ "OMK",
609
+ "RES",
610
+ "DRIFT_IND",
611
+ "OEVRIG_IND",
612
+ "FIN_IND",
613
+ "VARE_UD",
614
+ "LOEN_UD",
615
+ "FIN_UD",
616
+ ),
617
+ )
618
+ result = df.loc[("2013Q1", 1111), "OMS"]
619
+
620
+ alt = df.loc[(a[0], 1111), "OMS"]
621
+ assert np.isnan(alt)
622
+
623
+ # Because the resolution of the string matches, it is an exact lookup,
624
+ # not a slice
625
+ assert np.isnan(result)
626
+
627
+ alt = df.loc[("2013Q1", 1111), "OMS"]
628
+ assert np.isnan(alt)
629
+
630
+
631
+ def test_loc_datetime_mask_slicing():
632
+ # GH 16699
633
+ dt_idx = pd.to_datetime(["2017-05-04", "2017-05-05"])
634
+ m_idx = MultiIndex.from_product([dt_idx, dt_idx], names=["Idx1", "Idx2"])
635
+ df = DataFrame(
636
+ data=[[1, 2], [3, 4], [5, 6], [7, 6]], index=m_idx, columns=["C1", "C2"]
637
+ )
638
+ result = df.loc[(dt_idx[0], (df.index.get_level_values(1) > "2017-05-04")), "C1"]
639
+ expected = Series(
640
+ [3],
641
+ name="C1",
642
+ index=MultiIndex.from_tuples(
643
+ [(pd.Timestamp("2017-05-04"), pd.Timestamp("2017-05-05"))],
644
+ names=["Idx1", "Idx2"],
645
+ ),
646
+ )
647
+ tm.assert_series_equal(result, expected)
648
+
649
+
650
+ def test_loc_datetime_series_tuple_slicing():
651
+ # https://github.com/pandas-dev/pandas/issues/35858
652
+ date = pd.Timestamp("2000")
653
+ ser = Series(
654
+ 1,
655
+ index=MultiIndex.from_tuples([("a", date)], names=["a", "b"]),
656
+ name="c",
657
+ )
658
+ result = ser.loc[:, [date]]
659
+ tm.assert_series_equal(result, ser)
660
+
661
+
662
+ def test_loc_with_mi_indexer():
663
+ # https://github.com/pandas-dev/pandas/issues/35351
664
+ df = DataFrame(
665
+ data=[["a", 1], ["a", 0], ["b", 1], ["c", 2]],
666
+ index=MultiIndex.from_tuples(
667
+ [(0, 1), (1, 0), (1, 1), (1, 1)], names=["index", "date"]
668
+ ),
669
+ columns=["author", "price"],
670
+ )
671
+ idx = MultiIndex.from_tuples([(0, 1), (1, 1)], names=["index", "date"])
672
+ result = df.loc[idx, :]
673
+ expected = DataFrame(
674
+ [["a", 1], ["b", 1], ["c", 2]],
675
+ index=MultiIndex.from_tuples([(0, 1), (1, 1), (1, 1)], names=["index", "date"]),
676
+ columns=["author", "price"],
677
+ )
678
+ tm.assert_frame_equal(result, expected)
679
+
680
+
681
+ def test_loc_mi_with_level1_named_0():
682
+ # GH#37194
683
+ dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific")
684
+
685
+ ser = Series(range(3), index=dti)
686
+ df = ser.to_frame()
687
+ df[1] = dti
688
+
689
+ df2 = df.set_index(0, append=True)
690
+ assert df2.index.names == (None, 0)
691
+ df2.index.get_loc(dti[0]) # smoke test
692
+
693
+ result = df2.loc[dti[0]]
694
+ expected = df2.iloc[[0]].droplevel(None)
695
+ tm.assert_frame_equal(result, expected)
696
+
697
+ ser2 = df2[1]
698
+ assert ser2.index.names == (None, 0)
699
+
700
+ result = ser2.loc[dti[0]]
701
+ expected = ser2.iloc[[0]].droplevel(None)
702
+ tm.assert_series_equal(result, expected)
703
+
704
+
705
+ def test_getitem_str_slice():
706
+ # GH#15928
707
+ df = DataFrame(
708
+ [
709
+ ["20160525 13:30:00.023", "MSFT", "51.95", "51.95"],
710
+ ["20160525 13:30:00.048", "GOOG", "720.50", "720.93"],
711
+ ["20160525 13:30:00.076", "AAPL", "98.55", "98.56"],
712
+ ["20160525 13:30:00.131", "AAPL", "98.61", "98.62"],
713
+ ["20160525 13:30:00.135", "MSFT", "51.92", "51.95"],
714
+ ["20160525 13:30:00.135", "AAPL", "98.61", "98.62"],
715
+ ],
716
+ columns="time,ticker,bid,ask".split(","),
717
+ )
718
+ df2 = df.set_index(["ticker", "time"]).sort_index()
719
+
720
+ res = df2.loc[("AAPL", slice("2016-05-25 13:30:00")), :].droplevel(0)
721
+ expected = df2.loc["AAPL"].loc[slice("2016-05-25 13:30:00"), :]
722
+ tm.assert_frame_equal(res, expected)
723
+
724
+
725
+ def test_3levels_leading_period_index():
726
+ # GH#24091
727
+ pi = pd.PeriodIndex(
728
+ ["20181101 1100", "20181101 1200", "20181102 1300", "20181102 1400"],
729
+ name="datetime",
730
+ freq="D",
731
+ )
732
+ lev2 = ["A", "A", "Z", "W"]
733
+ lev3 = ["B", "C", "Q", "F"]
734
+ mi = MultiIndex.from_arrays([pi, lev2, lev3])
735
+
736
+ ser = Series(range(4), index=mi, dtype=np.float64)
737
+ result = ser.loc[(pi[0], "A", "B")]
738
+ assert result == 0.0
739
+
740
+
741
+ class TestKeyErrorsWithMultiIndex:
742
+ def test_missing_keys_raises_keyerror(self):
743
+ # GH#27420 KeyError, not TypeError
744
+ df = DataFrame(np.arange(12).reshape(4, 3), columns=["A", "B", "C"])
745
+ df2 = df.set_index(["A", "B"])
746
+
747
+ with pytest.raises(KeyError, match="1"):
748
+ df2.loc[(1, 6)]
749
+
750
+ def test_missing_key_raises_keyerror2(self):
751
+ # GH#21168 KeyError, not "IndexingError: Too many indexers"
752
+ ser = Series(-1, index=MultiIndex.from_product([[0, 1]] * 2))
753
+
754
+ with pytest.raises(KeyError, match=r"\(0, 3\)"):
755
+ ser.loc[0, 3]
756
+
757
+ def test_missing_key_combination(self):
758
+ # GH: 19556
759
+ mi = MultiIndex.from_arrays(
760
+ [
761
+ np.array(["a", "a", "b", "b"]),
762
+ np.array(["1", "2", "2", "3"]),
763
+ np.array(["c", "d", "c", "d"]),
764
+ ],
765
+ names=["one", "two", "three"],
766
+ )
767
+ df = DataFrame(np.random.default_rng(2).random((4, 3)), index=mi)
768
+ msg = r"\('b', '1', slice\(None, None, None\)\)"
769
+ with pytest.raises(KeyError, match=msg):
770
+ df.loc[("b", "1", slice(None)), :]
771
+ with pytest.raises(KeyError, match=msg):
772
+ df.index.get_locs(("b", "1", slice(None)))
773
+ with pytest.raises(KeyError, match=r"\('b', '1'\)"):
774
+ df.loc[("b", "1"), :]
775
+
776
+
777
+ def test_getitem_loc_commutability(multiindex_year_month_day_dataframe_random_data):
778
+ df = multiindex_year_month_day_dataframe_random_data
779
+ ser = df["A"]
780
+ result = ser[2000, 5]
781
+ expected = df.loc[2000, 5]["A"]
782
+ tm.assert_series_equal(result, expected)
783
+
784
+
785
+ def test_loc_with_nan():
786
+ # GH: 27104
787
+ df = DataFrame(
788
+ {"col": [1, 2, 5], "ind1": ["a", "d", np.nan], "ind2": [1, 4, 5]}
789
+ ).set_index(["ind1", "ind2"])
790
+ result = df.loc[["a"]]
791
+ expected = DataFrame(
792
+ {"col": [1]}, index=MultiIndex.from_tuples([("a", 1)], names=["ind1", "ind2"])
793
+ )
794
+ tm.assert_frame_equal(result, expected)
795
+
796
+ result = df.loc["a"]
797
+ expected = DataFrame({"col": [1]}, index=Index([1], name="ind2"))
798
+ tm.assert_frame_equal(result, expected)
799
+
800
+
801
+ def test_getitem_non_found_tuple():
802
+ # GH: 25236
803
+ df = DataFrame([[1, 2, 3, 4]], columns=["a", "b", "c", "d"]).set_index(
804
+ ["a", "b", "c"]
805
+ )
806
+ with pytest.raises(KeyError, match=r"\(2\.0, 2\.0, 3\.0\)"):
807
+ df.loc[(2.0, 2.0, 3.0)]
808
+
809
+
810
+ def test_get_loc_datetime_index():
811
+ # GH#24263
812
+ index = pd.date_range("2001-01-01", periods=100)
813
+ mi = MultiIndex.from_arrays([index])
814
+ # Check if get_loc matches for Index and MultiIndex
815
+ assert mi.get_loc("2001-01") == slice(0, 31, None)
816
+ assert index.get_loc("2001-01") == slice(0, 31, None)
817
+
818
+ loc = mi[::2].get_loc("2001-01")
819
+ expected = index[::2].get_loc("2001-01")
820
+ assert loc == expected
821
+
822
+ loc = mi.repeat(2).get_loc("2001-01")
823
+ expected = index.repeat(2).get_loc("2001-01")
824
+ assert loc == expected
825
+
826
+ loc = mi.append(mi).get_loc("2001-01")
827
+ expected = index.append(index).get_loc("2001-01")
828
+ # TODO: standardize return type for MultiIndex.get_loc
829
+ tm.assert_numpy_array_equal(loc.nonzero()[0], expected)
830
+
831
+
832
+ def test_loc_setitem_indexer_differently_ordered():
833
+ # GH#34603
834
+ mi = MultiIndex.from_product([["a", "b"], [0, 1]])
835
+ df = DataFrame([[1, 2], [3, 4], [5, 6], [7, 8]], index=mi)
836
+
837
+ indexer = ("a", [1, 0])
838
+ df.loc[indexer, :] = np.array([[9, 10], [11, 12]])
839
+ expected = DataFrame([[11, 12], [9, 10], [5, 6], [7, 8]], index=mi)
840
+ tm.assert_frame_equal(df, expected)
841
+
842
+
843
+ def test_loc_getitem_index_differently_ordered_slice_none():
844
+ # GH#31330
845
+ df = DataFrame(
846
+ [[1, 2], [3, 4], [5, 6], [7, 8]],
847
+ index=[["a", "a", "b", "b"], [1, 2, 1, 2]],
848
+ columns=["a", "b"],
849
+ )
850
+ result = df.loc[(slice(None), [2, 1]), :]
851
+ expected = DataFrame(
852
+ [[3, 4], [7, 8], [1, 2], [5, 6]],
853
+ index=[["a", "b", "a", "b"], [2, 2, 1, 1]],
854
+ columns=["a", "b"],
855
+ )
856
+ tm.assert_frame_equal(result, expected)
857
+
858
+
859
+ @pytest.mark.parametrize("indexer", [[1, 2, 7, 6, 2, 3, 8, 7], [1, 2, 7, 6, 3, 8]])
860
+ def test_loc_getitem_index_differently_ordered_slice_none_duplicates(indexer):
861
+ # GH#40978
862
+ df = DataFrame(
863
+ [1] * 8,
864
+ index=MultiIndex.from_tuples(
865
+ [(1, 1), (1, 2), (1, 7), (1, 6), (2, 2), (2, 3), (2, 8), (2, 7)]
866
+ ),
867
+ columns=["a"],
868
+ )
869
+ result = df.loc[(slice(None), indexer), :]
870
+ expected = DataFrame(
871
+ [1] * 8,
872
+ index=[[1, 1, 2, 1, 2, 1, 2, 2], [1, 2, 2, 7, 7, 6, 3, 8]],
873
+ columns=["a"],
874
+ )
875
+ tm.assert_frame_equal(result, expected)
876
+
877
+ result = df.loc[df.index.isin(indexer, level=1), :]
878
+ tm.assert_frame_equal(result, df)
879
+
880
+
881
+ def test_loc_getitem_drops_levels_for_one_row_dataframe():
882
+ # GH#10521 "x" and "z" are both scalar indexing, so those levels are dropped
883
+ mi = MultiIndex.from_arrays([["x"], ["y"], ["z"]], names=["a", "b", "c"])
884
+ df = DataFrame({"d": [0]}, index=mi)
885
+ expected = df.droplevel([0, 2])
886
+ result = df.loc["x", :, "z"]
887
+ tm.assert_frame_equal(result, expected)
888
+
889
+ ser = Series([0], index=mi)
890
+ result = ser.loc["x", :, "z"]
891
+ expected = Series([0], index=Index(["y"], name="b"))
892
+ tm.assert_series_equal(result, expected)
893
+
894
+
895
+ def test_mi_columns_loc_list_label_order():
896
+ # GH 10710
897
+ cols = MultiIndex.from_product([["A", "B", "C"], [1, 2]])
898
+ df = DataFrame(np.zeros((5, 6)), columns=cols)
899
+ result = df.loc[:, ["B", "A"]]
900
+ expected = DataFrame(
901
+ np.zeros((5, 4)),
902
+ columns=MultiIndex.from_tuples([("B", 1), ("B", 2), ("A", 1), ("A", 2)]),
903
+ )
904
+ tm.assert_frame_equal(result, expected)
905
+
906
+
907
+ def test_mi_partial_indexing_list_raises():
908
+ # GH 13501
909
+ frame = DataFrame(
910
+ np.arange(12).reshape((4, 3)),
911
+ index=[["a", "a", "b", "b"], [1, 2, 1, 2]],
912
+ columns=[["Ohio", "Ohio", "Colorado"], ["Green", "Red", "Green"]],
913
+ )
914
+ frame.index.names = ["key1", "key2"]
915
+ frame.columns.names = ["state", "color"]
916
+ with pytest.raises(KeyError, match="\\[2\\] not in index"):
917
+ frame.loc[["b", 2], "Colorado"]
918
+
919
+
920
+ def test_mi_indexing_list_nonexistent_raises():
921
+ # GH 15452
922
+ s = Series(range(4), index=MultiIndex.from_product([[1, 2], ["a", "b"]]))
923
+ with pytest.raises(KeyError, match="\\['not' 'found'\\] not in index"):
924
+ s.loc[["not", "found"]]
925
+
926
+
927
+ def test_mi_add_cell_missing_row_non_unique():
928
+ # GH 16018
929
+ result = DataFrame(
930
+ [[1, 2, 5, 6], [3, 4, 7, 8]],
931
+ index=["a", "a"],
932
+ columns=MultiIndex.from_product([[1, 2], ["A", "B"]]),
933
+ )
934
+ result.loc["c"] = -1
935
+ result.loc["c", (1, "A")] = 3
936
+ result.loc["d", (1, "A")] = 3
937
+ expected = DataFrame(
938
+ [
939
+ [1.0, 2.0, 5.0, 6.0],
940
+ [3.0, 4.0, 7.0, 8.0],
941
+ [3.0, -1.0, -1, -1],
942
+ [3.0, np.nan, np.nan, np.nan],
943
+ ],
944
+ index=["a", "a", "c", "d"],
945
+ columns=MultiIndex.from_product([[1, 2], ["A", "B"]]),
946
+ )
947
+ tm.assert_frame_equal(result, expected)
948
+
949
+
950
+ def test_loc_get_scalar_casting_to_float():
951
+ # GH#41369
952
+ df = DataFrame(
953
+ {"a": 1.0, "b": 2}, index=MultiIndex.from_arrays([[3], [4]], names=["c", "d"])
954
+ )
955
+ result = df.loc[(3, 4), "b"]
956
+ assert result == 2
957
+ assert isinstance(result, np.int64)
958
+ result = df.loc[[(3, 4)], "b"].iloc[0]
959
+ assert result == 2
960
+ assert isinstance(result, np.int64)
961
+
962
+
963
+ def test_loc_empty_single_selector_with_names():
964
+ # GH 19517
965
+ idx = MultiIndex.from_product([["a", "b"], ["A", "B"]], names=[1, 0])
966
+ s2 = Series(index=idx, dtype=np.float64)
967
+ result = s2.loc["a"]
968
+ expected = Series([np.nan, np.nan], index=Index(["A", "B"], name=0))
969
+ tm.assert_series_equal(result, expected)
970
+
971
+
972
+ def test_loc_keyerror_rightmost_key_missing():
973
+ # GH 20951
974
+
975
+ df = DataFrame(
976
+ {
977
+ "A": [100, 100, 200, 200, 300, 300],
978
+ "B": [10, 10, 20, 21, 31, 33],
979
+ "C": range(6),
980
+ }
981
+ )
982
+ df = df.set_index(["A", "B"])
983
+ with pytest.raises(KeyError, match="^1$"):
984
+ df.loc[(100, 1)]
985
+
986
+
987
+ def test_multindex_series_loc_with_tuple_label():
988
+ # GH#43908
989
+ mi = MultiIndex.from_tuples([(1, 2), (3, (4, 5))])
990
+ ser = Series([1, 2], index=mi)
991
+ result = ser.loc[(3, (4, 5))]
992
+ assert result == 2
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_multiindex.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas._libs.index as libindex
5
+ from pandas.errors import PerformanceWarning
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ CategoricalDtype,
10
+ DataFrame,
11
+ Index,
12
+ MultiIndex,
13
+ Series,
14
+ )
15
+ import pandas._testing as tm
16
+ from pandas.core.arrays.boolean import BooleanDtype
17
+
18
+
19
+ class TestMultiIndexBasic:
20
+ def test_multiindex_perf_warn(self):
21
+ df = DataFrame(
22
+ {
23
+ "jim": [0, 0, 1, 1],
24
+ "joe": ["x", "x", "z", "y"],
25
+ "jolie": np.random.default_rng(2).random(4),
26
+ }
27
+ ).set_index(["jim", "joe"])
28
+
29
+ with tm.assert_produces_warning(PerformanceWarning):
30
+ df.loc[(1, "z")]
31
+
32
+ df = df.iloc[[2, 1, 3, 0]]
33
+ with tm.assert_produces_warning(PerformanceWarning):
34
+ df.loc[(0,)]
35
+
36
+ @pytest.mark.parametrize("offset", [-5, 5])
37
+ def test_indexing_over_hashtable_size_cutoff(self, monkeypatch, offset):
38
+ size_cutoff = 20
39
+ n = size_cutoff + offset
40
+
41
+ with monkeypatch.context():
42
+ monkeypatch.setattr(libindex, "_SIZE_CUTOFF", size_cutoff)
43
+ s = Series(np.arange(n), MultiIndex.from_arrays((["a"] * n, np.arange(n))))
44
+
45
+ # hai it works!
46
+ assert s[("a", 5)] == 5
47
+ assert s[("a", 6)] == 6
48
+ assert s[("a", 7)] == 7
49
+
50
+ def test_multi_nan_indexing(self):
51
+ # GH 3588
52
+ df = DataFrame(
53
+ {
54
+ "a": ["R1", "R2", np.nan, "R4"],
55
+ "b": ["C1", "C2", "C3", "C4"],
56
+ "c": [10, 15, np.nan, 20],
57
+ }
58
+ )
59
+ result = df.set_index(["a", "b"], drop=False)
60
+ expected = DataFrame(
61
+ {
62
+ "a": ["R1", "R2", np.nan, "R4"],
63
+ "b": ["C1", "C2", "C3", "C4"],
64
+ "c": [10, 15, np.nan, 20],
65
+ },
66
+ index=[
67
+ Index(["R1", "R2", np.nan, "R4"], name="a"),
68
+ Index(["C1", "C2", "C3", "C4"], name="b"),
69
+ ],
70
+ )
71
+ tm.assert_frame_equal(result, expected)
72
+
73
+ def test_exclusive_nat_column_indexing(self):
74
+ # GH 38025
75
+ # test multi indexing when one column exclusively contains NaT values
76
+ df = DataFrame(
77
+ {
78
+ "a": [pd.NaT, pd.NaT, pd.NaT, pd.NaT],
79
+ "b": ["C1", "C2", "C3", "C4"],
80
+ "c": [10, 15, np.nan, 20],
81
+ }
82
+ )
83
+ df = df.set_index(["a", "b"])
84
+ expected = DataFrame(
85
+ {
86
+ "c": [10, 15, np.nan, 20],
87
+ },
88
+ index=[
89
+ Index([pd.NaT, pd.NaT, pd.NaT, pd.NaT], name="a"),
90
+ Index(["C1", "C2", "C3", "C4"], name="b"),
91
+ ],
92
+ )
93
+ tm.assert_frame_equal(df, expected)
94
+
95
+ def test_nested_tuples_duplicates(self):
96
+ # GH#30892
97
+
98
+ dti = pd.to_datetime(["20190101", "20190101", "20190102"])
99
+ idx = Index(["a", "a", "c"])
100
+ mi = MultiIndex.from_arrays([dti, idx], names=["index1", "index2"])
101
+
102
+ df = DataFrame({"c1": [1, 2, 3], "c2": [np.nan, np.nan, np.nan]}, index=mi)
103
+
104
+ expected = DataFrame({"c1": df["c1"], "c2": [1.0, 1.0, np.nan]}, index=mi)
105
+
106
+ df2 = df.copy(deep=True)
107
+ df2.loc[(dti[0], "a"), "c2"] = 1.0
108
+ tm.assert_frame_equal(df2, expected)
109
+
110
+ df3 = df.copy(deep=True)
111
+ df3.loc[[(dti[0], "a")], "c2"] = 1.0
112
+ tm.assert_frame_equal(df3, expected)
113
+
114
+ def test_multiindex_with_datatime_level_preserves_freq(self):
115
+ # https://github.com/pandas-dev/pandas/issues/35563
116
+ idx = Index(range(2), name="A")
117
+ dti = pd.date_range("2020-01-01", periods=7, freq="D", name="B")
118
+ mi = MultiIndex.from_product([idx, dti])
119
+ df = DataFrame(np.random.default_rng(2).standard_normal((14, 2)), index=mi)
120
+ result = df.loc[0].index
121
+ tm.assert_index_equal(result, dti)
122
+ assert result.freq == dti.freq
123
+
124
+ def test_multiindex_complex(self):
125
+ # GH#42145
126
+ complex_data = [1 + 2j, 4 - 3j, 10 - 1j]
127
+ non_complex_data = [3, 4, 5]
128
+ result = DataFrame(
129
+ {
130
+ "x": complex_data,
131
+ "y": non_complex_data,
132
+ "z": non_complex_data,
133
+ }
134
+ )
135
+ result.set_index(["x", "y"], inplace=True)
136
+ expected = DataFrame(
137
+ {"z": non_complex_data},
138
+ index=MultiIndex.from_arrays(
139
+ [complex_data, non_complex_data],
140
+ names=("x", "y"),
141
+ ),
142
+ )
143
+ tm.assert_frame_equal(result, expected)
144
+
145
+ def test_rename_multiindex_with_duplicates(self):
146
+ # GH 38015
147
+ mi = MultiIndex.from_tuples([("A", "cat"), ("B", "cat"), ("B", "cat")])
148
+ df = DataFrame(index=mi)
149
+ df = df.rename(index={"A": "Apple"}, level=0)
150
+
151
+ mi2 = MultiIndex.from_tuples([("Apple", "cat"), ("B", "cat"), ("B", "cat")])
152
+ expected = DataFrame(index=mi2)
153
+ tm.assert_frame_equal(df, expected)
154
+
155
+ def test_series_align_multiindex_with_nan_overlap_only(self):
156
+ # GH 38439
157
+ mi1 = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]])
158
+ mi2 = MultiIndex.from_arrays([[np.nan, 82.0], [np.nan, np.nan]])
159
+ ser1 = Series([1, 2], index=mi1)
160
+ ser2 = Series([1, 2], index=mi2)
161
+ result1, result2 = ser1.align(ser2)
162
+
163
+ mi = MultiIndex.from_arrays([[81.0, 82.0, np.nan], [np.nan, np.nan, np.nan]])
164
+ expected1 = Series([1.0, np.nan, 2.0], index=mi)
165
+ expected2 = Series([np.nan, 2.0, 1.0], index=mi)
166
+
167
+ tm.assert_series_equal(result1, expected1)
168
+ tm.assert_series_equal(result2, expected2)
169
+
170
+ def test_series_align_multiindex_with_nan(self):
171
+ # GH 38439
172
+ mi1 = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]])
173
+ mi2 = MultiIndex.from_arrays([[np.nan, 81.0], [np.nan, np.nan]])
174
+ ser1 = Series([1, 2], index=mi1)
175
+ ser2 = Series([1, 2], index=mi2)
176
+ result1, result2 = ser1.align(ser2)
177
+
178
+ mi = MultiIndex.from_arrays([[81.0, np.nan], [np.nan, np.nan]])
179
+ expected1 = Series([1, 2], index=mi)
180
+ expected2 = Series([2, 1], index=mi)
181
+
182
+ tm.assert_series_equal(result1, expected1)
183
+ tm.assert_series_equal(result2, expected2)
184
+
185
+ def test_nunique_smoke(self):
186
+ # GH 34019
187
+ n = DataFrame([[1, 2], [1, 2]]).set_index([0, 1]).index.nunique()
188
+ assert n == 1
189
+
190
+ def test_multiindex_repeated_keys(self):
191
+ # GH19414
192
+ tm.assert_series_equal(
193
+ Series([1, 2], MultiIndex.from_arrays([["a", "b"]])).loc[
194
+ ["a", "a", "b", "b"]
195
+ ],
196
+ Series([1, 1, 2, 2], MultiIndex.from_arrays([["a", "a", "b", "b"]])),
197
+ )
198
+
199
+ def test_multiindex_with_na_missing_key(self):
200
+ # GH46173
201
+ df = DataFrame.from_dict(
202
+ {
203
+ ("foo",): [1, 2, 3],
204
+ ("bar",): [5, 6, 7],
205
+ (None,): [8, 9, 0],
206
+ }
207
+ )
208
+ with pytest.raises(KeyError, match="missing_key"):
209
+ df[[("missing_key",)]]
210
+
211
+ def test_multiindex_dtype_preservation(self):
212
+ # GH51261
213
+ columns = MultiIndex.from_tuples([("A", "B")], names=["lvl1", "lvl2"])
214
+ df = DataFrame(["value"], columns=columns).astype("category")
215
+ df_no_multiindex = df["A"]
216
+ assert isinstance(df_no_multiindex["B"].dtype, CategoricalDtype)
217
+
218
+ # geopandas 1763 analogue
219
+ df = DataFrame(
220
+ [[1, 0], [0, 1]],
221
+ columns=[
222
+ ["foo", "foo"],
223
+ ["location", "location"],
224
+ ["x", "y"],
225
+ ],
226
+ ).assign(bools=Series([True, False], dtype="boolean"))
227
+ assert isinstance(df["bools"].dtype, BooleanDtype)
228
+
229
+ def test_multiindex_from_tuples_with_nan(self):
230
+ # GH#23578
231
+ result = MultiIndex.from_tuples([("a", "b", "c"), np.nan, ("d", "", "")])
232
+ expected = MultiIndex.from_tuples(
233
+ [("a", "b", "c"), (np.nan, np.nan, np.nan), ("d", "", "")]
234
+ )
235
+ tm.assert_index_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_partial.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas.util._test_decorators as td
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ DatetimeIndex,
9
+ MultiIndex,
10
+ date_range,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+
15
+ class TestMultiIndexPartial:
16
+ def test_getitem_partial_int(self):
17
+ # GH 12416
18
+ # with single item
19
+ l1 = [10, 20]
20
+ l2 = ["a", "b"]
21
+ df = DataFrame(index=range(2), columns=MultiIndex.from_product([l1, l2]))
22
+ expected = DataFrame(index=range(2), columns=l2)
23
+ result = df[20]
24
+ tm.assert_frame_equal(result, expected)
25
+
26
+ # with list
27
+ expected = DataFrame(
28
+ index=range(2), columns=MultiIndex.from_product([l1[1:], l2])
29
+ )
30
+ result = df[[20]]
31
+ tm.assert_frame_equal(result, expected)
32
+
33
+ # missing item:
34
+ with pytest.raises(KeyError, match="1"):
35
+ df[1]
36
+ with pytest.raises(KeyError, match=r"'\[1\] not in index'"):
37
+ df[[1]]
38
+
39
+ def test_series_slice_partial(self):
40
+ pass
41
+
42
+ def test_xs_partial(
43
+ self,
44
+ multiindex_dataframe_random_data,
45
+ multiindex_year_month_day_dataframe_random_data,
46
+ ):
47
+ frame = multiindex_dataframe_random_data
48
+ ymd = multiindex_year_month_day_dataframe_random_data
49
+ result = frame.xs("foo")
50
+ result2 = frame.loc["foo"]
51
+ expected = frame.T["foo"].T
52
+ tm.assert_frame_equal(result, expected)
53
+ tm.assert_frame_equal(result, result2)
54
+
55
+ result = ymd.xs((2000, 4))
56
+ expected = ymd.loc[2000, 4]
57
+ tm.assert_frame_equal(result, expected)
58
+
59
+ # ex from #1796
60
+ index = MultiIndex(
61
+ levels=[["foo", "bar"], ["one", "two"], [-1, 1]],
62
+ codes=[
63
+ [0, 0, 0, 0, 1, 1, 1, 1],
64
+ [0, 0, 1, 1, 0, 0, 1, 1],
65
+ [0, 1, 0, 1, 0, 1, 0, 1],
66
+ ],
67
+ )
68
+ df = DataFrame(
69
+ np.random.default_rng(2).standard_normal((8, 4)),
70
+ index=index,
71
+ columns=list("abcd"),
72
+ )
73
+
74
+ result = df.xs(("foo", "one"))
75
+ expected = df.loc["foo", "one"]
76
+ tm.assert_frame_equal(result, expected)
77
+
78
+ def test_getitem_partial(self, multiindex_year_month_day_dataframe_random_data):
79
+ ymd = multiindex_year_month_day_dataframe_random_data
80
+ ymd = ymd.T
81
+ result = ymd[2000, 2]
82
+
83
+ expected = ymd.reindex(columns=ymd.columns[ymd.columns.codes[1] == 1])
84
+ expected.columns = expected.columns.droplevel(0).droplevel(0)
85
+ tm.assert_frame_equal(result, expected)
86
+
87
+ def test_fancy_slice_partial(
88
+ self,
89
+ multiindex_dataframe_random_data,
90
+ multiindex_year_month_day_dataframe_random_data,
91
+ ):
92
+ frame = multiindex_dataframe_random_data
93
+ result = frame.loc["bar":"baz"]
94
+ expected = frame[3:7]
95
+ tm.assert_frame_equal(result, expected)
96
+
97
+ ymd = multiindex_year_month_day_dataframe_random_data
98
+ result = ymd.loc[(2000, 2):(2000, 4)]
99
+ lev = ymd.index.codes[1]
100
+ expected = ymd[(lev >= 1) & (lev <= 3)]
101
+ tm.assert_frame_equal(result, expected)
102
+
103
+ def test_getitem_partial_column_select(self):
104
+ idx = MultiIndex(
105
+ codes=[[0, 0, 0], [0, 1, 1], [1, 0, 1]],
106
+ levels=[["a", "b"], ["x", "y"], ["p", "q"]],
107
+ )
108
+ df = DataFrame(np.random.default_rng(2).random((3, 2)), index=idx)
109
+
110
+ result = df.loc[("a", "y"), :]
111
+ expected = df.loc[("a", "y")]
112
+ tm.assert_frame_equal(result, expected)
113
+
114
+ result = df.loc[("a", "y"), [1, 0]]
115
+ expected = df.loc[("a", "y")][[1, 0]]
116
+ tm.assert_frame_equal(result, expected)
117
+
118
+ with pytest.raises(KeyError, match=r"\('a', 'foo'\)"):
119
+ df.loc[("a", "foo"), :]
120
+
121
+ # TODO(ArrayManager) rewrite test to not use .values
122
+ # exp.loc[2000, 4].values[:] select multiple columns -> .values is not a view
123
+ @td.skip_array_manager_invalid_test
124
+ def test_partial_set(
125
+ self,
126
+ multiindex_year_month_day_dataframe_random_data,
127
+ using_copy_on_write,
128
+ warn_copy_on_write,
129
+ ):
130
+ # GH #397
131
+ ymd = multiindex_year_month_day_dataframe_random_data
132
+ df = ymd.copy()
133
+ exp = ymd.copy()
134
+ df.loc[2000, 4] = 0
135
+ exp.iloc[65:85] = 0
136
+ tm.assert_frame_equal(df, exp)
137
+
138
+ if using_copy_on_write:
139
+ with tm.raises_chained_assignment_error():
140
+ df["A"].loc[2000, 4] = 1
141
+ df.loc[(2000, 4), "A"] = 1
142
+ else:
143
+ with tm.raises_chained_assignment_error():
144
+ df["A"].loc[2000, 4] = 1
145
+ exp.iloc[65:85, 0] = 1
146
+ tm.assert_frame_equal(df, exp)
147
+
148
+ df.loc[2000] = 5
149
+ exp.iloc[:100] = 5
150
+ tm.assert_frame_equal(df, exp)
151
+
152
+ # this works...for now
153
+ with tm.raises_chained_assignment_error():
154
+ df["A"].iloc[14] = 5
155
+ if using_copy_on_write:
156
+ assert df["A"].iloc[14] == exp["A"].iloc[14]
157
+ else:
158
+ assert df["A"].iloc[14] == 5
159
+
160
+ @pytest.mark.parametrize("dtype", [int, float])
161
+ def test_getitem_intkey_leading_level(
162
+ self, multiindex_year_month_day_dataframe_random_data, dtype
163
+ ):
164
+ # GH#33355 dont fall-back to positional when leading level is int
165
+ ymd = multiindex_year_month_day_dataframe_random_data
166
+ levels = ymd.index.levels
167
+ ymd.index = ymd.index.set_levels([levels[0].astype(dtype)] + levels[1:])
168
+ ser = ymd["A"]
169
+ mi = ser.index
170
+ assert isinstance(mi, MultiIndex)
171
+ if dtype is int:
172
+ assert mi.levels[0].dtype == np.dtype(int)
173
+ else:
174
+ assert mi.levels[0].dtype == np.float64
175
+
176
+ assert 14 not in mi.levels[0]
177
+ assert not mi.levels[0]._should_fallback_to_positional
178
+ assert not mi._should_fallback_to_positional
179
+
180
+ with pytest.raises(KeyError, match="14"):
181
+ ser[14]
182
+
183
+ # ---------------------------------------------------------------------
184
+
185
+ def test_setitem_multiple_partial(self, multiindex_dataframe_random_data):
186
+ frame = multiindex_dataframe_random_data
187
+ expected = frame.copy()
188
+ result = frame.copy()
189
+ result.loc[["foo", "bar"]] = 0
190
+ expected.loc["foo"] = 0
191
+ expected.loc["bar"] = 0
192
+ tm.assert_frame_equal(result, expected)
193
+
194
+ expected = frame.copy()
195
+ result = frame.copy()
196
+ result.loc["foo":"bar"] = 0
197
+ expected.loc["foo"] = 0
198
+ expected.loc["bar"] = 0
199
+ tm.assert_frame_equal(result, expected)
200
+
201
+ expected = frame["A"].copy()
202
+ result = frame["A"].copy()
203
+ result.loc[["foo", "bar"]] = 0
204
+ expected.loc["foo"] = 0
205
+ expected.loc["bar"] = 0
206
+ tm.assert_series_equal(result, expected)
207
+
208
+ expected = frame["A"].copy()
209
+ result = frame["A"].copy()
210
+ result.loc["foo":"bar"] = 0
211
+ expected.loc["foo"] = 0
212
+ expected.loc["bar"] = 0
213
+ tm.assert_series_equal(result, expected)
214
+
215
+ @pytest.mark.parametrize(
216
+ "indexer, exp_idx, exp_values",
217
+ [
218
+ (
219
+ slice("2019-2", None),
220
+ DatetimeIndex(["2019-02-01"], dtype="M8[ns]"),
221
+ [2, 3],
222
+ ),
223
+ (
224
+ slice(None, "2019-2"),
225
+ date_range("2019", periods=2, freq="MS"),
226
+ [0, 1, 2, 3],
227
+ ),
228
+ ],
229
+ )
230
+ def test_partial_getitem_loc_datetime(self, indexer, exp_idx, exp_values):
231
+ # GH: 25165
232
+ date_idx = date_range("2019", periods=2, freq="MS")
233
+ df = DataFrame(
234
+ list(range(4)),
235
+ index=MultiIndex.from_product([date_idx, [0, 1]], names=["x", "y"]),
236
+ )
237
+ expected = DataFrame(
238
+ exp_values,
239
+ index=MultiIndex.from_product([exp_idx, [0, 1]], names=["x", "y"]),
240
+ )
241
+ result = df[indexer]
242
+ tm.assert_frame_equal(result, expected)
243
+ result = df.loc[indexer]
244
+ tm.assert_frame_equal(result, expected)
245
+
246
+ result = df.loc(axis=0)[indexer]
247
+ tm.assert_frame_equal(result, expected)
248
+
249
+ result = df.loc[indexer, :]
250
+ tm.assert_frame_equal(result, expected)
251
+
252
+ df2 = df.swaplevel(0, 1).sort_index()
253
+ expected = expected.swaplevel(0, 1).sort_index()
254
+
255
+ result = df2.loc[:, indexer, :]
256
+ tm.assert_frame_equal(result, expected)
257
+
258
+
259
+ def test_loc_getitem_partial_both_axis():
260
+ # gh-12660
261
+ iterables = [["a", "b"], [2, 1]]
262
+ columns = MultiIndex.from_product(iterables, names=["col1", "col2"])
263
+ rows = MultiIndex.from_product(iterables, names=["row1", "row2"])
264
+ df = DataFrame(
265
+ np.random.default_rng(2).standard_normal((4, 4)), index=rows, columns=columns
266
+ )
267
+ expected = df.iloc[:2, 2:].droplevel("row1").droplevel("col1", axis=1)
268
+ result = df.loc["a", "b"]
269
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/indexing/multiindex/test_setitem.py ADDED
@@ -0,0 +1,589 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas.errors import SettingWithCopyError
5
+ import pandas.util._test_decorators as td
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ DataFrame,
10
+ MultiIndex,
11
+ Series,
12
+ date_range,
13
+ isna,
14
+ notna,
15
+ )
16
+ import pandas._testing as tm
17
+
18
+
19
+ def assert_equal(a, b):
20
+ assert a == b
21
+
22
+
23
+ class TestMultiIndexSetItem:
24
+ def check(self, target, indexers, value, compare_fn=assert_equal, expected=None):
25
+ target.loc[indexers] = value
26
+ result = target.loc[indexers]
27
+ if expected is None:
28
+ expected = value
29
+ compare_fn(result, expected)
30
+
31
+ def test_setitem_multiindex(self):
32
+ # GH#7190
33
+ cols = ["A", "w", "l", "a", "x", "X", "d", "profit"]
34
+ index = MultiIndex.from_product(
35
+ [np.arange(0, 100), np.arange(0, 80)], names=["time", "firm"]
36
+ )
37
+ t, n = 0, 2
38
+
39
+ df = DataFrame(
40
+ np.nan,
41
+ columns=cols,
42
+ index=index,
43
+ )
44
+ self.check(target=df, indexers=((t, n), "X"), value=0)
45
+
46
+ df = DataFrame(-999, columns=cols, index=index)
47
+ self.check(target=df, indexers=((t, n), "X"), value=1)
48
+
49
+ df = DataFrame(columns=cols, index=index)
50
+ self.check(target=df, indexers=((t, n), "X"), value=2)
51
+
52
+ # gh-7218: assigning with 0-dim arrays
53
+ df = DataFrame(-999, columns=cols, index=index)
54
+ self.check(
55
+ target=df,
56
+ indexers=((t, n), "X"),
57
+ value=np.array(3),
58
+ expected=3,
59
+ )
60
+
61
+ def test_setitem_multiindex2(self):
62
+ # GH#5206
63
+ df = DataFrame(
64
+ np.arange(25).reshape(5, 5), columns="A,B,C,D,E".split(","), dtype=float
65
+ )
66
+ df["F"] = 99
67
+ row_selection = df["A"] % 2 == 0
68
+ col_selection = ["B", "C"]
69
+ df.loc[row_selection, col_selection] = df["F"]
70
+ output = DataFrame(99.0, index=[0, 2, 4], columns=["B", "C"])
71
+ tm.assert_frame_equal(df.loc[row_selection, col_selection], output)
72
+ self.check(
73
+ target=df,
74
+ indexers=(row_selection, col_selection),
75
+ value=df["F"],
76
+ compare_fn=tm.assert_frame_equal,
77
+ expected=output,
78
+ )
79
+
80
+ def test_setitem_multiindex3(self):
81
+ # GH#11372
82
+ idx = MultiIndex.from_product(
83
+ [["A", "B", "C"], date_range("2015-01-01", "2015-04-01", freq="MS")]
84
+ )
85
+ cols = MultiIndex.from_product(
86
+ [["foo", "bar"], date_range("2016-01-01", "2016-02-01", freq="MS")]
87
+ )
88
+
89
+ df = DataFrame(
90
+ np.random.default_rng(2).random((12, 4)), index=idx, columns=cols
91
+ )
92
+
93
+ subidx = MultiIndex.from_arrays(
94
+ [["A", "A"], date_range("2015-01-01", "2015-02-01", freq="MS")]
95
+ )
96
+ subcols = MultiIndex.from_arrays(
97
+ [["foo", "foo"], date_range("2016-01-01", "2016-02-01", freq="MS")]
98
+ )
99
+
100
+ vals = DataFrame(
101
+ np.random.default_rng(2).random((2, 2)), index=subidx, columns=subcols
102
+ )
103
+ self.check(
104
+ target=df,
105
+ indexers=(subidx, subcols),
106
+ value=vals,
107
+ compare_fn=tm.assert_frame_equal,
108
+ )
109
+ # set all columns
110
+ vals = DataFrame(
111
+ np.random.default_rng(2).random((2, 4)), index=subidx, columns=cols
112
+ )
113
+ self.check(
114
+ target=df,
115
+ indexers=(subidx, slice(None, None, None)),
116
+ value=vals,
117
+ compare_fn=tm.assert_frame_equal,
118
+ )
119
+ # identity
120
+ copy = df.copy()
121
+ self.check(
122
+ target=df,
123
+ indexers=(df.index, df.columns),
124
+ value=df,
125
+ compare_fn=tm.assert_frame_equal,
126
+ expected=copy,
127
+ )
128
+
129
+ # TODO(ArrayManager) df.loc["bar"] *= 2 doesn't raise an error but results in
130
+ # all NaNs -> doesn't work in the "split" path (also for BlockManager actually)
131
+ @td.skip_array_manager_not_yet_implemented
132
+ def test_multiindex_setitem(self):
133
+ # GH 3738
134
+ # setting with a multi-index right hand side
135
+ arrays = [
136
+ np.array(["bar", "bar", "baz", "qux", "qux", "bar"]),
137
+ np.array(["one", "two", "one", "one", "two", "one"]),
138
+ np.arange(0, 6, 1),
139
+ ]
140
+
141
+ df_orig = DataFrame(
142
+ np.random.default_rng(2).standard_normal((6, 3)),
143
+ index=arrays,
144
+ columns=["A", "B", "C"],
145
+ ).sort_index()
146
+
147
+ expected = df_orig.loc[["bar"]] * 2
148
+ df = df_orig.copy()
149
+ df.loc[["bar"]] *= 2
150
+ tm.assert_frame_equal(df.loc[["bar"]], expected)
151
+
152
+ # raise because these have differing levels
153
+ msg = "cannot align on a multi-index with out specifying the join levels"
154
+ with pytest.raises(TypeError, match=msg):
155
+ df.loc["bar"] *= 2
156
+
157
+ def test_multiindex_setitem2(self):
158
+ # from SO
159
+ # https://stackoverflow.com/questions/24572040/pandas-access-the-level-of-multiindex-for-inplace-operation
160
+ df_orig = DataFrame.from_dict(
161
+ {
162
+ "price": {
163
+ ("DE", "Coal", "Stock"): 2,
164
+ ("DE", "Gas", "Stock"): 4,
165
+ ("DE", "Elec", "Demand"): 1,
166
+ ("FR", "Gas", "Stock"): 5,
167
+ ("FR", "Solar", "SupIm"): 0,
168
+ ("FR", "Wind", "SupIm"): 0,
169
+ }
170
+ }
171
+ )
172
+ df_orig.index = MultiIndex.from_tuples(
173
+ df_orig.index, names=["Sit", "Com", "Type"]
174
+ )
175
+
176
+ expected = df_orig.copy()
177
+ expected.iloc[[0, 1, 3]] *= 2
178
+
179
+ idx = pd.IndexSlice
180
+ df = df_orig.copy()
181
+ df.loc[idx[:, :, "Stock"], :] *= 2
182
+ tm.assert_frame_equal(df, expected)
183
+
184
+ df = df_orig.copy()
185
+ df.loc[idx[:, :, "Stock"], "price"] *= 2
186
+ tm.assert_frame_equal(df, expected)
187
+
188
+ def test_multiindex_assignment(self):
189
+ # GH3777 part 2
190
+
191
+ # mixed dtype
192
+ df = DataFrame(
193
+ np.random.default_rng(2).integers(5, 10, size=9).reshape(3, 3),
194
+ columns=list("abc"),
195
+ index=[[4, 4, 8], [8, 10, 12]],
196
+ )
197
+ df["d"] = np.nan
198
+ arr = np.array([0.0, 1.0])
199
+
200
+ df.loc[4, "d"] = arr
201
+ tm.assert_series_equal(df.loc[4, "d"], Series(arr, index=[8, 10], name="d"))
202
+
203
+ def test_multiindex_assignment_single_dtype(
204
+ self, using_copy_on_write, warn_copy_on_write
205
+ ):
206
+ # GH3777 part 2b
207
+ # single dtype
208
+ arr = np.array([0.0, 1.0])
209
+
210
+ df = DataFrame(
211
+ np.random.default_rng(2).integers(5, 10, size=9).reshape(3, 3),
212
+ columns=list("abc"),
213
+ index=[[4, 4, 8], [8, 10, 12]],
214
+ dtype=np.int64,
215
+ )
216
+ view = df["c"].iloc[:2].values
217
+
218
+ # arr can be losslessly cast to int, so this setitem is inplace
219
+ # INFO(CoW-warn) this does not warn because we directly took .values
220
+ # above, so no reference to a pandas object is alive for `view`
221
+ df.loc[4, "c"] = arr
222
+ exp = Series(arr, index=[8, 10], name="c", dtype="int64")
223
+ result = df.loc[4, "c"]
224
+ tm.assert_series_equal(result, exp)
225
+
226
+ # extra check for inplace-ness
227
+ if not using_copy_on_write:
228
+ tm.assert_numpy_array_equal(view, exp.values)
229
+
230
+ # arr + 0.5 cannot be cast losslessly to int, so we upcast
231
+ with tm.assert_produces_warning(
232
+ FutureWarning, match="item of incompatible dtype"
233
+ ):
234
+ df.loc[4, "c"] = arr + 0.5
235
+ result = df.loc[4, "c"]
236
+ exp = exp + 0.5
237
+ tm.assert_series_equal(result, exp)
238
+
239
+ # scalar ok
240
+ with tm.assert_cow_warning(warn_copy_on_write):
241
+ df.loc[4, "c"] = 10
242
+ exp = Series(10, index=[8, 10], name="c", dtype="float64")
243
+ tm.assert_series_equal(df.loc[4, "c"], exp)
244
+
245
+ # invalid assignments
246
+ msg = "Must have equal len keys and value when setting with an iterable"
247
+ with pytest.raises(ValueError, match=msg):
248
+ df.loc[4, "c"] = [0, 1, 2, 3]
249
+
250
+ with pytest.raises(ValueError, match=msg):
251
+ df.loc[4, "c"] = [0]
252
+
253
+ # But with a length-1 listlike column indexer this behaves like
254
+ # `df.loc[4, "c"] = 0
255
+ with tm.assert_cow_warning(warn_copy_on_write):
256
+ df.loc[4, ["c"]] = [0]
257
+ assert (df.loc[4, "c"] == 0).all()
258
+
259
+ def test_groupby_example(self):
260
+ # groupby example
261
+ NUM_ROWS = 100
262
+ NUM_COLS = 10
263
+ col_names = ["A" + num for num in map(str, np.arange(NUM_COLS).tolist())]
264
+ index_cols = col_names[:5]
265
+
266
+ df = DataFrame(
267
+ np.random.default_rng(2).integers(5, size=(NUM_ROWS, NUM_COLS)),
268
+ dtype=np.int64,
269
+ columns=col_names,
270
+ )
271
+ df = df.set_index(index_cols).sort_index()
272
+ grp = df.groupby(level=index_cols[:4])
273
+ df["new_col"] = np.nan
274
+
275
+ # we are actually operating on a copy here
276
+ # but in this case, that's ok
277
+ for name, df2 in grp:
278
+ new_vals = np.arange(df2.shape[0])
279
+ df.loc[name, "new_col"] = new_vals
280
+
281
+ def test_series_setitem(
282
+ self, multiindex_year_month_day_dataframe_random_data, warn_copy_on_write
283
+ ):
284
+ ymd = multiindex_year_month_day_dataframe_random_data
285
+ s = ymd["A"]
286
+
287
+ with tm.assert_cow_warning(warn_copy_on_write):
288
+ s[2000, 3] = np.nan
289
+ assert isna(s.values[42:65]).all()
290
+ assert notna(s.values[:42]).all()
291
+ assert notna(s.values[65:]).all()
292
+
293
+ with tm.assert_cow_warning(warn_copy_on_write):
294
+ s[2000, 3, 10] = np.nan
295
+ assert isna(s.iloc[49])
296
+
297
+ with pytest.raises(KeyError, match="49"):
298
+ # GH#33355 dont fall-back to positional when leading level is int
299
+ s[49]
300
+
301
+ def test_frame_getitem_setitem_boolean(self, multiindex_dataframe_random_data):
302
+ frame = multiindex_dataframe_random_data
303
+ df = frame.T.copy()
304
+ values = df.values.copy()
305
+
306
+ result = df[df > 0]
307
+ expected = df.where(df > 0)
308
+ tm.assert_frame_equal(result, expected)
309
+
310
+ df[df > 0] = 5
311
+ values[values > 0] = 5
312
+ tm.assert_almost_equal(df.values, values)
313
+
314
+ df[df == 5] = 0
315
+ values[values == 5] = 0
316
+ tm.assert_almost_equal(df.values, values)
317
+
318
+ # a df that needs alignment first
319
+ df[df[:-1] < 0] = 2
320
+ np.putmask(values[:-1], values[:-1] < 0, 2)
321
+ tm.assert_almost_equal(df.values, values)
322
+
323
+ with pytest.raises(TypeError, match="boolean values only"):
324
+ df[df * 0] = 2
325
+
326
+ def test_frame_getitem_setitem_multislice(self):
327
+ levels = [["t1", "t2"], ["a", "b", "c"]]
328
+ codes = [[0, 0, 0, 1, 1], [0, 1, 2, 0, 1]]
329
+ midx = MultiIndex(codes=codes, levels=levels, names=[None, "id"])
330
+ df = DataFrame({"value": [1, 2, 3, 7, 8]}, index=midx)
331
+
332
+ result = df.loc[:, "value"]
333
+ tm.assert_series_equal(df["value"], result)
334
+
335
+ result = df.loc[df.index[1:3], "value"]
336
+ tm.assert_series_equal(df["value"][1:3], result)
337
+
338
+ result = df.loc[:, :]
339
+ tm.assert_frame_equal(df, result)
340
+
341
+ result = df
342
+ df.loc[:, "value"] = 10
343
+ result["value"] = 10
344
+ tm.assert_frame_equal(df, result)
345
+
346
+ df.loc[:, :] = 10
347
+ tm.assert_frame_equal(df, result)
348
+
349
+ def test_frame_setitem_multi_column(self):
350
+ df = DataFrame(
351
+ np.random.default_rng(2).standard_normal((10, 4)),
352
+ columns=[["a", "a", "b", "b"], [0, 1, 0, 1]],
353
+ )
354
+
355
+ cp = df.copy()
356
+ cp["a"] = cp["b"]
357
+ tm.assert_frame_equal(cp["a"], cp["b"])
358
+
359
+ # set with ndarray
360
+ cp = df.copy()
361
+ cp["a"] = cp["b"].values
362
+ tm.assert_frame_equal(cp["a"], cp["b"])
363
+
364
+ def test_frame_setitem_multi_column2(self):
365
+ # ---------------------------------------
366
+ # GH#1803
367
+ columns = MultiIndex.from_tuples([("A", "1"), ("A", "2"), ("B", "1")])
368
+ df = DataFrame(index=[1, 3, 5], columns=columns)
369
+
370
+ # Works, but adds a column instead of updating the two existing ones
371
+ df["A"] = 0.0 # Doesn't work
372
+ assert (df["A"].values == 0).all()
373
+
374
+ # it broadcasts
375
+ df["B", "1"] = [1, 2, 3]
376
+ df["A"] = df["B", "1"]
377
+
378
+ sliced_a1 = df["A", "1"]
379
+ sliced_a2 = df["A", "2"]
380
+ sliced_b1 = df["B", "1"]
381
+ tm.assert_series_equal(sliced_a1, sliced_b1, check_names=False)
382
+ tm.assert_series_equal(sliced_a2, sliced_b1, check_names=False)
383
+ assert sliced_a1.name == ("A", "1")
384
+ assert sliced_a2.name == ("A", "2")
385
+ assert sliced_b1.name == ("B", "1")
386
+
387
+ def test_loc_getitem_tuple_plus_columns(
388
+ self, multiindex_year_month_day_dataframe_random_data
389
+ ):
390
+ # GH #1013
391
+ ymd = multiindex_year_month_day_dataframe_random_data
392
+ df = ymd[:5]
393
+
394
+ result = df.loc[(2000, 1, 6), ["A", "B", "C"]]
395
+ expected = df.loc[2000, 1, 6][["A", "B", "C"]]
396
+ tm.assert_series_equal(result, expected)
397
+
398
+ @pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")
399
+ def test_loc_getitem_setitem_slice_integers(self, frame_or_series):
400
+ index = MultiIndex(
401
+ levels=[[0, 1, 2], [0, 2]], codes=[[0, 0, 1, 1, 2, 2], [0, 1, 0, 1, 0, 1]]
402
+ )
403
+
404
+ obj = DataFrame(
405
+ np.random.default_rng(2).standard_normal((len(index), 4)),
406
+ index=index,
407
+ columns=["a", "b", "c", "d"],
408
+ )
409
+ obj = tm.get_obj(obj, frame_or_series)
410
+
411
+ res = obj.loc[1:2]
412
+ exp = obj.reindex(obj.index[2:])
413
+ tm.assert_equal(res, exp)
414
+
415
+ obj.loc[1:2] = 7
416
+ assert (obj.loc[1:2] == 7).values.all()
417
+
418
+ def test_setitem_change_dtype(self, multiindex_dataframe_random_data):
419
+ frame = multiindex_dataframe_random_data
420
+ dft = frame.T
421
+ s = dft["foo", "two"]
422
+ dft["foo", "two"] = s > s.median()
423
+ tm.assert_series_equal(dft["foo", "two"], s > s.median())
424
+ # assert isinstance(dft._data.blocks[1].items, MultiIndex)
425
+
426
+ reindexed = dft.reindex(columns=[("foo", "two")])
427
+ tm.assert_series_equal(reindexed["foo", "two"], s > s.median())
428
+
429
+ def test_set_column_scalar_with_loc(
430
+ self, multiindex_dataframe_random_data, using_copy_on_write, warn_copy_on_write
431
+ ):
432
+ frame = multiindex_dataframe_random_data
433
+ subset = frame.index[[1, 4, 5]]
434
+
435
+ frame.loc[subset] = 99
436
+ assert (frame.loc[subset].values == 99).all()
437
+
438
+ frame_original = frame.copy()
439
+ col = frame["B"]
440
+ with tm.assert_cow_warning(warn_copy_on_write):
441
+ col[subset] = 97
442
+ if using_copy_on_write:
443
+ # chained setitem doesn't work with CoW
444
+ tm.assert_frame_equal(frame, frame_original)
445
+ else:
446
+ assert (frame.loc[subset, "B"] == 97).all()
447
+
448
+ def test_nonunique_assignment_1750(self):
449
+ df = DataFrame(
450
+ [[1, 1, "x", "X"], [1, 1, "y", "Y"], [1, 2, "z", "Z"]], columns=list("ABCD")
451
+ )
452
+
453
+ df = df.set_index(["A", "B"])
454
+ mi = MultiIndex.from_tuples([(1, 1)])
455
+
456
+ df.loc[mi, "C"] = "_"
457
+
458
+ assert (df.xs((1, 1))["C"] == "_").all()
459
+
460
+ def test_astype_assignment_with_dups(self):
461
+ # GH 4686
462
+ # assignment with dups that has a dtype change
463
+ cols = MultiIndex.from_tuples([("A", "1"), ("B", "1"), ("A", "2")])
464
+ df = DataFrame(np.arange(3).reshape((1, 3)), columns=cols, dtype=object)
465
+ index = df.index.copy()
466
+
467
+ df["A"] = df["A"].astype(np.float64)
468
+ tm.assert_index_equal(df.index, index)
469
+
470
+ def test_setitem_nonmonotonic(self):
471
+ # https://github.com/pandas-dev/pandas/issues/31449
472
+ index = MultiIndex.from_tuples(
473
+ [("a", "c"), ("b", "x"), ("a", "d")], names=["l1", "l2"]
474
+ )
475
+ df = DataFrame(data=[0, 1, 2], index=index, columns=["e"])
476
+ df.loc["a", "e"] = np.arange(99, 101, dtype="int64")
477
+ expected = DataFrame({"e": [99, 1, 100]}, index=index)
478
+ tm.assert_frame_equal(df, expected)
479
+
480
+
481
+ class TestSetitemWithExpansionMultiIndex:
482
+ def test_setitem_new_column_mixed_depth(self):
483
+ arrays = [
484
+ ["a", "top", "top", "routine1", "routine1", "routine2"],
485
+ ["", "OD", "OD", "result1", "result2", "result1"],
486
+ ["", "wx", "wy", "", "", ""],
487
+ ]
488
+
489
+ tuples = sorted(zip(*arrays))
490
+ index = MultiIndex.from_tuples(tuples)
491
+ df = DataFrame(np.random.default_rng(2).standard_normal((4, 6)), columns=index)
492
+
493
+ result = df.copy()
494
+ expected = df.copy()
495
+ result["b"] = [1, 2, 3, 4]
496
+ expected["b", "", ""] = [1, 2, 3, 4]
497
+ tm.assert_frame_equal(result, expected)
498
+
499
+ def test_setitem_new_column_all_na(self):
500
+ # GH#1534
501
+ mix = MultiIndex.from_tuples([("1a", "2a"), ("1a", "2b"), ("1a", "2c")])
502
+ df = DataFrame([[1, 2], [3, 4], [5, 6]], index=mix)
503
+ s = Series({(1, 1): 1, (1, 2): 2})
504
+ df["new"] = s
505
+ assert df["new"].isna().all()
506
+
507
+ def test_setitem_enlargement_keep_index_names(self):
508
+ # GH#53053
509
+ mi = MultiIndex.from_tuples([(1, 2, 3)], names=["i1", "i2", "i3"])
510
+ df = DataFrame(data=[[10, 20, 30]], index=mi, columns=["A", "B", "C"])
511
+ df.loc[(0, 0, 0)] = df.loc[(1, 2, 3)]
512
+ mi_expected = MultiIndex.from_tuples(
513
+ [(1, 2, 3), (0, 0, 0)], names=["i1", "i2", "i3"]
514
+ )
515
+ expected = DataFrame(
516
+ data=[[10, 20, 30], [10, 20, 30]],
517
+ index=mi_expected,
518
+ columns=["A", "B", "C"],
519
+ )
520
+ tm.assert_frame_equal(df, expected)
521
+
522
+
523
+ @td.skip_array_manager_invalid_test # df["foo"] select multiple columns -> .values
524
+ # is not a view
525
+ def test_frame_setitem_view_direct(
526
+ multiindex_dataframe_random_data, using_copy_on_write
527
+ ):
528
+ # this works because we are modifying the underlying array
529
+ # really a no-no
530
+ df = multiindex_dataframe_random_data.T
531
+ if using_copy_on_write:
532
+ with pytest.raises(ValueError, match="read-only"):
533
+ df["foo"].values[:] = 0
534
+ assert (df["foo"].values != 0).all()
535
+ else:
536
+ df["foo"].values[:] = 0
537
+ assert (df["foo"].values == 0).all()
538
+
539
+
540
+ def test_frame_setitem_copy_raises(
541
+ multiindex_dataframe_random_data, using_copy_on_write, warn_copy_on_write
542
+ ):
543
+ # will raise/warn as its chained assignment
544
+ df = multiindex_dataframe_random_data.T
545
+ if using_copy_on_write or warn_copy_on_write:
546
+ with tm.raises_chained_assignment_error():
547
+ df["foo"]["one"] = 2
548
+ else:
549
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
550
+ with pytest.raises(SettingWithCopyError, match=msg):
551
+ with tm.raises_chained_assignment_error():
552
+ df["foo"]["one"] = 2
553
+
554
+
555
+ def test_frame_setitem_copy_no_write(
556
+ multiindex_dataframe_random_data, using_copy_on_write, warn_copy_on_write
557
+ ):
558
+ frame = multiindex_dataframe_random_data.T
559
+ expected = frame
560
+ df = frame.copy()
561
+ if using_copy_on_write or warn_copy_on_write:
562
+ with tm.raises_chained_assignment_error():
563
+ df["foo"]["one"] = 2
564
+ else:
565
+ msg = "A value is trying to be set on a copy of a slice from a DataFrame"
566
+ with pytest.raises(SettingWithCopyError, match=msg):
567
+ with tm.raises_chained_assignment_error():
568
+ df["foo"]["one"] = 2
569
+
570
+ result = df
571
+ tm.assert_frame_equal(result, expected)
572
+
573
+
574
+ def test_frame_setitem_partial_multiindex():
575
+ # GH 54875
576
+ df = DataFrame(
577
+ {
578
+ "a": [1, 2, 3],
579
+ "b": [3, 4, 5],
580
+ "c": 6,
581
+ "d": 7,
582
+ }
583
+ ).set_index(["a", "b", "c"])
584
+ ser = Series(8, index=df.index.droplevel("c"))
585
+ result = df.copy()
586
+ result["d"] = ser
587
+ expected = df.copy()
588
+ expected["d"] = 8
589
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (182 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/test_hashtable.cpython-310.pyc ADDED
Binary file (24.2 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/test_join.cpython-310.pyc ADDED
Binary file (7.86 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/test_lib.cpython-310.pyc ADDED
Binary file (9.73 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/__pycache__/test_libalgos.cpython-310.pyc ADDED
Binary file (4.87 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/test_hashtable.py ADDED
@@ -0,0 +1,748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Generator
2
+ from contextlib import contextmanager
3
+ import re
4
+ import struct
5
+ import tracemalloc
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ from pandas._libs import hashtable as ht
11
+
12
+ import pandas as pd
13
+ import pandas._testing as tm
14
+ from pandas.core.algorithms import isin
15
+
16
+
17
+ @contextmanager
18
+ def activated_tracemalloc() -> Generator[None, None, None]:
19
+ tracemalloc.start()
20
+ try:
21
+ yield
22
+ finally:
23
+ tracemalloc.stop()
24
+
25
+
26
+ def get_allocated_khash_memory():
27
+ snapshot = tracemalloc.take_snapshot()
28
+ snapshot = snapshot.filter_traces(
29
+ (tracemalloc.DomainFilter(True, ht.get_hashtable_trace_domain()),)
30
+ )
31
+ return sum(x.size for x in snapshot.traces)
32
+
33
+
34
+ @pytest.mark.parametrize(
35
+ "table_type, dtype",
36
+ [
37
+ (ht.PyObjectHashTable, np.object_),
38
+ (ht.Complex128HashTable, np.complex128),
39
+ (ht.Int64HashTable, np.int64),
40
+ (ht.UInt64HashTable, np.uint64),
41
+ (ht.Float64HashTable, np.float64),
42
+ (ht.Complex64HashTable, np.complex64),
43
+ (ht.Int32HashTable, np.int32),
44
+ (ht.UInt32HashTable, np.uint32),
45
+ (ht.Float32HashTable, np.float32),
46
+ (ht.Int16HashTable, np.int16),
47
+ (ht.UInt16HashTable, np.uint16),
48
+ (ht.Int8HashTable, np.int8),
49
+ (ht.UInt8HashTable, np.uint8),
50
+ (ht.IntpHashTable, np.intp),
51
+ ],
52
+ )
53
+ class TestHashTable:
54
+ def test_get_set_contains_len(self, table_type, dtype):
55
+ index = 5
56
+ table = table_type(55)
57
+ assert len(table) == 0
58
+ assert index not in table
59
+
60
+ table.set_item(index, 42)
61
+ assert len(table) == 1
62
+ assert index in table
63
+ assert table.get_item(index) == 42
64
+
65
+ table.set_item(index + 1, 41)
66
+ assert index in table
67
+ assert index + 1 in table
68
+ assert len(table) == 2
69
+ assert table.get_item(index) == 42
70
+ assert table.get_item(index + 1) == 41
71
+
72
+ table.set_item(index, 21)
73
+ assert index in table
74
+ assert index + 1 in table
75
+ assert len(table) == 2
76
+ assert table.get_item(index) == 21
77
+ assert table.get_item(index + 1) == 41
78
+ assert index + 2 not in table
79
+
80
+ table.set_item(index + 1, 21)
81
+ assert index in table
82
+ assert index + 1 in table
83
+ assert len(table) == 2
84
+ assert table.get_item(index) == 21
85
+ assert table.get_item(index + 1) == 21
86
+
87
+ with pytest.raises(KeyError, match=str(index + 2)):
88
+ table.get_item(index + 2)
89
+
90
+ def test_get_set_contains_len_mask(self, table_type, dtype):
91
+ if table_type == ht.PyObjectHashTable:
92
+ pytest.skip("Mask not supported for object")
93
+ index = 5
94
+ table = table_type(55, uses_mask=True)
95
+ assert len(table) == 0
96
+ assert index not in table
97
+
98
+ table.set_item(index, 42)
99
+ assert len(table) == 1
100
+ assert index in table
101
+ assert table.get_item(index) == 42
102
+ with pytest.raises(KeyError, match="NA"):
103
+ table.get_na()
104
+
105
+ table.set_item(index + 1, 41)
106
+ table.set_na(41)
107
+ assert pd.NA in table
108
+ assert index in table
109
+ assert index + 1 in table
110
+ assert len(table) == 3
111
+ assert table.get_item(index) == 42
112
+ assert table.get_item(index + 1) == 41
113
+ assert table.get_na() == 41
114
+
115
+ table.set_na(21)
116
+ assert index in table
117
+ assert index + 1 in table
118
+ assert len(table) == 3
119
+ assert table.get_item(index + 1) == 41
120
+ assert table.get_na() == 21
121
+ assert index + 2 not in table
122
+
123
+ with pytest.raises(KeyError, match=str(index + 2)):
124
+ table.get_item(index + 2)
125
+
126
+ def test_map_keys_to_values(self, table_type, dtype, writable):
127
+ # only Int64HashTable has this method
128
+ if table_type == ht.Int64HashTable:
129
+ N = 77
130
+ table = table_type()
131
+ keys = np.arange(N).astype(dtype)
132
+ vals = np.arange(N).astype(np.int64) + N
133
+ keys.flags.writeable = writable
134
+ vals.flags.writeable = writable
135
+ table.map_keys_to_values(keys, vals)
136
+ for i in range(N):
137
+ assert table.get_item(keys[i]) == i + N
138
+
139
+ def test_map_locations(self, table_type, dtype, writable):
140
+ N = 8
141
+ table = table_type()
142
+ keys = (np.arange(N) + N).astype(dtype)
143
+ keys.flags.writeable = writable
144
+ table.map_locations(keys)
145
+ for i in range(N):
146
+ assert table.get_item(keys[i]) == i
147
+
148
+ def test_map_locations_mask(self, table_type, dtype, writable):
149
+ if table_type == ht.PyObjectHashTable:
150
+ pytest.skip("Mask not supported for object")
151
+ N = 3
152
+ table = table_type(uses_mask=True)
153
+ keys = (np.arange(N) + N).astype(dtype)
154
+ keys.flags.writeable = writable
155
+ table.map_locations(keys, np.array([False, False, True]))
156
+ for i in range(N - 1):
157
+ assert table.get_item(keys[i]) == i
158
+
159
+ with pytest.raises(KeyError, match=re.escape(str(keys[N - 1]))):
160
+ table.get_item(keys[N - 1])
161
+
162
+ assert table.get_na() == 2
163
+
164
+ def test_lookup(self, table_type, dtype, writable):
165
+ N = 3
166
+ table = table_type()
167
+ keys = (np.arange(N) + N).astype(dtype)
168
+ keys.flags.writeable = writable
169
+ table.map_locations(keys)
170
+ result = table.lookup(keys)
171
+ expected = np.arange(N)
172
+ tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64))
173
+
174
+ def test_lookup_wrong(self, table_type, dtype):
175
+ if dtype in (np.int8, np.uint8):
176
+ N = 100
177
+ else:
178
+ N = 512
179
+ table = table_type()
180
+ keys = (np.arange(N) + N).astype(dtype)
181
+ table.map_locations(keys)
182
+ wrong_keys = np.arange(N).astype(dtype)
183
+ result = table.lookup(wrong_keys)
184
+ assert np.all(result == -1)
185
+
186
+ def test_lookup_mask(self, table_type, dtype, writable):
187
+ if table_type == ht.PyObjectHashTable:
188
+ pytest.skip("Mask not supported for object")
189
+ N = 3
190
+ table = table_type(uses_mask=True)
191
+ keys = (np.arange(N) + N).astype(dtype)
192
+ mask = np.array([False, True, False])
193
+ keys.flags.writeable = writable
194
+ table.map_locations(keys, mask)
195
+ result = table.lookup(keys, mask)
196
+ expected = np.arange(N)
197
+ tm.assert_numpy_array_equal(result.astype(np.int64), expected.astype(np.int64))
198
+
199
+ result = table.lookup(np.array([1 + N]).astype(dtype), np.array([False]))
200
+ tm.assert_numpy_array_equal(
201
+ result.astype(np.int64), np.array([-1], dtype=np.int64)
202
+ )
203
+
204
+ def test_unique(self, table_type, dtype, writable):
205
+ if dtype in (np.int8, np.uint8):
206
+ N = 88
207
+ else:
208
+ N = 1000
209
+ table = table_type()
210
+ expected = (np.arange(N) + N).astype(dtype)
211
+ keys = np.repeat(expected, 5)
212
+ keys.flags.writeable = writable
213
+ unique = table.unique(keys)
214
+ tm.assert_numpy_array_equal(unique, expected)
215
+
216
+ def test_tracemalloc_works(self, table_type, dtype):
217
+ if dtype in (np.int8, np.uint8):
218
+ N = 256
219
+ else:
220
+ N = 30000
221
+ keys = np.arange(N).astype(dtype)
222
+ with activated_tracemalloc():
223
+ table = table_type()
224
+ table.map_locations(keys)
225
+ used = get_allocated_khash_memory()
226
+ my_size = table.sizeof()
227
+ assert used == my_size
228
+ del table
229
+ assert get_allocated_khash_memory() == 0
230
+
231
+ def test_tracemalloc_for_empty(self, table_type, dtype):
232
+ with activated_tracemalloc():
233
+ table = table_type()
234
+ used = get_allocated_khash_memory()
235
+ my_size = table.sizeof()
236
+ assert used == my_size
237
+ del table
238
+ assert get_allocated_khash_memory() == 0
239
+
240
+ def test_get_state(self, table_type, dtype):
241
+ table = table_type(1000)
242
+ state = table.get_state()
243
+ assert state["size"] == 0
244
+ assert state["n_occupied"] == 0
245
+ assert "n_buckets" in state
246
+ assert "upper_bound" in state
247
+
248
+ @pytest.mark.parametrize("N", range(1, 110))
249
+ def test_no_reallocation(self, table_type, dtype, N):
250
+ keys = np.arange(N).astype(dtype)
251
+ preallocated_table = table_type(N)
252
+ n_buckets_start = preallocated_table.get_state()["n_buckets"]
253
+ preallocated_table.map_locations(keys)
254
+ n_buckets_end = preallocated_table.get_state()["n_buckets"]
255
+ # original number of buckets was enough:
256
+ assert n_buckets_start == n_buckets_end
257
+ # check with clean table (not too much preallocated)
258
+ clean_table = table_type()
259
+ clean_table.map_locations(keys)
260
+ assert n_buckets_start == clean_table.get_state()["n_buckets"]
261
+
262
+
263
+ class TestHashTableUnsorted:
264
+ # TODO: moved from test_algos; may be redundancies with other tests
265
+ def test_string_hashtable_set_item_signature(self):
266
+ # GH#30419 fix typing in StringHashTable.set_item to prevent segfault
267
+ tbl = ht.StringHashTable()
268
+
269
+ tbl.set_item("key", 1)
270
+ assert tbl.get_item("key") == 1
271
+
272
+ with pytest.raises(TypeError, match="'key' has incorrect type"):
273
+ # key arg typed as string, not object
274
+ tbl.set_item(4, 6)
275
+ with pytest.raises(TypeError, match="'val' has incorrect type"):
276
+ tbl.get_item(4)
277
+
278
+ def test_lookup_nan(self, writable):
279
+ # GH#21688 ensure we can deal with readonly memory views
280
+ xs = np.array([2.718, 3.14, np.nan, -7, 5, 2, 3])
281
+ xs.setflags(write=writable)
282
+ m = ht.Float64HashTable()
283
+ m.map_locations(xs)
284
+ tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
285
+
286
+ def test_add_signed_zeros(self):
287
+ # GH#21866 inconsistent hash-function for float64
288
+ # default hash-function would lead to different hash-buckets
289
+ # for 0.0 and -0.0 if there are more than 2^30 hash-buckets
290
+ # but this would mean 16GB
291
+ N = 4 # 12 * 10**8 would trigger the error, if you have enough memory
292
+ m = ht.Float64HashTable(N)
293
+ m.set_item(0.0, 0)
294
+ m.set_item(-0.0, 0)
295
+ assert len(m) == 1 # 0.0 and -0.0 are equivalent
296
+
297
+ def test_add_different_nans(self):
298
+ # GH#21866 inconsistent hash-function for float64
299
+ # create different nans from bit-patterns:
300
+ NAN1 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000000))[0]
301
+ NAN2 = struct.unpack("d", struct.pack("=Q", 0x7FF8000000000001))[0]
302
+ assert NAN1 != NAN1
303
+ assert NAN2 != NAN2
304
+ # default hash function would lead to different hash-buckets
305
+ # for NAN1 and NAN2 even if there are only 4 buckets:
306
+ m = ht.Float64HashTable()
307
+ m.set_item(NAN1, 0)
308
+ m.set_item(NAN2, 0)
309
+ assert len(m) == 1 # NAN1 and NAN2 are equivalent
310
+
311
+ def test_lookup_overflow(self, writable):
312
+ xs = np.array([1, 2, 2**63], dtype=np.uint64)
313
+ # GH 21688 ensure we can deal with readonly memory views
314
+ xs.setflags(write=writable)
315
+ m = ht.UInt64HashTable()
316
+ m.map_locations(xs)
317
+ tm.assert_numpy_array_equal(m.lookup(xs), np.arange(len(xs), dtype=np.intp))
318
+
319
+ @pytest.mark.parametrize("nvals", [0, 10]) # resizing to 0 is special case
320
+ @pytest.mark.parametrize(
321
+ "htable, uniques, dtype, safely_resizes",
322
+ [
323
+ (ht.PyObjectHashTable, ht.ObjectVector, "object", False),
324
+ (ht.StringHashTable, ht.ObjectVector, "object", True),
325
+ (ht.Float64HashTable, ht.Float64Vector, "float64", False),
326
+ (ht.Int64HashTable, ht.Int64Vector, "int64", False),
327
+ (ht.Int32HashTable, ht.Int32Vector, "int32", False),
328
+ (ht.UInt64HashTable, ht.UInt64Vector, "uint64", False),
329
+ ],
330
+ )
331
+ def test_vector_resize(
332
+ self, writable, htable, uniques, dtype, safely_resizes, nvals
333
+ ):
334
+ # Test for memory errors after internal vector
335
+ # reallocations (GH 7157)
336
+ # Changed from using np.random.default_rng(2).rand to range
337
+ # which could cause flaky CI failures when safely_resizes=False
338
+ vals = np.array(range(1000), dtype=dtype)
339
+
340
+ # GH 21688 ensures we can deal with read-only memory views
341
+ vals.setflags(write=writable)
342
+
343
+ # initialise instances; cannot initialise in parametrization,
344
+ # as otherwise external views would be held on the array (which is
345
+ # one of the things this test is checking)
346
+ htable = htable()
347
+ uniques = uniques()
348
+
349
+ # get_labels may append to uniques
350
+ htable.get_labels(vals[:nvals], uniques, 0, -1)
351
+ # to_array() sets an external_view_exists flag on uniques.
352
+ tmp = uniques.to_array()
353
+ oldshape = tmp.shape
354
+
355
+ # subsequent get_labels() calls can no longer append to it
356
+ # (except for StringHashTables + ObjectVector)
357
+ if safely_resizes:
358
+ htable.get_labels(vals, uniques, 0, -1)
359
+ else:
360
+ with pytest.raises(ValueError, match="external reference.*"):
361
+ htable.get_labels(vals, uniques, 0, -1)
362
+
363
+ uniques.to_array() # should not raise here
364
+ assert tmp.shape == oldshape
365
+
366
+ @pytest.mark.parametrize(
367
+ "hashtable",
368
+ [
369
+ ht.PyObjectHashTable,
370
+ ht.StringHashTable,
371
+ ht.Float64HashTable,
372
+ ht.Int64HashTable,
373
+ ht.Int32HashTable,
374
+ ht.UInt64HashTable,
375
+ ],
376
+ )
377
+ def test_hashtable_large_sizehint(self, hashtable):
378
+ # GH#22729 smoketest for not raising when passing a large size_hint
379
+ size_hint = np.iinfo(np.uint32).max + 1
380
+ hashtable(size_hint=size_hint)
381
+
382
+
383
+ class TestPyObjectHashTableWithNans:
384
+ def test_nan_float(self):
385
+ nan1 = float("nan")
386
+ nan2 = float("nan")
387
+ assert nan1 is not nan2
388
+ table = ht.PyObjectHashTable()
389
+ table.set_item(nan1, 42)
390
+ assert table.get_item(nan2) == 42
391
+
392
+ def test_nan_complex_both(self):
393
+ nan1 = complex(float("nan"), float("nan"))
394
+ nan2 = complex(float("nan"), float("nan"))
395
+ assert nan1 is not nan2
396
+ table = ht.PyObjectHashTable()
397
+ table.set_item(nan1, 42)
398
+ assert table.get_item(nan2) == 42
399
+
400
+ def test_nan_complex_real(self):
401
+ nan1 = complex(float("nan"), 1)
402
+ nan2 = complex(float("nan"), 1)
403
+ other = complex(float("nan"), 2)
404
+ assert nan1 is not nan2
405
+ table = ht.PyObjectHashTable()
406
+ table.set_item(nan1, 42)
407
+ assert table.get_item(nan2) == 42
408
+ with pytest.raises(KeyError, match=None) as error:
409
+ table.get_item(other)
410
+ assert str(error.value) == str(other)
411
+
412
+ def test_nan_complex_imag(self):
413
+ nan1 = complex(1, float("nan"))
414
+ nan2 = complex(1, float("nan"))
415
+ other = complex(2, float("nan"))
416
+ assert nan1 is not nan2
417
+ table = ht.PyObjectHashTable()
418
+ table.set_item(nan1, 42)
419
+ assert table.get_item(nan2) == 42
420
+ with pytest.raises(KeyError, match=None) as error:
421
+ table.get_item(other)
422
+ assert str(error.value) == str(other)
423
+
424
+ def test_nan_in_tuple(self):
425
+ nan1 = (float("nan"),)
426
+ nan2 = (float("nan"),)
427
+ assert nan1[0] is not nan2[0]
428
+ table = ht.PyObjectHashTable()
429
+ table.set_item(nan1, 42)
430
+ assert table.get_item(nan2) == 42
431
+
432
+ def test_nan_in_nested_tuple(self):
433
+ nan1 = (1, (2, (float("nan"),)))
434
+ nan2 = (1, (2, (float("nan"),)))
435
+ other = (1, 2)
436
+ table = ht.PyObjectHashTable()
437
+ table.set_item(nan1, 42)
438
+ assert table.get_item(nan2) == 42
439
+ with pytest.raises(KeyError, match=None) as error:
440
+ table.get_item(other)
441
+ assert str(error.value) == str(other)
442
+
443
+
444
+ def test_hash_equal_tuple_with_nans():
445
+ a = (float("nan"), (float("nan"), float("nan")))
446
+ b = (float("nan"), (float("nan"), float("nan")))
447
+ assert ht.object_hash(a) == ht.object_hash(b)
448
+ assert ht.objects_are_equal(a, b)
449
+
450
+
451
+ def test_get_labels_groupby_for_Int64(writable):
452
+ table = ht.Int64HashTable()
453
+ vals = np.array([1, 2, -1, 2, 1, -1], dtype=np.int64)
454
+ vals.flags.writeable = writable
455
+ arr, unique = table.get_labels_groupby(vals)
456
+ expected_arr = np.array([0, 1, -1, 1, 0, -1], dtype=np.intp)
457
+ expected_unique = np.array([1, 2], dtype=np.int64)
458
+ tm.assert_numpy_array_equal(arr, expected_arr)
459
+ tm.assert_numpy_array_equal(unique, expected_unique)
460
+
461
+
462
+ def test_tracemalloc_works_for_StringHashTable():
463
+ N = 1000
464
+ keys = np.arange(N).astype(np.str_).astype(np.object_)
465
+ with activated_tracemalloc():
466
+ table = ht.StringHashTable()
467
+ table.map_locations(keys)
468
+ used = get_allocated_khash_memory()
469
+ my_size = table.sizeof()
470
+ assert used == my_size
471
+ del table
472
+ assert get_allocated_khash_memory() == 0
473
+
474
+
475
+ def test_tracemalloc_for_empty_StringHashTable():
476
+ with activated_tracemalloc():
477
+ table = ht.StringHashTable()
478
+ used = get_allocated_khash_memory()
479
+ my_size = table.sizeof()
480
+ assert used == my_size
481
+ del table
482
+ assert get_allocated_khash_memory() == 0
483
+
484
+
485
+ @pytest.mark.parametrize("N", range(1, 110))
486
+ def test_no_reallocation_StringHashTable(N):
487
+ keys = np.arange(N).astype(np.str_).astype(np.object_)
488
+ preallocated_table = ht.StringHashTable(N)
489
+ n_buckets_start = preallocated_table.get_state()["n_buckets"]
490
+ preallocated_table.map_locations(keys)
491
+ n_buckets_end = preallocated_table.get_state()["n_buckets"]
492
+ # original number of buckets was enough:
493
+ assert n_buckets_start == n_buckets_end
494
+ # check with clean table (not too much preallocated)
495
+ clean_table = ht.StringHashTable()
496
+ clean_table.map_locations(keys)
497
+ assert n_buckets_start == clean_table.get_state()["n_buckets"]
498
+
499
+
500
+ @pytest.mark.parametrize(
501
+ "table_type, dtype",
502
+ [
503
+ (ht.Float64HashTable, np.float64),
504
+ (ht.Float32HashTable, np.float32),
505
+ (ht.Complex128HashTable, np.complex128),
506
+ (ht.Complex64HashTable, np.complex64),
507
+ ],
508
+ )
509
+ class TestHashTableWithNans:
510
+ def test_get_set_contains_len(self, table_type, dtype):
511
+ index = float("nan")
512
+ table = table_type()
513
+ assert index not in table
514
+
515
+ table.set_item(index, 42)
516
+ assert len(table) == 1
517
+ assert index in table
518
+ assert table.get_item(index) == 42
519
+
520
+ table.set_item(index, 41)
521
+ assert len(table) == 1
522
+ assert index in table
523
+ assert table.get_item(index) == 41
524
+
525
+ def test_map_locations(self, table_type, dtype):
526
+ N = 10
527
+ table = table_type()
528
+ keys = np.full(N, np.nan, dtype=dtype)
529
+ table.map_locations(keys)
530
+ assert len(table) == 1
531
+ assert table.get_item(np.nan) == N - 1
532
+
533
+ def test_unique(self, table_type, dtype):
534
+ N = 1020
535
+ table = table_type()
536
+ keys = np.full(N, np.nan, dtype=dtype)
537
+ unique = table.unique(keys)
538
+ assert np.all(np.isnan(unique)) and len(unique) == 1
539
+
540
+
541
+ def test_unique_for_nan_objects_floats():
542
+ table = ht.PyObjectHashTable()
543
+ keys = np.array([float("nan") for i in range(50)], dtype=np.object_)
544
+ unique = table.unique(keys)
545
+ assert len(unique) == 1
546
+
547
+
548
+ def test_unique_for_nan_objects_complex():
549
+ table = ht.PyObjectHashTable()
550
+ keys = np.array([complex(float("nan"), 1.0) for i in range(50)], dtype=np.object_)
551
+ unique = table.unique(keys)
552
+ assert len(unique) == 1
553
+
554
+
555
+ def test_unique_for_nan_objects_tuple():
556
+ table = ht.PyObjectHashTable()
557
+ keys = np.array(
558
+ [1] + [(1.0, (float("nan"), 1.0)) for i in range(50)], dtype=np.object_
559
+ )
560
+ unique = table.unique(keys)
561
+ assert len(unique) == 2
562
+
563
+
564
+ @pytest.mark.parametrize(
565
+ "dtype",
566
+ [
567
+ np.object_,
568
+ np.complex128,
569
+ np.int64,
570
+ np.uint64,
571
+ np.float64,
572
+ np.complex64,
573
+ np.int32,
574
+ np.uint32,
575
+ np.float32,
576
+ np.int16,
577
+ np.uint16,
578
+ np.int8,
579
+ np.uint8,
580
+ np.intp,
581
+ ],
582
+ )
583
+ class TestHelpFunctions:
584
+ def test_value_count(self, dtype, writable):
585
+ N = 43
586
+ expected = (np.arange(N) + N).astype(dtype)
587
+ values = np.repeat(expected, 5)
588
+ values.flags.writeable = writable
589
+ keys, counts, _ = ht.value_count(values, False)
590
+ tm.assert_numpy_array_equal(np.sort(keys), expected)
591
+ assert np.all(counts == 5)
592
+
593
+ def test_value_count_mask(self, dtype):
594
+ if dtype == np.object_:
595
+ pytest.skip("mask not implemented for object dtype")
596
+ values = np.array([1] * 5, dtype=dtype)
597
+ mask = np.zeros((5,), dtype=np.bool_)
598
+ mask[1] = True
599
+ mask[4] = True
600
+ keys, counts, na_counter = ht.value_count(values, False, mask=mask)
601
+ assert len(keys) == 2
602
+ assert na_counter == 2
603
+
604
+ def test_value_count_stable(self, dtype, writable):
605
+ # GH12679
606
+ values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
607
+ values.flags.writeable = writable
608
+ keys, counts, _ = ht.value_count(values, False)
609
+ tm.assert_numpy_array_equal(keys, values)
610
+ assert np.all(counts == 1)
611
+
612
+ def test_duplicated_first(self, dtype, writable):
613
+ N = 100
614
+ values = np.repeat(np.arange(N).astype(dtype), 5)
615
+ values.flags.writeable = writable
616
+ result = ht.duplicated(values)
617
+ expected = np.ones_like(values, dtype=np.bool_)
618
+ expected[::5] = False
619
+ tm.assert_numpy_array_equal(result, expected)
620
+
621
+ def test_ismember_yes(self, dtype, writable):
622
+ N = 127
623
+ arr = np.arange(N).astype(dtype)
624
+ values = np.arange(N).astype(dtype)
625
+ arr.flags.writeable = writable
626
+ values.flags.writeable = writable
627
+ result = ht.ismember(arr, values)
628
+ expected = np.ones_like(values, dtype=np.bool_)
629
+ tm.assert_numpy_array_equal(result, expected)
630
+
631
+ def test_ismember_no(self, dtype):
632
+ N = 17
633
+ arr = np.arange(N).astype(dtype)
634
+ values = (np.arange(N) + N).astype(dtype)
635
+ result = ht.ismember(arr, values)
636
+ expected = np.zeros_like(values, dtype=np.bool_)
637
+ tm.assert_numpy_array_equal(result, expected)
638
+
639
+ def test_mode(self, dtype, writable):
640
+ if dtype in (np.int8, np.uint8):
641
+ N = 53
642
+ else:
643
+ N = 11111
644
+ values = np.repeat(np.arange(N).astype(dtype), 5)
645
+ values[0] = 42
646
+ values.flags.writeable = writable
647
+ result = ht.mode(values, False)[0]
648
+ assert result == 42
649
+
650
+ def test_mode_stable(self, dtype, writable):
651
+ values = np.array([2, 1, 5, 22, 3, -1, 8]).astype(dtype)
652
+ values.flags.writeable = writable
653
+ keys = ht.mode(values, False)[0]
654
+ tm.assert_numpy_array_equal(keys, values)
655
+
656
+
657
+ def test_modes_with_nans():
658
+ # GH42688, nans aren't mangled
659
+ nulls = [pd.NA, np.nan, pd.NaT, None]
660
+ values = np.array([True] + nulls * 2, dtype=np.object_)
661
+ modes = ht.mode(values, False)[0]
662
+ assert modes.size == len(nulls)
663
+
664
+
665
+ def test_unique_label_indices_intp(writable):
666
+ keys = np.array([1, 2, 2, 2, 1, 3], dtype=np.intp)
667
+ keys.flags.writeable = writable
668
+ result = ht.unique_label_indices(keys)
669
+ expected = np.array([0, 1, 5], dtype=np.intp)
670
+ tm.assert_numpy_array_equal(result, expected)
671
+
672
+
673
+ def test_unique_label_indices():
674
+ a = np.random.default_rng(2).integers(1, 1 << 10, 1 << 15).astype(np.intp)
675
+
676
+ left = ht.unique_label_indices(a)
677
+ right = np.unique(a, return_index=True)[1]
678
+
679
+ tm.assert_numpy_array_equal(left, right, check_dtype=False)
680
+
681
+ a[np.random.default_rng(2).choice(len(a), 10)] = -1
682
+ left = ht.unique_label_indices(a)
683
+ right = np.unique(a, return_index=True)[1][1:]
684
+ tm.assert_numpy_array_equal(left, right, check_dtype=False)
685
+
686
+
687
+ @pytest.mark.parametrize(
688
+ "dtype",
689
+ [
690
+ np.float64,
691
+ np.float32,
692
+ np.complex128,
693
+ np.complex64,
694
+ ],
695
+ )
696
+ class TestHelpFunctionsWithNans:
697
+ def test_value_count(self, dtype):
698
+ values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
699
+ keys, counts, _ = ht.value_count(values, True)
700
+ assert len(keys) == 0
701
+ keys, counts, _ = ht.value_count(values, False)
702
+ assert len(keys) == 1 and np.all(np.isnan(keys))
703
+ assert counts[0] == 3
704
+
705
+ def test_duplicated_first(self, dtype):
706
+ values = np.array([np.nan, np.nan, np.nan], dtype=dtype)
707
+ result = ht.duplicated(values)
708
+ expected = np.array([False, True, True])
709
+ tm.assert_numpy_array_equal(result, expected)
710
+
711
+ def test_ismember_yes(self, dtype):
712
+ arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
713
+ values = np.array([np.nan, np.nan], dtype=dtype)
714
+ result = ht.ismember(arr, values)
715
+ expected = np.array([True, True, True], dtype=np.bool_)
716
+ tm.assert_numpy_array_equal(result, expected)
717
+
718
+ def test_ismember_no(self, dtype):
719
+ arr = np.array([np.nan, np.nan, np.nan], dtype=dtype)
720
+ values = np.array([1], dtype=dtype)
721
+ result = ht.ismember(arr, values)
722
+ expected = np.array([False, False, False], dtype=np.bool_)
723
+ tm.assert_numpy_array_equal(result, expected)
724
+
725
+ def test_mode(self, dtype):
726
+ values = np.array([42, np.nan, np.nan, np.nan], dtype=dtype)
727
+ assert ht.mode(values, True)[0] == 42
728
+ assert np.isnan(ht.mode(values, False)[0])
729
+
730
+
731
+ def test_ismember_tuple_with_nans():
732
+ # GH-41836
733
+ values = [("a", float("nan")), ("b", 1)]
734
+ comps = [("a", float("nan"))]
735
+
736
+ msg = "isin with argument that is not not a Series"
737
+ with tm.assert_produces_warning(FutureWarning, match=msg):
738
+ result = isin(values, comps)
739
+ expected = np.array([True, False], dtype=np.bool_)
740
+ tm.assert_numpy_array_equal(result, expected)
741
+
742
+
743
+ def test_float_complex_int_are_equal_as_objects():
744
+ values = ["a", 5, 5.0, 5.0 + 0j]
745
+ comps = list(range(129))
746
+ result = isin(np.array(values, dtype=object), np.asarray(comps))
747
+ expected = np.array([False, True, True, True], dtype=np.bool_)
748
+ tm.assert_numpy_array_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/test_join.py ADDED
@@ -0,0 +1,390 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import join as libjoin
5
+ from pandas._libs.join import (
6
+ inner_join,
7
+ left_outer_join,
8
+ )
9
+
10
+ import pandas._testing as tm
11
+
12
+
13
+ class TestIndexer:
14
+ @pytest.mark.parametrize(
15
+ "dtype", ["int32", "int64", "float32", "float64", "object"]
16
+ )
17
+ def test_outer_join_indexer(self, dtype):
18
+ indexer = libjoin.outer_join_indexer
19
+
20
+ left = np.arange(3, dtype=dtype)
21
+ right = np.arange(2, 5, dtype=dtype)
22
+ empty = np.array([], dtype=dtype)
23
+
24
+ result, lindexer, rindexer = indexer(left, right)
25
+ assert isinstance(result, np.ndarray)
26
+ assert isinstance(lindexer, np.ndarray)
27
+ assert isinstance(rindexer, np.ndarray)
28
+ tm.assert_numpy_array_equal(result, np.arange(5, dtype=dtype))
29
+ exp = np.array([0, 1, 2, -1, -1], dtype=np.intp)
30
+ tm.assert_numpy_array_equal(lindexer, exp)
31
+ exp = np.array([-1, -1, 0, 1, 2], dtype=np.intp)
32
+ tm.assert_numpy_array_equal(rindexer, exp)
33
+
34
+ result, lindexer, rindexer = indexer(empty, right)
35
+ tm.assert_numpy_array_equal(result, right)
36
+ exp = np.array([-1, -1, -1], dtype=np.intp)
37
+ tm.assert_numpy_array_equal(lindexer, exp)
38
+ exp = np.array([0, 1, 2], dtype=np.intp)
39
+ tm.assert_numpy_array_equal(rindexer, exp)
40
+
41
+ result, lindexer, rindexer = indexer(left, empty)
42
+ tm.assert_numpy_array_equal(result, left)
43
+ exp = np.array([0, 1, 2], dtype=np.intp)
44
+ tm.assert_numpy_array_equal(lindexer, exp)
45
+ exp = np.array([-1, -1, -1], dtype=np.intp)
46
+ tm.assert_numpy_array_equal(rindexer, exp)
47
+
48
+ def test_cython_left_outer_join(self):
49
+ left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.intp)
50
+ right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.intp)
51
+ max_group = 5
52
+
53
+ ls, rs = left_outer_join(left, right, max_group)
54
+
55
+ exp_ls = left.argsort(kind="mergesort")
56
+ exp_rs = right.argsort(kind="mergesort")
57
+
58
+ exp_li = np.array([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10])
59
+ exp_ri = np.array(
60
+ [0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 4, 5, 4, 5, 4, 5, -1, -1]
61
+ )
62
+
63
+ exp_ls = exp_ls.take(exp_li)
64
+ exp_ls[exp_li == -1] = -1
65
+
66
+ exp_rs = exp_rs.take(exp_ri)
67
+ exp_rs[exp_ri == -1] = -1
68
+
69
+ tm.assert_numpy_array_equal(ls, exp_ls, check_dtype=False)
70
+ tm.assert_numpy_array_equal(rs, exp_rs, check_dtype=False)
71
+
72
+ def test_cython_right_outer_join(self):
73
+ left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.intp)
74
+ right = np.array([1, 1, 0, 4, 2, 2, 1], dtype=np.intp)
75
+ max_group = 5
76
+
77
+ rs, ls = left_outer_join(right, left, max_group)
78
+
79
+ exp_ls = left.argsort(kind="mergesort")
80
+ exp_rs = right.argsort(kind="mergesort")
81
+
82
+ # 0 1 1 1
83
+ exp_li = np.array(
84
+ [
85
+ 0,
86
+ 1,
87
+ 2,
88
+ 3,
89
+ 4,
90
+ 5,
91
+ 3,
92
+ 4,
93
+ 5,
94
+ 3,
95
+ 4,
96
+ 5,
97
+ # 2 2 4
98
+ 6,
99
+ 7,
100
+ 8,
101
+ 6,
102
+ 7,
103
+ 8,
104
+ -1,
105
+ ]
106
+ )
107
+ exp_ri = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6])
108
+
109
+ exp_ls = exp_ls.take(exp_li)
110
+ exp_ls[exp_li == -1] = -1
111
+
112
+ exp_rs = exp_rs.take(exp_ri)
113
+ exp_rs[exp_ri == -1] = -1
114
+
115
+ tm.assert_numpy_array_equal(ls, exp_ls)
116
+ tm.assert_numpy_array_equal(rs, exp_rs)
117
+
118
+ def test_cython_inner_join(self):
119
+ left = np.array([0, 1, 2, 1, 2, 0, 0, 1, 2, 3, 3], dtype=np.intp)
120
+ right = np.array([1, 1, 0, 4, 2, 2, 1, 4], dtype=np.intp)
121
+ max_group = 5
122
+
123
+ ls, rs = inner_join(left, right, max_group)
124
+
125
+ exp_ls = left.argsort(kind="mergesort")
126
+ exp_rs = right.argsort(kind="mergesort")
127
+
128
+ exp_li = np.array([0, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8])
129
+ exp_ri = np.array([0, 0, 0, 1, 2, 3, 1, 2, 3, 1, 2, 3, 4, 5, 4, 5, 4, 5])
130
+
131
+ exp_ls = exp_ls.take(exp_li)
132
+ exp_ls[exp_li == -1] = -1
133
+
134
+ exp_rs = exp_rs.take(exp_ri)
135
+ exp_rs[exp_ri == -1] = -1
136
+
137
+ tm.assert_numpy_array_equal(ls, exp_ls)
138
+ tm.assert_numpy_array_equal(rs, exp_rs)
139
+
140
+
141
+ @pytest.mark.parametrize("readonly", [True, False])
142
+ def test_left_join_indexer_unique(readonly):
143
+ a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
144
+ b = np.array([2, 2, 3, 4, 4], dtype=np.int64)
145
+ if readonly:
146
+ # GH#37312, GH#37264
147
+ a.setflags(write=False)
148
+ b.setflags(write=False)
149
+
150
+ result = libjoin.left_join_indexer_unique(b, a)
151
+ expected = np.array([1, 1, 2, 3, 3], dtype=np.intp)
152
+ tm.assert_numpy_array_equal(result, expected)
153
+
154
+
155
+ def test_left_outer_join_bug():
156
+ left = np.array(
157
+ [
158
+ 0,
159
+ 1,
160
+ 0,
161
+ 1,
162
+ 1,
163
+ 2,
164
+ 3,
165
+ 1,
166
+ 0,
167
+ 2,
168
+ 1,
169
+ 2,
170
+ 0,
171
+ 1,
172
+ 1,
173
+ 2,
174
+ 3,
175
+ 2,
176
+ 3,
177
+ 2,
178
+ 1,
179
+ 1,
180
+ 3,
181
+ 0,
182
+ 3,
183
+ 2,
184
+ 3,
185
+ 0,
186
+ 0,
187
+ 2,
188
+ 3,
189
+ 2,
190
+ 0,
191
+ 3,
192
+ 1,
193
+ 3,
194
+ 0,
195
+ 1,
196
+ 3,
197
+ 0,
198
+ 0,
199
+ 1,
200
+ 0,
201
+ 3,
202
+ 1,
203
+ 0,
204
+ 1,
205
+ 0,
206
+ 1,
207
+ 1,
208
+ 0,
209
+ 2,
210
+ 2,
211
+ 2,
212
+ 2,
213
+ 2,
214
+ 0,
215
+ 3,
216
+ 1,
217
+ 2,
218
+ 0,
219
+ 0,
220
+ 3,
221
+ 1,
222
+ 3,
223
+ 2,
224
+ 2,
225
+ 0,
226
+ 1,
227
+ 3,
228
+ 0,
229
+ 2,
230
+ 3,
231
+ 2,
232
+ 3,
233
+ 3,
234
+ 2,
235
+ 3,
236
+ 3,
237
+ 1,
238
+ 3,
239
+ 2,
240
+ 0,
241
+ 0,
242
+ 3,
243
+ 1,
244
+ 1,
245
+ 1,
246
+ 0,
247
+ 2,
248
+ 3,
249
+ 3,
250
+ 1,
251
+ 2,
252
+ 0,
253
+ 3,
254
+ 1,
255
+ 2,
256
+ 0,
257
+ 2,
258
+ ],
259
+ dtype=np.intp,
260
+ )
261
+
262
+ right = np.array([3, 1], dtype=np.intp)
263
+ max_groups = 4
264
+
265
+ lidx, ridx = libjoin.left_outer_join(left, right, max_groups, sort=False)
266
+
267
+ exp_lidx = np.arange(len(left), dtype=np.intp)
268
+ exp_ridx = -np.ones(len(left), dtype=np.intp)
269
+
270
+ exp_ridx[left == 1] = 1
271
+ exp_ridx[left == 3] = 0
272
+
273
+ tm.assert_numpy_array_equal(lidx, exp_lidx)
274
+ tm.assert_numpy_array_equal(ridx, exp_ridx)
275
+
276
+
277
+ def test_inner_join_indexer():
278
+ a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
279
+ b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
280
+
281
+ index, ares, bres = libjoin.inner_join_indexer(a, b)
282
+
283
+ index_exp = np.array([3, 5], dtype=np.int64)
284
+ tm.assert_almost_equal(index, index_exp)
285
+
286
+ aexp = np.array([2, 4], dtype=np.intp)
287
+ bexp = np.array([1, 2], dtype=np.intp)
288
+ tm.assert_almost_equal(ares, aexp)
289
+ tm.assert_almost_equal(bres, bexp)
290
+
291
+ a = np.array([5], dtype=np.int64)
292
+ b = np.array([5], dtype=np.int64)
293
+
294
+ index, ares, bres = libjoin.inner_join_indexer(a, b)
295
+ tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
296
+ tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.intp))
297
+ tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.intp))
298
+
299
+
300
+ def test_outer_join_indexer():
301
+ a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
302
+ b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
303
+
304
+ index, ares, bres = libjoin.outer_join_indexer(a, b)
305
+
306
+ index_exp = np.array([0, 1, 2, 3, 4, 5, 7, 9], dtype=np.int64)
307
+ tm.assert_almost_equal(index, index_exp)
308
+
309
+ aexp = np.array([-1, 0, 1, 2, 3, 4, -1, -1], dtype=np.intp)
310
+ bexp = np.array([0, -1, -1, 1, -1, 2, 3, 4], dtype=np.intp)
311
+ tm.assert_almost_equal(ares, aexp)
312
+ tm.assert_almost_equal(bres, bexp)
313
+
314
+ a = np.array([5], dtype=np.int64)
315
+ b = np.array([5], dtype=np.int64)
316
+
317
+ index, ares, bres = libjoin.outer_join_indexer(a, b)
318
+ tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
319
+ tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.intp))
320
+ tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.intp))
321
+
322
+
323
+ def test_left_join_indexer():
324
+ a = np.array([1, 2, 3, 4, 5], dtype=np.int64)
325
+ b = np.array([0, 3, 5, 7, 9], dtype=np.int64)
326
+
327
+ index, ares, bres = libjoin.left_join_indexer(a, b)
328
+
329
+ tm.assert_almost_equal(index, a)
330
+
331
+ aexp = np.array([0, 1, 2, 3, 4], dtype=np.intp)
332
+ bexp = np.array([-1, -1, 1, -1, 2], dtype=np.intp)
333
+ tm.assert_almost_equal(ares, aexp)
334
+ tm.assert_almost_equal(bres, bexp)
335
+
336
+ a = np.array([5], dtype=np.int64)
337
+ b = np.array([5], dtype=np.int64)
338
+
339
+ index, ares, bres = libjoin.left_join_indexer(a, b)
340
+ tm.assert_numpy_array_equal(index, np.array([5], dtype=np.int64))
341
+ tm.assert_numpy_array_equal(ares, np.array([0], dtype=np.intp))
342
+ tm.assert_numpy_array_equal(bres, np.array([0], dtype=np.intp))
343
+
344
+
345
+ def test_left_join_indexer2():
346
+ idx = np.array([1, 1, 2, 5], dtype=np.int64)
347
+ idx2 = np.array([1, 2, 5, 7, 9], dtype=np.int64)
348
+
349
+ res, lidx, ridx = libjoin.left_join_indexer(idx2, idx)
350
+
351
+ exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
352
+ tm.assert_almost_equal(res, exp_res)
353
+
354
+ exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
355
+ tm.assert_almost_equal(lidx, exp_lidx)
356
+
357
+ exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
358
+ tm.assert_almost_equal(ridx, exp_ridx)
359
+
360
+
361
+ def test_outer_join_indexer2():
362
+ idx = np.array([1, 1, 2, 5], dtype=np.int64)
363
+ idx2 = np.array([1, 2, 5, 7, 9], dtype=np.int64)
364
+
365
+ res, lidx, ridx = libjoin.outer_join_indexer(idx2, idx)
366
+
367
+ exp_res = np.array([1, 1, 2, 5, 7, 9], dtype=np.int64)
368
+ tm.assert_almost_equal(res, exp_res)
369
+
370
+ exp_lidx = np.array([0, 0, 1, 2, 3, 4], dtype=np.intp)
371
+ tm.assert_almost_equal(lidx, exp_lidx)
372
+
373
+ exp_ridx = np.array([0, 1, 2, 3, -1, -1], dtype=np.intp)
374
+ tm.assert_almost_equal(ridx, exp_ridx)
375
+
376
+
377
+ def test_inner_join_indexer2():
378
+ idx = np.array([1, 1, 2, 5], dtype=np.int64)
379
+ idx2 = np.array([1, 2, 5, 7, 9], dtype=np.int64)
380
+
381
+ res, lidx, ridx = libjoin.inner_join_indexer(idx2, idx)
382
+
383
+ exp_res = np.array([1, 1, 2, 5], dtype=np.int64)
384
+ tm.assert_almost_equal(res, exp_res)
385
+
386
+ exp_lidx = np.array([0, 0, 1, 2], dtype=np.intp)
387
+ tm.assert_almost_equal(lidx, exp_lidx)
388
+
389
+ exp_ridx = np.array([0, 1, 2, 3], dtype=np.intp)
390
+ tm.assert_almost_equal(ridx, exp_ridx)
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/test_lib.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs import (
5
+ Timedelta,
6
+ lib,
7
+ writers as libwriters,
8
+ )
9
+ from pandas.compat import IS64
10
+
11
+ from pandas import Index
12
+ import pandas._testing as tm
13
+
14
+
15
+ class TestMisc:
16
+ def test_max_len_string_array(self):
17
+ arr = a = np.array(["foo", "b", np.nan], dtype="object")
18
+ assert libwriters.max_len_string_array(arr) == 3
19
+
20
+ # unicode
21
+ arr = a.astype("U").astype(object)
22
+ assert libwriters.max_len_string_array(arr) == 3
23
+
24
+ # bytes for python3
25
+ arr = a.astype("S").astype(object)
26
+ assert libwriters.max_len_string_array(arr) == 3
27
+
28
+ # raises
29
+ msg = "No matching signature found"
30
+ with pytest.raises(TypeError, match=msg):
31
+ libwriters.max_len_string_array(arr.astype("U"))
32
+
33
+ def test_fast_unique_multiple_list_gen_sort(self):
34
+ keys = [["p", "a"], ["n", "d"], ["a", "s"]]
35
+
36
+ gen = (key for key in keys)
37
+ expected = np.array(["a", "d", "n", "p", "s"])
38
+ out = lib.fast_unique_multiple_list_gen(gen, sort=True)
39
+ tm.assert_numpy_array_equal(np.array(out), expected)
40
+
41
+ gen = (key for key in keys)
42
+ expected = np.array(["p", "a", "n", "d", "s"])
43
+ out = lib.fast_unique_multiple_list_gen(gen, sort=False)
44
+ tm.assert_numpy_array_equal(np.array(out), expected)
45
+
46
+ def test_fast_multiget_timedelta_resos(self):
47
+ # This will become relevant for test_constructor_dict_timedelta64_index
48
+ # once Timedelta constructor preserves reso when passed a
49
+ # np.timedelta64 object
50
+ td = Timedelta(days=1)
51
+
52
+ mapping1 = {td: 1}
53
+ mapping2 = {td.as_unit("s"): 1}
54
+
55
+ oindex = Index([td * n for n in range(3)])._values.astype(object)
56
+
57
+ expected = lib.fast_multiget(mapping1, oindex)
58
+ result = lib.fast_multiget(mapping2, oindex)
59
+ tm.assert_numpy_array_equal(result, expected)
60
+
61
+ # case that can't be cast to td64ns
62
+ td = Timedelta(np.timedelta64(146000, "D"))
63
+ assert hash(td) == hash(td.as_unit("ms"))
64
+ assert hash(td) == hash(td.as_unit("us"))
65
+ mapping1 = {td: 1}
66
+ mapping2 = {td.as_unit("ms"): 1}
67
+
68
+ oindex = Index([td * n for n in range(3)])._values.astype(object)
69
+
70
+ expected = lib.fast_multiget(mapping1, oindex)
71
+ result = lib.fast_multiget(mapping2, oindex)
72
+ tm.assert_numpy_array_equal(result, expected)
73
+
74
+
75
+ class TestIndexing:
76
+ def test_maybe_indices_to_slice_left_edge(self):
77
+ target = np.arange(100)
78
+
79
+ # slice
80
+ indices = np.array([], dtype=np.intp)
81
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
82
+
83
+ assert isinstance(maybe_slice, slice)
84
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
85
+
86
+ @pytest.mark.parametrize("end", [1, 2, 5, 20, 99])
87
+ @pytest.mark.parametrize("step", [1, 2, 4])
88
+ def test_maybe_indices_to_slice_left_edge_not_slice_end_steps(self, end, step):
89
+ target = np.arange(100)
90
+ indices = np.arange(0, end, step, dtype=np.intp)
91
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
92
+
93
+ assert isinstance(maybe_slice, slice)
94
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
95
+
96
+ # reverse
97
+ indices = indices[::-1]
98
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
99
+
100
+ assert isinstance(maybe_slice, slice)
101
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
102
+
103
+ @pytest.mark.parametrize(
104
+ "case", [[2, 1, 2, 0], [2, 2, 1, 0], [0, 1, 2, 1], [-2, 0, 2], [2, 0, -2]]
105
+ )
106
+ def test_maybe_indices_to_slice_left_edge_not_slice(self, case):
107
+ # not slice
108
+ target = np.arange(100)
109
+ indices = np.array(case, dtype=np.intp)
110
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
111
+
112
+ assert not isinstance(maybe_slice, slice)
113
+ tm.assert_numpy_array_equal(maybe_slice, indices)
114
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
115
+
116
+ @pytest.mark.parametrize("start", [0, 2, 5, 20, 97, 98])
117
+ @pytest.mark.parametrize("step", [1, 2, 4])
118
+ def test_maybe_indices_to_slice_right_edge(self, start, step):
119
+ target = np.arange(100)
120
+
121
+ # slice
122
+ indices = np.arange(start, 99, step, dtype=np.intp)
123
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
124
+
125
+ assert isinstance(maybe_slice, slice)
126
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
127
+
128
+ # reverse
129
+ indices = indices[::-1]
130
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
131
+
132
+ assert isinstance(maybe_slice, slice)
133
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
134
+
135
+ def test_maybe_indices_to_slice_right_edge_not_slice(self):
136
+ # not slice
137
+ target = np.arange(100)
138
+ indices = np.array([97, 98, 99, 100], dtype=np.intp)
139
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
140
+
141
+ assert not isinstance(maybe_slice, slice)
142
+ tm.assert_numpy_array_equal(maybe_slice, indices)
143
+
144
+ msg = "index 100 is out of bounds for axis (0|1) with size 100"
145
+
146
+ with pytest.raises(IndexError, match=msg):
147
+ target[indices]
148
+ with pytest.raises(IndexError, match=msg):
149
+ target[maybe_slice]
150
+
151
+ indices = np.array([100, 99, 98, 97], dtype=np.intp)
152
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
153
+
154
+ assert not isinstance(maybe_slice, slice)
155
+ tm.assert_numpy_array_equal(maybe_slice, indices)
156
+
157
+ with pytest.raises(IndexError, match=msg):
158
+ target[indices]
159
+ with pytest.raises(IndexError, match=msg):
160
+ target[maybe_slice]
161
+
162
+ @pytest.mark.parametrize(
163
+ "case", [[99, 97, 99, 96], [99, 99, 98, 97], [98, 98, 97, 96]]
164
+ )
165
+ def test_maybe_indices_to_slice_right_edge_cases(self, case):
166
+ target = np.arange(100)
167
+ indices = np.array(case, dtype=np.intp)
168
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
169
+
170
+ assert not isinstance(maybe_slice, slice)
171
+ tm.assert_numpy_array_equal(maybe_slice, indices)
172
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
173
+
174
+ @pytest.mark.parametrize("step", [1, 2, 4, 5, 8, 9])
175
+ def test_maybe_indices_to_slice_both_edges(self, step):
176
+ target = np.arange(10)
177
+
178
+ # slice
179
+ indices = np.arange(0, 9, step, dtype=np.intp)
180
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
181
+ assert isinstance(maybe_slice, slice)
182
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
183
+
184
+ # reverse
185
+ indices = indices[::-1]
186
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
187
+ assert isinstance(maybe_slice, slice)
188
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
189
+
190
+ @pytest.mark.parametrize("case", [[4, 2, 0, -2], [2, 2, 1, 0], [0, 1, 2, 1]])
191
+ def test_maybe_indices_to_slice_both_edges_not_slice(self, case):
192
+ # not slice
193
+ target = np.arange(10)
194
+ indices = np.array(case, dtype=np.intp)
195
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
196
+ assert not isinstance(maybe_slice, slice)
197
+ tm.assert_numpy_array_equal(maybe_slice, indices)
198
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
199
+
200
+ @pytest.mark.parametrize("start, end", [(2, 10), (5, 25), (65, 97)])
201
+ @pytest.mark.parametrize("step", [1, 2, 4, 20])
202
+ def test_maybe_indices_to_slice_middle(self, start, end, step):
203
+ target = np.arange(100)
204
+
205
+ # slice
206
+ indices = np.arange(start, end, step, dtype=np.intp)
207
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
208
+
209
+ assert isinstance(maybe_slice, slice)
210
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
211
+
212
+ # reverse
213
+ indices = indices[::-1]
214
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
215
+
216
+ assert isinstance(maybe_slice, slice)
217
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
218
+
219
+ @pytest.mark.parametrize(
220
+ "case", [[14, 12, 10, 12], [12, 12, 11, 10], [10, 11, 12, 11]]
221
+ )
222
+ def test_maybe_indices_to_slice_middle_not_slice(self, case):
223
+ # not slice
224
+ target = np.arange(100)
225
+ indices = np.array(case, dtype=np.intp)
226
+ maybe_slice = lib.maybe_indices_to_slice(indices, len(target))
227
+
228
+ assert not isinstance(maybe_slice, slice)
229
+ tm.assert_numpy_array_equal(maybe_slice, indices)
230
+ tm.assert_numpy_array_equal(target[indices], target[maybe_slice])
231
+
232
+ def test_maybe_booleans_to_slice(self):
233
+ arr = np.array([0, 0, 1, 1, 1, 0, 1], dtype=np.uint8)
234
+ result = lib.maybe_booleans_to_slice(arr)
235
+ assert result.dtype == np.bool_
236
+
237
+ result = lib.maybe_booleans_to_slice(arr[:0])
238
+ assert result == slice(0, 0)
239
+
240
+ def test_get_reverse_indexer(self):
241
+ indexer = np.array([-1, -1, 1, 2, 0, -1, 3, 4], dtype=np.intp)
242
+ result = lib.get_reverse_indexer(indexer, 5)
243
+ expected = np.array([4, 2, 3, 6, 7], dtype=np.intp)
244
+ tm.assert_numpy_array_equal(result, expected)
245
+
246
+ @pytest.mark.parametrize("dtype", ["int64", "int32"])
247
+ def test_is_range_indexer(self, dtype):
248
+ # GH#50592
249
+ left = np.arange(0, 100, dtype=dtype)
250
+ assert lib.is_range_indexer(left, 100)
251
+
252
+ @pytest.mark.skipif(
253
+ not IS64,
254
+ reason="2**31 is too big for Py_ssize_t on 32-bit. "
255
+ "It doesn't matter though since you cannot create an array that long on 32-bit",
256
+ )
257
+ @pytest.mark.parametrize("dtype", ["int64", "int32"])
258
+ def test_is_range_indexer_big_n(self, dtype):
259
+ # GH53616
260
+ left = np.arange(0, 100, dtype=dtype)
261
+
262
+ assert not lib.is_range_indexer(left, 2**31)
263
+
264
+ @pytest.mark.parametrize("dtype", ["int64", "int32"])
265
+ def test_is_range_indexer_not_equal(self, dtype):
266
+ # GH#50592
267
+ left = np.array([1, 2], dtype=dtype)
268
+ assert not lib.is_range_indexer(left, 2)
269
+
270
+ @pytest.mark.parametrize("dtype", ["int64", "int32"])
271
+ def test_is_range_indexer_not_equal_shape(self, dtype):
272
+ # GH#50592
273
+ left = np.array([0, 1, 2], dtype=dtype)
274
+ assert not lib.is_range_indexer(left, 2)
275
+
276
+
277
+ def test_cache_readonly_preserve_docstrings():
278
+ # GH18197
279
+ assert Index.hasnans.__doc__ is not None
280
+
281
+
282
+ def test_no_default_pickle():
283
+ # GH#40397
284
+ obj = tm.round_trip_pickle(lib.no_default)
285
+ assert obj is lib.no_default
env-llmeval/lib/python3.10/site-packages/pandas/tests/libs/test_libalgos.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ from itertools import permutations
3
+
4
+ import numpy as np
5
+
6
+ from pandas._libs import algos as libalgos
7
+
8
+ import pandas._testing as tm
9
+
10
+
11
+ def test_ensure_platform_int():
12
+ arr = np.arange(100, dtype=np.intp)
13
+
14
+ result = libalgos.ensure_platform_int(arr)
15
+ assert result is arr
16
+
17
+
18
+ def test_is_lexsorted():
19
+ failure = [
20
+ np.array(
21
+ ([3] * 32) + ([2] * 32) + ([1] * 32) + ([0] * 32),
22
+ dtype="int64",
23
+ ),
24
+ np.array(
25
+ list(range(31))[::-1] * 4,
26
+ dtype="int64",
27
+ ),
28
+ ]
29
+
30
+ assert not libalgos.is_lexsorted(failure)
31
+
32
+
33
+ def test_groupsort_indexer():
34
+ a = np.random.default_rng(2).integers(0, 1000, 100).astype(np.intp)
35
+ b = np.random.default_rng(2).integers(0, 1000, 100).astype(np.intp)
36
+
37
+ result = libalgos.groupsort_indexer(a, 1000)[0]
38
+
39
+ # need to use a stable sort
40
+ # np.argsort returns int, groupsort_indexer
41
+ # always returns intp
42
+ expected = np.argsort(a, kind="mergesort")
43
+ expected = expected.astype(np.intp)
44
+
45
+ tm.assert_numpy_array_equal(result, expected)
46
+
47
+ # compare with lexsort
48
+ # np.lexsort returns int, groupsort_indexer
49
+ # always returns intp
50
+ key = a * 1000 + b
51
+ result = libalgos.groupsort_indexer(key, 1000000)[0]
52
+ expected = np.lexsort((b, a))
53
+ expected = expected.astype(np.intp)
54
+
55
+ tm.assert_numpy_array_equal(result, expected)
56
+
57
+
58
+ class TestPadBackfill:
59
+ def test_backfill(self):
60
+ old = np.array([1, 5, 10], dtype=np.int64)
61
+ new = np.array(list(range(12)), dtype=np.int64)
62
+
63
+ filler = libalgos.backfill["int64_t"](old, new)
64
+
65
+ expect_filler = np.array([0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, -1], dtype=np.intp)
66
+ tm.assert_numpy_array_equal(filler, expect_filler)
67
+
68
+ # corner case
69
+ old = np.array([1, 4], dtype=np.int64)
70
+ new = np.array(list(range(5, 10)), dtype=np.int64)
71
+ filler = libalgos.backfill["int64_t"](old, new)
72
+
73
+ expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
74
+ tm.assert_numpy_array_equal(filler, expect_filler)
75
+
76
+ def test_pad(self):
77
+ old = np.array([1, 5, 10], dtype=np.int64)
78
+ new = np.array(list(range(12)), dtype=np.int64)
79
+
80
+ filler = libalgos.pad["int64_t"](old, new)
81
+
82
+ expect_filler = np.array([-1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2], dtype=np.intp)
83
+ tm.assert_numpy_array_equal(filler, expect_filler)
84
+
85
+ # corner case
86
+ old = np.array([5, 10], dtype=np.int64)
87
+ new = np.arange(5, dtype=np.int64)
88
+ filler = libalgos.pad["int64_t"](old, new)
89
+ expect_filler = np.array([-1, -1, -1, -1, -1], dtype=np.intp)
90
+ tm.assert_numpy_array_equal(filler, expect_filler)
91
+
92
+ def test_pad_backfill_object_segfault(self):
93
+ old = np.array([], dtype="O")
94
+ new = np.array([datetime(2010, 12, 31)], dtype="O")
95
+
96
+ result = libalgos.pad["object"](old, new)
97
+ expected = np.array([-1], dtype=np.intp)
98
+ tm.assert_numpy_array_equal(result, expected)
99
+
100
+ result = libalgos.pad["object"](new, old)
101
+ expected = np.array([], dtype=np.intp)
102
+ tm.assert_numpy_array_equal(result, expected)
103
+
104
+ result = libalgos.backfill["object"](old, new)
105
+ expected = np.array([-1], dtype=np.intp)
106
+ tm.assert_numpy_array_equal(result, expected)
107
+
108
+ result = libalgos.backfill["object"](new, old)
109
+ expected = np.array([], dtype=np.intp)
110
+ tm.assert_numpy_array_equal(result, expected)
111
+
112
+
113
+ class TestInfinity:
114
+ def test_infinity_sort(self):
115
+ # GH#13445
116
+ # numpy's argsort can be unhappy if something is less than
117
+ # itself. Instead, let's give our infinities a self-consistent
118
+ # ordering, but outside the float extended real line.
119
+
120
+ Inf = libalgos.Infinity()
121
+ NegInf = libalgos.NegInfinity()
122
+
123
+ ref_nums = [NegInf, float("-inf"), -1e100, 0, 1e100, float("inf"), Inf]
124
+
125
+ assert all(Inf >= x for x in ref_nums)
126
+ assert all(Inf > x or x is Inf for x in ref_nums)
127
+ assert Inf >= Inf and Inf == Inf
128
+ assert not Inf < Inf and not Inf > Inf
129
+ assert libalgos.Infinity() == libalgos.Infinity()
130
+ assert not libalgos.Infinity() != libalgos.Infinity()
131
+
132
+ assert all(NegInf <= x for x in ref_nums)
133
+ assert all(NegInf < x or x is NegInf for x in ref_nums)
134
+ assert NegInf <= NegInf and NegInf == NegInf
135
+ assert not NegInf < NegInf and not NegInf > NegInf
136
+ assert libalgos.NegInfinity() == libalgos.NegInfinity()
137
+ assert not libalgos.NegInfinity() != libalgos.NegInfinity()
138
+
139
+ for perm in permutations(ref_nums):
140
+ assert sorted(perm) == ref_nums
141
+
142
+ # smoke tests
143
+ np.array([libalgos.Infinity()] * 32).argsort()
144
+ np.array([libalgos.NegInfinity()] * 32).argsort()
145
+
146
+ def test_infinity_against_nan(self):
147
+ Inf = libalgos.Infinity()
148
+ NegInf = libalgos.NegInfinity()
149
+
150
+ assert not Inf > np.nan
151
+ assert not Inf >= np.nan
152
+ assert not Inf < np.nan
153
+ assert not Inf <= np.nan
154
+ assert not Inf == np.nan
155
+ assert Inf != np.nan
156
+
157
+ assert not NegInf > np.nan
158
+ assert not NegInf >= np.nan
159
+ assert not NegInf < np.nan
160
+ assert not NegInf <= np.nan
161
+ assert not NegInf == np.nan
162
+ assert NegInf != np.nan
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (197 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__pycache__/test_freq_code.cpython-310.pyc ADDED
Binary file (1.92 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/__pycache__/test_frequencies.cpython-310.pyc ADDED
Binary file (815 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/test_freq_code.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._libs.tslibs import (
5
+ Period,
6
+ to_offset,
7
+ )
8
+
9
+
10
+ @pytest.mark.parametrize(
11
+ "freqstr,exp_freqstr",
12
+ [("D", "D"), ("W", "D"), ("ME", "D"), ("s", "s"), ("min", "s"), ("h", "s")],
13
+ )
14
+ def test_get_to_timestamp_base(freqstr, exp_freqstr):
15
+ off = to_offset(freqstr)
16
+ per = Period._from_ordinal(1, off)
17
+ exp_code = to_offset(exp_freqstr)._period_dtype_code
18
+
19
+ result_code = per._dtype._get_to_timestamp_base()
20
+ assert result_code == exp_code
21
+
22
+
23
+ @pytest.mark.parametrize(
24
+ "args,expected",
25
+ [
26
+ ((1.5, "min"), (90, "s")),
27
+ ((62.4, "min"), (3744, "s")),
28
+ ((1.04, "h"), (3744, "s")),
29
+ ((1, "D"), (1, "D")),
30
+ ((0.342931, "h"), (1234551600, "us")),
31
+ ((1.2345, "D"), (106660800, "ms")),
32
+ ],
33
+ )
34
+ def test_resolution_bumping(args, expected):
35
+ # see gh-14378
36
+ off = to_offset(str(args[0]) + args[1])
37
+ assert off.n == expected[0]
38
+ assert off._prefix == expected[1]
39
+
40
+
41
+ @pytest.mark.parametrize(
42
+ "args",
43
+ [
44
+ (0.5, "ns"),
45
+ # Too much precision in the input can prevent.
46
+ (0.3429324798798269273987982, "h"),
47
+ ],
48
+ )
49
+ def test_cat(args):
50
+ msg = "Invalid frequency"
51
+
52
+ with pytest.raises(ValueError, match=msg):
53
+ to_offset(str(args[0]) + args[1])
54
+
55
+
56
+ @pytest.mark.parametrize(
57
+ "freqstr,expected",
58
+ [
59
+ ("1h", "2021-01-01T09:00:00"),
60
+ ("1D", "2021-01-02T08:00:00"),
61
+ ("1W", "2021-01-03T08:00:00"),
62
+ ("1ME", "2021-01-31T08:00:00"),
63
+ ("1YE", "2021-12-31T08:00:00"),
64
+ ],
65
+ )
66
+ def test_compatibility(freqstr, expected):
67
+ ts_np = np.datetime64("2021-01-01T08:00:00.00")
68
+ do = to_offset(freqstr)
69
+ assert ts_np + do == np.datetime64(expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/test_frequencies.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas._libs.tslibs import offsets
4
+
5
+ from pandas.tseries.frequencies import (
6
+ is_subperiod,
7
+ is_superperiod,
8
+ )
9
+
10
+
11
+ @pytest.mark.parametrize(
12
+ "p1,p2,expected",
13
+ [
14
+ # Input validation.
15
+ (offsets.MonthEnd(), None, False),
16
+ (offsets.YearEnd(), None, False),
17
+ (None, offsets.YearEnd(), False),
18
+ (None, offsets.MonthEnd(), False),
19
+ (None, None, False),
20
+ (offsets.YearEnd(), offsets.MonthEnd(), True),
21
+ (offsets.Hour(), offsets.Minute(), True),
22
+ (offsets.Second(), offsets.Milli(), True),
23
+ (offsets.Milli(), offsets.Micro(), True),
24
+ (offsets.Micro(), offsets.Nano(), True),
25
+ ],
26
+ )
27
+ def test_super_sub_symmetry(p1, p2, expected):
28
+ assert is_superperiod(p1, p2) is expected
29
+ assert is_subperiod(p2, p1) is expected
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/frequencies/test_inference.py ADDED
@@ -0,0 +1,558 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ datetime,
3
+ timedelta,
4
+ )
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from pandas._libs.tslibs.ccalendar import (
10
+ DAYS,
11
+ MONTHS,
12
+ )
13
+ from pandas._libs.tslibs.offsets import _get_offset
14
+ from pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG
15
+ from pandas.compat import is_platform_windows
16
+
17
+ from pandas import (
18
+ DatetimeIndex,
19
+ Index,
20
+ RangeIndex,
21
+ Series,
22
+ Timestamp,
23
+ date_range,
24
+ period_range,
25
+ )
26
+ import pandas._testing as tm
27
+ from pandas.core.arrays import (
28
+ DatetimeArray,
29
+ TimedeltaArray,
30
+ )
31
+ from pandas.core.tools.datetimes import to_datetime
32
+
33
+ from pandas.tseries import (
34
+ frequencies,
35
+ offsets,
36
+ )
37
+
38
+
39
+ @pytest.fixture(
40
+ params=[
41
+ (timedelta(1), "D"),
42
+ (timedelta(hours=1), "h"),
43
+ (timedelta(minutes=1), "min"),
44
+ (timedelta(seconds=1), "s"),
45
+ (np.timedelta64(1, "ns"), "ns"),
46
+ (timedelta(microseconds=1), "us"),
47
+ (timedelta(microseconds=1000), "ms"),
48
+ ]
49
+ )
50
+ def base_delta_code_pair(request):
51
+ return request.param
52
+
53
+
54
+ freqs = (
55
+ [f"QE-{month}" for month in MONTHS]
56
+ + [f"{annual}-{month}" for annual in ["YE", "BYE"] for month in MONTHS]
57
+ + ["ME", "BME", "BMS"]
58
+ + [f"WOM-{count}{day}" for count in range(1, 5) for day in DAYS]
59
+ + [f"W-{day}" for day in DAYS]
60
+ )
61
+
62
+
63
+ @pytest.mark.parametrize("freq", freqs)
64
+ @pytest.mark.parametrize("periods", [5, 7])
65
+ def test_infer_freq_range(periods, freq):
66
+ freq = freq.upper()
67
+
68
+ gen = date_range("1/1/2000", periods=periods, freq=freq)
69
+ index = DatetimeIndex(gen.values)
70
+
71
+ if not freq.startswith("QE-"):
72
+ assert frequencies.infer_freq(index) == gen.freqstr
73
+ else:
74
+ inf_freq = frequencies.infer_freq(index)
75
+ is_dec_range = inf_freq == "QE-DEC" and gen.freqstr in (
76
+ "QE",
77
+ "QE-DEC",
78
+ "QE-SEP",
79
+ "QE-JUN",
80
+ "QE-MAR",
81
+ )
82
+ is_nov_range = inf_freq == "QE-NOV" and gen.freqstr in (
83
+ "QE-NOV",
84
+ "QE-AUG",
85
+ "QE-MAY",
86
+ "QE-FEB",
87
+ )
88
+ is_oct_range = inf_freq == "QE-OCT" and gen.freqstr in (
89
+ "QE-OCT",
90
+ "QE-JUL",
91
+ "QE-APR",
92
+ "QE-JAN",
93
+ )
94
+ assert is_dec_range or is_nov_range or is_oct_range
95
+
96
+
97
+ def test_raise_if_period_index():
98
+ index = period_range(start="1/1/1990", periods=20, freq="M")
99
+ msg = "Check the `freq` attribute instead of using infer_freq"
100
+
101
+ with pytest.raises(TypeError, match=msg):
102
+ frequencies.infer_freq(index)
103
+
104
+
105
+ def test_raise_if_too_few():
106
+ index = DatetimeIndex(["12/31/1998", "1/3/1999"])
107
+ msg = "Need at least 3 dates to infer frequency"
108
+
109
+ with pytest.raises(ValueError, match=msg):
110
+ frequencies.infer_freq(index)
111
+
112
+
113
+ def test_business_daily():
114
+ index = DatetimeIndex(["01/01/1999", "1/4/1999", "1/5/1999"])
115
+ assert frequencies.infer_freq(index) == "B"
116
+
117
+
118
+ def test_business_daily_look_alike():
119
+ # see gh-16624
120
+ #
121
+ # Do not infer "B when "weekend" (2-day gap) in wrong place.
122
+ index = DatetimeIndex(["12/31/1998", "1/3/1999", "1/4/1999"])
123
+ assert frequencies.infer_freq(index) is None
124
+
125
+
126
+ def test_day_corner():
127
+ index = DatetimeIndex(["1/1/2000", "1/2/2000", "1/3/2000"])
128
+ assert frequencies.infer_freq(index) == "D"
129
+
130
+
131
+ def test_non_datetime_index():
132
+ dates = to_datetime(["1/1/2000", "1/2/2000", "1/3/2000"])
133
+ assert frequencies.infer_freq(dates) == "D"
134
+
135
+
136
+ def test_fifth_week_of_month_infer():
137
+ # see gh-9425
138
+ #
139
+ # Only attempt to infer up to WOM-4.
140
+ index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])
141
+ assert frequencies.infer_freq(index) is None
142
+
143
+
144
+ def test_week_of_month_fake():
145
+ # All of these dates are on same day
146
+ # of week and are 4 or 5 weeks apart.
147
+ index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29", "2013-11-26"])
148
+ assert frequencies.infer_freq(index) != "WOM-4TUE"
149
+
150
+
151
+ def test_fifth_week_of_month():
152
+ # see gh-9425
153
+ #
154
+ # Only supports freq up to WOM-4.
155
+ msg = (
156
+ "Of the four parameters: start, end, periods, "
157
+ "and freq, exactly three must be specified"
158
+ )
159
+
160
+ with pytest.raises(ValueError, match=msg):
161
+ date_range("2014-01-01", freq="WOM-5MON")
162
+
163
+
164
+ def test_monthly_ambiguous():
165
+ rng = DatetimeIndex(["1/31/2000", "2/29/2000", "3/31/2000"])
166
+ assert rng.inferred_freq == "ME"
167
+
168
+
169
+ def test_annual_ambiguous():
170
+ rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
171
+ assert rng.inferred_freq == "YE-JAN"
172
+
173
+
174
+ @pytest.mark.parametrize("count", range(1, 5))
175
+ def test_infer_freq_delta(base_delta_code_pair, count):
176
+ b = Timestamp(datetime.now())
177
+ base_delta, code = base_delta_code_pair
178
+
179
+ inc = base_delta * count
180
+ index = DatetimeIndex([b + inc * j for j in range(3)])
181
+
182
+ exp_freq = f"{count:d}{code}" if count > 1 else code
183
+ assert frequencies.infer_freq(index) == exp_freq
184
+
185
+
186
+ @pytest.mark.parametrize(
187
+ "constructor",
188
+ [
189
+ lambda now, delta: DatetimeIndex(
190
+ [now + delta * 7] + [now + delta * j for j in range(3)]
191
+ ),
192
+ lambda now, delta: DatetimeIndex(
193
+ [now + delta * j for j in range(3)] + [now + delta * 7]
194
+ ),
195
+ ],
196
+ )
197
+ def test_infer_freq_custom(base_delta_code_pair, constructor):
198
+ b = Timestamp(datetime.now())
199
+ base_delta, _ = base_delta_code_pair
200
+
201
+ index = constructor(b, base_delta)
202
+ assert frequencies.infer_freq(index) is None
203
+
204
+
205
+ @pytest.mark.parametrize(
206
+ "freq,expected", [("Q", "QE-DEC"), ("Q-NOV", "QE-NOV"), ("Q-OCT", "QE-OCT")]
207
+ )
208
+ def test_infer_freq_index(freq, expected):
209
+ rng = period_range("1959Q2", "2009Q3", freq=freq)
210
+ with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):
211
+ rng = Index(rng.to_timestamp("D", how="e").astype(object))
212
+
213
+ assert rng.inferred_freq == expected
214
+
215
+
216
+ @pytest.mark.parametrize(
217
+ "expected,dates",
218
+ list(
219
+ {
220
+ "YS-JAN": ["2009-01-01", "2010-01-01", "2011-01-01", "2012-01-01"],
221
+ "QE-OCT": ["2009-01-31", "2009-04-30", "2009-07-31", "2009-10-31"],
222
+ "ME": ["2010-11-30", "2010-12-31", "2011-01-31", "2011-02-28"],
223
+ "W-SAT": ["2010-12-25", "2011-01-01", "2011-01-08", "2011-01-15"],
224
+ "D": ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"],
225
+ "h": [
226
+ "2011-12-31 22:00",
227
+ "2011-12-31 23:00",
228
+ "2012-01-01 00:00",
229
+ "2012-01-01 01:00",
230
+ ],
231
+ }.items()
232
+ ),
233
+ )
234
+ @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])
235
+ def test_infer_freq_tz(tz_naive_fixture, expected, dates, unit):
236
+ # see gh-7310, GH#55609
237
+ tz = tz_naive_fixture
238
+ idx = DatetimeIndex(dates, tz=tz).as_unit(unit)
239
+ assert idx.inferred_freq == expected
240
+
241
+
242
+ def test_infer_freq_tz_series(tz_naive_fixture):
243
+ # infer_freq should work with both tz-naive and tz-aware series. See gh-52456
244
+ tz = tz_naive_fixture
245
+ idx = date_range("2021-01-01", "2021-01-04", tz=tz)
246
+ series = idx.to_series().reset_index(drop=True)
247
+ inferred_freq = frequencies.infer_freq(series)
248
+ assert inferred_freq == "D"
249
+
250
+
251
+ @pytest.mark.parametrize(
252
+ "date_pair",
253
+ [
254
+ ["2013-11-02", "2013-11-5"], # Fall DST
255
+ ["2014-03-08", "2014-03-11"], # Spring DST
256
+ ["2014-01-01", "2014-01-03"], # Regular Time
257
+ ],
258
+ )
259
+ @pytest.mark.parametrize(
260
+ "freq",
261
+ ["h", "3h", "10min", "3601s", "3600001ms", "3600000001us", "3600000000001ns"],
262
+ )
263
+ def test_infer_freq_tz_transition(tz_naive_fixture, date_pair, freq):
264
+ # see gh-8772
265
+ tz = tz_naive_fixture
266
+ idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)
267
+ assert idx.inferred_freq == freq
268
+
269
+
270
+ def test_infer_freq_tz_transition_custom():
271
+ index = date_range("2013-11-03", periods=5, freq="3h").tz_localize(
272
+ "America/Chicago"
273
+ )
274
+ assert index.inferred_freq is None
275
+
276
+
277
+ @pytest.mark.parametrize(
278
+ "data,expected",
279
+ [
280
+ # Hourly freq in a day must result in "h"
281
+ (
282
+ [
283
+ "2014-07-01 09:00",
284
+ "2014-07-01 10:00",
285
+ "2014-07-01 11:00",
286
+ "2014-07-01 12:00",
287
+ "2014-07-01 13:00",
288
+ "2014-07-01 14:00",
289
+ ],
290
+ "h",
291
+ ),
292
+ (
293
+ [
294
+ "2014-07-01 09:00",
295
+ "2014-07-01 10:00",
296
+ "2014-07-01 11:00",
297
+ "2014-07-01 12:00",
298
+ "2014-07-01 13:00",
299
+ "2014-07-01 14:00",
300
+ "2014-07-01 15:00",
301
+ "2014-07-01 16:00",
302
+ "2014-07-02 09:00",
303
+ "2014-07-02 10:00",
304
+ "2014-07-02 11:00",
305
+ ],
306
+ "bh",
307
+ ),
308
+ (
309
+ [
310
+ "2014-07-04 09:00",
311
+ "2014-07-04 10:00",
312
+ "2014-07-04 11:00",
313
+ "2014-07-04 12:00",
314
+ "2014-07-04 13:00",
315
+ "2014-07-04 14:00",
316
+ "2014-07-04 15:00",
317
+ "2014-07-04 16:00",
318
+ "2014-07-07 09:00",
319
+ "2014-07-07 10:00",
320
+ "2014-07-07 11:00",
321
+ ],
322
+ "bh",
323
+ ),
324
+ (
325
+ [
326
+ "2014-07-04 09:00",
327
+ "2014-07-04 10:00",
328
+ "2014-07-04 11:00",
329
+ "2014-07-04 12:00",
330
+ "2014-07-04 13:00",
331
+ "2014-07-04 14:00",
332
+ "2014-07-04 15:00",
333
+ "2014-07-04 16:00",
334
+ "2014-07-07 09:00",
335
+ "2014-07-07 10:00",
336
+ "2014-07-07 11:00",
337
+ "2014-07-07 12:00",
338
+ "2014-07-07 13:00",
339
+ "2014-07-07 14:00",
340
+ "2014-07-07 15:00",
341
+ "2014-07-07 16:00",
342
+ "2014-07-08 09:00",
343
+ "2014-07-08 10:00",
344
+ "2014-07-08 11:00",
345
+ "2014-07-08 12:00",
346
+ "2014-07-08 13:00",
347
+ "2014-07-08 14:00",
348
+ "2014-07-08 15:00",
349
+ "2014-07-08 16:00",
350
+ ],
351
+ "bh",
352
+ ),
353
+ ],
354
+ )
355
+ def test_infer_freq_business_hour(data, expected):
356
+ # see gh-7905
357
+ idx = DatetimeIndex(data)
358
+ assert idx.inferred_freq == expected
359
+
360
+
361
+ def test_not_monotonic():
362
+ rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
363
+ rng = rng[::-1]
364
+
365
+ assert rng.inferred_freq == "-1YE-JAN"
366
+
367
+
368
+ def test_non_datetime_index2():
369
+ rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])
370
+ vals = rng.to_pydatetime()
371
+
372
+ result = frequencies.infer_freq(vals)
373
+ assert result == rng.inferred_freq
374
+
375
+
376
+ @pytest.mark.parametrize(
377
+ "idx",
378
+ [
379
+ Index(np.arange(5), dtype=np.int64),
380
+ Index(np.arange(5), dtype=np.float64),
381
+ period_range("2020-01-01", periods=5),
382
+ RangeIndex(5),
383
+ ],
384
+ )
385
+ def test_invalid_index_types(idx):
386
+ # see gh-48439
387
+ msg = "|".join(
388
+ [
389
+ "cannot infer freq from a non-convertible",
390
+ "Check the `freq` attribute instead of using infer_freq",
391
+ ]
392
+ )
393
+
394
+ with pytest.raises(TypeError, match=msg):
395
+ frequencies.infer_freq(idx)
396
+
397
+
398
+ @pytest.mark.skipif(is_platform_windows(), reason="see gh-10822: Windows issue")
399
+ def test_invalid_index_types_unicode():
400
+ # see gh-10822
401
+ #
402
+ # Odd error message on conversions to datetime for unicode.
403
+ msg = "Unknown datetime string format"
404
+
405
+ with pytest.raises(ValueError, match=msg):
406
+ frequencies.infer_freq(Index(["ZqgszYBfuL"]))
407
+
408
+
409
+ def test_string_datetime_like_compat():
410
+ # see gh-6463
411
+ data = ["2004-01", "2004-02", "2004-03", "2004-04"]
412
+
413
+ expected = frequencies.infer_freq(data)
414
+ result = frequencies.infer_freq(Index(data))
415
+
416
+ assert result == expected
417
+
418
+
419
+ def test_series():
420
+ # see gh-6407
421
+ s = Series(date_range("20130101", "20130110"))
422
+ inferred = frequencies.infer_freq(s)
423
+ assert inferred == "D"
424
+
425
+
426
+ @pytest.mark.parametrize("end", [10, 10.0])
427
+ def test_series_invalid_type(end):
428
+ # see gh-6407
429
+ msg = "cannot infer freq from a non-convertible dtype on a Series"
430
+ s = Series(np.arange(end))
431
+
432
+ with pytest.raises(TypeError, match=msg):
433
+ frequencies.infer_freq(s)
434
+
435
+
436
+ def test_series_inconvertible_string(using_infer_string):
437
+ # see gh-6407
438
+ if using_infer_string:
439
+ msg = "cannot infer freq from"
440
+
441
+ with pytest.raises(TypeError, match=msg):
442
+ frequencies.infer_freq(Series(["foo", "bar"]))
443
+ else:
444
+ msg = "Unknown datetime string format"
445
+
446
+ with pytest.raises(ValueError, match=msg):
447
+ frequencies.infer_freq(Series(["foo", "bar"]))
448
+
449
+
450
+ @pytest.mark.parametrize("freq", [None, "ms"])
451
+ def test_series_period_index(freq):
452
+ # see gh-6407
453
+ #
454
+ # Cannot infer on PeriodIndex
455
+ msg = "cannot infer freq from a non-convertible dtype on a Series"
456
+ s = Series(period_range("2013", periods=10, freq=freq))
457
+
458
+ with pytest.raises(TypeError, match=msg):
459
+ frequencies.infer_freq(s)
460
+
461
+
462
+ @pytest.mark.parametrize("freq", ["ME", "ms", "s"])
463
+ def test_series_datetime_index(freq):
464
+ s = Series(date_range("20130101", periods=10, freq=freq))
465
+ inferred = frequencies.infer_freq(s)
466
+ assert inferred == freq
467
+
468
+
469
+ @pytest.mark.parametrize(
470
+ "offset_func",
471
+ [
472
+ _get_offset,
473
+ lambda freq: date_range("2011-01-01", periods=5, freq=freq),
474
+ ],
475
+ )
476
+ @pytest.mark.parametrize(
477
+ "freq",
478
+ [
479
+ "WEEKDAY",
480
+ "EOM",
481
+ "W@MON",
482
+ "W@TUE",
483
+ "W@WED",
484
+ "W@THU",
485
+ "W@FRI",
486
+ "W@SAT",
487
+ "W@SUN",
488
+ "QE@JAN",
489
+ "QE@FEB",
490
+ "QE@MAR",
491
+ "YE@JAN",
492
+ "YE@FEB",
493
+ "YE@MAR",
494
+ "YE@APR",
495
+ "YE@MAY",
496
+ "YE@JUN",
497
+ "YE@JUL",
498
+ "YE@AUG",
499
+ "YE@SEP",
500
+ "YE@OCT",
501
+ "YE@NOV",
502
+ "YE@DEC",
503
+ "YE@JAN",
504
+ "WOM@1MON",
505
+ "WOM@2MON",
506
+ "WOM@3MON",
507
+ "WOM@4MON",
508
+ "WOM@1TUE",
509
+ "WOM@2TUE",
510
+ "WOM@3TUE",
511
+ "WOM@4TUE",
512
+ "WOM@1WED",
513
+ "WOM@2WED",
514
+ "WOM@3WED",
515
+ "WOM@4WED",
516
+ "WOM@1THU",
517
+ "WOM@2THU",
518
+ "WOM@3THU",
519
+ "WOM@4THU",
520
+ "WOM@1FRI",
521
+ "WOM@2FRI",
522
+ "WOM@3FRI",
523
+ "WOM@4FRI",
524
+ ],
525
+ )
526
+ def test_legacy_offset_warnings(offset_func, freq):
527
+ with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):
528
+ offset_func(freq)
529
+
530
+
531
+ def test_ms_vs_capital_ms():
532
+ left = _get_offset("ms")
533
+ right = _get_offset("MS")
534
+
535
+ assert left == offsets.Milli()
536
+ assert right == offsets.MonthBegin()
537
+
538
+
539
+ def test_infer_freq_non_nano():
540
+ arr = np.arange(10).astype(np.int64).view("M8[s]")
541
+ dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)
542
+ res = frequencies.infer_freq(dta)
543
+ assert res == "s"
544
+
545
+ arr2 = arr.view("m8[ms]")
546
+ tda = TimedeltaArray._simple_new(arr2, dtype=arr2.dtype)
547
+ res2 = frequencies.infer_freq(tda)
548
+ assert res2 == "ms"
549
+
550
+
551
+ def test_infer_freq_non_nano_tzaware(tz_aware_fixture):
552
+ tz = tz_aware_fixture
553
+
554
+ dti = date_range("2016-01-01", periods=365, freq="B", tz=tz)
555
+ dta = dti._data.as_unit("s")
556
+
557
+ res = frequencies.infer_freq(dta)
558
+ assert res == "B"
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (193 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_day.cpython-310.pyc ADDED
Binary file (6.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_hour.cpython-310.pyc ADDED
Binary file (27.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_business_year.cpython-310.pyc ADDED
Binary file (4.63 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_custom_business_hour.cpython-310.pyc ADDED
Binary file (9.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_dst.cpython-310.pyc ADDED
Binary file (6.33 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_index.cpython-310.pyc ADDED
Binary file (1.15 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/tseries/offsets/__pycache__/test_month.cpython-310.pyc ADDED
Binary file (11.4 kB). View file