applied-ai-018 commited on
Commit
4545740
·
verified ·
1 Parent(s): fe9b727

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__init__.py +0 -0
  2. env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_constructors.py +179 -0
  3. env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py +56 -0
  4. env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_unique.py +124 -0
  5. env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_value_counts.py +356 -0
  6. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__init__.py +0 -0
  7. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc +0 -0
  8. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc +0 -0
  9. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc +0 -0
  10. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc +0 -0
  11. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc +0 -0
  12. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc +0 -0
  13. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc +0 -0
  14. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc +0 -0
  15. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_duplicate_labels.py +413 -0
  16. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_finalize.py +767 -0
  17. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py +209 -0
  18. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py +504 -0
  19. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py +336 -0
  20. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_series.py +159 -0
  21. env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py +130 -0
  22. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py +0 -0
  23. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc +0 -0
  24. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc +0 -0
  25. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc +0 -0
  26. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc +0 -0
  27. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc +0 -0
  28. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc +0 -0
  29. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc +0 -0
  30. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc +0 -0
  31. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/common.py +563 -0
  32. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py +56 -0
  33. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py +0 -0
  34. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc +0 -0
  35. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc +0 -0
  36. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc +0 -0
  37. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc +0 -0
  38. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc +0 -0
  39. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc +0 -0
  40. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc +0 -0
  41. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py +0 -0
  42. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py +670 -0
  43. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py +72 -0
  44. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py +272 -0
  45. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py +752 -0
  46. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py +342 -0
  47. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py +98 -0
  48. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py +761 -0
  49. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py +60 -0
  50. env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py +410 -0
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_constructors.py ADDED
@@ -0,0 +1,179 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import datetime
2
+ import sys
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas.compat import PYPY
8
+
9
+ import pandas as pd
10
+ from pandas import (
11
+ DataFrame,
12
+ Index,
13
+ Series,
14
+ )
15
+ import pandas._testing as tm
16
+ from pandas.core.accessor import PandasDelegate
17
+ from pandas.core.base import (
18
+ NoNewAttributesMixin,
19
+ PandasObject,
20
+ )
21
+
22
+
23
+ def series_via_frame_from_dict(x, **kwargs):
24
+ return DataFrame({"a": x}, **kwargs)["a"]
25
+
26
+
27
+ def series_via_frame_from_scalar(x, **kwargs):
28
+ return DataFrame(x, **kwargs)[0]
29
+
30
+
31
+ @pytest.fixture(
32
+ params=[
33
+ Series,
34
+ series_via_frame_from_dict,
35
+ series_via_frame_from_scalar,
36
+ Index,
37
+ ],
38
+ ids=["Series", "DataFrame-dict", "DataFrame-array", "Index"],
39
+ )
40
+ def constructor(request):
41
+ return request.param
42
+
43
+
44
+ class TestPandasDelegate:
45
+ class Delegator:
46
+ _properties = ["prop"]
47
+ _methods = ["test_method"]
48
+
49
+ def _set_prop(self, value):
50
+ self.prop = value
51
+
52
+ def _get_prop(self):
53
+ return self.prop
54
+
55
+ prop = property(_get_prop, _set_prop, doc="foo property")
56
+
57
+ def test_method(self, *args, **kwargs):
58
+ """a test method"""
59
+
60
+ class Delegate(PandasDelegate, PandasObject):
61
+ def __init__(self, obj) -> None:
62
+ self.obj = obj
63
+
64
+ def test_invalid_delegation(self):
65
+ # these show that in order for the delegation to work
66
+ # the _delegate_* methods need to be overridden to not raise
67
+ # a TypeError
68
+
69
+ self.Delegate._add_delegate_accessors(
70
+ delegate=self.Delegator,
71
+ accessors=self.Delegator._properties,
72
+ typ="property",
73
+ )
74
+ self.Delegate._add_delegate_accessors(
75
+ delegate=self.Delegator, accessors=self.Delegator._methods, typ="method"
76
+ )
77
+
78
+ delegate = self.Delegate(self.Delegator())
79
+
80
+ msg = "You cannot access the property prop"
81
+ with pytest.raises(TypeError, match=msg):
82
+ delegate.prop
83
+
84
+ msg = "The property prop cannot be set"
85
+ with pytest.raises(TypeError, match=msg):
86
+ delegate.prop = 5
87
+
88
+ msg = "You cannot access the property prop"
89
+ with pytest.raises(TypeError, match=msg):
90
+ delegate.prop
91
+
92
+ @pytest.mark.skipif(PYPY, reason="not relevant for PyPy")
93
+ def test_memory_usage(self):
94
+ # Delegate does not implement memory_usage.
95
+ # Check that we fall back to in-built `__sizeof__`
96
+ # GH 12924
97
+ delegate = self.Delegate(self.Delegator())
98
+ sys.getsizeof(delegate)
99
+
100
+
101
+ class TestNoNewAttributesMixin:
102
+ def test_mixin(self):
103
+ class T(NoNewAttributesMixin):
104
+ pass
105
+
106
+ t = T()
107
+ assert not hasattr(t, "__frozen")
108
+
109
+ t.a = "test"
110
+ assert t.a == "test"
111
+
112
+ t._freeze()
113
+ assert "__frozen" in dir(t)
114
+ assert getattr(t, "__frozen")
115
+ msg = "You cannot add any new attribute"
116
+ with pytest.raises(AttributeError, match=msg):
117
+ t.b = "test"
118
+
119
+ assert not hasattr(t, "b")
120
+
121
+
122
+ class TestConstruction:
123
+ # test certain constructor behaviours on dtype inference across Series,
124
+ # Index and DataFrame
125
+
126
+ @pytest.mark.parametrize(
127
+ "a",
128
+ [
129
+ np.array(["2263-01-01"], dtype="datetime64[D]"),
130
+ np.array([datetime(2263, 1, 1)], dtype=object),
131
+ np.array([np.datetime64("2263-01-01", "D")], dtype=object),
132
+ np.array(["2263-01-01"], dtype=object),
133
+ ],
134
+ ids=[
135
+ "datetime64[D]",
136
+ "object-datetime.datetime",
137
+ "object-numpy-scalar",
138
+ "object-string",
139
+ ],
140
+ )
141
+ def test_constructor_datetime_outofbound(
142
+ self, a, constructor, request, using_infer_string
143
+ ):
144
+ # GH-26853 (+ bug GH-26206 out of bound non-ns unit)
145
+
146
+ # No dtype specified (dtype inference)
147
+ # datetime64[non-ns] raise error, other cases result in object dtype
148
+ # and preserve original data
149
+ if a.dtype.kind == "M":
150
+ # Can't fit in nanosecond bounds -> get the nearest supported unit
151
+ result = constructor(a)
152
+ assert result.dtype == "M8[s]"
153
+ else:
154
+ result = constructor(a)
155
+ if using_infer_string and "object-string" in request.node.callspec.id:
156
+ assert result.dtype == "string"
157
+ else:
158
+ assert result.dtype == "object"
159
+ tm.assert_numpy_array_equal(result.to_numpy(), a)
160
+
161
+ # Explicit dtype specified
162
+ # Forced conversion fails for all -> all cases raise error
163
+ msg = "Out of bounds|Out of bounds .* present at position 0"
164
+ with pytest.raises(pd.errors.OutOfBoundsDatetime, match=msg):
165
+ constructor(a, dtype="datetime64[ns]")
166
+
167
+ def test_constructor_datetime_nonns(self, constructor):
168
+ arr = np.array(["2020-01-01T00:00:00.000000"], dtype="datetime64[us]")
169
+ dta = pd.core.arrays.DatetimeArray._simple_new(arr, dtype=arr.dtype)
170
+ expected = constructor(dta)
171
+ assert expected.dtype == arr.dtype
172
+
173
+ result = constructor(arr)
174
+ tm.assert_equal(result, expected)
175
+
176
+ # https://github.com/pandas-dev/pandas/issues/34843
177
+ arr.flags.writeable = False
178
+ result = constructor(arr)
179
+ tm.assert_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_transpose.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ CategoricalDtype,
6
+ DataFrame,
7
+ )
8
+ import pandas._testing as tm
9
+
10
+
11
+ def test_transpose(index_or_series_obj):
12
+ obj = index_or_series_obj
13
+ tm.assert_equal(obj.transpose(), obj)
14
+
15
+
16
+ def test_transpose_non_default_axes(index_or_series_obj):
17
+ msg = "the 'axes' parameter is not supported"
18
+ obj = index_or_series_obj
19
+ with pytest.raises(ValueError, match=msg):
20
+ obj.transpose(1)
21
+ with pytest.raises(ValueError, match=msg):
22
+ obj.transpose(axes=1)
23
+
24
+
25
+ def test_numpy_transpose(index_or_series_obj):
26
+ msg = "the 'axes' parameter is not supported"
27
+ obj = index_or_series_obj
28
+ tm.assert_equal(np.transpose(obj), obj)
29
+
30
+ with pytest.raises(ValueError, match=msg):
31
+ np.transpose(obj, axes=1)
32
+
33
+
34
+ @pytest.mark.parametrize(
35
+ "data, transposed_data, index, columns, dtype",
36
+ [
37
+ ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], int),
38
+ ([[1], [2]], [[1, 2]], ["a", "a"], ["b"], CategoricalDtype([1, 2])),
39
+ ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], int),
40
+ ([[1, 2]], [[1], [2]], ["b"], ["a", "a"], CategoricalDtype([1, 2])),
41
+ ([[1, 2], [3, 4]], [[1, 3], [2, 4]], ["a", "a"], ["b", "b"], int),
42
+ (
43
+ [[1, 2], [3, 4]],
44
+ [[1, 3], [2, 4]],
45
+ ["a", "a"],
46
+ ["b", "b"],
47
+ CategoricalDtype([1, 2, 3, 4]),
48
+ ),
49
+ ],
50
+ )
51
+ def test_duplicate_labels(data, transposed_data, index, columns, dtype):
52
+ # GH 42380
53
+ df = DataFrame(data, index=index, columns=columns, dtype=dtype)
54
+ result = df.T
55
+ expected = DataFrame(transposed_data, index=columns, columns=index, dtype=dtype)
56
+ tm.assert_frame_equal(result, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_unique.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas._config import using_pyarrow_string_dtype
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+ from pandas.tests.base.common import allow_na_ops
9
+
10
+
11
+ @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
12
+ def test_unique(index_or_series_obj):
13
+ obj = index_or_series_obj
14
+ obj = np.repeat(obj, range(1, len(obj) + 1))
15
+ result = obj.unique()
16
+
17
+ # dict.fromkeys preserves the order
18
+ unique_values = list(dict.fromkeys(obj.values))
19
+ if isinstance(obj, pd.MultiIndex):
20
+ expected = pd.MultiIndex.from_tuples(unique_values)
21
+ expected.names = obj.names
22
+ tm.assert_index_equal(result, expected, exact=True)
23
+ elif isinstance(obj, pd.Index):
24
+ expected = pd.Index(unique_values, dtype=obj.dtype)
25
+ if isinstance(obj.dtype, pd.DatetimeTZDtype):
26
+ expected = expected.normalize()
27
+ tm.assert_index_equal(result, expected, exact=True)
28
+ else:
29
+ expected = np.array(unique_values)
30
+ tm.assert_numpy_array_equal(result, expected)
31
+
32
+
33
+ @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
34
+ @pytest.mark.parametrize("null_obj", [np.nan, None])
35
+ def test_unique_null(null_obj, index_or_series_obj):
36
+ obj = index_or_series_obj
37
+
38
+ if not allow_na_ops(obj):
39
+ pytest.skip("type doesn't allow for NA operations")
40
+ elif len(obj) < 1:
41
+ pytest.skip("Test doesn't make sense on empty data")
42
+ elif isinstance(obj, pd.MultiIndex):
43
+ pytest.skip(f"MultiIndex can't hold '{null_obj}'")
44
+
45
+ values = obj._values
46
+ values[0:2] = null_obj
47
+
48
+ klass = type(obj)
49
+ repeated_values = np.repeat(values, range(1, len(values) + 1))
50
+ obj = klass(repeated_values, dtype=obj.dtype)
51
+ result = obj.unique()
52
+
53
+ unique_values_raw = dict.fromkeys(obj.values)
54
+ # because np.nan == np.nan is False, but None == None is True
55
+ # np.nan would be duplicated, whereas None wouldn't
56
+ unique_values_not_null = [val for val in unique_values_raw if not pd.isnull(val)]
57
+ unique_values = [null_obj] + unique_values_not_null
58
+
59
+ if isinstance(obj, pd.Index):
60
+ expected = pd.Index(unique_values, dtype=obj.dtype)
61
+ if isinstance(obj.dtype, pd.DatetimeTZDtype):
62
+ result = result.normalize()
63
+ expected = expected.normalize()
64
+ tm.assert_index_equal(result, expected, exact=True)
65
+ else:
66
+ expected = np.array(unique_values, dtype=obj.dtype)
67
+ tm.assert_numpy_array_equal(result, expected)
68
+
69
+
70
+ def test_nunique(index_or_series_obj):
71
+ obj = index_or_series_obj
72
+ obj = np.repeat(obj, range(1, len(obj) + 1))
73
+ expected = len(obj.unique())
74
+ assert obj.nunique(dropna=False) == expected
75
+
76
+
77
+ @pytest.mark.parametrize("null_obj", [np.nan, None])
78
+ def test_nunique_null(null_obj, index_or_series_obj):
79
+ obj = index_or_series_obj
80
+
81
+ if not allow_na_ops(obj):
82
+ pytest.skip("type doesn't allow for NA operations")
83
+ elif isinstance(obj, pd.MultiIndex):
84
+ pytest.skip(f"MultiIndex can't hold '{null_obj}'")
85
+
86
+ values = obj._values
87
+ values[0:2] = null_obj
88
+
89
+ klass = type(obj)
90
+ repeated_values = np.repeat(values, range(1, len(values) + 1))
91
+ obj = klass(repeated_values, dtype=obj.dtype)
92
+
93
+ if isinstance(obj, pd.CategoricalIndex):
94
+ assert obj.nunique() == len(obj.categories)
95
+ assert obj.nunique(dropna=False) == len(obj.categories) + 1
96
+ else:
97
+ num_unique_values = len(obj.unique())
98
+ assert obj.nunique() == max(0, num_unique_values - 1)
99
+ assert obj.nunique(dropna=False) == max(0, num_unique_values)
100
+
101
+
102
+ @pytest.mark.single_cpu
103
+ @pytest.mark.xfail(using_pyarrow_string_dtype(), reason="decoding fails")
104
+ def test_unique_bad_unicode(index_or_series):
105
+ # regression test for #34550
106
+ uval = "\ud83d" # smiley emoji
107
+
108
+ obj = index_or_series([uval] * 2)
109
+ result = obj.unique()
110
+
111
+ if isinstance(obj, pd.Index):
112
+ expected = pd.Index(["\ud83d"], dtype=object)
113
+ tm.assert_index_equal(result, expected, exact=True)
114
+ else:
115
+ expected = np.array(["\ud83d"], dtype=object)
116
+ tm.assert_numpy_array_equal(result, expected)
117
+
118
+
119
+ @pytest.mark.parametrize("dropna", [True, False])
120
+ def test_nunique_dropna(dropna):
121
+ # GH37566
122
+ ser = pd.Series(["yes", "yes", pd.NA, np.nan, None, pd.NaT])
123
+ res = ser.nunique(dropna)
124
+ assert res == 1 if dropna else 5
env-llmeval/lib/python3.10/site-packages/pandas/tests/base/test_value_counts.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ from datetime import timedelta
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ DatetimeIndex,
10
+ Index,
11
+ Interval,
12
+ IntervalIndex,
13
+ MultiIndex,
14
+ Series,
15
+ Timedelta,
16
+ TimedeltaIndex,
17
+ array,
18
+ )
19
+ import pandas._testing as tm
20
+ from pandas.tests.base.common import allow_na_ops
21
+
22
+
23
+ @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
24
+ def test_value_counts(index_or_series_obj):
25
+ obj = index_or_series_obj
26
+ obj = np.repeat(obj, range(1, len(obj) + 1))
27
+ result = obj.value_counts()
28
+
29
+ counter = collections.Counter(obj)
30
+ expected = Series(dict(counter.most_common()), dtype=np.int64, name="count")
31
+
32
+ if obj.dtype != np.float16:
33
+ expected.index = expected.index.astype(obj.dtype)
34
+ else:
35
+ with pytest.raises(NotImplementedError, match="float16 indexes are not "):
36
+ expected.index.astype(obj.dtype)
37
+ return
38
+ if isinstance(expected.index, MultiIndex):
39
+ expected.index.names = obj.names
40
+ else:
41
+ expected.index.name = obj.name
42
+
43
+ if not isinstance(result.dtype, np.dtype):
44
+ if getattr(obj.dtype, "storage", "") == "pyarrow":
45
+ expected = expected.astype("int64[pyarrow]")
46
+ else:
47
+ # i.e IntegerDtype
48
+ expected = expected.astype("Int64")
49
+
50
+ # TODO(GH#32514): Order of entries with the same count is inconsistent
51
+ # on CI (gh-32449)
52
+ if obj.duplicated().any():
53
+ result = result.sort_index()
54
+ expected = expected.sort_index()
55
+ tm.assert_series_equal(result, expected)
56
+
57
+
58
+ @pytest.mark.parametrize("null_obj", [np.nan, None])
59
+ @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
60
+ def test_value_counts_null(null_obj, index_or_series_obj):
61
+ orig = index_or_series_obj
62
+ obj = orig.copy()
63
+
64
+ if not allow_na_ops(obj):
65
+ pytest.skip("type doesn't allow for NA operations")
66
+ elif len(obj) < 1:
67
+ pytest.skip("Test doesn't make sense on empty data")
68
+ elif isinstance(orig, MultiIndex):
69
+ pytest.skip(f"MultiIndex can't hold '{null_obj}'")
70
+
71
+ values = obj._values
72
+ values[0:2] = null_obj
73
+
74
+ klass = type(obj)
75
+ repeated_values = np.repeat(values, range(1, len(values) + 1))
76
+ obj = klass(repeated_values, dtype=obj.dtype)
77
+
78
+ # because np.nan == np.nan is False, but None == None is True
79
+ # np.nan would be duplicated, whereas None wouldn't
80
+ counter = collections.Counter(obj.dropna())
81
+ expected = Series(dict(counter.most_common()), dtype=np.int64, name="count")
82
+
83
+ if obj.dtype != np.float16:
84
+ expected.index = expected.index.astype(obj.dtype)
85
+ else:
86
+ with pytest.raises(NotImplementedError, match="float16 indexes are not "):
87
+ expected.index.astype(obj.dtype)
88
+ return
89
+ expected.index.name = obj.name
90
+
91
+ result = obj.value_counts()
92
+ if obj.duplicated().any():
93
+ # TODO(GH#32514):
94
+ # Order of entries with the same count is inconsistent on CI (gh-32449)
95
+ expected = expected.sort_index()
96
+ result = result.sort_index()
97
+
98
+ if not isinstance(result.dtype, np.dtype):
99
+ if getattr(obj.dtype, "storage", "") == "pyarrow":
100
+ expected = expected.astype("int64[pyarrow]")
101
+ else:
102
+ # i.e IntegerDtype
103
+ expected = expected.astype("Int64")
104
+ tm.assert_series_equal(result, expected)
105
+
106
+ expected[null_obj] = 3
107
+
108
+ result = obj.value_counts(dropna=False)
109
+ if obj.duplicated().any():
110
+ # TODO(GH#32514):
111
+ # Order of entries with the same count is inconsistent on CI (gh-32449)
112
+ expected = expected.sort_index()
113
+ result = result.sort_index()
114
+ tm.assert_series_equal(result, expected)
115
+
116
+
117
+ def test_value_counts_inferred(index_or_series, using_infer_string):
118
+ klass = index_or_series
119
+ s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
120
+ s = klass(s_values)
121
+ expected = Series([4, 3, 2, 1], index=["b", "a", "d", "c"], name="count")
122
+ tm.assert_series_equal(s.value_counts(), expected)
123
+
124
+ if isinstance(s, Index):
125
+ exp = Index(np.unique(np.array(s_values, dtype=np.object_)))
126
+ tm.assert_index_equal(s.unique(), exp)
127
+ else:
128
+ exp = np.unique(np.array(s_values, dtype=np.object_))
129
+ if using_infer_string:
130
+ exp = array(exp)
131
+ tm.assert_equal(s.unique(), exp)
132
+
133
+ assert s.nunique() == 4
134
+ # don't sort, have to sort after the fact as not sorting is
135
+ # platform-dep
136
+ hist = s.value_counts(sort=False).sort_values()
137
+ expected = Series([3, 1, 4, 2], index=list("acbd"), name="count").sort_values()
138
+ tm.assert_series_equal(hist, expected)
139
+
140
+ # sort ascending
141
+ hist = s.value_counts(ascending=True)
142
+ expected = Series([1, 2, 3, 4], index=list("cdab"), name="count")
143
+ tm.assert_series_equal(hist, expected)
144
+
145
+ # relative histogram.
146
+ hist = s.value_counts(normalize=True)
147
+ expected = Series(
148
+ [0.4, 0.3, 0.2, 0.1], index=["b", "a", "d", "c"], name="proportion"
149
+ )
150
+ tm.assert_series_equal(hist, expected)
151
+
152
+
153
+ def test_value_counts_bins(index_or_series, using_infer_string):
154
+ klass = index_or_series
155
+ s_values = ["a", "b", "b", "b", "b", "c", "d", "d", "a", "a"]
156
+ s = klass(s_values)
157
+
158
+ # bins
159
+ msg = "bins argument only works with numeric data"
160
+ with pytest.raises(TypeError, match=msg):
161
+ s.value_counts(bins=1)
162
+
163
+ s1 = Series([1, 1, 2, 3])
164
+ res1 = s1.value_counts(bins=1)
165
+ exp1 = Series({Interval(0.997, 3.0): 4}, name="count")
166
+ tm.assert_series_equal(res1, exp1)
167
+ res1n = s1.value_counts(bins=1, normalize=True)
168
+ exp1n = Series({Interval(0.997, 3.0): 1.0}, name="proportion")
169
+ tm.assert_series_equal(res1n, exp1n)
170
+
171
+ if isinstance(s1, Index):
172
+ tm.assert_index_equal(s1.unique(), Index([1, 2, 3]))
173
+ else:
174
+ exp = np.array([1, 2, 3], dtype=np.int64)
175
+ tm.assert_numpy_array_equal(s1.unique(), exp)
176
+
177
+ assert s1.nunique() == 3
178
+
179
+ # these return the same
180
+ res4 = s1.value_counts(bins=4, dropna=True)
181
+ intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
182
+ exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]), name="count")
183
+ tm.assert_series_equal(res4, exp4)
184
+
185
+ res4 = s1.value_counts(bins=4, dropna=False)
186
+ intervals = IntervalIndex.from_breaks([0.997, 1.5, 2.0, 2.5, 3.0])
187
+ exp4 = Series([2, 1, 1, 0], index=intervals.take([0, 1, 3, 2]), name="count")
188
+ tm.assert_series_equal(res4, exp4)
189
+
190
+ res4n = s1.value_counts(bins=4, normalize=True)
191
+ exp4n = Series(
192
+ [0.5, 0.25, 0.25, 0], index=intervals.take([0, 1, 3, 2]), name="proportion"
193
+ )
194
+ tm.assert_series_equal(res4n, exp4n)
195
+
196
+ # handle NA's properly
197
+ s_values = ["a", "b", "b", "b", np.nan, np.nan, "d", "d", "a", "a", "b"]
198
+ s = klass(s_values)
199
+ expected = Series([4, 3, 2], index=["b", "a", "d"], name="count")
200
+ tm.assert_series_equal(s.value_counts(), expected)
201
+
202
+ if isinstance(s, Index):
203
+ exp = Index(["a", "b", np.nan, "d"])
204
+ tm.assert_index_equal(s.unique(), exp)
205
+ else:
206
+ exp = np.array(["a", "b", np.nan, "d"], dtype=object)
207
+ if using_infer_string:
208
+ exp = array(exp)
209
+ tm.assert_equal(s.unique(), exp)
210
+ assert s.nunique() == 3
211
+
212
+ s = klass({}) if klass is dict else klass({}, dtype=object)
213
+ expected = Series([], dtype=np.int64, name="count")
214
+ tm.assert_series_equal(s.value_counts(), expected, check_index_type=False)
215
+ # returned dtype differs depending on original
216
+ if isinstance(s, Index):
217
+ tm.assert_index_equal(s.unique(), Index([]), exact=False)
218
+ else:
219
+ tm.assert_numpy_array_equal(s.unique(), np.array([]), check_dtype=False)
220
+
221
+ assert s.nunique() == 0
222
+
223
+
224
+ def test_value_counts_datetime64(index_or_series, unit):
225
+ klass = index_or_series
226
+
227
+ # GH 3002, datetime64[ns]
228
+ # don't test names though
229
+ df = pd.DataFrame(
230
+ {
231
+ "person_id": ["xxyyzz", "xxyyzz", "xxyyzz", "xxyyww", "foofoo", "foofoo"],
232
+ "dt": pd.to_datetime(
233
+ [
234
+ "2010-01-01",
235
+ "2010-01-01",
236
+ "2010-01-01",
237
+ "2009-01-01",
238
+ "2008-09-09",
239
+ "2008-09-09",
240
+ ]
241
+ ).as_unit(unit),
242
+ "food": ["PIE", "GUM", "EGG", "EGG", "PIE", "GUM"],
243
+ }
244
+ )
245
+
246
+ s = klass(df["dt"].copy())
247
+ s.name = None
248
+ idx = pd.to_datetime(
249
+ ["2010-01-01 00:00:00", "2008-09-09 00:00:00", "2009-01-01 00:00:00"]
250
+ ).as_unit(unit)
251
+ expected_s = Series([3, 2, 1], index=idx, name="count")
252
+ tm.assert_series_equal(s.value_counts(), expected_s)
253
+
254
+ expected = array(
255
+ np.array(
256
+ ["2010-01-01 00:00:00", "2009-01-01 00:00:00", "2008-09-09 00:00:00"],
257
+ dtype=f"datetime64[{unit}]",
258
+ )
259
+ )
260
+ result = s.unique()
261
+ if isinstance(s, Index):
262
+ tm.assert_index_equal(result, DatetimeIndex(expected))
263
+ else:
264
+ tm.assert_extension_array_equal(result, expected)
265
+
266
+ assert s.nunique() == 3
267
+
268
+ # with NaT
269
+ s = df["dt"].copy()
270
+ s = klass(list(s.values) + [pd.NaT] * 4)
271
+ if klass is Series:
272
+ s = s.dt.as_unit(unit)
273
+ else:
274
+ s = s.as_unit(unit)
275
+
276
+ result = s.value_counts()
277
+ assert result.index.dtype == f"datetime64[{unit}]"
278
+ tm.assert_series_equal(result, expected_s)
279
+
280
+ result = s.value_counts(dropna=False)
281
+ expected_s = pd.concat(
282
+ [
283
+ Series([4], index=DatetimeIndex([pd.NaT]).as_unit(unit), name="count"),
284
+ expected_s,
285
+ ]
286
+ )
287
+ tm.assert_series_equal(result, expected_s)
288
+
289
+ assert s.dtype == f"datetime64[{unit}]"
290
+ unique = s.unique()
291
+ assert unique.dtype == f"datetime64[{unit}]"
292
+
293
+ # numpy_array_equal cannot compare pd.NaT
294
+ if isinstance(s, Index):
295
+ exp_idx = DatetimeIndex(expected.tolist() + [pd.NaT]).as_unit(unit)
296
+ tm.assert_index_equal(unique, exp_idx)
297
+ else:
298
+ tm.assert_extension_array_equal(unique[:3], expected)
299
+ assert pd.isna(unique[3])
300
+
301
+ assert s.nunique() == 3
302
+ assert s.nunique(dropna=False) == 4
303
+
304
+
305
+ def test_value_counts_timedelta64(index_or_series, unit):
306
+ # timedelta64[ns]
307
+ klass = index_or_series
308
+
309
+ day = Timedelta(timedelta(1)).as_unit(unit)
310
+ tdi = TimedeltaIndex([day], name="dt").as_unit(unit)
311
+
312
+ tdvals = np.zeros(6, dtype=f"m8[{unit}]") + day
313
+ td = klass(tdvals, name="dt")
314
+
315
+ result = td.value_counts()
316
+ expected_s = Series([6], index=tdi, name="count")
317
+ tm.assert_series_equal(result, expected_s)
318
+
319
+ expected = tdi
320
+ result = td.unique()
321
+ if isinstance(td, Index):
322
+ tm.assert_index_equal(result, expected)
323
+ else:
324
+ tm.assert_extension_array_equal(result, expected._values)
325
+
326
+ td2 = day + np.zeros(6, dtype=f"m8[{unit}]")
327
+ td2 = klass(td2, name="dt")
328
+ result2 = td2.value_counts()
329
+ tm.assert_series_equal(result2, expected_s)
330
+
331
+
332
+ @pytest.mark.parametrize("dropna", [True, False])
333
+ def test_value_counts_with_nan(dropna, index_or_series):
334
+ # GH31944
335
+ klass = index_or_series
336
+ values = [True, pd.NA, np.nan]
337
+ obj = klass(values)
338
+ res = obj.value_counts(dropna=dropna)
339
+ if dropna is True:
340
+ expected = Series([1], index=Index([True], dtype=obj.dtype), name="count")
341
+ else:
342
+ expected = Series([1, 1, 1], index=[True, pd.NA, np.nan], name="count")
343
+ tm.assert_series_equal(res, expected)
344
+
345
+
346
+ def test_value_counts_object_inference_deprecated():
347
+ # GH#56161
348
+ dti = pd.date_range("2016-01-01", periods=3, tz="UTC")
349
+
350
+ idx = dti.astype(object)
351
+ msg = "The behavior of value_counts with object-dtype is deprecated"
352
+ with tm.assert_produces_warning(FutureWarning, match=msg):
353
+ res = idx.value_counts()
354
+
355
+ exp = dti.value_counts()
356
+ tm.assert_series_equal(res, exp)
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (185 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_duplicate_labels.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_finalize.cpython-310.pyc ADDED
Binary file (18.1 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_frame.cpython-310.pyc ADDED
Binary file (7.46 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_generic.cpython-310.pyc ADDED
Binary file (15.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_label_or_level_utils.cpython-310.pyc ADDED
Binary file (7.42 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_series.cpython-310.pyc ADDED
Binary file (5.95 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/__pycache__/test_to_xarray.cpython-310.pyc ADDED
Binary file (4.25 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_duplicate_labels.py ADDED
@@ -0,0 +1,413 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests dealing with the NDFrame.allows_duplicates."""
2
+ import operator
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ import pandas as pd
8
+ import pandas._testing as tm
9
+
10
+ not_implemented = pytest.mark.xfail(reason="Not implemented.")
11
+
12
+ # ----------------------------------------------------------------------------
13
+ # Preservation
14
+
15
+
16
+ class TestPreserves:
17
+ @pytest.mark.parametrize(
18
+ "cls, data",
19
+ [
20
+ (pd.Series, np.array([])),
21
+ (pd.Series, [1, 2]),
22
+ (pd.DataFrame, {}),
23
+ (pd.DataFrame, {"A": [1, 2]}),
24
+ ],
25
+ )
26
+ def test_construction_ok(self, cls, data):
27
+ result = cls(data)
28
+ assert result.flags.allows_duplicate_labels is True
29
+
30
+ result = cls(data).set_flags(allows_duplicate_labels=False)
31
+ assert result.flags.allows_duplicate_labels is False
32
+
33
+ @pytest.mark.parametrize(
34
+ "func",
35
+ [
36
+ operator.itemgetter(["a"]),
37
+ operator.methodcaller("add", 1),
38
+ operator.methodcaller("rename", str.upper),
39
+ operator.methodcaller("rename", "name"),
40
+ operator.methodcaller("abs"),
41
+ np.abs,
42
+ ],
43
+ )
44
+ def test_preserved_series(self, func):
45
+ s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)
46
+ assert func(s).flags.allows_duplicate_labels is False
47
+
48
+ @pytest.mark.parametrize(
49
+ "other", [pd.Series(0, index=["a", "b", "c"]), pd.Series(0, index=["a", "b"])]
50
+ )
51
+ # TODO: frame
52
+ @not_implemented
53
+ def test_align(self, other):
54
+ s = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)
55
+ a, b = s.align(other)
56
+ assert a.flags.allows_duplicate_labels is False
57
+ assert b.flags.allows_duplicate_labels is False
58
+
59
+ def test_preserved_frame(self):
60
+ df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags(
61
+ allows_duplicate_labels=False
62
+ )
63
+ assert df.loc[["a"]].flags.allows_duplicate_labels is False
64
+ assert df.loc[:, ["A", "B"]].flags.allows_duplicate_labels is False
65
+
66
+ def test_to_frame(self):
67
+ ser = pd.Series(dtype=float).set_flags(allows_duplicate_labels=False)
68
+ assert ser.to_frame().flags.allows_duplicate_labels is False
69
+
70
+ @pytest.mark.parametrize("func", ["add", "sub"])
71
+ @pytest.mark.parametrize("frame", [False, True])
72
+ @pytest.mark.parametrize("other", [1, pd.Series([1, 2], name="A")])
73
+ def test_binops(self, func, other, frame):
74
+ df = pd.Series([1, 2], name="A", index=["a", "b"]).set_flags(
75
+ allows_duplicate_labels=False
76
+ )
77
+ if frame:
78
+ df = df.to_frame()
79
+ if isinstance(other, pd.Series) and frame:
80
+ other = other.to_frame()
81
+ func = operator.methodcaller(func, other)
82
+ assert df.flags.allows_duplicate_labels is False
83
+ assert func(df).flags.allows_duplicate_labels is False
84
+
85
+ def test_preserve_getitem(self):
86
+ df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False)
87
+ assert df[["A"]].flags.allows_duplicate_labels is False
88
+ assert df["A"].flags.allows_duplicate_labels is False
89
+ assert df.loc[0].flags.allows_duplicate_labels is False
90
+ assert df.loc[[0]].flags.allows_duplicate_labels is False
91
+ assert df.loc[0, ["A"]].flags.allows_duplicate_labels is False
92
+
93
+ def test_ndframe_getitem_caching_issue(
94
+ self, request, using_copy_on_write, warn_copy_on_write
95
+ ):
96
+ if not (using_copy_on_write or warn_copy_on_write):
97
+ request.applymarker(pytest.mark.xfail(reason="Unclear behavior."))
98
+ # NDFrame.__getitem__ will cache the first df['A']. May need to
99
+ # invalidate that cache? Update the cached entries?
100
+ df = pd.DataFrame({"A": [0]}).set_flags(allows_duplicate_labels=False)
101
+ assert df["A"].flags.allows_duplicate_labels is False
102
+ df.flags.allows_duplicate_labels = True
103
+ assert df["A"].flags.allows_duplicate_labels is True
104
+
105
+ @pytest.mark.parametrize(
106
+ "objs, kwargs",
107
+ [
108
+ # Series
109
+ (
110
+ [
111
+ pd.Series(1, index=["a", "b"]),
112
+ pd.Series(2, index=["c", "d"]),
113
+ ],
114
+ {},
115
+ ),
116
+ (
117
+ [
118
+ pd.Series(1, index=["a", "b"]),
119
+ pd.Series(2, index=["a", "b"]),
120
+ ],
121
+ {"ignore_index": True},
122
+ ),
123
+ (
124
+ [
125
+ pd.Series(1, index=["a", "b"]),
126
+ pd.Series(2, index=["a", "b"]),
127
+ ],
128
+ {"axis": 1},
129
+ ),
130
+ # Frame
131
+ (
132
+ [
133
+ pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
134
+ pd.DataFrame({"A": [1, 2]}, index=["c", "d"]),
135
+ ],
136
+ {},
137
+ ),
138
+ (
139
+ [
140
+ pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
141
+ pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
142
+ ],
143
+ {"ignore_index": True},
144
+ ),
145
+ (
146
+ [
147
+ pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
148
+ pd.DataFrame({"B": [1, 2]}, index=["a", "b"]),
149
+ ],
150
+ {"axis": 1},
151
+ ),
152
+ # Series / Frame
153
+ (
154
+ [
155
+ pd.DataFrame({"A": [1, 2]}, index=["a", "b"]),
156
+ pd.Series([1, 2], index=["a", "b"], name="B"),
157
+ ],
158
+ {"axis": 1},
159
+ ),
160
+ ],
161
+ )
162
+ def test_concat(self, objs, kwargs):
163
+ objs = [x.set_flags(allows_duplicate_labels=False) for x in objs]
164
+ result = pd.concat(objs, **kwargs)
165
+ assert result.flags.allows_duplicate_labels is False
166
+
167
+ @pytest.mark.parametrize(
168
+ "left, right, expected",
169
+ [
170
+ # false false false
171
+ pytest.param(
172
+ pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags(
173
+ allows_duplicate_labels=False
174
+ ),
175
+ pd.DataFrame({"B": [0, 1]}, index=["a", "d"]).set_flags(
176
+ allows_duplicate_labels=False
177
+ ),
178
+ False,
179
+ marks=not_implemented,
180
+ ),
181
+ # false true false
182
+ pytest.param(
183
+ pd.DataFrame({"A": [0, 1]}, index=["a", "b"]).set_flags(
184
+ allows_duplicate_labels=False
185
+ ),
186
+ pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),
187
+ False,
188
+ marks=not_implemented,
189
+ ),
190
+ # true true true
191
+ (
192
+ pd.DataFrame({"A": [0, 1]}, index=["a", "b"]),
193
+ pd.DataFrame({"B": [0, 1]}, index=["a", "d"]),
194
+ True,
195
+ ),
196
+ ],
197
+ )
198
+ def test_merge(self, left, right, expected):
199
+ result = pd.merge(left, right, left_index=True, right_index=True)
200
+ assert result.flags.allows_duplicate_labels is expected
201
+
202
+ @not_implemented
203
+ def test_groupby(self):
204
+ # XXX: This is under tested
205
+ # TODO:
206
+ # - apply
207
+ # - transform
208
+ # - Should passing a grouper that disallows duplicates propagate?
209
+ df = pd.DataFrame({"A": [1, 2, 3]}).set_flags(allows_duplicate_labels=False)
210
+ result = df.groupby([0, 0, 1]).agg("count")
211
+ assert result.flags.allows_duplicate_labels is False
212
+
213
+ @pytest.mark.parametrize("frame", [True, False])
214
+ @not_implemented
215
+ def test_window(self, frame):
216
+ df = pd.Series(
217
+ 1,
218
+ index=pd.date_range("2000", periods=12),
219
+ name="A",
220
+ allows_duplicate_labels=False,
221
+ )
222
+ if frame:
223
+ df = df.to_frame()
224
+ assert df.rolling(3).mean().flags.allows_duplicate_labels is False
225
+ assert df.ewm(3).mean().flags.allows_duplicate_labels is False
226
+ assert df.expanding(3).mean().flags.allows_duplicate_labels is False
227
+
228
+
229
+ # ----------------------------------------------------------------------------
230
+ # Raises
231
+
232
+
233
+ class TestRaises:
234
+ @pytest.mark.parametrize(
235
+ "cls, axes",
236
+ [
237
+ (pd.Series, {"index": ["a", "a"], "dtype": float}),
238
+ (pd.DataFrame, {"index": ["a", "a"]}),
239
+ (pd.DataFrame, {"index": ["a", "a"], "columns": ["b", "b"]}),
240
+ (pd.DataFrame, {"columns": ["b", "b"]}),
241
+ ],
242
+ )
243
+ def test_set_flags_with_duplicates(self, cls, axes):
244
+ result = cls(**axes)
245
+ assert result.flags.allows_duplicate_labels is True
246
+
247
+ msg = "Index has duplicates."
248
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
249
+ cls(**axes).set_flags(allows_duplicate_labels=False)
250
+
251
+ @pytest.mark.parametrize(
252
+ "data",
253
+ [
254
+ pd.Series(index=[0, 0], dtype=float),
255
+ pd.DataFrame(index=[0, 0]),
256
+ pd.DataFrame(columns=[0, 0]),
257
+ ],
258
+ )
259
+ def test_setting_allows_duplicate_labels_raises(self, data):
260
+ msg = "Index has duplicates."
261
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
262
+ data.flags.allows_duplicate_labels = False
263
+
264
+ assert data.flags.allows_duplicate_labels is True
265
+
266
+ def test_series_raises(self):
267
+ a = pd.Series(0, index=["a", "b"])
268
+ b = pd.Series([0, 1], index=["a", "b"]).set_flags(allows_duplicate_labels=False)
269
+ msg = "Index has duplicates."
270
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
271
+ pd.concat([a, b])
272
+
273
+ @pytest.mark.parametrize(
274
+ "getter, target",
275
+ [
276
+ (operator.itemgetter(["A", "A"]), None),
277
+ # loc
278
+ (operator.itemgetter(["a", "a"]), "loc"),
279
+ pytest.param(operator.itemgetter(("a", ["A", "A"])), "loc"),
280
+ (operator.itemgetter((["a", "a"], "A")), "loc"),
281
+ # iloc
282
+ (operator.itemgetter([0, 0]), "iloc"),
283
+ pytest.param(operator.itemgetter((0, [0, 0])), "iloc"),
284
+ pytest.param(operator.itemgetter(([0, 0], 0)), "iloc"),
285
+ ],
286
+ )
287
+ def test_getitem_raises(self, getter, target):
288
+ df = pd.DataFrame({"A": [1, 2], "B": [3, 4]}, index=["a", "b"]).set_flags(
289
+ allows_duplicate_labels=False
290
+ )
291
+ if target:
292
+ # df, df.loc, or df.iloc
293
+ target = getattr(df, target)
294
+ else:
295
+ target = df
296
+
297
+ msg = "Index has duplicates."
298
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
299
+ getter(target)
300
+
301
+ @pytest.mark.parametrize(
302
+ "objs, kwargs",
303
+ [
304
+ (
305
+ [
306
+ pd.Series(1, index=[0, 1], name="a"),
307
+ pd.Series(2, index=[0, 1], name="a"),
308
+ ],
309
+ {"axis": 1},
310
+ )
311
+ ],
312
+ )
313
+ def test_concat_raises(self, objs, kwargs):
314
+ objs = [x.set_flags(allows_duplicate_labels=False) for x in objs]
315
+ msg = "Index has duplicates."
316
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
317
+ pd.concat(objs, **kwargs)
318
+
319
+ @not_implemented
320
+ def test_merge_raises(self):
321
+ a = pd.DataFrame({"A": [0, 1, 2]}, index=["a", "b", "c"]).set_flags(
322
+ allows_duplicate_labels=False
323
+ )
324
+ b = pd.DataFrame({"B": [0, 1, 2]}, index=["a", "b", "b"])
325
+ msg = "Index has duplicates."
326
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
327
+ pd.merge(a, b, left_index=True, right_index=True)
328
+
329
+
330
+ @pytest.mark.parametrize(
331
+ "idx",
332
+ [
333
+ pd.Index([1, 1]),
334
+ pd.Index(["a", "a"]),
335
+ pd.Index([1.1, 1.1]),
336
+ pd.PeriodIndex([pd.Period("2000", "D")] * 2),
337
+ pd.DatetimeIndex([pd.Timestamp("2000")] * 2),
338
+ pd.TimedeltaIndex([pd.Timedelta("1D")] * 2),
339
+ pd.CategoricalIndex(["a", "a"]),
340
+ pd.IntervalIndex([pd.Interval(0, 1)] * 2),
341
+ pd.MultiIndex.from_tuples([("a", 1), ("a", 1)]),
342
+ ],
343
+ ids=lambda x: type(x).__name__,
344
+ )
345
+ def test_raises_basic(idx):
346
+ msg = "Index has duplicates."
347
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
348
+ pd.Series(1, index=idx).set_flags(allows_duplicate_labels=False)
349
+
350
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
351
+ pd.DataFrame({"A": [1, 1]}, index=idx).set_flags(allows_duplicate_labels=False)
352
+
353
+ with pytest.raises(pd.errors.DuplicateLabelError, match=msg):
354
+ pd.DataFrame([[1, 2]], columns=idx).set_flags(allows_duplicate_labels=False)
355
+
356
+
357
+ def test_format_duplicate_labels_message():
358
+ idx = pd.Index(["a", "b", "a", "b", "c"])
359
+ result = idx._format_duplicate_message()
360
+ expected = pd.DataFrame(
361
+ {"positions": [[0, 2], [1, 3]]}, index=pd.Index(["a", "b"], name="label")
362
+ )
363
+ tm.assert_frame_equal(result, expected)
364
+
365
+
366
+ def test_format_duplicate_labels_message_multi():
367
+ idx = pd.MultiIndex.from_product([["A"], ["a", "b", "a", "b", "c"]])
368
+ result = idx._format_duplicate_message()
369
+ expected = pd.DataFrame(
370
+ {"positions": [[0, 2], [1, 3]]},
371
+ index=pd.MultiIndex.from_product([["A"], ["a", "b"]]),
372
+ )
373
+ tm.assert_frame_equal(result, expected)
374
+
375
+
376
+ def test_dataframe_insert_raises():
377
+ df = pd.DataFrame({"A": [1, 2]}).set_flags(allows_duplicate_labels=False)
378
+ msg = "Cannot specify"
379
+ with pytest.raises(ValueError, match=msg):
380
+ df.insert(0, "A", [3, 4], allow_duplicates=True)
381
+
382
+
383
+ @pytest.mark.parametrize(
384
+ "method, frame_only",
385
+ [
386
+ (operator.methodcaller("set_index", "A", inplace=True), True),
387
+ (operator.methodcaller("reset_index", inplace=True), True),
388
+ (operator.methodcaller("rename", lambda x: x, inplace=True), False),
389
+ ],
390
+ )
391
+ def test_inplace_raises(method, frame_only):
392
+ df = pd.DataFrame({"A": [0, 0], "B": [1, 2]}).set_flags(
393
+ allows_duplicate_labels=False
394
+ )
395
+ s = df["A"]
396
+ s.flags.allows_duplicate_labels = False
397
+ msg = "Cannot specify"
398
+
399
+ with pytest.raises(ValueError, match=msg):
400
+ method(df)
401
+ if not frame_only:
402
+ with pytest.raises(ValueError, match=msg):
403
+ method(s)
404
+
405
+
406
+ def test_pickle():
407
+ a = pd.Series([1, 2]).set_flags(allows_duplicate_labels=False)
408
+ b = tm.round_trip_pickle(a)
409
+ tm.assert_series_equal(a, b)
410
+
411
+ a = pd.DataFrame({"A": []}).set_flags(allows_duplicate_labels=False)
412
+ b = tm.round_trip_pickle(a)
413
+ tm.assert_frame_equal(a, b)
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_finalize.py ADDED
@@ -0,0 +1,767 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ An exhaustive list of pandas methods exercising NDFrame.__finalize__.
3
+ """
4
+ import operator
5
+ import re
6
+
7
+ import numpy as np
8
+ import pytest
9
+
10
+ import pandas as pd
11
+ import pandas._testing as tm
12
+
13
+ # TODO:
14
+ # * Binary methods (mul, div, etc.)
15
+ # * Binary outputs (align, etc.)
16
+ # * top-level methods (concat, merge, get_dummies, etc.)
17
+ # * window
18
+ # * cumulative reductions
19
+
20
+ not_implemented_mark = pytest.mark.xfail(reason="not implemented")
21
+
22
+ mi = pd.MultiIndex.from_product([["a", "b"], [0, 1]], names=["A", "B"])
23
+
24
+ frame_data = ({"A": [1]},)
25
+ frame_mi_data = ({"A": [1, 2, 3, 4]}, mi)
26
+
27
+
28
+ # Tuple of
29
+ # - Callable: Constructor (Series, DataFrame)
30
+ # - Tuple: Constructor args
31
+ # - Callable: pass the constructed value with attrs set to this.
32
+
33
+ _all_methods = [
34
+ (pd.Series, ([0],), operator.methodcaller("take", [])),
35
+ (pd.Series, ([0],), operator.methodcaller("__getitem__", [True])),
36
+ (pd.Series, ([0],), operator.methodcaller("repeat", 2)),
37
+ (pd.Series, ([0],), operator.methodcaller("reset_index")),
38
+ (pd.Series, ([0],), operator.methodcaller("reset_index", drop=True)),
39
+ (pd.Series, ([0],), operator.methodcaller("to_frame")),
40
+ (pd.Series, ([0, 0],), operator.methodcaller("drop_duplicates")),
41
+ (pd.Series, ([0, 0],), operator.methodcaller("duplicated")),
42
+ (pd.Series, ([0, 0],), operator.methodcaller("round")),
43
+ (pd.Series, ([0, 0],), operator.methodcaller("rename", lambda x: x + 1)),
44
+ (pd.Series, ([0, 0],), operator.methodcaller("rename", "name")),
45
+ (pd.Series, ([0, 0],), operator.methodcaller("set_axis", ["a", "b"])),
46
+ (pd.Series, ([0, 0],), operator.methodcaller("reindex", [1, 0])),
47
+ (pd.Series, ([0, 0],), operator.methodcaller("drop", [0])),
48
+ (pd.Series, (pd.array([0, pd.NA]),), operator.methodcaller("fillna", 0)),
49
+ (pd.Series, ([0, 0],), operator.methodcaller("replace", {0: 1})),
50
+ (pd.Series, ([0, 0],), operator.methodcaller("shift")),
51
+ (pd.Series, ([0, 0],), operator.methodcaller("isin", [0, 1])),
52
+ (pd.Series, ([0, 0],), operator.methodcaller("between", 0, 2)),
53
+ (pd.Series, ([0, 0],), operator.methodcaller("isna")),
54
+ (pd.Series, ([0, 0],), operator.methodcaller("isnull")),
55
+ (pd.Series, ([0, 0],), operator.methodcaller("notna")),
56
+ (pd.Series, ([0, 0],), operator.methodcaller("notnull")),
57
+ (pd.Series, ([1],), operator.methodcaller("add", pd.Series([1]))),
58
+ # TODO: mul, div, etc.
59
+ (
60
+ pd.Series,
61
+ ([0], pd.period_range("2000", periods=1)),
62
+ operator.methodcaller("to_timestamp"),
63
+ ),
64
+ (
65
+ pd.Series,
66
+ ([0], pd.date_range("2000", periods=1)),
67
+ operator.methodcaller("to_period"),
68
+ ),
69
+ pytest.param(
70
+ (
71
+ pd.DataFrame,
72
+ frame_data,
73
+ operator.methodcaller("dot", pd.DataFrame(index=["A"])),
74
+ ),
75
+ marks=pytest.mark.xfail(reason="Implement binary finalize"),
76
+ ),
77
+ (pd.DataFrame, frame_data, operator.methodcaller("transpose")),
78
+ (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", "A")),
79
+ (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", ["A"])),
80
+ (pd.DataFrame, frame_data, operator.methodcaller("__getitem__", np.array([True]))),
81
+ (pd.DataFrame, ({("A", "a"): [1]},), operator.methodcaller("__getitem__", ["A"])),
82
+ (pd.DataFrame, frame_data, operator.methodcaller("query", "A == 1")),
83
+ (pd.DataFrame, frame_data, operator.methodcaller("eval", "A + 1", engine="python")),
84
+ (pd.DataFrame, frame_data, operator.methodcaller("select_dtypes", include="int")),
85
+ (pd.DataFrame, frame_data, operator.methodcaller("assign", b=1)),
86
+ (pd.DataFrame, frame_data, operator.methodcaller("set_axis", ["A"])),
87
+ (pd.DataFrame, frame_data, operator.methodcaller("reindex", [0, 1])),
88
+ (pd.DataFrame, frame_data, operator.methodcaller("drop", columns=["A"])),
89
+ (pd.DataFrame, frame_data, operator.methodcaller("drop", index=[0])),
90
+ (pd.DataFrame, frame_data, operator.methodcaller("rename", columns={"A": "a"})),
91
+ (pd.DataFrame, frame_data, operator.methodcaller("rename", index=lambda x: x)),
92
+ (pd.DataFrame, frame_data, operator.methodcaller("fillna", "A")),
93
+ (pd.DataFrame, frame_data, operator.methodcaller("fillna", method="ffill")),
94
+ (pd.DataFrame, frame_data, operator.methodcaller("set_index", "A")),
95
+ (pd.DataFrame, frame_data, operator.methodcaller("reset_index")),
96
+ (pd.DataFrame, frame_data, operator.methodcaller("isna")),
97
+ (pd.DataFrame, frame_data, operator.methodcaller("isnull")),
98
+ (pd.DataFrame, frame_data, operator.methodcaller("notna")),
99
+ (pd.DataFrame, frame_data, operator.methodcaller("notnull")),
100
+ (pd.DataFrame, frame_data, operator.methodcaller("dropna")),
101
+ (pd.DataFrame, frame_data, operator.methodcaller("drop_duplicates")),
102
+ (pd.DataFrame, frame_data, operator.methodcaller("duplicated")),
103
+ (pd.DataFrame, frame_data, operator.methodcaller("sort_values", by="A")),
104
+ (pd.DataFrame, frame_data, operator.methodcaller("sort_index")),
105
+ (pd.DataFrame, frame_data, operator.methodcaller("nlargest", 1, "A")),
106
+ (pd.DataFrame, frame_data, operator.methodcaller("nsmallest", 1, "A")),
107
+ (pd.DataFrame, frame_mi_data, operator.methodcaller("swaplevel")),
108
+ (
109
+ pd.DataFrame,
110
+ frame_data,
111
+ operator.methodcaller("add", pd.DataFrame(*frame_data)),
112
+ ),
113
+ # TODO: div, mul, etc.
114
+ (
115
+ pd.DataFrame,
116
+ frame_data,
117
+ operator.methodcaller("combine", pd.DataFrame(*frame_data), operator.add),
118
+ ),
119
+ (
120
+ pd.DataFrame,
121
+ frame_data,
122
+ operator.methodcaller("combine_first", pd.DataFrame(*frame_data)),
123
+ ),
124
+ pytest.param(
125
+ (
126
+ pd.DataFrame,
127
+ frame_data,
128
+ operator.methodcaller("update", pd.DataFrame(*frame_data)),
129
+ ),
130
+ marks=not_implemented_mark,
131
+ ),
132
+ (pd.DataFrame, frame_data, operator.methodcaller("pivot", columns="A")),
133
+ (
134
+ pd.DataFrame,
135
+ ({"A": [1], "B": [1]},),
136
+ operator.methodcaller("pivot_table", columns="A"),
137
+ ),
138
+ (
139
+ pd.DataFrame,
140
+ ({"A": [1], "B": [1]},),
141
+ operator.methodcaller("pivot_table", columns="A", aggfunc=["mean", "sum"]),
142
+ ),
143
+ (pd.DataFrame, frame_data, operator.methodcaller("stack")),
144
+ (pd.DataFrame, frame_data, operator.methodcaller("explode", "A")),
145
+ (pd.DataFrame, frame_mi_data, operator.methodcaller("unstack")),
146
+ (
147
+ pd.DataFrame,
148
+ ({"A": ["a", "b", "c"], "B": [1, 3, 5], "C": [2, 4, 6]},),
149
+ operator.methodcaller("melt", id_vars=["A"], value_vars=["B"]),
150
+ ),
151
+ (pd.DataFrame, frame_data, operator.methodcaller("map", lambda x: x)),
152
+ pytest.param(
153
+ (
154
+ pd.DataFrame,
155
+ frame_data,
156
+ operator.methodcaller("merge", pd.DataFrame({"A": [1]})),
157
+ ),
158
+ marks=not_implemented_mark,
159
+ ),
160
+ (pd.DataFrame, frame_data, operator.methodcaller("round", 2)),
161
+ (pd.DataFrame, frame_data, operator.methodcaller("corr")),
162
+ pytest.param(
163
+ (pd.DataFrame, frame_data, operator.methodcaller("cov")),
164
+ marks=[
165
+ pytest.mark.filterwarnings("ignore::RuntimeWarning"),
166
+ ],
167
+ ),
168
+ (
169
+ pd.DataFrame,
170
+ frame_data,
171
+ operator.methodcaller("corrwith", pd.DataFrame(*frame_data)),
172
+ ),
173
+ (pd.DataFrame, frame_data, operator.methodcaller("count")),
174
+ (pd.DataFrame, frame_data, operator.methodcaller("nunique")),
175
+ (pd.DataFrame, frame_data, operator.methodcaller("idxmin")),
176
+ (pd.DataFrame, frame_data, operator.methodcaller("idxmax")),
177
+ (pd.DataFrame, frame_data, operator.methodcaller("mode")),
178
+ (pd.Series, [0], operator.methodcaller("mode")),
179
+ (pd.DataFrame, frame_data, operator.methodcaller("median")),
180
+ (
181
+ pd.DataFrame,
182
+ frame_data,
183
+ operator.methodcaller("quantile", numeric_only=True),
184
+ ),
185
+ (
186
+ pd.DataFrame,
187
+ frame_data,
188
+ operator.methodcaller("quantile", q=[0.25, 0.75], numeric_only=True),
189
+ ),
190
+ (
191
+ pd.DataFrame,
192
+ ({"A": [pd.Timedelta(days=1), pd.Timedelta(days=2)]},),
193
+ operator.methodcaller("quantile", numeric_only=False),
194
+ ),
195
+ (
196
+ pd.DataFrame,
197
+ ({"A": [np.datetime64("2022-01-01"), np.datetime64("2022-01-02")]},),
198
+ operator.methodcaller("quantile", numeric_only=True),
199
+ ),
200
+ (
201
+ pd.DataFrame,
202
+ ({"A": [1]}, [pd.Period("2000", "D")]),
203
+ operator.methodcaller("to_timestamp"),
204
+ ),
205
+ (
206
+ pd.DataFrame,
207
+ ({"A": [1]}, [pd.Timestamp("2000")]),
208
+ operator.methodcaller("to_period", freq="D"),
209
+ ),
210
+ (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", [1])),
211
+ (pd.DataFrame, frame_mi_data, operator.methodcaller("isin", pd.Series([1]))),
212
+ (
213
+ pd.DataFrame,
214
+ frame_mi_data,
215
+ operator.methodcaller("isin", pd.DataFrame({"A": [1]})),
216
+ ),
217
+ (pd.DataFrame, frame_mi_data, operator.methodcaller("droplevel", "A")),
218
+ (pd.DataFrame, frame_data, operator.methodcaller("pop", "A")),
219
+ # Squeeze on columns, otherwise we'll end up with a scalar
220
+ (pd.DataFrame, frame_data, operator.methodcaller("squeeze", axis="columns")),
221
+ (pd.Series, ([1, 2],), operator.methodcaller("squeeze")),
222
+ (pd.Series, ([1, 2],), operator.methodcaller("rename_axis", index="a")),
223
+ (pd.DataFrame, frame_data, operator.methodcaller("rename_axis", columns="a")),
224
+ # Unary ops
225
+ (pd.DataFrame, frame_data, operator.neg),
226
+ (pd.Series, [1], operator.neg),
227
+ (pd.DataFrame, frame_data, operator.pos),
228
+ (pd.Series, [1], operator.pos),
229
+ (pd.DataFrame, frame_data, operator.inv),
230
+ (pd.Series, [1], operator.inv),
231
+ (pd.DataFrame, frame_data, abs),
232
+ (pd.Series, [1], abs),
233
+ (pd.DataFrame, frame_data, round),
234
+ (pd.Series, [1], round),
235
+ (pd.DataFrame, frame_data, operator.methodcaller("take", [0, 0])),
236
+ (pd.DataFrame, frame_mi_data, operator.methodcaller("xs", "a")),
237
+ (pd.Series, (1, mi), operator.methodcaller("xs", "a")),
238
+ (pd.DataFrame, frame_data, operator.methodcaller("get", "A")),
239
+ (
240
+ pd.DataFrame,
241
+ frame_data,
242
+ operator.methodcaller("reindex_like", pd.DataFrame({"A": [1, 2, 3]})),
243
+ ),
244
+ (
245
+ pd.Series,
246
+ frame_data,
247
+ operator.methodcaller("reindex_like", pd.Series([0, 1, 2])),
248
+ ),
249
+ (pd.DataFrame, frame_data, operator.methodcaller("add_prefix", "_")),
250
+ (pd.DataFrame, frame_data, operator.methodcaller("add_suffix", "_")),
251
+ (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_prefix", "_")),
252
+ (pd.Series, (1, ["a", "b"]), operator.methodcaller("add_suffix", "_")),
253
+ (pd.Series, ([3, 2],), operator.methodcaller("sort_values")),
254
+ (pd.Series, ([1] * 10,), operator.methodcaller("head")),
255
+ (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("head")),
256
+ (pd.Series, ([1] * 10,), operator.methodcaller("tail")),
257
+ (pd.DataFrame, ({"A": [1] * 10},), operator.methodcaller("tail")),
258
+ (pd.Series, ([1, 2],), operator.methodcaller("sample", n=2, replace=True)),
259
+ (pd.DataFrame, (frame_data,), operator.methodcaller("sample", n=2, replace=True)),
260
+ (pd.Series, ([1, 2],), operator.methodcaller("astype", float)),
261
+ (pd.DataFrame, frame_data, operator.methodcaller("astype", float)),
262
+ (pd.Series, ([1, 2],), operator.methodcaller("copy")),
263
+ (pd.DataFrame, frame_data, operator.methodcaller("copy")),
264
+ (pd.Series, ([1, 2], None, object), operator.methodcaller("infer_objects")),
265
+ (
266
+ pd.DataFrame,
267
+ ({"A": np.array([1, 2], dtype=object)},),
268
+ operator.methodcaller("infer_objects"),
269
+ ),
270
+ (pd.Series, ([1, 2],), operator.methodcaller("convert_dtypes")),
271
+ (pd.DataFrame, frame_data, operator.methodcaller("convert_dtypes")),
272
+ (pd.Series, ([1, None, 3],), operator.methodcaller("interpolate")),
273
+ (pd.DataFrame, ({"A": [1, None, 3]},), operator.methodcaller("interpolate")),
274
+ (pd.Series, ([1, 2],), operator.methodcaller("clip", lower=1)),
275
+ (pd.DataFrame, frame_data, operator.methodcaller("clip", lower=1)),
276
+ (
277
+ pd.Series,
278
+ (1, pd.date_range("2000", periods=4)),
279
+ operator.methodcaller("asfreq", "h"),
280
+ ),
281
+ (
282
+ pd.DataFrame,
283
+ ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
284
+ operator.methodcaller("asfreq", "h"),
285
+ ),
286
+ (
287
+ pd.Series,
288
+ (1, pd.date_range("2000", periods=4)),
289
+ operator.methodcaller("at_time", "12:00"),
290
+ ),
291
+ (
292
+ pd.DataFrame,
293
+ ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
294
+ operator.methodcaller("at_time", "12:00"),
295
+ ),
296
+ (
297
+ pd.Series,
298
+ (1, pd.date_range("2000", periods=4)),
299
+ operator.methodcaller("between_time", "12:00", "13:00"),
300
+ ),
301
+ (
302
+ pd.DataFrame,
303
+ ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
304
+ operator.methodcaller("between_time", "12:00", "13:00"),
305
+ ),
306
+ (
307
+ pd.Series,
308
+ (1, pd.date_range("2000", periods=4)),
309
+ operator.methodcaller("last", "3D"),
310
+ ),
311
+ (
312
+ pd.DataFrame,
313
+ ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
314
+ operator.methodcaller("last", "3D"),
315
+ ),
316
+ (pd.Series, ([1, 2],), operator.methodcaller("rank")),
317
+ (pd.DataFrame, frame_data, operator.methodcaller("rank")),
318
+ (pd.Series, ([1, 2],), operator.methodcaller("where", np.array([True, False]))),
319
+ (pd.DataFrame, frame_data, operator.methodcaller("where", np.array([[True]]))),
320
+ (pd.Series, ([1, 2],), operator.methodcaller("mask", np.array([True, False]))),
321
+ (pd.DataFrame, frame_data, operator.methodcaller("mask", np.array([[True]]))),
322
+ (pd.Series, ([1, 2],), operator.methodcaller("truncate", before=0)),
323
+ (pd.DataFrame, frame_data, operator.methodcaller("truncate", before=0)),
324
+ (
325
+ pd.Series,
326
+ (1, pd.date_range("2000", periods=4, tz="UTC")),
327
+ operator.methodcaller("tz_convert", "CET"),
328
+ ),
329
+ (
330
+ pd.DataFrame,
331
+ ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4, tz="UTC")),
332
+ operator.methodcaller("tz_convert", "CET"),
333
+ ),
334
+ (
335
+ pd.Series,
336
+ (1, pd.date_range("2000", periods=4)),
337
+ operator.methodcaller("tz_localize", "CET"),
338
+ ),
339
+ (
340
+ pd.DataFrame,
341
+ ({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
342
+ operator.methodcaller("tz_localize", "CET"),
343
+ ),
344
+ (pd.Series, ([1, 2],), operator.methodcaller("describe")),
345
+ (pd.DataFrame, frame_data, operator.methodcaller("describe")),
346
+ (pd.Series, ([1, 2],), operator.methodcaller("pct_change")),
347
+ (pd.DataFrame, frame_data, operator.methodcaller("pct_change")),
348
+ (pd.Series, ([1],), operator.methodcaller("transform", lambda x: x - x.min())),
349
+ (
350
+ pd.DataFrame,
351
+ frame_mi_data,
352
+ operator.methodcaller("transform", lambda x: x - x.min()),
353
+ ),
354
+ (pd.Series, ([1],), operator.methodcaller("apply", lambda x: x)),
355
+ (pd.DataFrame, frame_mi_data, operator.methodcaller("apply", lambda x: x)),
356
+ # Cumulative reductions
357
+ (pd.Series, ([1],), operator.methodcaller("cumsum")),
358
+ (pd.DataFrame, frame_data, operator.methodcaller("cumsum")),
359
+ (pd.Series, ([1],), operator.methodcaller("cummin")),
360
+ (pd.DataFrame, frame_data, operator.methodcaller("cummin")),
361
+ (pd.Series, ([1],), operator.methodcaller("cummax")),
362
+ (pd.DataFrame, frame_data, operator.methodcaller("cummax")),
363
+ (pd.Series, ([1],), operator.methodcaller("cumprod")),
364
+ (pd.DataFrame, frame_data, operator.methodcaller("cumprod")),
365
+ # Reductions
366
+ (pd.DataFrame, frame_data, operator.methodcaller("any")),
367
+ (pd.DataFrame, frame_data, operator.methodcaller("all")),
368
+ (pd.DataFrame, frame_data, operator.methodcaller("min")),
369
+ (pd.DataFrame, frame_data, operator.methodcaller("max")),
370
+ (pd.DataFrame, frame_data, operator.methodcaller("sum")),
371
+ (pd.DataFrame, frame_data, operator.methodcaller("std")),
372
+ (pd.DataFrame, frame_data, operator.methodcaller("mean")),
373
+ (pd.DataFrame, frame_data, operator.methodcaller("prod")),
374
+ (pd.DataFrame, frame_data, operator.methodcaller("sem")),
375
+ (pd.DataFrame, frame_data, operator.methodcaller("skew")),
376
+ (pd.DataFrame, frame_data, operator.methodcaller("kurt")),
377
+ ]
378
+
379
+
380
+ def idfn(x):
381
+ xpr = re.compile(r"'(.*)?'")
382
+ m = xpr.search(str(x))
383
+ if m:
384
+ return m.group(1)
385
+ else:
386
+ return str(x)
387
+
388
+
389
+ @pytest.fixture(params=_all_methods, ids=lambda x: idfn(x[-1]))
390
+ def ndframe_method(request):
391
+ """
392
+ An NDFrame method returning an NDFrame.
393
+ """
394
+ return request.param
395
+
396
+
397
+ @pytest.mark.filterwarnings(
398
+ "ignore:DataFrame.fillna with 'method' is deprecated:FutureWarning",
399
+ "ignore:last is deprecated:FutureWarning",
400
+ )
401
+ def test_finalize_called(ndframe_method):
402
+ cls, init_args, method = ndframe_method
403
+ ndframe = cls(*init_args)
404
+
405
+ ndframe.attrs = {"a": 1}
406
+ result = method(ndframe)
407
+
408
+ assert result.attrs == {"a": 1}
409
+
410
+
411
+ @pytest.mark.parametrize(
412
+ "data",
413
+ [
414
+ pd.Series(1, pd.date_range("2000", periods=4)),
415
+ pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
416
+ ],
417
+ )
418
+ def test_finalize_first(data):
419
+ deprecated_msg = "first is deprecated"
420
+
421
+ data.attrs = {"a": 1}
422
+ with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):
423
+ result = data.first("3D")
424
+ assert result.attrs == {"a": 1}
425
+
426
+
427
+ @pytest.mark.parametrize(
428
+ "data",
429
+ [
430
+ pd.Series(1, pd.date_range("2000", periods=4)),
431
+ pd.DataFrame({"A": [1, 1, 1, 1]}, pd.date_range("2000", periods=4)),
432
+ ],
433
+ )
434
+ def test_finalize_last(data):
435
+ # GH 53710
436
+ deprecated_msg = "last is deprecated"
437
+
438
+ data.attrs = {"a": 1}
439
+ with tm.assert_produces_warning(FutureWarning, match=deprecated_msg):
440
+ result = data.last("3D")
441
+ assert result.attrs == {"a": 1}
442
+
443
+
444
+ @not_implemented_mark
445
+ def test_finalize_called_eval_numexpr():
446
+ pytest.importorskip("numexpr")
447
+ df = pd.DataFrame({"A": [1, 2]})
448
+ df.attrs["A"] = 1
449
+ result = df.eval("A + 1", engine="numexpr")
450
+ assert result.attrs == {"A": 1}
451
+
452
+
453
+ # ----------------------------------------------------------------------------
454
+ # Binary operations
455
+
456
+
457
+ @pytest.mark.parametrize("annotate", ["left", "right", "both"])
458
+ @pytest.mark.parametrize(
459
+ "args",
460
+ [
461
+ (1, pd.Series([1])),
462
+ (1, pd.DataFrame({"A": [1]})),
463
+ (pd.Series([1]), 1),
464
+ (pd.DataFrame({"A": [1]}), 1),
465
+ (pd.Series([1]), pd.Series([1])),
466
+ (pd.DataFrame({"A": [1]}), pd.DataFrame({"A": [1]})),
467
+ (pd.Series([1]), pd.DataFrame({"A": [1]})),
468
+ (pd.DataFrame({"A": [1]}), pd.Series([1])),
469
+ ],
470
+ ids=lambda x: f"({type(x[0]).__name__},{type(x[1]).__name__})",
471
+ )
472
+ def test_binops(request, args, annotate, all_binary_operators):
473
+ # This generates 624 tests... Is that needed?
474
+ left, right = args
475
+ if isinstance(left, (pd.DataFrame, pd.Series)):
476
+ left.attrs = {}
477
+ if isinstance(right, (pd.DataFrame, pd.Series)):
478
+ right.attrs = {}
479
+
480
+ if annotate == "left" and isinstance(left, int):
481
+ pytest.skip("left is an int and doesn't support .attrs")
482
+ if annotate == "right" and isinstance(right, int):
483
+ pytest.skip("right is an int and doesn't support .attrs")
484
+
485
+ if not (isinstance(left, int) or isinstance(right, int)) and annotate != "both":
486
+ if not all_binary_operators.__name__.startswith("r"):
487
+ if annotate == "right" and isinstance(left, type(right)):
488
+ request.applymarker(
489
+ pytest.mark.xfail(
490
+ reason=f"{all_binary_operators} doesn't work when right has "
491
+ f"attrs and both are {type(left)}"
492
+ )
493
+ )
494
+ if not isinstance(left, type(right)):
495
+ if annotate == "left" and isinstance(left, pd.Series):
496
+ request.applymarker(
497
+ pytest.mark.xfail(
498
+ reason=f"{all_binary_operators} doesn't work when the "
499
+ "objects are different Series has attrs"
500
+ )
501
+ )
502
+ elif annotate == "right" and isinstance(right, pd.Series):
503
+ request.applymarker(
504
+ pytest.mark.xfail(
505
+ reason=f"{all_binary_operators} doesn't work when the "
506
+ "objects are different Series has attrs"
507
+ )
508
+ )
509
+ else:
510
+ if annotate == "left" and isinstance(left, type(right)):
511
+ request.applymarker(
512
+ pytest.mark.xfail(
513
+ reason=f"{all_binary_operators} doesn't work when left has "
514
+ f"attrs and both are {type(left)}"
515
+ )
516
+ )
517
+ if not isinstance(left, type(right)):
518
+ if annotate == "right" and isinstance(right, pd.Series):
519
+ request.applymarker(
520
+ pytest.mark.xfail(
521
+ reason=f"{all_binary_operators} doesn't work when the "
522
+ "objects are different Series has attrs"
523
+ )
524
+ )
525
+ elif annotate == "left" and isinstance(left, pd.Series):
526
+ request.applymarker(
527
+ pytest.mark.xfail(
528
+ reason=f"{all_binary_operators} doesn't work when the "
529
+ "objects are different Series has attrs"
530
+ )
531
+ )
532
+ if annotate in {"left", "both"} and not isinstance(left, int):
533
+ left.attrs = {"a": 1}
534
+ if annotate in {"right", "both"} and not isinstance(right, int):
535
+ right.attrs = {"a": 1}
536
+
537
+ is_cmp = all_binary_operators in [
538
+ operator.eq,
539
+ operator.ne,
540
+ operator.gt,
541
+ operator.ge,
542
+ operator.lt,
543
+ operator.le,
544
+ ]
545
+ if is_cmp and isinstance(left, pd.DataFrame) and isinstance(right, pd.Series):
546
+ # in 2.0 silent alignment on comparisons was removed xref GH#28759
547
+ left, right = left.align(right, axis=1, copy=False)
548
+ elif is_cmp and isinstance(left, pd.Series) and isinstance(right, pd.DataFrame):
549
+ right, left = right.align(left, axis=1, copy=False)
550
+
551
+ result = all_binary_operators(left, right)
552
+ assert result.attrs == {"a": 1}
553
+
554
+
555
+ # ----------------------------------------------------------------------------
556
+ # Accessors
557
+
558
+
559
+ @pytest.mark.parametrize(
560
+ "method",
561
+ [
562
+ operator.methodcaller("capitalize"),
563
+ operator.methodcaller("casefold"),
564
+ operator.methodcaller("cat", ["a"]),
565
+ operator.methodcaller("contains", "a"),
566
+ operator.methodcaller("count", "a"),
567
+ operator.methodcaller("encode", "utf-8"),
568
+ operator.methodcaller("endswith", "a"),
569
+ operator.methodcaller("extract", r"(\w)(\d)"),
570
+ operator.methodcaller("extract", r"(\w)(\d)", expand=False),
571
+ operator.methodcaller("find", "a"),
572
+ operator.methodcaller("findall", "a"),
573
+ operator.methodcaller("get", 0),
574
+ operator.methodcaller("index", "a"),
575
+ operator.methodcaller("len"),
576
+ operator.methodcaller("ljust", 4),
577
+ operator.methodcaller("lower"),
578
+ operator.methodcaller("lstrip"),
579
+ operator.methodcaller("match", r"\w"),
580
+ operator.methodcaller("normalize", "NFC"),
581
+ operator.methodcaller("pad", 4),
582
+ operator.methodcaller("partition", "a"),
583
+ operator.methodcaller("repeat", 2),
584
+ operator.methodcaller("replace", "a", "b"),
585
+ operator.methodcaller("rfind", "a"),
586
+ operator.methodcaller("rindex", "a"),
587
+ operator.methodcaller("rjust", 4),
588
+ operator.methodcaller("rpartition", "a"),
589
+ operator.methodcaller("rstrip"),
590
+ operator.methodcaller("slice", 4),
591
+ operator.methodcaller("slice_replace", 1, repl="a"),
592
+ operator.methodcaller("startswith", "a"),
593
+ operator.methodcaller("strip"),
594
+ operator.methodcaller("swapcase"),
595
+ operator.methodcaller("translate", {"a": "b"}),
596
+ operator.methodcaller("upper"),
597
+ operator.methodcaller("wrap", 4),
598
+ operator.methodcaller("zfill", 4),
599
+ operator.methodcaller("isalnum"),
600
+ operator.methodcaller("isalpha"),
601
+ operator.methodcaller("isdigit"),
602
+ operator.methodcaller("isspace"),
603
+ operator.methodcaller("islower"),
604
+ operator.methodcaller("isupper"),
605
+ operator.methodcaller("istitle"),
606
+ operator.methodcaller("isnumeric"),
607
+ operator.methodcaller("isdecimal"),
608
+ operator.methodcaller("get_dummies"),
609
+ ],
610
+ ids=idfn,
611
+ )
612
+ def test_string_method(method):
613
+ s = pd.Series(["a1"])
614
+ s.attrs = {"a": 1}
615
+ result = method(s.str)
616
+ assert result.attrs == {"a": 1}
617
+
618
+
619
+ @pytest.mark.parametrize(
620
+ "method",
621
+ [
622
+ operator.methodcaller("to_period"),
623
+ operator.methodcaller("tz_localize", "CET"),
624
+ operator.methodcaller("normalize"),
625
+ operator.methodcaller("strftime", "%Y"),
626
+ operator.methodcaller("round", "h"),
627
+ operator.methodcaller("floor", "h"),
628
+ operator.methodcaller("ceil", "h"),
629
+ operator.methodcaller("month_name"),
630
+ operator.methodcaller("day_name"),
631
+ ],
632
+ ids=idfn,
633
+ )
634
+ def test_datetime_method(method):
635
+ s = pd.Series(pd.date_range("2000", periods=4))
636
+ s.attrs = {"a": 1}
637
+ result = method(s.dt)
638
+ assert result.attrs == {"a": 1}
639
+
640
+
641
+ @pytest.mark.parametrize(
642
+ "attr",
643
+ [
644
+ "date",
645
+ "time",
646
+ "timetz",
647
+ "year",
648
+ "month",
649
+ "day",
650
+ "hour",
651
+ "minute",
652
+ "second",
653
+ "microsecond",
654
+ "nanosecond",
655
+ "dayofweek",
656
+ "day_of_week",
657
+ "dayofyear",
658
+ "day_of_year",
659
+ "quarter",
660
+ "is_month_start",
661
+ "is_month_end",
662
+ "is_quarter_start",
663
+ "is_quarter_end",
664
+ "is_year_start",
665
+ "is_year_end",
666
+ "is_leap_year",
667
+ "daysinmonth",
668
+ "days_in_month",
669
+ ],
670
+ )
671
+ def test_datetime_property(attr):
672
+ s = pd.Series(pd.date_range("2000", periods=4))
673
+ s.attrs = {"a": 1}
674
+ result = getattr(s.dt, attr)
675
+ assert result.attrs == {"a": 1}
676
+
677
+
678
+ @pytest.mark.parametrize(
679
+ "attr", ["days", "seconds", "microseconds", "nanoseconds", "components"]
680
+ )
681
+ def test_timedelta_property(attr):
682
+ s = pd.Series(pd.timedelta_range("2000", periods=4))
683
+ s.attrs = {"a": 1}
684
+ result = getattr(s.dt, attr)
685
+ assert result.attrs == {"a": 1}
686
+
687
+
688
+ @pytest.mark.parametrize("method", [operator.methodcaller("total_seconds")])
689
+ def test_timedelta_methods(method):
690
+ s = pd.Series(pd.timedelta_range("2000", periods=4))
691
+ s.attrs = {"a": 1}
692
+ result = method(s.dt)
693
+ assert result.attrs == {"a": 1}
694
+
695
+
696
+ @pytest.mark.parametrize(
697
+ "method",
698
+ [
699
+ operator.methodcaller("add_categories", ["c"]),
700
+ operator.methodcaller("as_ordered"),
701
+ operator.methodcaller("as_unordered"),
702
+ lambda x: getattr(x, "codes"),
703
+ operator.methodcaller("remove_categories", "a"),
704
+ operator.methodcaller("remove_unused_categories"),
705
+ operator.methodcaller("rename_categories", {"a": "A", "b": "B"}),
706
+ operator.methodcaller("reorder_categories", ["b", "a"]),
707
+ operator.methodcaller("set_categories", ["A", "B"]),
708
+ ],
709
+ )
710
+ @not_implemented_mark
711
+ def test_categorical_accessor(method):
712
+ s = pd.Series(["a", "b"], dtype="category")
713
+ s.attrs = {"a": 1}
714
+ result = method(s.cat)
715
+ assert result.attrs == {"a": 1}
716
+
717
+
718
+ # ----------------------------------------------------------------------------
719
+ # Groupby
720
+
721
+
722
+ @pytest.mark.parametrize(
723
+ "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})]
724
+ )
725
+ @pytest.mark.parametrize(
726
+ "method",
727
+ [
728
+ operator.methodcaller("sum"),
729
+ lambda x: x.apply(lambda y: y),
730
+ lambda x: x.agg("sum"),
731
+ lambda x: x.agg("mean"),
732
+ lambda x: x.agg("median"),
733
+ ],
734
+ )
735
+ def test_groupby_finalize(obj, method):
736
+ obj.attrs = {"a": 1}
737
+ result = method(obj.groupby([0, 0], group_keys=False))
738
+ assert result.attrs == {"a": 1}
739
+
740
+
741
+ @pytest.mark.parametrize(
742
+ "obj", [pd.Series([0, 0]), pd.DataFrame({"A": [0, 1], "B": [1, 2]})]
743
+ )
744
+ @pytest.mark.parametrize(
745
+ "method",
746
+ [
747
+ lambda x: x.agg(["sum", "count"]),
748
+ lambda x: x.agg("std"),
749
+ lambda x: x.agg("var"),
750
+ lambda x: x.agg("sem"),
751
+ lambda x: x.agg("size"),
752
+ lambda x: x.agg("ohlc"),
753
+ ],
754
+ )
755
+ @not_implemented_mark
756
+ def test_groupby_finalize_not_implemented(obj, method):
757
+ obj.attrs = {"a": 1}
758
+ result = method(obj.groupby([0, 0]))
759
+ assert result.attrs == {"a": 1}
760
+
761
+
762
+ def test_finalize_frame_series_name():
763
+ # https://github.com/pandas-dev/pandas/pull/37186/files#r506978889
764
+ # ensure we don't copy the column `name` to the Series.
765
+ df = pd.DataFrame({"name": [1, 2]})
766
+ result = pd.Series([1, 2]).__finalize__(df)
767
+ assert result.name is None
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_frame.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import deepcopy
2
+ from operator import methodcaller
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ import pandas as pd
8
+ from pandas import (
9
+ DataFrame,
10
+ MultiIndex,
11
+ Series,
12
+ date_range,
13
+ )
14
+ import pandas._testing as tm
15
+
16
+
17
+ class TestDataFrame:
18
+ @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
19
+ def test_set_axis_name(self, func):
20
+ df = DataFrame([[1, 2], [3, 4]])
21
+
22
+ result = methodcaller(func, "foo")(df)
23
+ assert df.index.name is None
24
+ assert result.index.name == "foo"
25
+
26
+ result = methodcaller(func, "cols", axis=1)(df)
27
+ assert df.columns.name is None
28
+ assert result.columns.name == "cols"
29
+
30
+ @pytest.mark.parametrize("func", ["_set_axis_name", "rename_axis"])
31
+ def test_set_axis_name_mi(self, func):
32
+ df = DataFrame(
33
+ np.empty((3, 3)),
34
+ index=MultiIndex.from_tuples([("A", x) for x in list("aBc")]),
35
+ columns=MultiIndex.from_tuples([("C", x) for x in list("xyz")]),
36
+ )
37
+
38
+ level_names = ["L1", "L2"]
39
+
40
+ result = methodcaller(func, level_names)(df)
41
+ assert result.index.names == level_names
42
+ assert result.columns.names == [None, None]
43
+
44
+ result = methodcaller(func, level_names, axis=1)(df)
45
+ assert result.columns.names == ["L1", "L2"]
46
+ assert result.index.names == [None, None]
47
+
48
+ def test_nonzero_single_element(self):
49
+ # allow single item via bool method
50
+ msg_warn = (
51
+ "DataFrame.bool is now deprecated and will be removed "
52
+ "in future version of pandas"
53
+ )
54
+ df = DataFrame([[True]])
55
+ df1 = DataFrame([[False]])
56
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
57
+ assert df.bool()
58
+
59
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
60
+ assert not df1.bool()
61
+
62
+ df = DataFrame([[False, False]])
63
+ msg_err = "The truth value of a DataFrame is ambiguous"
64
+ with pytest.raises(ValueError, match=msg_err):
65
+ bool(df)
66
+
67
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
68
+ with pytest.raises(ValueError, match=msg_err):
69
+ df.bool()
70
+
71
+ def test_metadata_propagation_indiv_groupby(self):
72
+ # groupby
73
+ df = DataFrame(
74
+ {
75
+ "A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
76
+ "B": ["one", "one", "two", "three", "two", "two", "one", "three"],
77
+ "C": np.random.default_rng(2).standard_normal(8),
78
+ "D": np.random.default_rng(2).standard_normal(8),
79
+ }
80
+ )
81
+ result = df.groupby("A").sum()
82
+ tm.assert_metadata_equivalent(df, result)
83
+
84
+ def test_metadata_propagation_indiv_resample(self):
85
+ # resample
86
+ df = DataFrame(
87
+ np.random.default_rng(2).standard_normal((1000, 2)),
88
+ index=date_range("20130101", periods=1000, freq="s"),
89
+ )
90
+ result = df.resample("1min")
91
+ tm.assert_metadata_equivalent(df, result)
92
+
93
+ def test_metadata_propagation_indiv(self, monkeypatch):
94
+ # merging with override
95
+ # GH 6923
96
+
97
+ def finalize(self, other, method=None, **kwargs):
98
+ for name in self._metadata:
99
+ if method == "merge":
100
+ left, right = other.left, other.right
101
+ value = getattr(left, name, "") + "|" + getattr(right, name, "")
102
+ object.__setattr__(self, name, value)
103
+ elif method == "concat":
104
+ value = "+".join(
105
+ [getattr(o, name) for o in other.objs if getattr(o, name, None)]
106
+ )
107
+ object.__setattr__(self, name, value)
108
+ else:
109
+ object.__setattr__(self, name, getattr(other, name, ""))
110
+
111
+ return self
112
+
113
+ with monkeypatch.context() as m:
114
+ m.setattr(DataFrame, "_metadata", ["filename"])
115
+ m.setattr(DataFrame, "__finalize__", finalize)
116
+
117
+ df1 = DataFrame(
118
+ np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["a", "b"]
119
+ )
120
+ df2 = DataFrame(
121
+ np.random.default_rng(2).integers(0, 4, (3, 2)), columns=["c", "d"]
122
+ )
123
+ DataFrame._metadata = ["filename"]
124
+ df1.filename = "fname1.csv"
125
+ df2.filename = "fname2.csv"
126
+
127
+ result = df1.merge(df2, left_on=["a"], right_on=["c"], how="inner")
128
+ assert result.filename == "fname1.csv|fname2.csv"
129
+
130
+ # concat
131
+ # GH#6927
132
+ df1 = DataFrame(
133
+ np.random.default_rng(2).integers(0, 4, (3, 2)), columns=list("ab")
134
+ )
135
+ df1.filename = "foo"
136
+
137
+ result = pd.concat([df1, df1])
138
+ assert result.filename == "foo+foo"
139
+
140
+ def test_set_attribute(self):
141
+ # Test for consistent setattr behavior when an attribute and a column
142
+ # have the same name (Issue #8994)
143
+ df = DataFrame({"x": [1, 2, 3]})
144
+
145
+ df.y = 2
146
+ df["y"] = [2, 4, 6]
147
+ df.y = 5
148
+
149
+ assert df.y == 5
150
+ tm.assert_series_equal(df["y"], Series([2, 4, 6], name="y"))
151
+
152
+ def test_deepcopy_empty(self):
153
+ # This test covers empty frame copying with non-empty column sets
154
+ # as reported in issue GH15370
155
+ empty_frame = DataFrame(data=[], index=[], columns=["A"])
156
+ empty_frame_copy = deepcopy(empty_frame)
157
+
158
+ tm.assert_frame_equal(empty_frame_copy, empty_frame)
159
+
160
+
161
+ # formerly in Generic but only test DataFrame
162
+ class TestDataFrame2:
163
+ @pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])
164
+ def test_validate_bool_args(self, value):
165
+ df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})
166
+
167
+ msg = 'For argument "inplace" expected type bool, received type'
168
+ with pytest.raises(ValueError, match=msg):
169
+ df.copy().rename_axis(mapper={"a": "x", "b": "y"}, axis=1, inplace=value)
170
+
171
+ with pytest.raises(ValueError, match=msg):
172
+ df.copy().drop("a", axis=1, inplace=value)
173
+
174
+ with pytest.raises(ValueError, match=msg):
175
+ df.copy().fillna(value=0, inplace=value)
176
+
177
+ with pytest.raises(ValueError, match=msg):
178
+ df.copy().replace(to_replace=1, value=7, inplace=value)
179
+
180
+ with pytest.raises(ValueError, match=msg):
181
+ df.copy().interpolate(inplace=value)
182
+
183
+ with pytest.raises(ValueError, match=msg):
184
+ df.copy()._where(cond=df.a > 2, inplace=value)
185
+
186
+ with pytest.raises(ValueError, match=msg):
187
+ df.copy().mask(cond=df.a > 2, inplace=value)
188
+
189
+ def test_unexpected_keyword(self):
190
+ # GH8597
191
+ df = DataFrame(
192
+ np.random.default_rng(2).standard_normal((5, 2)), columns=["jim", "joe"]
193
+ )
194
+ ca = pd.Categorical([0, 0, 2, 2, 3, np.nan])
195
+ ts = df["joe"].copy()
196
+ ts[2] = np.nan
197
+
198
+ msg = "unexpected keyword"
199
+ with pytest.raises(TypeError, match=msg):
200
+ df.drop("joe", axis=1, in_place=True)
201
+
202
+ with pytest.raises(TypeError, match=msg):
203
+ df.reindex([1, 0], inplace=True)
204
+
205
+ with pytest.raises(TypeError, match=msg):
206
+ ca.fillna(0, inplace=True)
207
+
208
+ with pytest.raises(TypeError, match=msg):
209
+ ts.fillna(0, in_place=True)
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_generic.py ADDED
@@ -0,0 +1,504 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from copy import (
2
+ copy,
3
+ deepcopy,
4
+ )
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from pandas.core.dtypes.common import is_scalar
10
+
11
+ from pandas import (
12
+ DataFrame,
13
+ Index,
14
+ Series,
15
+ date_range,
16
+ )
17
+ import pandas._testing as tm
18
+
19
+ # ----------------------------------------------------------------------
20
+ # Generic types test cases
21
+
22
+
23
+ def construct(box, shape, value=None, dtype=None, **kwargs):
24
+ """
25
+ construct an object for the given shape
26
+ if value is specified use that if its a scalar
27
+ if value is an array, repeat it as needed
28
+ """
29
+ if isinstance(shape, int):
30
+ shape = tuple([shape] * box._AXIS_LEN)
31
+ if value is not None:
32
+ if is_scalar(value):
33
+ if value == "empty":
34
+ arr = None
35
+ dtype = np.float64
36
+
37
+ # remove the info axis
38
+ kwargs.pop(box._info_axis_name, None)
39
+ else:
40
+ arr = np.empty(shape, dtype=dtype)
41
+ arr.fill(value)
42
+ else:
43
+ fshape = np.prod(shape)
44
+ arr = value.ravel()
45
+ new_shape = fshape / arr.shape[0]
46
+ if fshape % arr.shape[0] != 0:
47
+ raise Exception("invalid value passed in construct")
48
+
49
+ arr = np.repeat(arr, new_shape).reshape(shape)
50
+ else:
51
+ arr = np.random.default_rng(2).standard_normal(shape)
52
+ return box(arr, dtype=dtype, **kwargs)
53
+
54
+
55
+ class TestGeneric:
56
+ @pytest.mark.parametrize(
57
+ "func",
58
+ [
59
+ str.lower,
60
+ {x: x.lower() for x in list("ABCD")},
61
+ Series({x: x.lower() for x in list("ABCD")}),
62
+ ],
63
+ )
64
+ def test_rename(self, frame_or_series, func):
65
+ # single axis
66
+ idx = list("ABCD")
67
+
68
+ for axis in frame_or_series._AXIS_ORDERS:
69
+ kwargs = {axis: idx}
70
+ obj = construct(frame_or_series, 4, **kwargs)
71
+
72
+ # rename a single axis
73
+ result = obj.rename(**{axis: func})
74
+ expected = obj.copy()
75
+ setattr(expected, axis, list("abcd"))
76
+ tm.assert_equal(result, expected)
77
+
78
+ def test_get_numeric_data(self, frame_or_series):
79
+ n = 4
80
+ kwargs = {
81
+ frame_or_series._get_axis_name(i): list(range(n))
82
+ for i in range(frame_or_series._AXIS_LEN)
83
+ }
84
+
85
+ # get the numeric data
86
+ o = construct(frame_or_series, n, **kwargs)
87
+ result = o._get_numeric_data()
88
+ tm.assert_equal(result, o)
89
+
90
+ # non-inclusion
91
+ result = o._get_bool_data()
92
+ expected = construct(frame_or_series, n, value="empty", **kwargs)
93
+ if isinstance(o, DataFrame):
94
+ # preserve columns dtype
95
+ expected.columns = o.columns[:0]
96
+ # https://github.com/pandas-dev/pandas/issues/50862
97
+ tm.assert_equal(result.reset_index(drop=True), expected)
98
+
99
+ # get the bool data
100
+ arr = np.array([True, True, False, True])
101
+ o = construct(frame_or_series, n, value=arr, **kwargs)
102
+ result = o._get_numeric_data()
103
+ tm.assert_equal(result, o)
104
+
105
+ def test_nonzero(self, frame_or_series):
106
+ # GH 4633
107
+ # look at the boolean/nonzero behavior for objects
108
+ obj = construct(frame_or_series, shape=4)
109
+ msg = f"The truth value of a {frame_or_series.__name__} is ambiguous"
110
+ with pytest.raises(ValueError, match=msg):
111
+ bool(obj == 0)
112
+ with pytest.raises(ValueError, match=msg):
113
+ bool(obj == 1)
114
+ with pytest.raises(ValueError, match=msg):
115
+ bool(obj)
116
+
117
+ obj = construct(frame_or_series, shape=4, value=1)
118
+ with pytest.raises(ValueError, match=msg):
119
+ bool(obj == 0)
120
+ with pytest.raises(ValueError, match=msg):
121
+ bool(obj == 1)
122
+ with pytest.raises(ValueError, match=msg):
123
+ bool(obj)
124
+
125
+ obj = construct(frame_or_series, shape=4, value=np.nan)
126
+ with pytest.raises(ValueError, match=msg):
127
+ bool(obj == 0)
128
+ with pytest.raises(ValueError, match=msg):
129
+ bool(obj == 1)
130
+ with pytest.raises(ValueError, match=msg):
131
+ bool(obj)
132
+
133
+ # empty
134
+ obj = construct(frame_or_series, shape=0)
135
+ with pytest.raises(ValueError, match=msg):
136
+ bool(obj)
137
+
138
+ # invalid behaviors
139
+
140
+ obj1 = construct(frame_or_series, shape=4, value=1)
141
+ obj2 = construct(frame_or_series, shape=4, value=1)
142
+
143
+ with pytest.raises(ValueError, match=msg):
144
+ if obj1:
145
+ pass
146
+
147
+ with pytest.raises(ValueError, match=msg):
148
+ obj1 and obj2
149
+ with pytest.raises(ValueError, match=msg):
150
+ obj1 or obj2
151
+ with pytest.raises(ValueError, match=msg):
152
+ not obj1
153
+
154
+ def test_frame_or_series_compound_dtypes(self, frame_or_series):
155
+ # see gh-5191
156
+ # Compound dtypes should raise NotImplementedError.
157
+
158
+ def f(dtype):
159
+ return construct(frame_or_series, shape=3, value=1, dtype=dtype)
160
+
161
+ msg = (
162
+ "compound dtypes are not implemented "
163
+ f"in the {frame_or_series.__name__} constructor"
164
+ )
165
+
166
+ with pytest.raises(NotImplementedError, match=msg):
167
+ f([("A", "datetime64[h]"), ("B", "str"), ("C", "int32")])
168
+
169
+ # these work (though results may be unexpected)
170
+ f("int64")
171
+ f("float64")
172
+ f("M8[ns]")
173
+
174
+ def test_metadata_propagation(self, frame_or_series):
175
+ # check that the metadata matches up on the resulting ops
176
+
177
+ o = construct(frame_or_series, shape=3)
178
+ o.name = "foo"
179
+ o2 = construct(frame_or_series, shape=3)
180
+ o2.name = "bar"
181
+
182
+ # ----------
183
+ # preserving
184
+ # ----------
185
+
186
+ # simple ops with scalars
187
+ for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
188
+ result = getattr(o, op)(1)
189
+ tm.assert_metadata_equivalent(o, result)
190
+
191
+ # ops with like
192
+ for op in ["__add__", "__sub__", "__truediv__", "__mul__"]:
193
+ result = getattr(o, op)(o)
194
+ tm.assert_metadata_equivalent(o, result)
195
+
196
+ # simple boolean
197
+ for op in ["__eq__", "__le__", "__ge__"]:
198
+ v1 = getattr(o, op)(o)
199
+ tm.assert_metadata_equivalent(o, v1)
200
+ tm.assert_metadata_equivalent(o, v1 & v1)
201
+ tm.assert_metadata_equivalent(o, v1 | v1)
202
+
203
+ # combine_first
204
+ result = o.combine_first(o2)
205
+ tm.assert_metadata_equivalent(o, result)
206
+
207
+ # ---------------------------
208
+ # non-preserving (by default)
209
+ # ---------------------------
210
+
211
+ # add non-like
212
+ result = o + o2
213
+ tm.assert_metadata_equivalent(result)
214
+
215
+ # simple boolean
216
+ for op in ["__eq__", "__le__", "__ge__"]:
217
+ # this is a name matching op
218
+ v1 = getattr(o, op)(o)
219
+ v2 = getattr(o, op)(o2)
220
+ tm.assert_metadata_equivalent(v2)
221
+ tm.assert_metadata_equivalent(v1 & v2)
222
+ tm.assert_metadata_equivalent(v1 | v2)
223
+
224
+ def test_size_compat(self, frame_or_series):
225
+ # GH8846
226
+ # size property should be defined
227
+
228
+ o = construct(frame_or_series, shape=10)
229
+ assert o.size == np.prod(o.shape)
230
+ assert o.size == 10 ** len(o.axes)
231
+
232
+ def test_split_compat(self, frame_or_series):
233
+ # xref GH8846
234
+ o = construct(frame_or_series, shape=10)
235
+ with tm.assert_produces_warning(
236
+ FutureWarning, match=".swapaxes' is deprecated", check_stacklevel=False
237
+ ):
238
+ assert len(np.array_split(o, 5)) == 5
239
+ assert len(np.array_split(o, 2)) == 2
240
+
241
+ # See gh-12301
242
+ def test_stat_unexpected_keyword(self, frame_or_series):
243
+ obj = construct(frame_or_series, 5)
244
+ starwars = "Star Wars"
245
+ errmsg = "unexpected keyword"
246
+
247
+ with pytest.raises(TypeError, match=errmsg):
248
+ obj.max(epic=starwars) # stat_function
249
+ with pytest.raises(TypeError, match=errmsg):
250
+ obj.var(epic=starwars) # stat_function_ddof
251
+ with pytest.raises(TypeError, match=errmsg):
252
+ obj.sum(epic=starwars) # cum_function
253
+ with pytest.raises(TypeError, match=errmsg):
254
+ obj.any(epic=starwars) # logical_function
255
+
256
+ @pytest.mark.parametrize("func", ["sum", "cumsum", "any", "var"])
257
+ def test_api_compat(self, func, frame_or_series):
258
+ # GH 12021
259
+ # compat for __name__, __qualname__
260
+
261
+ obj = construct(frame_or_series, 5)
262
+ f = getattr(obj, func)
263
+ assert f.__name__ == func
264
+ assert f.__qualname__.endswith(func)
265
+
266
+ def test_stat_non_defaults_args(self, frame_or_series):
267
+ obj = construct(frame_or_series, 5)
268
+ out = np.array([0])
269
+ errmsg = "the 'out' parameter is not supported"
270
+
271
+ with pytest.raises(ValueError, match=errmsg):
272
+ obj.max(out=out) # stat_function
273
+ with pytest.raises(ValueError, match=errmsg):
274
+ obj.var(out=out) # stat_function_ddof
275
+ with pytest.raises(ValueError, match=errmsg):
276
+ obj.sum(out=out) # cum_function
277
+ with pytest.raises(ValueError, match=errmsg):
278
+ obj.any(out=out) # logical_function
279
+
280
+ def test_truncate_out_of_bounds(self, frame_or_series):
281
+ # GH11382
282
+
283
+ # small
284
+ shape = [2000] + ([1] * (frame_or_series._AXIS_LEN - 1))
285
+ small = construct(frame_or_series, shape, dtype="int8", value=1)
286
+ tm.assert_equal(small.truncate(), small)
287
+ tm.assert_equal(small.truncate(before=0, after=3e3), small)
288
+ tm.assert_equal(small.truncate(before=-1, after=2e3), small)
289
+
290
+ # big
291
+ shape = [2_000_000] + ([1] * (frame_or_series._AXIS_LEN - 1))
292
+ big = construct(frame_or_series, shape, dtype="int8", value=1)
293
+ tm.assert_equal(big.truncate(), big)
294
+ tm.assert_equal(big.truncate(before=0, after=3e6), big)
295
+ tm.assert_equal(big.truncate(before=-1, after=2e6), big)
296
+
297
+ @pytest.mark.parametrize(
298
+ "func",
299
+ [copy, deepcopy, lambda x: x.copy(deep=False), lambda x: x.copy(deep=True)],
300
+ )
301
+ @pytest.mark.parametrize("shape", [0, 1, 2])
302
+ def test_copy_and_deepcopy(self, frame_or_series, shape, func):
303
+ # GH 15444
304
+ obj = construct(frame_or_series, shape)
305
+ obj_copy = func(obj)
306
+ assert obj_copy is not obj
307
+ tm.assert_equal(obj_copy, obj)
308
+
309
+ def test_data_deprecated(self, frame_or_series):
310
+ obj = frame_or_series()
311
+ msg = "(Series|DataFrame)._data is deprecated"
312
+ with tm.assert_produces_warning(DeprecationWarning, match=msg):
313
+ mgr = obj._data
314
+ assert mgr is obj._mgr
315
+
316
+
317
+ class TestNDFrame:
318
+ # tests that don't fit elsewhere
319
+
320
+ @pytest.mark.parametrize(
321
+ "ser",
322
+ [
323
+ Series(range(10), dtype=np.float64),
324
+ Series([str(i) for i in range(10)], dtype=object),
325
+ ],
326
+ )
327
+ def test_squeeze_series_noop(self, ser):
328
+ # noop
329
+ tm.assert_series_equal(ser.squeeze(), ser)
330
+
331
+ def test_squeeze_frame_noop(self):
332
+ # noop
333
+ df = DataFrame(np.eye(2))
334
+ tm.assert_frame_equal(df.squeeze(), df)
335
+
336
+ def test_squeeze_frame_reindex(self):
337
+ # squeezing
338
+ df = DataFrame(
339
+ np.random.default_rng(2).standard_normal((10, 4)),
340
+ columns=Index(list("ABCD"), dtype=object),
341
+ index=date_range("2000-01-01", periods=10, freq="B"),
342
+ ).reindex(columns=["A"])
343
+ tm.assert_series_equal(df.squeeze(), df["A"])
344
+
345
+ def test_squeeze_0_len_dim(self):
346
+ # don't fail with 0 length dimensions GH11229 & GH8999
347
+ empty_series = Series([], name="five", dtype=np.float64)
348
+ empty_frame = DataFrame([empty_series])
349
+ tm.assert_series_equal(empty_series, empty_series.squeeze())
350
+ tm.assert_series_equal(empty_series, empty_frame.squeeze())
351
+
352
+ def test_squeeze_axis(self):
353
+ # axis argument
354
+ df = DataFrame(
355
+ np.random.default_rng(2).standard_normal((1, 4)),
356
+ columns=Index(list("ABCD"), dtype=object),
357
+ index=date_range("2000-01-01", periods=1, freq="B"),
358
+ ).iloc[:, :1]
359
+ assert df.shape == (1, 1)
360
+ tm.assert_series_equal(df.squeeze(axis=0), df.iloc[0])
361
+ tm.assert_series_equal(df.squeeze(axis="index"), df.iloc[0])
362
+ tm.assert_series_equal(df.squeeze(axis=1), df.iloc[:, 0])
363
+ tm.assert_series_equal(df.squeeze(axis="columns"), df.iloc[:, 0])
364
+ assert df.squeeze() == df.iloc[0, 0]
365
+ msg = "No axis named 2 for object type DataFrame"
366
+ with pytest.raises(ValueError, match=msg):
367
+ df.squeeze(axis=2)
368
+ msg = "No axis named x for object type DataFrame"
369
+ with pytest.raises(ValueError, match=msg):
370
+ df.squeeze(axis="x")
371
+
372
+ def test_squeeze_axis_len_3(self):
373
+ df = DataFrame(
374
+ np.random.default_rng(2).standard_normal((3, 4)),
375
+ columns=Index(list("ABCD"), dtype=object),
376
+ index=date_range("2000-01-01", periods=3, freq="B"),
377
+ )
378
+ tm.assert_frame_equal(df.squeeze(axis=0), df)
379
+
380
+ def test_numpy_squeeze(self):
381
+ s = Series(range(2), dtype=np.float64)
382
+ tm.assert_series_equal(np.squeeze(s), s)
383
+
384
+ df = DataFrame(
385
+ np.random.default_rng(2).standard_normal((10, 4)),
386
+ columns=Index(list("ABCD"), dtype=object),
387
+ index=date_range("2000-01-01", periods=10, freq="B"),
388
+ ).reindex(columns=["A"])
389
+ tm.assert_series_equal(np.squeeze(df), df["A"])
390
+
391
+ @pytest.mark.parametrize(
392
+ "ser",
393
+ [
394
+ Series(range(10), dtype=np.float64),
395
+ Series([str(i) for i in range(10)], dtype=object),
396
+ ],
397
+ )
398
+ def test_transpose_series(self, ser):
399
+ # calls implementation in pandas/core/base.py
400
+ tm.assert_series_equal(ser.transpose(), ser)
401
+
402
+ def test_transpose_frame(self):
403
+ df = DataFrame(
404
+ np.random.default_rng(2).standard_normal((10, 4)),
405
+ columns=Index(list("ABCD"), dtype=object),
406
+ index=date_range("2000-01-01", periods=10, freq="B"),
407
+ )
408
+ tm.assert_frame_equal(df.transpose().transpose(), df)
409
+
410
+ def test_numpy_transpose(self, frame_or_series):
411
+ obj = DataFrame(
412
+ np.random.default_rng(2).standard_normal((10, 4)),
413
+ columns=Index(list("ABCD"), dtype=object),
414
+ index=date_range("2000-01-01", periods=10, freq="B"),
415
+ )
416
+ obj = tm.get_obj(obj, frame_or_series)
417
+
418
+ if frame_or_series is Series:
419
+ # 1D -> np.transpose is no-op
420
+ tm.assert_series_equal(np.transpose(obj), obj)
421
+
422
+ # round-trip preserved
423
+ tm.assert_equal(np.transpose(np.transpose(obj)), obj)
424
+
425
+ msg = "the 'axes' parameter is not supported"
426
+ with pytest.raises(ValueError, match=msg):
427
+ np.transpose(obj, axes=1)
428
+
429
+ @pytest.mark.parametrize(
430
+ "ser",
431
+ [
432
+ Series(range(10), dtype=np.float64),
433
+ Series([str(i) for i in range(10)], dtype=object),
434
+ ],
435
+ )
436
+ def test_take_series(self, ser):
437
+ indices = [1, 5, -2, 6, 3, -1]
438
+ out = ser.take(indices)
439
+ expected = Series(
440
+ data=ser.values.take(indices),
441
+ index=ser.index.take(indices),
442
+ dtype=ser.dtype,
443
+ )
444
+ tm.assert_series_equal(out, expected)
445
+
446
+ def test_take_frame(self):
447
+ indices = [1, 5, -2, 6, 3, -1]
448
+ df = DataFrame(
449
+ np.random.default_rng(2).standard_normal((10, 4)),
450
+ columns=Index(list("ABCD"), dtype=object),
451
+ index=date_range("2000-01-01", periods=10, freq="B"),
452
+ )
453
+ out = df.take(indices)
454
+ expected = DataFrame(
455
+ data=df.values.take(indices, axis=0),
456
+ index=df.index.take(indices),
457
+ columns=df.columns,
458
+ )
459
+ tm.assert_frame_equal(out, expected)
460
+
461
+ def test_take_invalid_kwargs(self, frame_or_series):
462
+ indices = [-3, 2, 0, 1]
463
+
464
+ obj = DataFrame(range(5))
465
+ obj = tm.get_obj(obj, frame_or_series)
466
+
467
+ msg = r"take\(\) got an unexpected keyword argument 'foo'"
468
+ with pytest.raises(TypeError, match=msg):
469
+ obj.take(indices, foo=2)
470
+
471
+ msg = "the 'out' parameter is not supported"
472
+ with pytest.raises(ValueError, match=msg):
473
+ obj.take(indices, out=indices)
474
+
475
+ msg = "the 'mode' parameter is not supported"
476
+ with pytest.raises(ValueError, match=msg):
477
+ obj.take(indices, mode="clip")
478
+
479
+ def test_axis_classmethods(self, frame_or_series):
480
+ box = frame_or_series
481
+ obj = box(dtype=object)
482
+ values = box._AXIS_TO_AXIS_NUMBER.keys()
483
+ for v in values:
484
+ assert obj._get_axis_number(v) == box._get_axis_number(v)
485
+ assert obj._get_axis_name(v) == box._get_axis_name(v)
486
+ assert obj._get_block_manager_axis(v) == box._get_block_manager_axis(v)
487
+
488
+ def test_flags_identity(self, frame_or_series):
489
+ obj = Series([1, 2])
490
+ if frame_or_series is DataFrame:
491
+ obj = obj.to_frame()
492
+
493
+ assert obj.flags is obj.flags
494
+ obj2 = obj.copy()
495
+ assert obj2.flags is not obj.flags
496
+
497
+ def test_bool_dep(self) -> None:
498
+ # GH-51749
499
+ msg_warn = (
500
+ "DataFrame.bool is now deprecated and will be removed "
501
+ "in future version of pandas"
502
+ )
503
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
504
+ DataFrame({"col": [False]}).bool()
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_label_or_level_utils.py ADDED
@@ -0,0 +1,336 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas.core.dtypes.missing import array_equivalent
4
+
5
+ import pandas as pd
6
+
7
+
8
+ # Fixtures
9
+ # ========
10
+ @pytest.fixture
11
+ def df():
12
+ """DataFrame with columns 'L1', 'L2', and 'L3'"""
13
+ return pd.DataFrame({"L1": [1, 2, 3], "L2": [11, 12, 13], "L3": ["A", "B", "C"]})
14
+
15
+
16
+ @pytest.fixture(params=[[], ["L1"], ["L1", "L2"], ["L1", "L2", "L3"]])
17
+ def df_levels(request, df):
18
+ """DataFrame with columns or index levels 'L1', 'L2', and 'L3'"""
19
+ levels = request.param
20
+
21
+ if levels:
22
+ df = df.set_index(levels)
23
+
24
+ return df
25
+
26
+
27
+ @pytest.fixture
28
+ def df_ambig(df):
29
+ """DataFrame with levels 'L1' and 'L2' and labels 'L1' and 'L3'"""
30
+ df = df.set_index(["L1", "L2"])
31
+
32
+ df["L1"] = df["L3"]
33
+
34
+ return df
35
+
36
+
37
+ @pytest.fixture
38
+ def df_duplabels(df):
39
+ """DataFrame with level 'L1' and labels 'L2', 'L3', and 'L2'"""
40
+ df = df.set_index(["L1"])
41
+ df = pd.concat([df, df["L2"]], axis=1)
42
+
43
+ return df
44
+
45
+
46
+ # Test is label/level reference
47
+ # =============================
48
+ def get_labels_levels(df_levels):
49
+ expected_labels = list(df_levels.columns)
50
+ expected_levels = [name for name in df_levels.index.names if name is not None]
51
+ return expected_labels, expected_levels
52
+
53
+
54
+ def assert_label_reference(frame, labels, axis):
55
+ for label in labels:
56
+ assert frame._is_label_reference(label, axis=axis)
57
+ assert not frame._is_level_reference(label, axis=axis)
58
+ assert frame._is_label_or_level_reference(label, axis=axis)
59
+
60
+
61
+ def assert_level_reference(frame, levels, axis):
62
+ for level in levels:
63
+ assert frame._is_level_reference(level, axis=axis)
64
+ assert not frame._is_label_reference(level, axis=axis)
65
+ assert frame._is_label_or_level_reference(level, axis=axis)
66
+
67
+
68
+ # DataFrame
69
+ # ---------
70
+ def test_is_level_or_label_reference_df_simple(df_levels, axis):
71
+ axis = df_levels._get_axis_number(axis)
72
+ # Compute expected labels and levels
73
+ expected_labels, expected_levels = get_labels_levels(df_levels)
74
+
75
+ # Transpose frame if axis == 1
76
+ if axis == 1:
77
+ df_levels = df_levels.T
78
+
79
+ # Perform checks
80
+ assert_level_reference(df_levels, expected_levels, axis=axis)
81
+ assert_label_reference(df_levels, expected_labels, axis=axis)
82
+
83
+
84
+ def test_is_level_reference_df_ambig(df_ambig, axis):
85
+ axis = df_ambig._get_axis_number(axis)
86
+
87
+ # Transpose frame if axis == 1
88
+ if axis == 1:
89
+ df_ambig = df_ambig.T
90
+
91
+ # df has both an on-axis level and off-axis label named L1
92
+ # Therefore L1 should reference the label, not the level
93
+ assert_label_reference(df_ambig, ["L1"], axis=axis)
94
+
95
+ # df has an on-axis level named L2 and it is not ambiguous
96
+ # Therefore L2 is an level reference
97
+ assert_level_reference(df_ambig, ["L2"], axis=axis)
98
+
99
+ # df has a column named L3 and it not an level reference
100
+ assert_label_reference(df_ambig, ["L3"], axis=axis)
101
+
102
+
103
+ # Series
104
+ # ------
105
+ def test_is_level_reference_series_simple_axis0(df):
106
+ # Make series with L1 as index
107
+ s = df.set_index("L1").L2
108
+ assert_level_reference(s, ["L1"], axis=0)
109
+ assert not s._is_level_reference("L2")
110
+
111
+ # Make series with L1 and L2 as index
112
+ s = df.set_index(["L1", "L2"]).L3
113
+ assert_level_reference(s, ["L1", "L2"], axis=0)
114
+ assert not s._is_level_reference("L3")
115
+
116
+
117
+ def test_is_level_reference_series_axis1_error(df):
118
+ # Make series with L1 as index
119
+ s = df.set_index("L1").L2
120
+
121
+ with pytest.raises(ValueError, match="No axis named 1"):
122
+ s._is_level_reference("L1", axis=1)
123
+
124
+
125
+ # Test _check_label_or_level_ambiguity_df
126
+ # =======================================
127
+
128
+
129
+ # DataFrame
130
+ # ---------
131
+ def test_check_label_or_level_ambiguity_df(df_ambig, axis):
132
+ axis = df_ambig._get_axis_number(axis)
133
+ # Transpose frame if axis == 1
134
+ if axis == 1:
135
+ df_ambig = df_ambig.T
136
+ msg = "'L1' is both a column level and an index label"
137
+
138
+ else:
139
+ msg = "'L1' is both an index level and a column label"
140
+ # df_ambig has both an on-axis level and off-axis label named L1
141
+ # Therefore, L1 is ambiguous.
142
+ with pytest.raises(ValueError, match=msg):
143
+ df_ambig._check_label_or_level_ambiguity("L1", axis=axis)
144
+
145
+ # df_ambig has an on-axis level named L2,, and it is not ambiguous.
146
+ df_ambig._check_label_or_level_ambiguity("L2", axis=axis)
147
+
148
+ # df_ambig has an off-axis label named L3, and it is not ambiguous
149
+ assert not df_ambig._check_label_or_level_ambiguity("L3", axis=axis)
150
+
151
+
152
+ # Series
153
+ # ------
154
+ def test_check_label_or_level_ambiguity_series(df):
155
+ # A series has no columns and therefore references are never ambiguous
156
+
157
+ # Make series with L1 as index
158
+ s = df.set_index("L1").L2
159
+ s._check_label_or_level_ambiguity("L1", axis=0)
160
+ s._check_label_or_level_ambiguity("L2", axis=0)
161
+
162
+ # Make series with L1 and L2 as index
163
+ s = df.set_index(["L1", "L2"]).L3
164
+ s._check_label_or_level_ambiguity("L1", axis=0)
165
+ s._check_label_or_level_ambiguity("L2", axis=0)
166
+ s._check_label_or_level_ambiguity("L3", axis=0)
167
+
168
+
169
+ def test_check_label_or_level_ambiguity_series_axis1_error(df):
170
+ # Make series with L1 as index
171
+ s = df.set_index("L1").L2
172
+
173
+ with pytest.raises(ValueError, match="No axis named 1"):
174
+ s._check_label_or_level_ambiguity("L1", axis=1)
175
+
176
+
177
+ # Test _get_label_or_level_values
178
+ # ===============================
179
+ def assert_label_values(frame, labels, axis):
180
+ axis = frame._get_axis_number(axis)
181
+ for label in labels:
182
+ if axis == 0:
183
+ expected = frame[label]._values
184
+ else:
185
+ expected = frame.loc[label]._values
186
+
187
+ result = frame._get_label_or_level_values(label, axis=axis)
188
+ assert array_equivalent(expected, result)
189
+
190
+
191
+ def assert_level_values(frame, levels, axis):
192
+ axis = frame._get_axis_number(axis)
193
+ for level in levels:
194
+ if axis == 0:
195
+ expected = frame.index.get_level_values(level=level)._values
196
+ else:
197
+ expected = frame.columns.get_level_values(level=level)._values
198
+
199
+ result = frame._get_label_or_level_values(level, axis=axis)
200
+ assert array_equivalent(expected, result)
201
+
202
+
203
+ # DataFrame
204
+ # ---------
205
+ def test_get_label_or_level_values_df_simple(df_levels, axis):
206
+ # Compute expected labels and levels
207
+ expected_labels, expected_levels = get_labels_levels(df_levels)
208
+
209
+ axis = df_levels._get_axis_number(axis)
210
+ # Transpose frame if axis == 1
211
+ if axis == 1:
212
+ df_levels = df_levels.T
213
+
214
+ # Perform checks
215
+ assert_label_values(df_levels, expected_labels, axis=axis)
216
+ assert_level_values(df_levels, expected_levels, axis=axis)
217
+
218
+
219
+ def test_get_label_or_level_values_df_ambig(df_ambig, axis):
220
+ axis = df_ambig._get_axis_number(axis)
221
+ # Transpose frame if axis == 1
222
+ if axis == 1:
223
+ df_ambig = df_ambig.T
224
+
225
+ # df has an on-axis level named L2, and it is not ambiguous.
226
+ assert_level_values(df_ambig, ["L2"], axis=axis)
227
+
228
+ # df has an off-axis label named L3, and it is not ambiguous.
229
+ assert_label_values(df_ambig, ["L3"], axis=axis)
230
+
231
+
232
+ def test_get_label_or_level_values_df_duplabels(df_duplabels, axis):
233
+ axis = df_duplabels._get_axis_number(axis)
234
+ # Transpose frame if axis == 1
235
+ if axis == 1:
236
+ df_duplabels = df_duplabels.T
237
+
238
+ # df has unambiguous level 'L1'
239
+ assert_level_values(df_duplabels, ["L1"], axis=axis)
240
+
241
+ # df has unique label 'L3'
242
+ assert_label_values(df_duplabels, ["L3"], axis=axis)
243
+
244
+ # df has duplicate labels 'L2'
245
+ if axis == 0:
246
+ expected_msg = "The column label 'L2' is not unique"
247
+ else:
248
+ expected_msg = "The index label 'L2' is not unique"
249
+
250
+ with pytest.raises(ValueError, match=expected_msg):
251
+ assert_label_values(df_duplabels, ["L2"], axis=axis)
252
+
253
+
254
+ # Series
255
+ # ------
256
+ def test_get_label_or_level_values_series_axis0(df):
257
+ # Make series with L1 as index
258
+ s = df.set_index("L1").L2
259
+ assert_level_values(s, ["L1"], axis=0)
260
+
261
+ # Make series with L1 and L2 as index
262
+ s = df.set_index(["L1", "L2"]).L3
263
+ assert_level_values(s, ["L1", "L2"], axis=0)
264
+
265
+
266
+ def test_get_label_or_level_values_series_axis1_error(df):
267
+ # Make series with L1 as index
268
+ s = df.set_index("L1").L2
269
+
270
+ with pytest.raises(ValueError, match="No axis named 1"):
271
+ s._get_label_or_level_values("L1", axis=1)
272
+
273
+
274
+ # Test _drop_labels_or_levels
275
+ # ===========================
276
+ def assert_labels_dropped(frame, labels, axis):
277
+ axis = frame._get_axis_number(axis)
278
+ for label in labels:
279
+ df_dropped = frame._drop_labels_or_levels(label, axis=axis)
280
+
281
+ if axis == 0:
282
+ assert label in frame.columns
283
+ assert label not in df_dropped.columns
284
+ else:
285
+ assert label in frame.index
286
+ assert label not in df_dropped.index
287
+
288
+
289
+ def assert_levels_dropped(frame, levels, axis):
290
+ axis = frame._get_axis_number(axis)
291
+ for level in levels:
292
+ df_dropped = frame._drop_labels_or_levels(level, axis=axis)
293
+
294
+ if axis == 0:
295
+ assert level in frame.index.names
296
+ assert level not in df_dropped.index.names
297
+ else:
298
+ assert level in frame.columns.names
299
+ assert level not in df_dropped.columns.names
300
+
301
+
302
+ # DataFrame
303
+ # ---------
304
+ def test_drop_labels_or_levels_df(df_levels, axis):
305
+ # Compute expected labels and levels
306
+ expected_labels, expected_levels = get_labels_levels(df_levels)
307
+
308
+ axis = df_levels._get_axis_number(axis)
309
+ # Transpose frame if axis == 1
310
+ if axis == 1:
311
+ df_levels = df_levels.T
312
+
313
+ # Perform checks
314
+ assert_labels_dropped(df_levels, expected_labels, axis=axis)
315
+ assert_levels_dropped(df_levels, expected_levels, axis=axis)
316
+
317
+ with pytest.raises(ValueError, match="not valid labels or levels"):
318
+ df_levels._drop_labels_or_levels("L4", axis=axis)
319
+
320
+
321
+ # Series
322
+ # ------
323
+ def test_drop_labels_or_levels_series(df):
324
+ # Make series with L1 as index
325
+ s = df.set_index("L1").L2
326
+ assert_levels_dropped(s, ["L1"], axis=0)
327
+
328
+ with pytest.raises(ValueError, match="not valid labels or levels"):
329
+ s._drop_labels_or_levels("L4", axis=0)
330
+
331
+ # Make series with L1 and L2 as index
332
+ s = df.set_index(["L1", "L2"]).L3
333
+ assert_levels_dropped(s, ["L1", "L2"], axis=0)
334
+
335
+ with pytest.raises(ValueError, match="not valid labels or levels"):
336
+ s._drop_labels_or_levels("L4", axis=0)
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_series.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from operator import methodcaller
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ import pandas as pd
7
+ from pandas import (
8
+ MultiIndex,
9
+ Series,
10
+ date_range,
11
+ )
12
+ import pandas._testing as tm
13
+
14
+
15
+ class TestSeries:
16
+ @pytest.mark.parametrize("func", ["rename_axis", "_set_axis_name"])
17
+ def test_set_axis_name_mi(self, func):
18
+ ser = Series(
19
+ [11, 21, 31],
20
+ index=MultiIndex.from_tuples(
21
+ [("A", x) for x in ["a", "B", "c"]], names=["l1", "l2"]
22
+ ),
23
+ )
24
+
25
+ result = methodcaller(func, ["L1", "L2"])(ser)
26
+ assert ser.index.name is None
27
+ assert ser.index.names == ["l1", "l2"]
28
+ assert result.index.name is None
29
+ assert result.index.names, ["L1", "L2"]
30
+
31
+ def test_set_axis_name_raises(self):
32
+ ser = Series([1])
33
+ msg = "No axis named 1 for object type Series"
34
+ with pytest.raises(ValueError, match=msg):
35
+ ser._set_axis_name(name="a", axis=1)
36
+
37
+ def test_get_bool_data_preserve_dtype(self):
38
+ ser = Series([True, False, True])
39
+ result = ser._get_bool_data()
40
+ tm.assert_series_equal(result, ser)
41
+
42
+ def test_nonzero_single_element(self):
43
+ # allow single item via bool method
44
+ msg_warn = (
45
+ "Series.bool is now deprecated and will be removed "
46
+ "in future version of pandas"
47
+ )
48
+ ser = Series([True])
49
+ ser1 = Series([False])
50
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
51
+ assert ser.bool()
52
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
53
+ assert not ser1.bool()
54
+
55
+ @pytest.mark.parametrize("data", [np.nan, pd.NaT, True, False])
56
+ def test_nonzero_single_element_raise_1(self, data):
57
+ # single item nan to raise
58
+ series = Series([data])
59
+
60
+ msg = "The truth value of a Series is ambiguous"
61
+ with pytest.raises(ValueError, match=msg):
62
+ bool(series)
63
+
64
+ @pytest.mark.parametrize("data", [np.nan, pd.NaT])
65
+ def test_nonzero_single_element_raise_2(self, data):
66
+ msg_warn = (
67
+ "Series.bool is now deprecated and will be removed "
68
+ "in future version of pandas"
69
+ )
70
+ msg_err = "bool cannot act on a non-boolean single element Series"
71
+ series = Series([data])
72
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
73
+ with pytest.raises(ValueError, match=msg_err):
74
+ series.bool()
75
+
76
+ @pytest.mark.parametrize("data", [(True, True), (False, False)])
77
+ def test_nonzero_multiple_element_raise(self, data):
78
+ # multiple bool are still an error
79
+ msg_warn = (
80
+ "Series.bool is now deprecated and will be removed "
81
+ "in future version of pandas"
82
+ )
83
+ msg_err = "The truth value of a Series is ambiguous"
84
+ series = Series([data])
85
+ with pytest.raises(ValueError, match=msg_err):
86
+ bool(series)
87
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
88
+ with pytest.raises(ValueError, match=msg_err):
89
+ series.bool()
90
+
91
+ @pytest.mark.parametrize("data", [1, 0, "a", 0.0])
92
+ def test_nonbool_single_element_raise(self, data):
93
+ # single non-bool are an error
94
+ msg_warn = (
95
+ "Series.bool is now deprecated and will be removed "
96
+ "in future version of pandas"
97
+ )
98
+ msg_err1 = "The truth value of a Series is ambiguous"
99
+ msg_err2 = "bool cannot act on a non-boolean single element Series"
100
+ series = Series([data])
101
+ with pytest.raises(ValueError, match=msg_err1):
102
+ bool(series)
103
+ with tm.assert_produces_warning(FutureWarning, match=msg_warn):
104
+ with pytest.raises(ValueError, match=msg_err2):
105
+ series.bool()
106
+
107
+ def test_metadata_propagation_indiv_resample(self):
108
+ # resample
109
+ ts = Series(
110
+ np.random.default_rng(2).random(1000),
111
+ index=date_range("20130101", periods=1000, freq="s"),
112
+ name="foo",
113
+ )
114
+ result = ts.resample("1min").mean()
115
+ tm.assert_metadata_equivalent(ts, result)
116
+
117
+ result = ts.resample("1min").min()
118
+ tm.assert_metadata_equivalent(ts, result)
119
+
120
+ result = ts.resample("1min").apply(lambda x: x.sum())
121
+ tm.assert_metadata_equivalent(ts, result)
122
+
123
+ def test_metadata_propagation_indiv(self, monkeypatch):
124
+ # check that the metadata matches up on the resulting ops
125
+
126
+ ser = Series(range(3), range(3))
127
+ ser.name = "foo"
128
+ ser2 = Series(range(3), range(3))
129
+ ser2.name = "bar"
130
+
131
+ result = ser.T
132
+ tm.assert_metadata_equivalent(ser, result)
133
+
134
+ def finalize(self, other, method=None, **kwargs):
135
+ for name in self._metadata:
136
+ if method == "concat" and name == "filename":
137
+ value = "+".join(
138
+ [
139
+ getattr(obj, name)
140
+ for obj in other.objs
141
+ if getattr(obj, name, None)
142
+ ]
143
+ )
144
+ object.__setattr__(self, name, value)
145
+ else:
146
+ object.__setattr__(self, name, getattr(other, name, None))
147
+
148
+ return self
149
+
150
+ with monkeypatch.context() as m:
151
+ m.setattr(Series, "_metadata", ["name", "filename"])
152
+ m.setattr(Series, "__finalize__", finalize)
153
+
154
+ ser.filename = "foo"
155
+ ser2.filename = "bar"
156
+
157
+ result = pd.concat([ser, ser2])
158
+ assert result.filename == "foo+bar"
159
+ assert result.name is None
env-llmeval/lib/python3.10/site-packages/pandas/tests/generic/test_to_xarray.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ from pandas import (
5
+ Categorical,
6
+ DataFrame,
7
+ MultiIndex,
8
+ Series,
9
+ date_range,
10
+ )
11
+ import pandas._testing as tm
12
+
13
+ pytest.importorskip("xarray")
14
+
15
+
16
+ class TestDataFrameToXArray:
17
+ @pytest.fixture
18
+ def df(self):
19
+ return DataFrame(
20
+ {
21
+ "a": list("abcd"),
22
+ "b": list(range(1, 5)),
23
+ "c": np.arange(3, 7).astype("u1"),
24
+ "d": np.arange(4.0, 8.0, dtype="float64"),
25
+ "e": [True, False, True, False],
26
+ "f": Categorical(list("abcd")),
27
+ "g": date_range("20130101", periods=4),
28
+ "h": date_range("20130101", periods=4, tz="US/Eastern"),
29
+ }
30
+ )
31
+
32
+ def test_to_xarray_index_types(self, index_flat, df, using_infer_string):
33
+ index = index_flat
34
+ # MultiIndex is tested in test_to_xarray_with_multiindex
35
+ if len(index) == 0:
36
+ pytest.skip("Test doesn't make sense for empty index")
37
+
38
+ from xarray import Dataset
39
+
40
+ df.index = index[:4]
41
+ df.index.name = "foo"
42
+ df.columns.name = "bar"
43
+ result = df.to_xarray()
44
+ assert result.sizes["foo"] == 4
45
+ assert len(result.coords) == 1
46
+ assert len(result.data_vars) == 8
47
+ tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
48
+ assert isinstance(result, Dataset)
49
+
50
+ # idempotency
51
+ # datetimes w/tz are preserved
52
+ # column names are lost
53
+ expected = df.copy()
54
+ expected["f"] = expected["f"].astype(
55
+ object if not using_infer_string else "string[pyarrow_numpy]"
56
+ )
57
+ expected.columns.name = None
58
+ tm.assert_frame_equal(result.to_dataframe(), expected)
59
+
60
+ def test_to_xarray_empty(self, df):
61
+ from xarray import Dataset
62
+
63
+ df.index.name = "foo"
64
+ result = df[0:0].to_xarray()
65
+ assert result.sizes["foo"] == 0
66
+ assert isinstance(result, Dataset)
67
+
68
+ def test_to_xarray_with_multiindex(self, df, using_infer_string):
69
+ from xarray import Dataset
70
+
71
+ # MultiIndex
72
+ df.index = MultiIndex.from_product([["a"], range(4)], names=["one", "two"])
73
+ result = df.to_xarray()
74
+ assert result.sizes["one"] == 1
75
+ assert result.sizes["two"] == 4
76
+ assert len(result.coords) == 2
77
+ assert len(result.data_vars) == 8
78
+ tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
79
+ assert isinstance(result, Dataset)
80
+
81
+ result = result.to_dataframe()
82
+ expected = df.copy()
83
+ expected["f"] = expected["f"].astype(
84
+ object if not using_infer_string else "string[pyarrow_numpy]"
85
+ )
86
+ expected.columns.name = None
87
+ tm.assert_frame_equal(result, expected)
88
+
89
+
90
+ class TestSeriesToXArray:
91
+ def test_to_xarray_index_types(self, index_flat):
92
+ index = index_flat
93
+ # MultiIndex is tested in test_to_xarray_with_multiindex
94
+
95
+ from xarray import DataArray
96
+
97
+ ser = Series(range(len(index)), index=index, dtype="int64")
98
+ ser.index.name = "foo"
99
+ result = ser.to_xarray()
100
+ repr(result)
101
+ assert len(result) == len(index)
102
+ assert len(result.coords) == 1
103
+ tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
104
+ assert isinstance(result, DataArray)
105
+
106
+ # idempotency
107
+ tm.assert_series_equal(result.to_series(), ser)
108
+
109
+ def test_to_xarray_empty(self):
110
+ from xarray import DataArray
111
+
112
+ ser = Series([], dtype=object)
113
+ ser.index.name = "foo"
114
+ result = ser.to_xarray()
115
+ assert len(result) == 0
116
+ assert len(result.coords) == 1
117
+ tm.assert_almost_equal(list(result.coords.keys()), ["foo"])
118
+ assert isinstance(result, DataArray)
119
+
120
+ def test_to_xarray_with_multiindex(self):
121
+ from xarray import DataArray
122
+
123
+ mi = MultiIndex.from_product([["a", "b"], range(3)], names=["one", "two"])
124
+ ser = Series(range(6), dtype="int64", index=mi)
125
+ result = ser.to_xarray()
126
+ assert len(result) == 2
127
+ tm.assert_almost_equal(list(result.coords.keys()), ["one", "two"])
128
+ assert isinstance(result, DataArray)
129
+ res = result.to_series()
130
+ tm.assert_series_equal(res, ser)
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/common.cpython-310.pyc ADDED
Binary file (14.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (1.32 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_backend.cpython-310.pyc ADDED
Binary file (3.51 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_converter.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_datetimelike.cpython-310.pyc ADDED
Binary file (58.7 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/__pycache__/test_groupby.cpython-310.pyc ADDED
Binary file (6.94 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/common.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Module consolidating common testing functions for checking plotting.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from typing import TYPE_CHECKING
8
+
9
+ import numpy as np
10
+
11
+ from pandas.core.dtypes.api import is_list_like
12
+
13
+ import pandas as pd
14
+ from pandas import Series
15
+ import pandas._testing as tm
16
+
17
+ if TYPE_CHECKING:
18
+ from collections.abc import Sequence
19
+
20
+ from matplotlib.axes import Axes
21
+
22
+
23
+ def _check_legend_labels(axes, labels=None, visible=True):
24
+ """
25
+ Check each axes has expected legend labels
26
+
27
+ Parameters
28
+ ----------
29
+ axes : matplotlib Axes object, or its list-like
30
+ labels : list-like
31
+ expected legend labels
32
+ visible : bool
33
+ expected legend visibility. labels are checked only when visible is
34
+ True
35
+ """
36
+ if visible and (labels is None):
37
+ raise ValueError("labels must be specified when visible is True")
38
+ axes = _flatten_visible(axes)
39
+ for ax in axes:
40
+ if visible:
41
+ assert ax.get_legend() is not None
42
+ _check_text_labels(ax.get_legend().get_texts(), labels)
43
+ else:
44
+ assert ax.get_legend() is None
45
+
46
+
47
+ def _check_legend_marker(ax, expected_markers=None, visible=True):
48
+ """
49
+ Check ax has expected legend markers
50
+
51
+ Parameters
52
+ ----------
53
+ ax : matplotlib Axes object
54
+ expected_markers : list-like
55
+ expected legend markers
56
+ visible : bool
57
+ expected legend visibility. labels are checked only when visible is
58
+ True
59
+ """
60
+ if visible and (expected_markers is None):
61
+ raise ValueError("Markers must be specified when visible is True")
62
+ if visible:
63
+ handles, _ = ax.get_legend_handles_labels()
64
+ markers = [handle.get_marker() for handle in handles]
65
+ assert markers == expected_markers
66
+ else:
67
+ assert ax.get_legend() is None
68
+
69
+
70
+ def _check_data(xp, rs):
71
+ """
72
+ Check each axes has identical lines
73
+
74
+ Parameters
75
+ ----------
76
+ xp : matplotlib Axes object
77
+ rs : matplotlib Axes object
78
+ """
79
+ import matplotlib.pyplot as plt
80
+
81
+ xp_lines = xp.get_lines()
82
+ rs_lines = rs.get_lines()
83
+
84
+ assert len(xp_lines) == len(rs_lines)
85
+ for xpl, rsl in zip(xp_lines, rs_lines):
86
+ xpdata = xpl.get_xydata()
87
+ rsdata = rsl.get_xydata()
88
+ tm.assert_almost_equal(xpdata, rsdata)
89
+
90
+ plt.close("all")
91
+
92
+
93
+ def _check_visible(collections, visible=True):
94
+ """
95
+ Check each artist is visible or not
96
+
97
+ Parameters
98
+ ----------
99
+ collections : matplotlib Artist or its list-like
100
+ target Artist or its list or collection
101
+ visible : bool
102
+ expected visibility
103
+ """
104
+ from matplotlib.collections import Collection
105
+
106
+ if not isinstance(collections, Collection) and not is_list_like(collections):
107
+ collections = [collections]
108
+
109
+ for patch in collections:
110
+ assert patch.get_visible() == visible
111
+
112
+
113
+ def _check_patches_all_filled(axes: Axes | Sequence[Axes], filled: bool = True) -> None:
114
+ """
115
+ Check for each artist whether it is filled or not
116
+
117
+ Parameters
118
+ ----------
119
+ axes : matplotlib Axes object, or its list-like
120
+ filled : bool
121
+ expected filling
122
+ """
123
+
124
+ axes = _flatten_visible(axes)
125
+ for ax in axes:
126
+ for patch in ax.patches:
127
+ assert patch.fill == filled
128
+
129
+
130
+ def _get_colors_mapped(series, colors):
131
+ unique = series.unique()
132
+ # unique and colors length can be differed
133
+ # depending on slice value
134
+ mapped = dict(zip(unique, colors))
135
+ return [mapped[v] for v in series.values]
136
+
137
+
138
+ def _check_colors(collections, linecolors=None, facecolors=None, mapping=None):
139
+ """
140
+ Check each artist has expected line colors and face colors
141
+
142
+ Parameters
143
+ ----------
144
+ collections : list-like
145
+ list or collection of target artist
146
+ linecolors : list-like which has the same length as collections
147
+ list of expected line colors
148
+ facecolors : list-like which has the same length as collections
149
+ list of expected face colors
150
+ mapping : Series
151
+ Series used for color grouping key
152
+ used for andrew_curves, parallel_coordinates, radviz test
153
+ """
154
+ from matplotlib import colors
155
+ from matplotlib.collections import (
156
+ Collection,
157
+ LineCollection,
158
+ PolyCollection,
159
+ )
160
+ from matplotlib.lines import Line2D
161
+
162
+ conv = colors.ColorConverter
163
+ if linecolors is not None:
164
+ if mapping is not None:
165
+ linecolors = _get_colors_mapped(mapping, linecolors)
166
+ linecolors = linecolors[: len(collections)]
167
+
168
+ assert len(collections) == len(linecolors)
169
+ for patch, color in zip(collections, linecolors):
170
+ if isinstance(patch, Line2D):
171
+ result = patch.get_color()
172
+ # Line2D may contains string color expression
173
+ result = conv.to_rgba(result)
174
+ elif isinstance(patch, (PolyCollection, LineCollection)):
175
+ result = tuple(patch.get_edgecolor()[0])
176
+ else:
177
+ result = patch.get_edgecolor()
178
+
179
+ expected = conv.to_rgba(color)
180
+ assert result == expected
181
+
182
+ if facecolors is not None:
183
+ if mapping is not None:
184
+ facecolors = _get_colors_mapped(mapping, facecolors)
185
+ facecolors = facecolors[: len(collections)]
186
+
187
+ assert len(collections) == len(facecolors)
188
+ for patch, color in zip(collections, facecolors):
189
+ if isinstance(patch, Collection):
190
+ # returned as list of np.array
191
+ result = patch.get_facecolor()[0]
192
+ else:
193
+ result = patch.get_facecolor()
194
+
195
+ if isinstance(result, np.ndarray):
196
+ result = tuple(result)
197
+
198
+ expected = conv.to_rgba(color)
199
+ assert result == expected
200
+
201
+
202
+ def _check_text_labels(texts, expected):
203
+ """
204
+ Check each text has expected labels
205
+
206
+ Parameters
207
+ ----------
208
+ texts : matplotlib Text object, or its list-like
209
+ target text, or its list
210
+ expected : str or list-like which has the same length as texts
211
+ expected text label, or its list
212
+ """
213
+ if not is_list_like(texts):
214
+ assert texts.get_text() == expected
215
+ else:
216
+ labels = [t.get_text() for t in texts]
217
+ assert len(labels) == len(expected)
218
+ for label, e in zip(labels, expected):
219
+ assert label == e
220
+
221
+
222
+ def _check_ticks_props(axes, xlabelsize=None, xrot=None, ylabelsize=None, yrot=None):
223
+ """
224
+ Check each axes has expected tick properties
225
+
226
+ Parameters
227
+ ----------
228
+ axes : matplotlib Axes object, or its list-like
229
+ xlabelsize : number
230
+ expected xticks font size
231
+ xrot : number
232
+ expected xticks rotation
233
+ ylabelsize : number
234
+ expected yticks font size
235
+ yrot : number
236
+ expected yticks rotation
237
+ """
238
+ from matplotlib.ticker import NullFormatter
239
+
240
+ axes = _flatten_visible(axes)
241
+ for ax in axes:
242
+ if xlabelsize is not None or xrot is not None:
243
+ if isinstance(ax.xaxis.get_minor_formatter(), NullFormatter):
244
+ # If minor ticks has NullFormatter, rot / fontsize are not
245
+ # retained
246
+ labels = ax.get_xticklabels()
247
+ else:
248
+ labels = ax.get_xticklabels() + ax.get_xticklabels(minor=True)
249
+
250
+ for label in labels:
251
+ if xlabelsize is not None:
252
+ tm.assert_almost_equal(label.get_fontsize(), xlabelsize)
253
+ if xrot is not None:
254
+ tm.assert_almost_equal(label.get_rotation(), xrot)
255
+
256
+ if ylabelsize is not None or yrot is not None:
257
+ if isinstance(ax.yaxis.get_minor_formatter(), NullFormatter):
258
+ labels = ax.get_yticklabels()
259
+ else:
260
+ labels = ax.get_yticklabels() + ax.get_yticklabels(minor=True)
261
+
262
+ for label in labels:
263
+ if ylabelsize is not None:
264
+ tm.assert_almost_equal(label.get_fontsize(), ylabelsize)
265
+ if yrot is not None:
266
+ tm.assert_almost_equal(label.get_rotation(), yrot)
267
+
268
+
269
+ def _check_ax_scales(axes, xaxis="linear", yaxis="linear"):
270
+ """
271
+ Check each axes has expected scales
272
+
273
+ Parameters
274
+ ----------
275
+ axes : matplotlib Axes object, or its list-like
276
+ xaxis : {'linear', 'log'}
277
+ expected xaxis scale
278
+ yaxis : {'linear', 'log'}
279
+ expected yaxis scale
280
+ """
281
+ axes = _flatten_visible(axes)
282
+ for ax in axes:
283
+ assert ax.xaxis.get_scale() == xaxis
284
+ assert ax.yaxis.get_scale() == yaxis
285
+
286
+
287
+ def _check_axes_shape(axes, axes_num=None, layout=None, figsize=None):
288
+ """
289
+ Check expected number of axes is drawn in expected layout
290
+
291
+ Parameters
292
+ ----------
293
+ axes : matplotlib Axes object, or its list-like
294
+ axes_num : number
295
+ expected number of axes. Unnecessary axes should be set to
296
+ invisible.
297
+ layout : tuple
298
+ expected layout, (expected number of rows , columns)
299
+ figsize : tuple
300
+ expected figsize. default is matplotlib default
301
+ """
302
+ from pandas.plotting._matplotlib.tools import flatten_axes
303
+
304
+ if figsize is None:
305
+ figsize = (6.4, 4.8)
306
+ visible_axes = _flatten_visible(axes)
307
+
308
+ if axes_num is not None:
309
+ assert len(visible_axes) == axes_num
310
+ for ax in visible_axes:
311
+ # check something drawn on visible axes
312
+ assert len(ax.get_children()) > 0
313
+
314
+ if layout is not None:
315
+ x_set = set()
316
+ y_set = set()
317
+ for ax in flatten_axes(axes):
318
+ # check axes coordinates to estimate layout
319
+ points = ax.get_position().get_points()
320
+ x_set.add(points[0][0])
321
+ y_set.add(points[0][1])
322
+ result = (len(y_set), len(x_set))
323
+ assert result == layout
324
+
325
+ tm.assert_numpy_array_equal(
326
+ visible_axes[0].figure.get_size_inches(),
327
+ np.array(figsize, dtype=np.float64),
328
+ )
329
+
330
+
331
+ def _flatten_visible(axes: Axes | Sequence[Axes]) -> Sequence[Axes]:
332
+ """
333
+ Flatten axes, and filter only visible
334
+
335
+ Parameters
336
+ ----------
337
+ axes : matplotlib Axes object, or its list-like
338
+
339
+ """
340
+ from pandas.plotting._matplotlib.tools import flatten_axes
341
+
342
+ axes_ndarray = flatten_axes(axes)
343
+ axes = [ax for ax in axes_ndarray if ax.get_visible()]
344
+ return axes
345
+
346
+
347
+ def _check_has_errorbars(axes, xerr=0, yerr=0):
348
+ """
349
+ Check axes has expected number of errorbars
350
+
351
+ Parameters
352
+ ----------
353
+ axes : matplotlib Axes object, or its list-like
354
+ xerr : number
355
+ expected number of x errorbar
356
+ yerr : number
357
+ expected number of y errorbar
358
+ """
359
+ axes = _flatten_visible(axes)
360
+ for ax in axes:
361
+ containers = ax.containers
362
+ xerr_count = 0
363
+ yerr_count = 0
364
+ for c in containers:
365
+ has_xerr = getattr(c, "has_xerr", False)
366
+ has_yerr = getattr(c, "has_yerr", False)
367
+ if has_xerr:
368
+ xerr_count += 1
369
+ if has_yerr:
370
+ yerr_count += 1
371
+ assert xerr == xerr_count
372
+ assert yerr == yerr_count
373
+
374
+
375
+ def _check_box_return_type(
376
+ returned, return_type, expected_keys=None, check_ax_title=True
377
+ ):
378
+ """
379
+ Check box returned type is correct
380
+
381
+ Parameters
382
+ ----------
383
+ returned : object to be tested, returned from boxplot
384
+ return_type : str
385
+ return_type passed to boxplot
386
+ expected_keys : list-like, optional
387
+ group labels in subplot case. If not passed,
388
+ the function checks assuming boxplot uses single ax
389
+ check_ax_title : bool
390
+ Whether to check the ax.title is the same as expected_key
391
+ Intended to be checked by calling from ``boxplot``.
392
+ Normal ``plot`` doesn't attach ``ax.title``, it must be disabled.
393
+ """
394
+ from matplotlib.axes import Axes
395
+
396
+ types = {"dict": dict, "axes": Axes, "both": tuple}
397
+ if expected_keys is None:
398
+ # should be fixed when the returning default is changed
399
+ if return_type is None:
400
+ return_type = "dict"
401
+
402
+ assert isinstance(returned, types[return_type])
403
+ if return_type == "both":
404
+ assert isinstance(returned.ax, Axes)
405
+ assert isinstance(returned.lines, dict)
406
+ else:
407
+ # should be fixed when the returning default is changed
408
+ if return_type is None:
409
+ for r in _flatten_visible(returned):
410
+ assert isinstance(r, Axes)
411
+ return
412
+
413
+ assert isinstance(returned, Series)
414
+
415
+ assert sorted(returned.keys()) == sorted(expected_keys)
416
+ for key, value in returned.items():
417
+ assert isinstance(value, types[return_type])
418
+ # check returned dict has correct mapping
419
+ if return_type == "axes":
420
+ if check_ax_title:
421
+ assert value.get_title() == key
422
+ elif return_type == "both":
423
+ if check_ax_title:
424
+ assert value.ax.get_title() == key
425
+ assert isinstance(value.ax, Axes)
426
+ assert isinstance(value.lines, dict)
427
+ elif return_type == "dict":
428
+ line = value["medians"][0]
429
+ axes = line.axes
430
+ if check_ax_title:
431
+ assert axes.get_title() == key
432
+ else:
433
+ raise AssertionError
434
+
435
+
436
+ def _check_grid_settings(obj, kinds, kws={}):
437
+ # Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
438
+
439
+ import matplotlib as mpl
440
+
441
+ def is_grid_on():
442
+ xticks = mpl.pyplot.gca().xaxis.get_major_ticks()
443
+ yticks = mpl.pyplot.gca().yaxis.get_major_ticks()
444
+ xoff = all(not g.gridline.get_visible() for g in xticks)
445
+ yoff = all(not g.gridline.get_visible() for g in yticks)
446
+
447
+ return not (xoff and yoff)
448
+
449
+ spndx = 1
450
+ for kind in kinds:
451
+ mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
452
+ spndx += 1
453
+ mpl.rc("axes", grid=False)
454
+ obj.plot(kind=kind, **kws)
455
+ assert not is_grid_on()
456
+ mpl.pyplot.clf()
457
+
458
+ mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
459
+ spndx += 1
460
+ mpl.rc("axes", grid=True)
461
+ obj.plot(kind=kind, grid=False, **kws)
462
+ assert not is_grid_on()
463
+ mpl.pyplot.clf()
464
+
465
+ if kind not in ["pie", "hexbin", "scatter"]:
466
+ mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
467
+ spndx += 1
468
+ mpl.rc("axes", grid=True)
469
+ obj.plot(kind=kind, **kws)
470
+ assert is_grid_on()
471
+ mpl.pyplot.clf()
472
+
473
+ mpl.pyplot.subplot(1, 4 * len(kinds), spndx)
474
+ spndx += 1
475
+ mpl.rc("axes", grid=False)
476
+ obj.plot(kind=kind, grid=True, **kws)
477
+ assert is_grid_on()
478
+ mpl.pyplot.clf()
479
+
480
+
481
+ def _unpack_cycler(rcParams, field="color"):
482
+ """
483
+ Auxiliary function for correctly unpacking cycler after MPL >= 1.5
484
+ """
485
+ return [v[field] for v in rcParams["axes.prop_cycle"]]
486
+
487
+
488
+ def get_x_axis(ax):
489
+ return ax._shared_axes["x"]
490
+
491
+
492
+ def get_y_axis(ax):
493
+ return ax._shared_axes["y"]
494
+
495
+
496
+ def _check_plot_works(f, default_axes=False, **kwargs):
497
+ """
498
+ Create plot and ensure that plot return object is valid.
499
+
500
+ Parameters
501
+ ----------
502
+ f : func
503
+ Plotting function.
504
+ default_axes : bool, optional
505
+ If False (default):
506
+ - If `ax` not in `kwargs`, then create subplot(211) and plot there
507
+ - Create new subplot(212) and plot there as well
508
+ - Mind special corner case for bootstrap_plot (see `_gen_two_subplots`)
509
+ If True:
510
+ - Simply run plotting function with kwargs provided
511
+ - All required axes instances will be created automatically
512
+ - It is recommended to use it when the plotting function
513
+ creates multiple axes itself. It helps avoid warnings like
514
+ 'UserWarning: To output multiple subplots,
515
+ the figure containing the passed axes is being cleared'
516
+ **kwargs
517
+ Keyword arguments passed to the plotting function.
518
+
519
+ Returns
520
+ -------
521
+ Plot object returned by the last plotting.
522
+ """
523
+ import matplotlib.pyplot as plt
524
+
525
+ if default_axes:
526
+ gen_plots = _gen_default_plot
527
+ else:
528
+ gen_plots = _gen_two_subplots
529
+
530
+ ret = None
531
+ try:
532
+ fig = kwargs.get("figure", plt.gcf())
533
+ plt.clf()
534
+
535
+ for ret in gen_plots(f, fig, **kwargs):
536
+ tm.assert_is_valid_plot_return_object(ret)
537
+
538
+ finally:
539
+ plt.close(fig)
540
+
541
+ return ret
542
+
543
+
544
+ def _gen_default_plot(f, fig, **kwargs):
545
+ """
546
+ Create plot in a default way.
547
+ """
548
+ yield f(**kwargs)
549
+
550
+
551
+ def _gen_two_subplots(f, fig, **kwargs):
552
+ """
553
+ Create plot on two subplots forcefully created.
554
+ """
555
+ if "ax" not in kwargs:
556
+ fig.add_subplot(211)
557
+ yield f(**kwargs)
558
+
559
+ if f is pd.plotting.bootstrap_plot:
560
+ assert "ax" not in kwargs
561
+ else:
562
+ kwargs["ax"] = fig.add_subplot(212)
563
+ yield f(**kwargs)
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/conftest.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gc
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ to_datetime,
9
+ )
10
+
11
+
12
+ @pytest.fixture(autouse=True)
13
+ def mpl_cleanup():
14
+ # matplotlib/testing/decorators.py#L24
15
+ # 1) Resets units registry
16
+ # 2) Resets rc_context
17
+ # 3) Closes all figures
18
+ mpl = pytest.importorskip("matplotlib")
19
+ mpl_units = pytest.importorskip("matplotlib.units")
20
+ plt = pytest.importorskip("matplotlib.pyplot")
21
+ orig_units_registry = mpl_units.registry.copy()
22
+ with mpl.rc_context():
23
+ mpl.use("template")
24
+ yield
25
+ mpl_units.registry.clear()
26
+ mpl_units.registry.update(orig_units_registry)
27
+ plt.close("all")
28
+ # https://matplotlib.org/stable/users/prev_whats_new/whats_new_3.6.0.html#garbage-collection-is-no-longer-run-on-figure-close # noqa: E501
29
+ gc.collect(1)
30
+
31
+
32
+ @pytest.fixture
33
+ def hist_df():
34
+ n = 50
35
+ rng = np.random.default_rng(10)
36
+ gender = rng.choice(["Male", "Female"], size=n)
37
+ classroom = rng.choice(["A", "B", "C"], size=n)
38
+
39
+ hist_df = DataFrame(
40
+ {
41
+ "gender": gender,
42
+ "classroom": classroom,
43
+ "height": rng.normal(66, 4, size=n),
44
+ "weight": rng.normal(161, 32, size=n),
45
+ "category": rng.integers(4, size=n),
46
+ "datetime": to_datetime(
47
+ rng.integers(
48
+ 812419200000000000,
49
+ 819331200000000000,
50
+ size=n,
51
+ dtype=np.int64,
52
+ )
53
+ ),
54
+ }
55
+ )
56
+ return hist_df
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (192 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame.cpython-310.pyc ADDED
Binary file (89.5 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_color.cpython-310.pyc ADDED
Binary file (31.9 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_groupby.cpython-310.pyc ADDED
Binary file (1.96 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_legend.cpython-310.pyc ADDED
Binary file (8.67 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_frame_subplots.cpython-310.pyc ADDED
Binary file (23.8 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/__pycache__/test_hist_box_by.cpython-310.pyc ADDED
Binary file (8.81 kB). View file
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_color.py ADDED
@@ -0,0 +1,670 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test cases for DataFrame.plot """
2
+ import re
3
+
4
+ import numpy as np
5
+ import pytest
6
+
7
+ import pandas as pd
8
+ from pandas import DataFrame
9
+ import pandas._testing as tm
10
+ from pandas.tests.plotting.common import (
11
+ _check_colors,
12
+ _check_plot_works,
13
+ _unpack_cycler,
14
+ )
15
+ from pandas.util.version import Version
16
+
17
+ mpl = pytest.importorskip("matplotlib")
18
+ plt = pytest.importorskip("matplotlib.pyplot")
19
+ cm = pytest.importorskip("matplotlib.cm")
20
+
21
+
22
+ def _check_colors_box(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
23
+ if fliers_c is None:
24
+ fliers_c = "k"
25
+ _check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
26
+ _check_colors(bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"]))
27
+ _check_colors(bp["medians"], linecolors=[medians_c] * len(bp["medians"]))
28
+ _check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
29
+ _check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
30
+
31
+
32
+ class TestDataFrameColor:
33
+ @pytest.mark.parametrize(
34
+ "color", ["C0", "C1", "C2", "C3", "C4", "C5", "C6", "C7", "C8", "C9"]
35
+ )
36
+ def test_mpl2_color_cycle_str(self, color):
37
+ # GH 15516
38
+ df = DataFrame(
39
+ np.random.default_rng(2).standard_normal((10, 3)), columns=["a", "b", "c"]
40
+ )
41
+ _check_plot_works(df.plot, color=color)
42
+
43
+ def test_color_single_series_list(self):
44
+ # GH 3486
45
+ df = DataFrame({"A": [1, 2, 3]})
46
+ _check_plot_works(df.plot, color=["red"])
47
+
48
+ @pytest.mark.parametrize("color", [(1, 0, 0), (1, 0, 0, 0.5)])
49
+ def test_rgb_tuple_color(self, color):
50
+ # GH 16695
51
+ df = DataFrame({"x": [1, 2], "y": [3, 4]})
52
+ _check_plot_works(df.plot, x="x", y="y", color=color)
53
+
54
+ def test_color_empty_string(self):
55
+ df = DataFrame(np.random.default_rng(2).standard_normal((10, 2)))
56
+ with pytest.raises(ValueError, match="Invalid color argument:"):
57
+ df.plot(color="")
58
+
59
+ def test_color_and_style_arguments(self):
60
+ df = DataFrame({"x": [1, 2], "y": [3, 4]})
61
+ # passing both 'color' and 'style' arguments should be allowed
62
+ # if there is no color symbol in the style strings:
63
+ ax = df.plot(color=["red", "black"], style=["-", "--"])
64
+ # check that the linestyles are correctly set:
65
+ linestyle = [line.get_linestyle() for line in ax.lines]
66
+ assert linestyle == ["-", "--"]
67
+ # check that the colors are correctly set:
68
+ color = [line.get_color() for line in ax.lines]
69
+ assert color == ["red", "black"]
70
+ # passing both 'color' and 'style' arguments should not be allowed
71
+ # if there is a color symbol in the style strings:
72
+ msg = (
73
+ "Cannot pass 'style' string with a color symbol and 'color' keyword "
74
+ "argument. Please use one or the other or pass 'style' without a color "
75
+ "symbol"
76
+ )
77
+ with pytest.raises(ValueError, match=msg):
78
+ df.plot(color=["red", "black"], style=["k-", "r--"])
79
+
80
+ @pytest.mark.parametrize(
81
+ "color, expected",
82
+ [
83
+ ("green", ["green"] * 4),
84
+ (["yellow", "red", "green", "blue"], ["yellow", "red", "green", "blue"]),
85
+ ],
86
+ )
87
+ def test_color_and_marker(self, color, expected):
88
+ # GH 21003
89
+ df = DataFrame(np.random.default_rng(2).random((7, 4)))
90
+ ax = df.plot(color=color, style="d--")
91
+ # check colors
92
+ result = [i.get_color() for i in ax.lines]
93
+ assert result == expected
94
+ # check markers and linestyles
95
+ assert all(i.get_linestyle() == "--" for i in ax.lines)
96
+ assert all(i.get_marker() == "d" for i in ax.lines)
97
+
98
+ def test_bar_colors(self):
99
+ default_colors = _unpack_cycler(plt.rcParams)
100
+
101
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
102
+ ax = df.plot.bar()
103
+ _check_colors(ax.patches[::5], facecolors=default_colors[:5])
104
+
105
+ def test_bar_colors_custom(self):
106
+ custom_colors = "rgcby"
107
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
108
+ ax = df.plot.bar(color=custom_colors)
109
+ _check_colors(ax.patches[::5], facecolors=custom_colors)
110
+
111
+ @pytest.mark.parametrize("colormap", ["jet", cm.jet])
112
+ def test_bar_colors_cmap(self, colormap):
113
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
114
+
115
+ ax = df.plot.bar(colormap=colormap)
116
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
117
+ _check_colors(ax.patches[::5], facecolors=rgba_colors)
118
+
119
+ def test_bar_colors_single_col(self):
120
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
121
+ ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
122
+ _check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
123
+
124
+ def test_bar_colors_green(self):
125
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
126
+ ax = df.plot(kind="bar", color="green")
127
+ _check_colors(ax.patches[::5], facecolors=["green"] * 5)
128
+
129
+ def test_bar_user_colors(self):
130
+ df = DataFrame(
131
+ {"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
132
+ )
133
+ # This should *only* work when `y` is specified, else
134
+ # we use one color per column
135
+ ax = df.plot.bar(y="A", color=df["color"])
136
+ result = [p.get_facecolor() for p in ax.patches]
137
+ expected = [
138
+ (1.0, 0.0, 0.0, 1.0),
139
+ (0.0, 0.0, 1.0, 1.0),
140
+ (0.0, 0.0, 1.0, 1.0),
141
+ (1.0, 0.0, 0.0, 1.0),
142
+ ]
143
+ assert result == expected
144
+
145
+ def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
146
+ # addressing issue #10611, to ensure colobar does not
147
+ # interfere with x-axis label and ticklabels with
148
+ # ipython inline backend.
149
+ random_array = np.random.default_rng(2).random((10, 3))
150
+ df = DataFrame(random_array, columns=["A label", "B label", "C label"])
151
+
152
+ ax1 = df.plot.scatter(x="A label", y="B label")
153
+ ax2 = df.plot.scatter(x="A label", y="B label", c="C label")
154
+
155
+ vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]
156
+ vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]
157
+ assert vis1 == vis2
158
+
159
+ vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]
160
+ vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]
161
+ assert vis1 == vis2
162
+
163
+ assert (
164
+ ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
165
+ )
166
+
167
+ def test_if_hexbin_xaxis_label_is_visible(self):
168
+ # addressing issue #10678, to ensure colobar does not
169
+ # interfere with x-axis label and ticklabels with
170
+ # ipython inline backend.
171
+ random_array = np.random.default_rng(2).random((10, 3))
172
+ df = DataFrame(random_array, columns=["A label", "B label", "C label"])
173
+
174
+ ax = df.plot.hexbin("A label", "B label", gridsize=12)
175
+ assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())
176
+ assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())
177
+ assert ax.xaxis.get_label().get_visible()
178
+
179
+ def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
180
+ random_array = np.random.default_rng(2).random((10, 3))
181
+ df = DataFrame(random_array, columns=["A label", "B label", "C label"])
182
+
183
+ fig, axes = plt.subplots(1, 2)
184
+ df.plot.scatter("A label", "B label", c="C label", ax=axes[0])
185
+ df.plot.scatter("A label", "B label", c="C label", ax=axes[1])
186
+ plt.tight_layout()
187
+
188
+ points = np.array([ax.get_position().get_points() for ax in fig.axes])
189
+ axes_x_coords = points[:, :, 0]
190
+ parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
191
+ colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
192
+ assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()
193
+
194
+ @pytest.mark.parametrize("cmap", [None, "Greys"])
195
+ def test_scatter_with_c_column_name_with_colors(self, cmap):
196
+ # https://github.com/pandas-dev/pandas/issues/34316
197
+
198
+ df = DataFrame(
199
+ [[5.1, 3.5], [4.9, 3.0], [7.0, 3.2], [6.4, 3.2], [5.9, 3.0]],
200
+ columns=["length", "width"],
201
+ )
202
+ df["species"] = ["r", "r", "g", "g", "b"]
203
+ if cmap is not None:
204
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
205
+ ax = df.plot.scatter(x=0, y=1, cmap=cmap, c="species")
206
+ else:
207
+ ax = df.plot.scatter(x=0, y=1, c="species", cmap=cmap)
208
+ assert ax.collections[0].colorbar is None
209
+
210
+ def test_scatter_colors(self):
211
+ df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
212
+ with pytest.raises(TypeError, match="Specify exactly one of `c` and `color`"):
213
+ df.plot.scatter(x="a", y="b", c="c", color="green")
214
+
215
+ def test_scatter_colors_not_raising_warnings(self):
216
+ # GH-53908. Do not raise UserWarning: No data for colormapping
217
+ # provided via 'c'. Parameters 'cmap' will be ignored
218
+ df = DataFrame({"x": [1, 2, 3], "y": [1, 2, 3]})
219
+ with tm.assert_produces_warning(None):
220
+ df.plot.scatter(x="x", y="y", c="b")
221
+
222
+ def test_scatter_colors_default(self):
223
+ df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
224
+ default_colors = _unpack_cycler(mpl.pyplot.rcParams)
225
+
226
+ ax = df.plot.scatter(x="a", y="b", c="c")
227
+ tm.assert_numpy_array_equal(
228
+ ax.collections[0].get_facecolor()[0],
229
+ np.array(mpl.colors.ColorConverter.to_rgba(default_colors[0])),
230
+ )
231
+
232
+ def test_scatter_colors_white(self):
233
+ df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
234
+ ax = df.plot.scatter(x="a", y="b", color="white")
235
+ tm.assert_numpy_array_equal(
236
+ ax.collections[0].get_facecolor()[0],
237
+ np.array([1, 1, 1, 1], dtype=np.float64),
238
+ )
239
+
240
+ def test_scatter_colorbar_different_cmap(self):
241
+ # GH 33389
242
+ df = DataFrame({"x": [1, 2, 3], "y": [1, 3, 2], "c": [1, 2, 3]})
243
+ df["x2"] = df["x"] + 1
244
+
245
+ _, ax = plt.subplots()
246
+ df.plot("x", "y", c="c", kind="scatter", cmap="cividis", ax=ax)
247
+ df.plot("x2", "y", c="c", kind="scatter", cmap="magma", ax=ax)
248
+
249
+ assert ax.collections[0].cmap.name == "cividis"
250
+ assert ax.collections[1].cmap.name == "magma"
251
+
252
+ def test_line_colors(self):
253
+ custom_colors = "rgcby"
254
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
255
+
256
+ ax = df.plot(color=custom_colors)
257
+ _check_colors(ax.get_lines(), linecolors=custom_colors)
258
+
259
+ plt.close("all")
260
+
261
+ ax2 = df.plot(color=custom_colors)
262
+ lines2 = ax2.get_lines()
263
+
264
+ for l1, l2 in zip(ax.get_lines(), lines2):
265
+ assert l1.get_color() == l2.get_color()
266
+
267
+ @pytest.mark.parametrize("colormap", ["jet", cm.jet])
268
+ def test_line_colors_cmap(self, colormap):
269
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
270
+ ax = df.plot(colormap=colormap)
271
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
272
+ _check_colors(ax.get_lines(), linecolors=rgba_colors)
273
+
274
+ def test_line_colors_single_col(self):
275
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
276
+ # make color a list if plotting one column frame
277
+ # handles cases like df.plot(color='DodgerBlue')
278
+ ax = df.loc[:, [0]].plot(color="DodgerBlue")
279
+ _check_colors(ax.lines, linecolors=["DodgerBlue"])
280
+
281
+ def test_line_colors_single_color(self):
282
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
283
+ ax = df.plot(color="red")
284
+ _check_colors(ax.get_lines(), linecolors=["red"] * 5)
285
+
286
+ def test_line_colors_hex(self):
287
+ # GH 10299
288
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
289
+ custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
290
+ ax = df.plot(color=custom_colors)
291
+ _check_colors(ax.get_lines(), linecolors=custom_colors)
292
+
293
+ def test_dont_modify_colors(self):
294
+ colors = ["r", "g", "b"]
295
+ DataFrame(np.random.default_rng(2).random((10, 2))).plot(color=colors)
296
+ assert len(colors) == 3
297
+
298
+ def test_line_colors_and_styles_subplots(self):
299
+ # GH 9894
300
+ default_colors = _unpack_cycler(mpl.pyplot.rcParams)
301
+
302
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
303
+
304
+ axes = df.plot(subplots=True)
305
+ for ax, c in zip(axes, list(default_colors)):
306
+ _check_colors(ax.get_lines(), linecolors=[c])
307
+
308
+ @pytest.mark.parametrize("color", ["k", "green"])
309
+ def test_line_colors_and_styles_subplots_single_color_str(self, color):
310
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
311
+ axes = df.plot(subplots=True, color=color)
312
+ for ax in axes:
313
+ _check_colors(ax.get_lines(), linecolors=[color])
314
+
315
+ @pytest.mark.parametrize("color", ["rgcby", list("rgcby")])
316
+ def test_line_colors_and_styles_subplots_custom_colors(self, color):
317
+ # GH 9894
318
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
319
+ axes = df.plot(color=color, subplots=True)
320
+ for ax, c in zip(axes, list(color)):
321
+ _check_colors(ax.get_lines(), linecolors=[c])
322
+
323
+ def test_line_colors_and_styles_subplots_colormap_hex(self):
324
+ # GH 9894
325
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
326
+ # GH 10299
327
+ custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
328
+ axes = df.plot(color=custom_colors, subplots=True)
329
+ for ax, c in zip(axes, list(custom_colors)):
330
+ _check_colors(ax.get_lines(), linecolors=[c])
331
+
332
+ @pytest.mark.parametrize("cmap", ["jet", cm.jet])
333
+ def test_line_colors_and_styles_subplots_colormap_subplot(self, cmap):
334
+ # GH 9894
335
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
336
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
337
+ axes = df.plot(colormap=cmap, subplots=True)
338
+ for ax, c in zip(axes, rgba_colors):
339
+ _check_colors(ax.get_lines(), linecolors=[c])
340
+
341
+ def test_line_colors_and_styles_subplots_single_col(self):
342
+ # GH 9894
343
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
344
+ # make color a list if plotting one column frame
345
+ # handles cases like df.plot(color='DodgerBlue')
346
+ axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True)
347
+ _check_colors(axes[0].lines, linecolors=["DodgerBlue"])
348
+
349
+ def test_line_colors_and_styles_subplots_single_char(self):
350
+ # GH 9894
351
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
352
+ # single character style
353
+ axes = df.plot(style="r", subplots=True)
354
+ for ax in axes:
355
+ _check_colors(ax.get_lines(), linecolors=["r"])
356
+
357
+ def test_line_colors_and_styles_subplots_list_styles(self):
358
+ # GH 9894
359
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
360
+ # list of styles
361
+ styles = list("rgcby")
362
+ axes = df.plot(style=styles, subplots=True)
363
+ for ax, c in zip(axes, styles):
364
+ _check_colors(ax.get_lines(), linecolors=[c])
365
+
366
+ def test_area_colors(self):
367
+ from matplotlib.collections import PolyCollection
368
+
369
+ custom_colors = "rgcby"
370
+ df = DataFrame(np.random.default_rng(2).random((5, 5)))
371
+
372
+ ax = df.plot.area(color=custom_colors)
373
+ _check_colors(ax.get_lines(), linecolors=custom_colors)
374
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
375
+ _check_colors(poly, facecolors=custom_colors)
376
+
377
+ handles, _ = ax.get_legend_handles_labels()
378
+ _check_colors(handles, facecolors=custom_colors)
379
+
380
+ for h in handles:
381
+ assert h.get_alpha() is None
382
+
383
+ def test_area_colors_poly(self):
384
+ from matplotlib import cm
385
+ from matplotlib.collections import PolyCollection
386
+
387
+ df = DataFrame(np.random.default_rng(2).random((5, 5)))
388
+ ax = df.plot.area(colormap="jet")
389
+ jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
390
+ _check_colors(ax.get_lines(), linecolors=jet_colors)
391
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
392
+ _check_colors(poly, facecolors=jet_colors)
393
+
394
+ handles, _ = ax.get_legend_handles_labels()
395
+ _check_colors(handles, facecolors=jet_colors)
396
+ for h in handles:
397
+ assert h.get_alpha() is None
398
+
399
+ def test_area_colors_stacked_false(self):
400
+ from matplotlib import cm
401
+ from matplotlib.collections import PolyCollection
402
+
403
+ df = DataFrame(np.random.default_rng(2).random((5, 5)))
404
+ jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
405
+ # When stacked=False, alpha is set to 0.5
406
+ ax = df.plot.area(colormap=cm.jet, stacked=False)
407
+ _check_colors(ax.get_lines(), linecolors=jet_colors)
408
+ poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
409
+ jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
410
+ _check_colors(poly, facecolors=jet_with_alpha)
411
+
412
+ handles, _ = ax.get_legend_handles_labels()
413
+ linecolors = jet_with_alpha
414
+ _check_colors(handles[: len(jet_colors)], linecolors=linecolors)
415
+ for h in handles:
416
+ assert h.get_alpha() == 0.5
417
+
418
+ def test_hist_colors(self):
419
+ default_colors = _unpack_cycler(mpl.pyplot.rcParams)
420
+
421
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
422
+ ax = df.plot.hist()
423
+ _check_colors(ax.patches[::10], facecolors=default_colors[:5])
424
+
425
+ def test_hist_colors_single_custom(self):
426
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
427
+ custom_colors = "rgcby"
428
+ ax = df.plot.hist(color=custom_colors)
429
+ _check_colors(ax.patches[::10], facecolors=custom_colors)
430
+
431
+ @pytest.mark.parametrize("colormap", ["jet", cm.jet])
432
+ def test_hist_colors_cmap(self, colormap):
433
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
434
+ ax = df.plot.hist(colormap=colormap)
435
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
436
+ _check_colors(ax.patches[::10], facecolors=rgba_colors)
437
+
438
+ def test_hist_colors_single_col(self):
439
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
440
+ ax = df.loc[:, [0]].plot.hist(color="DodgerBlue")
441
+ _check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
442
+
443
+ def test_hist_colors_single_color(self):
444
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
445
+ ax = df.plot(kind="hist", color="green")
446
+ _check_colors(ax.patches[::10], facecolors=["green"] * 5)
447
+
448
+ def test_kde_colors(self):
449
+ pytest.importorskip("scipy")
450
+ custom_colors = "rgcby"
451
+ df = DataFrame(np.random.default_rng(2).random((5, 5)))
452
+
453
+ ax = df.plot.kde(color=custom_colors)
454
+ _check_colors(ax.get_lines(), linecolors=custom_colors)
455
+
456
+ @pytest.mark.parametrize("colormap", ["jet", cm.jet])
457
+ def test_kde_colors_cmap(self, colormap):
458
+ pytest.importorskip("scipy")
459
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
460
+ ax = df.plot.kde(colormap=colormap)
461
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
462
+ _check_colors(ax.get_lines(), linecolors=rgba_colors)
463
+
464
+ def test_kde_colors_and_styles_subplots(self):
465
+ pytest.importorskip("scipy")
466
+ default_colors = _unpack_cycler(mpl.pyplot.rcParams)
467
+
468
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
469
+
470
+ axes = df.plot(kind="kde", subplots=True)
471
+ for ax, c in zip(axes, list(default_colors)):
472
+ _check_colors(ax.get_lines(), linecolors=[c])
473
+
474
+ @pytest.mark.parametrize("colormap", ["k", "red"])
475
+ def test_kde_colors_and_styles_subplots_single_col_str(self, colormap):
476
+ pytest.importorskip("scipy")
477
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
478
+ axes = df.plot(kind="kde", color=colormap, subplots=True)
479
+ for ax in axes:
480
+ _check_colors(ax.get_lines(), linecolors=[colormap])
481
+
482
+ def test_kde_colors_and_styles_subplots_custom_color(self):
483
+ pytest.importorskip("scipy")
484
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
485
+ custom_colors = "rgcby"
486
+ axes = df.plot(kind="kde", color=custom_colors, subplots=True)
487
+ for ax, c in zip(axes, list(custom_colors)):
488
+ _check_colors(ax.get_lines(), linecolors=[c])
489
+
490
+ @pytest.mark.parametrize("colormap", ["jet", cm.jet])
491
+ def test_kde_colors_and_styles_subplots_cmap(self, colormap):
492
+ pytest.importorskip("scipy")
493
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
494
+ rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
495
+ axes = df.plot(kind="kde", colormap=colormap, subplots=True)
496
+ for ax, c in zip(axes, rgba_colors):
497
+ _check_colors(ax.get_lines(), linecolors=[c])
498
+
499
+ def test_kde_colors_and_styles_subplots_single_col(self):
500
+ pytest.importorskip("scipy")
501
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
502
+ # make color a list if plotting one column frame
503
+ # handles cases like df.plot(color='DodgerBlue')
504
+ axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
505
+ _check_colors(axes[0].lines, linecolors=["DodgerBlue"])
506
+
507
+ def test_kde_colors_and_styles_subplots_single_char(self):
508
+ pytest.importorskip("scipy")
509
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
510
+ # list of styles
511
+ # single character style
512
+ axes = df.plot(kind="kde", style="r", subplots=True)
513
+ for ax in axes:
514
+ _check_colors(ax.get_lines(), linecolors=["r"])
515
+
516
+ def test_kde_colors_and_styles_subplots_list(self):
517
+ pytest.importorskip("scipy")
518
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
519
+ # list of styles
520
+ styles = list("rgcby")
521
+ axes = df.plot(kind="kde", style=styles, subplots=True)
522
+ for ax, c in zip(axes, styles):
523
+ _check_colors(ax.get_lines(), linecolors=[c])
524
+
525
+ def test_boxplot_colors(self):
526
+ default_colors = _unpack_cycler(mpl.pyplot.rcParams)
527
+
528
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
529
+ bp = df.plot.box(return_type="dict")
530
+ _check_colors_box(
531
+ bp,
532
+ default_colors[0],
533
+ default_colors[0],
534
+ default_colors[2],
535
+ default_colors[0],
536
+ )
537
+
538
+ def test_boxplot_colors_dict_colors(self):
539
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
540
+ dict_colors = {
541
+ "boxes": "#572923",
542
+ "whiskers": "#982042",
543
+ "medians": "#804823",
544
+ "caps": "#123456",
545
+ }
546
+ bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
547
+ _check_colors_box(
548
+ bp,
549
+ dict_colors["boxes"],
550
+ dict_colors["whiskers"],
551
+ dict_colors["medians"],
552
+ dict_colors["caps"],
553
+ "r",
554
+ )
555
+
556
+ def test_boxplot_colors_default_color(self):
557
+ default_colors = _unpack_cycler(mpl.pyplot.rcParams)
558
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
559
+ # partial colors
560
+ dict_colors = {"whiskers": "c", "medians": "m"}
561
+ bp = df.plot.box(color=dict_colors, return_type="dict")
562
+ _check_colors_box(bp, default_colors[0], "c", "m", default_colors[0])
563
+
564
+ @pytest.mark.parametrize("colormap", ["jet", cm.jet])
565
+ def test_boxplot_colors_cmap(self, colormap):
566
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
567
+ bp = df.plot.box(colormap=colormap, return_type="dict")
568
+ jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]
569
+ _check_colors_box(
570
+ bp, jet_colors[0], jet_colors[0], jet_colors[2], jet_colors[0]
571
+ )
572
+
573
+ def test_boxplot_colors_single(self):
574
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
575
+ # string color is applied to all artists except fliers
576
+ bp = df.plot.box(color="DodgerBlue", return_type="dict")
577
+ _check_colors_box(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue")
578
+
579
+ def test_boxplot_colors_tuple(self):
580
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
581
+ # tuple is also applied to all artists except fliers
582
+ bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict")
583
+ _check_colors_box(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456")
584
+
585
+ def test_boxplot_colors_invalid(self):
586
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
587
+ msg = re.escape(
588
+ "color dict contains invalid key 'xxxx'. The key must be either "
589
+ "['boxes', 'whiskers', 'medians', 'caps']"
590
+ )
591
+ with pytest.raises(ValueError, match=msg):
592
+ # Color contains invalid key results in ValueError
593
+ df.plot.box(color={"boxes": "red", "xxxx": "blue"})
594
+
595
+ def test_default_color_cycle(self):
596
+ import cycler
597
+
598
+ colors = list("rgbk")
599
+ plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
600
+
601
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 3)))
602
+ ax = df.plot()
603
+
604
+ expected = _unpack_cycler(plt.rcParams)[:3]
605
+ _check_colors(ax.get_lines(), linecolors=expected)
606
+
607
+ def test_no_color_bar(self):
608
+ df = DataFrame(
609
+ {
610
+ "A": np.random.default_rng(2).uniform(size=20),
611
+ "B": np.random.default_rng(2).uniform(size=20),
612
+ "C": np.arange(20) + np.random.default_rng(2).uniform(size=20),
613
+ }
614
+ )
615
+ ax = df.plot.hexbin(x="A", y="B", colorbar=None)
616
+ assert ax.collections[0].colorbar is None
617
+
618
+ def test_mixing_cmap_and_colormap_raises(self):
619
+ df = DataFrame(
620
+ {
621
+ "A": np.random.default_rng(2).uniform(size=20),
622
+ "B": np.random.default_rng(2).uniform(size=20),
623
+ "C": np.arange(20) + np.random.default_rng(2).uniform(size=20),
624
+ }
625
+ )
626
+ msg = "Only specify one of `cmap` and `colormap`"
627
+ with pytest.raises(TypeError, match=msg):
628
+ df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn")
629
+
630
+ def test_passed_bar_colors(self):
631
+ color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
632
+ colormap = mpl.colors.ListedColormap(color_tuples)
633
+ barplot = DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
634
+ assert color_tuples == [c.get_facecolor() for c in barplot.patches]
635
+
636
+ def test_rcParams_bar_colors(self):
637
+ color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
638
+ with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}):
639
+ barplot = DataFrame([[1, 2, 3]]).plot(kind="bar")
640
+ assert color_tuples == [c.get_facecolor() for c in barplot.patches]
641
+
642
+ def test_colors_of_columns_with_same_name(self):
643
+ # ISSUE 11136 -> https://github.com/pandas-dev/pandas/issues/11136
644
+ # Creating a DataFrame with duplicate column labels and testing colors of them.
645
+ df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]})
646
+ df1 = DataFrame({"a": [2, 4, 6]})
647
+ df_concat = pd.concat([df, df1], axis=1)
648
+ result = df_concat.plot()
649
+ legend = result.get_legend()
650
+ if Version(mpl.__version__) < Version("3.7"):
651
+ handles = legend.legendHandles
652
+ else:
653
+ handles = legend.legend_handles
654
+ for legend, line in zip(handles, result.lines):
655
+ assert legend.get_color() == line.get_color()
656
+
657
+ def test_invalid_colormap(self):
658
+ df = DataFrame(
659
+ np.random.default_rng(2).standard_normal((3, 2)), columns=["A", "B"]
660
+ )
661
+ msg = "(is not a valid value)|(is not a known colormap)"
662
+ with pytest.raises((ValueError, KeyError), match=msg):
663
+ df.plot(colormap="invalid_colormap")
664
+
665
+ def test_dataframe_none_color(self):
666
+ # GH51953
667
+ df = DataFrame([[1, 2, 3]])
668
+ ax = df.plot(color=None)
669
+ expected = _unpack_cycler(mpl.pyplot.rcParams)[:3]
670
+ _check_colors(ax.get_lines(), linecolors=expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_groupby.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test cases for DataFrame.plot """
2
+
3
+ import pytest
4
+
5
+ from pandas import DataFrame
6
+ from pandas.tests.plotting.common import _check_visible
7
+
8
+ pytest.importorskip("matplotlib")
9
+
10
+
11
+ class TestDataFramePlotsGroupby:
12
+ def _assert_ytickslabels_visibility(self, axes, expected):
13
+ for ax, exp in zip(axes, expected):
14
+ _check_visible(ax.get_yticklabels(), visible=exp)
15
+
16
+ def _assert_xtickslabels_visibility(self, axes, expected):
17
+ for ax, exp in zip(axes, expected):
18
+ _check_visible(ax.get_xticklabels(), visible=exp)
19
+
20
+ @pytest.mark.parametrize(
21
+ "kwargs, expected",
22
+ [
23
+ # behavior without keyword
24
+ ({}, [True, False, True, False]),
25
+ # set sharey=True should be identical
26
+ ({"sharey": True}, [True, False, True, False]),
27
+ # sharey=False, all yticklabels should be visible
28
+ ({"sharey": False}, [True, True, True, True]),
29
+ ],
30
+ )
31
+ def test_groupby_boxplot_sharey(self, kwargs, expected):
32
+ # https://github.com/pandas-dev/pandas/issues/20968
33
+ # sharey can now be switched check whether the right
34
+ # pair of axes is turned on or off
35
+ df = DataFrame(
36
+ {
37
+ "a": [-1.43, -0.15, -3.70, -1.43, -0.14],
38
+ "b": [0.56, 0.84, 0.29, 0.56, 0.85],
39
+ "c": [0, 1, 2, 3, 1],
40
+ },
41
+ index=[0, 1, 2, 3, 4],
42
+ )
43
+ axes = df.groupby("c").boxplot(**kwargs)
44
+ self._assert_ytickslabels_visibility(axes, expected)
45
+
46
+ @pytest.mark.parametrize(
47
+ "kwargs, expected",
48
+ [
49
+ # behavior without keyword
50
+ ({}, [True, True, True, True]),
51
+ # set sharex=False should be identical
52
+ ({"sharex": False}, [True, True, True, True]),
53
+ # sharex=True, xticklabels should be visible
54
+ # only for bottom plots
55
+ ({"sharex": True}, [False, False, True, True]),
56
+ ],
57
+ )
58
+ def test_groupby_boxplot_sharex(self, kwargs, expected):
59
+ # https://github.com/pandas-dev/pandas/issues/20968
60
+ # sharex can now be switched check whether the right
61
+ # pair of axes is turned on or off
62
+
63
+ df = DataFrame(
64
+ {
65
+ "a": [-1.43, -0.15, -3.70, -1.43, -0.14],
66
+ "b": [0.56, 0.84, 0.29, 0.56, 0.85],
67
+ "c": [0, 1, 2, 3, 1],
68
+ },
69
+ index=[0, 1, 2, 3, 4],
70
+ )
71
+ axes = df.groupby("c").boxplot(**kwargs)
72
+ self._assert_xtickslabels_visibility(axes, expected)
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_legend.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas.util._test_decorators as td
5
+
6
+ from pandas import (
7
+ DataFrame,
8
+ date_range,
9
+ )
10
+ from pandas.tests.plotting.common import (
11
+ _check_legend_labels,
12
+ _check_legend_marker,
13
+ _check_text_labels,
14
+ )
15
+ from pandas.util.version import Version
16
+
17
+ mpl = pytest.importorskip("matplotlib")
18
+
19
+
20
+ class TestFrameLegend:
21
+ @pytest.mark.xfail(
22
+ reason=(
23
+ "Open bug in matplotlib "
24
+ "https://github.com/matplotlib/matplotlib/issues/11357"
25
+ )
26
+ )
27
+ def test_mixed_yerr(self):
28
+ # https://github.com/pandas-dev/pandas/issues/39522
29
+ from matplotlib.collections import LineCollection
30
+ from matplotlib.lines import Line2D
31
+
32
+ df = DataFrame([{"x": 1, "a": 1, "b": 1}, {"x": 2, "a": 2, "b": 3}])
33
+
34
+ ax = df.plot("x", "a", c="orange", yerr=0.1, label="orange")
35
+ df.plot("x", "b", c="blue", yerr=None, ax=ax, label="blue")
36
+
37
+ legend = ax.get_legend()
38
+ if Version(mpl.__version__) < Version("3.7"):
39
+ result_handles = legend.legendHandles
40
+ else:
41
+ result_handles = legend.legend_handles
42
+
43
+ assert isinstance(result_handles[0], LineCollection)
44
+ assert isinstance(result_handles[1], Line2D)
45
+
46
+ def test_legend_false(self):
47
+ # https://github.com/pandas-dev/pandas/issues/40044
48
+ df = DataFrame({"a": [1, 1], "b": [2, 3]})
49
+ df2 = DataFrame({"d": [2.5, 2.5]})
50
+
51
+ ax = df.plot(legend=True, color={"a": "blue", "b": "green"}, secondary_y="b")
52
+ df2.plot(legend=True, color={"d": "red"}, ax=ax)
53
+ legend = ax.get_legend()
54
+ if Version(mpl.__version__) < Version("3.7"):
55
+ handles = legend.legendHandles
56
+ else:
57
+ handles = legend.legend_handles
58
+ result = [handle.get_color() for handle in handles]
59
+ expected = ["blue", "green", "red"]
60
+ assert result == expected
61
+
62
+ @pytest.mark.parametrize("kind", ["line", "bar", "barh", "kde", "area", "hist"])
63
+ def test_df_legend_labels(self, kind):
64
+ pytest.importorskip("scipy")
65
+ df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
66
+ df2 = DataFrame(
67
+ np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
68
+ )
69
+ df3 = DataFrame(
70
+ np.random.default_rng(2).random((3, 3)), columns=["g", "h", "i"]
71
+ )
72
+ df4 = DataFrame(
73
+ np.random.default_rng(2).random((3, 3)), columns=["j", "k", "l"]
74
+ )
75
+
76
+ ax = df.plot(kind=kind, legend=True)
77
+ _check_legend_labels(ax, labels=df.columns)
78
+
79
+ ax = df2.plot(kind=kind, legend=False, ax=ax)
80
+ _check_legend_labels(ax, labels=df.columns)
81
+
82
+ ax = df3.plot(kind=kind, legend=True, ax=ax)
83
+ _check_legend_labels(ax, labels=df.columns.union(df3.columns))
84
+
85
+ ax = df4.plot(kind=kind, legend="reverse", ax=ax)
86
+ expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
87
+ _check_legend_labels(ax, labels=expected)
88
+
89
+ def test_df_legend_labels_secondary_y(self):
90
+ pytest.importorskip("scipy")
91
+ df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
92
+ df2 = DataFrame(
93
+ np.random.default_rng(2).random((3, 3)), columns=["d", "e", "f"]
94
+ )
95
+ df3 = DataFrame(
96
+ np.random.default_rng(2).random((3, 3)), columns=["g", "h", "i"]
97
+ )
98
+ # Secondary Y
99
+ ax = df.plot(legend=True, secondary_y="b")
100
+ _check_legend_labels(ax, labels=["a", "b (right)", "c"])
101
+ ax = df2.plot(legend=False, ax=ax)
102
+ _check_legend_labels(ax, labels=["a", "b (right)", "c"])
103
+ ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax)
104
+ _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"])
105
+
106
+ def test_df_legend_labels_time_series(self):
107
+ # Time Series
108
+ pytest.importorskip("scipy")
109
+ ind = date_range("1/1/2014", periods=3)
110
+ df = DataFrame(
111
+ np.random.default_rng(2).standard_normal((3, 3)),
112
+ columns=["a", "b", "c"],
113
+ index=ind,
114
+ )
115
+ df2 = DataFrame(
116
+ np.random.default_rng(2).standard_normal((3, 3)),
117
+ columns=["d", "e", "f"],
118
+ index=ind,
119
+ )
120
+ df3 = DataFrame(
121
+ np.random.default_rng(2).standard_normal((3, 3)),
122
+ columns=["g", "h", "i"],
123
+ index=ind,
124
+ )
125
+ ax = df.plot(legend=True, secondary_y="b")
126
+ _check_legend_labels(ax, labels=["a", "b (right)", "c"])
127
+ ax = df2.plot(legend=False, ax=ax)
128
+ _check_legend_labels(ax, labels=["a", "b (right)", "c"])
129
+ ax = df3.plot(legend=True, ax=ax)
130
+ _check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"])
131
+
132
+ def test_df_legend_labels_time_series_scatter(self):
133
+ # Time Series
134
+ pytest.importorskip("scipy")
135
+ ind = date_range("1/1/2014", periods=3)
136
+ df = DataFrame(
137
+ np.random.default_rng(2).standard_normal((3, 3)),
138
+ columns=["a", "b", "c"],
139
+ index=ind,
140
+ )
141
+ df2 = DataFrame(
142
+ np.random.default_rng(2).standard_normal((3, 3)),
143
+ columns=["d", "e", "f"],
144
+ index=ind,
145
+ )
146
+ df3 = DataFrame(
147
+ np.random.default_rng(2).standard_normal((3, 3)),
148
+ columns=["g", "h", "i"],
149
+ index=ind,
150
+ )
151
+ # scatter
152
+ ax = df.plot.scatter(x="a", y="b", label="data1")
153
+ _check_legend_labels(ax, labels=["data1"])
154
+ ax = df2.plot.scatter(x="d", y="e", legend=False, label="data2", ax=ax)
155
+ _check_legend_labels(ax, labels=["data1"])
156
+ ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax)
157
+ _check_legend_labels(ax, labels=["data1", "data3"])
158
+
159
+ def test_df_legend_labels_time_series_no_mutate(self):
160
+ pytest.importorskip("scipy")
161
+ ind = date_range("1/1/2014", periods=3)
162
+ df = DataFrame(
163
+ np.random.default_rng(2).standard_normal((3, 3)),
164
+ columns=["a", "b", "c"],
165
+ index=ind,
166
+ )
167
+ # ensure label args pass through and
168
+ # index name does not mutate
169
+ # column names don't mutate
170
+ df5 = df.set_index("a")
171
+ ax = df5.plot(y="b")
172
+ _check_legend_labels(ax, labels=["b"])
173
+ ax = df5.plot(y="b", label="LABEL_b")
174
+ _check_legend_labels(ax, labels=["LABEL_b"])
175
+ _check_text_labels(ax.xaxis.get_label(), "a")
176
+ ax = df5.plot(y="c", label="LABEL_c", ax=ax)
177
+ _check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"])
178
+ assert df5.columns.tolist() == ["b", "c"]
179
+
180
+ def test_missing_marker_multi_plots_on_same_ax(self):
181
+ # GH 18222
182
+ df = DataFrame(data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"])
183
+ _, ax = mpl.pyplot.subplots(nrows=1, ncols=3)
184
+ # Left plot
185
+ df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0])
186
+ df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0])
187
+ df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0])
188
+ _check_legend_labels(ax[0], labels=["r", "g", "b"])
189
+ _check_legend_marker(ax[0], expected_markers=["o", "x", "o"])
190
+ # Center plot
191
+ df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1])
192
+ df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1])
193
+ df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1])
194
+ _check_legend_labels(ax[1], labels=["b", "r", "g"])
195
+ _check_legend_marker(ax[1], expected_markers=["o", "o", "x"])
196
+ # Right plot
197
+ df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2])
198
+ df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2])
199
+ df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2])
200
+ _check_legend_labels(ax[2], labels=["g", "b", "r"])
201
+ _check_legend_marker(ax[2], expected_markers=["x", "o", "o"])
202
+
203
+ def test_legend_name(self):
204
+ multi = DataFrame(
205
+ np.random.default_rng(2).standard_normal((4, 4)),
206
+ columns=[np.array(["a", "a", "b", "b"]), np.array(["x", "y", "x", "y"])],
207
+ )
208
+ multi.columns.names = ["group", "individual"]
209
+
210
+ ax = multi.plot()
211
+ leg_title = ax.legend_.get_title()
212
+ _check_text_labels(leg_title, "group,individual")
213
+
214
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
215
+ ax = df.plot(legend=True, ax=ax)
216
+ leg_title = ax.legend_.get_title()
217
+ _check_text_labels(leg_title, "group,individual")
218
+
219
+ df.columns.name = "new"
220
+ ax = df.plot(legend=False, ax=ax)
221
+ leg_title = ax.legend_.get_title()
222
+ _check_text_labels(leg_title, "group,individual")
223
+
224
+ ax = df.plot(legend=True, ax=ax)
225
+ leg_title = ax.legend_.get_title()
226
+ _check_text_labels(leg_title, "new")
227
+
228
+ @pytest.mark.parametrize(
229
+ "kind",
230
+ [
231
+ "line",
232
+ "bar",
233
+ "barh",
234
+ pytest.param("kde", marks=td.skip_if_no("scipy")),
235
+ "area",
236
+ "hist",
237
+ ],
238
+ )
239
+ def test_no_legend(self, kind):
240
+ df = DataFrame(np.random.default_rng(2).random((3, 3)), columns=["a", "b", "c"])
241
+ ax = df.plot(kind=kind, legend=False)
242
+ _check_legend_labels(ax, visible=False)
243
+
244
+ def test_missing_markers_legend(self):
245
+ # 14958
246
+ df = DataFrame(
247
+ np.random.default_rng(2).standard_normal((8, 3)), columns=["A", "B", "C"]
248
+ )
249
+ ax = df.plot(y=["A"], marker="x", linestyle="solid")
250
+ df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax)
251
+ df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax)
252
+
253
+ _check_legend_labels(ax, labels=["A", "B", "C"])
254
+ _check_legend_marker(ax, expected_markers=["x", "o", "<"])
255
+
256
+ def test_missing_markers_legend_using_style(self):
257
+ # 14563
258
+ df = DataFrame(
259
+ {
260
+ "A": [1, 2, 3, 4, 5, 6],
261
+ "B": [2, 4, 1, 3, 2, 4],
262
+ "C": [3, 3, 2, 6, 4, 2],
263
+ "X": [1, 2, 3, 4, 5, 6],
264
+ }
265
+ )
266
+
267
+ _, ax = mpl.pyplot.subplots()
268
+ for kind in "ABC":
269
+ df.plot("X", kind, label=kind, ax=ax, style=".")
270
+
271
+ _check_legend_labels(ax, labels=["A", "B", "C"])
272
+ _check_legend_marker(ax, expected_markers=[".", ".", "."])
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_frame_subplots.py ADDED
@@ -0,0 +1,752 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test cases for DataFrame.plot """
2
+
3
+ import string
4
+
5
+ import numpy as np
6
+ import pytest
7
+
8
+ from pandas.compat import is_platform_linux
9
+ from pandas.compat.numpy import np_version_gte1p24
10
+
11
+ import pandas as pd
12
+ from pandas import (
13
+ DataFrame,
14
+ Series,
15
+ date_range,
16
+ )
17
+ import pandas._testing as tm
18
+ from pandas.tests.plotting.common import (
19
+ _check_axes_shape,
20
+ _check_box_return_type,
21
+ _check_legend_labels,
22
+ _check_ticks_props,
23
+ _check_visible,
24
+ _flatten_visible,
25
+ )
26
+
27
+ from pandas.io.formats.printing import pprint_thing
28
+
29
+ mpl = pytest.importorskip("matplotlib")
30
+ plt = pytest.importorskip("matplotlib.pyplot")
31
+
32
+
33
+ class TestDataFramePlotsSubplots:
34
+ @pytest.mark.slow
35
+ @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"])
36
+ def test_subplots(self, kind):
37
+ df = DataFrame(
38
+ np.random.default_rng(2).random((10, 3)),
39
+ index=list(string.ascii_letters[:10]),
40
+ )
41
+
42
+ axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
43
+ _check_axes_shape(axes, axes_num=3, layout=(3, 1))
44
+ assert axes.shape == (3,)
45
+
46
+ for ax, column in zip(axes, df.columns):
47
+ _check_legend_labels(ax, labels=[pprint_thing(column)])
48
+
49
+ for ax in axes[:-2]:
50
+ _check_visible(ax.xaxis) # xaxis must be visible for grid
51
+ _check_visible(ax.get_xticklabels(), visible=False)
52
+ if kind != "bar":
53
+ # change https://github.com/pandas-dev/pandas/issues/26714
54
+ _check_visible(ax.get_xticklabels(minor=True), visible=False)
55
+ _check_visible(ax.xaxis.get_label(), visible=False)
56
+ _check_visible(ax.get_yticklabels())
57
+
58
+ _check_visible(axes[-1].xaxis)
59
+ _check_visible(axes[-1].get_xticklabels())
60
+ _check_visible(axes[-1].get_xticklabels(minor=True))
61
+ _check_visible(axes[-1].xaxis.get_label())
62
+ _check_visible(axes[-1].get_yticklabels())
63
+
64
+ @pytest.mark.slow
65
+ @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"])
66
+ def test_subplots_no_share_x(self, kind):
67
+ df = DataFrame(
68
+ np.random.default_rng(2).random((10, 3)),
69
+ index=list(string.ascii_letters[:10]),
70
+ )
71
+ axes = df.plot(kind=kind, subplots=True, sharex=False)
72
+ for ax in axes:
73
+ _check_visible(ax.xaxis)
74
+ _check_visible(ax.get_xticklabels())
75
+ _check_visible(ax.get_xticklabels(minor=True))
76
+ _check_visible(ax.xaxis.get_label())
77
+ _check_visible(ax.get_yticklabels())
78
+
79
+ @pytest.mark.slow
80
+ @pytest.mark.parametrize("kind", ["bar", "barh", "line", "area"])
81
+ def test_subplots_no_legend(self, kind):
82
+ df = DataFrame(
83
+ np.random.default_rng(2).random((10, 3)),
84
+ index=list(string.ascii_letters[:10]),
85
+ )
86
+ axes = df.plot(kind=kind, subplots=True, legend=False)
87
+ for ax in axes:
88
+ assert ax.get_legend() is None
89
+
90
+ @pytest.mark.parametrize("kind", ["line", "area"])
91
+ def test_subplots_timeseries(self, kind):
92
+ idx = date_range(start="2014-07-01", freq="ME", periods=10)
93
+ df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx)
94
+
95
+ axes = df.plot(kind=kind, subplots=True, sharex=True)
96
+ _check_axes_shape(axes, axes_num=3, layout=(3, 1))
97
+
98
+ for ax in axes[:-2]:
99
+ # GH 7801
100
+ _check_visible(ax.xaxis) # xaxis must be visible for grid
101
+ _check_visible(ax.get_xticklabels(), visible=False)
102
+ _check_visible(ax.get_xticklabels(minor=True), visible=False)
103
+ _check_visible(ax.xaxis.get_label(), visible=False)
104
+ _check_visible(ax.get_yticklabels())
105
+
106
+ _check_visible(axes[-1].xaxis)
107
+ _check_visible(axes[-1].get_xticklabels())
108
+ _check_visible(axes[-1].get_xticklabels(minor=True))
109
+ _check_visible(axes[-1].xaxis.get_label())
110
+ _check_visible(axes[-1].get_yticklabels())
111
+ _check_ticks_props(axes, xrot=0)
112
+
113
+ @pytest.mark.parametrize("kind", ["line", "area"])
114
+ def test_subplots_timeseries_rot(self, kind):
115
+ idx = date_range(start="2014-07-01", freq="ME", periods=10)
116
+ df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx)
117
+ axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
118
+ for ax in axes:
119
+ _check_visible(ax.xaxis)
120
+ _check_visible(ax.get_xticklabels())
121
+ _check_visible(ax.get_xticklabels(minor=True))
122
+ _check_visible(ax.xaxis.get_label())
123
+ _check_visible(ax.get_yticklabels())
124
+ _check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
125
+
126
+ @pytest.mark.parametrize(
127
+ "col", ["numeric", "timedelta", "datetime_no_tz", "datetime_all_tz"]
128
+ )
129
+ def test_subplots_timeseries_y_axis(self, col):
130
+ # GH16953
131
+ data = {
132
+ "numeric": np.array([1, 2, 5]),
133
+ "timedelta": [
134
+ pd.Timedelta(-10, unit="s"),
135
+ pd.Timedelta(10, unit="m"),
136
+ pd.Timedelta(10, unit="h"),
137
+ ],
138
+ "datetime_no_tz": [
139
+ pd.to_datetime("2017-08-01 00:00:00"),
140
+ pd.to_datetime("2017-08-01 02:00:00"),
141
+ pd.to_datetime("2017-08-02 00:00:00"),
142
+ ],
143
+ "datetime_all_tz": [
144
+ pd.to_datetime("2017-08-01 00:00:00", utc=True),
145
+ pd.to_datetime("2017-08-01 02:00:00", utc=True),
146
+ pd.to_datetime("2017-08-02 00:00:00", utc=True),
147
+ ],
148
+ "text": ["This", "should", "fail"],
149
+ }
150
+ testdata = DataFrame(data)
151
+
152
+ ax = testdata.plot(y=col)
153
+ result = ax.get_lines()[0].get_data()[1]
154
+ expected = testdata[col].values
155
+ assert (result == expected).all()
156
+
157
+ def test_subplots_timeseries_y_text_error(self):
158
+ # GH16953
159
+ data = {
160
+ "numeric": np.array([1, 2, 5]),
161
+ "text": ["This", "should", "fail"],
162
+ }
163
+ testdata = DataFrame(data)
164
+ msg = "no numeric data to plot"
165
+ with pytest.raises(TypeError, match=msg):
166
+ testdata.plot(y="text")
167
+
168
+ @pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
169
+ def test_subplots_timeseries_y_axis_not_supported(self):
170
+ """
171
+ This test will fail for:
172
+ period:
173
+ since period isn't yet implemented in ``select_dtypes``
174
+ and because it will need a custom value converter +
175
+ tick formatter (as was done for x-axis plots)
176
+
177
+ categorical:
178
+ because it will need a custom value converter +
179
+ tick formatter (also doesn't work for x-axis, as of now)
180
+
181
+ datetime_mixed_tz:
182
+ because of the way how pandas handles ``Series`` of
183
+ ``datetime`` objects with different timezone,
184
+ generally converting ``datetime`` objects in a tz-aware
185
+ form could help with this problem
186
+ """
187
+ data = {
188
+ "numeric": np.array([1, 2, 5]),
189
+ "period": [
190
+ pd.Period("2017-08-01 00:00:00", freq="H"),
191
+ pd.Period("2017-08-01 02:00", freq="H"),
192
+ pd.Period("2017-08-02 00:00:00", freq="H"),
193
+ ],
194
+ "categorical": pd.Categorical(
195
+ ["c", "b", "a"], categories=["a", "b", "c"], ordered=False
196
+ ),
197
+ "datetime_mixed_tz": [
198
+ pd.to_datetime("2017-08-01 00:00:00", utc=True),
199
+ pd.to_datetime("2017-08-01 02:00:00"),
200
+ pd.to_datetime("2017-08-02 00:00:00"),
201
+ ],
202
+ }
203
+ testdata = DataFrame(data)
204
+ ax_period = testdata.plot(x="numeric", y="period")
205
+ assert (
206
+ ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
207
+ ).all()
208
+ ax_categorical = testdata.plot(x="numeric", y="categorical")
209
+ assert (
210
+ ax_categorical.get_lines()[0].get_data()[1]
211
+ == testdata["categorical"].values
212
+ ).all()
213
+ ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
214
+ assert (
215
+ ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
216
+ == testdata["datetime_mixed_tz"].values
217
+ ).all()
218
+
219
+ @pytest.mark.parametrize(
220
+ "layout, exp_layout",
221
+ [
222
+ [(2, 2), (2, 2)],
223
+ [(-1, 2), (2, 2)],
224
+ [(2, -1), (2, 2)],
225
+ [(1, 4), (1, 4)],
226
+ [(-1, 4), (1, 4)],
227
+ [(4, -1), (4, 1)],
228
+ ],
229
+ )
230
+ def test_subplots_layout_multi_column(self, layout, exp_layout):
231
+ # GH 6667
232
+ df = DataFrame(
233
+ np.random.default_rng(2).random((10, 3)),
234
+ index=list(string.ascii_letters[:10]),
235
+ )
236
+
237
+ axes = df.plot(subplots=True, layout=layout)
238
+ _check_axes_shape(axes, axes_num=3, layout=exp_layout)
239
+ assert axes.shape == exp_layout
240
+
241
+ def test_subplots_layout_multi_column_error(self):
242
+ # GH 6667
243
+ df = DataFrame(
244
+ np.random.default_rng(2).random((10, 3)),
245
+ index=list(string.ascii_letters[:10]),
246
+ )
247
+ msg = "Layout of 1x1 must be larger than required size 3"
248
+
249
+ with pytest.raises(ValueError, match=msg):
250
+ df.plot(subplots=True, layout=(1, 1))
251
+
252
+ msg = "At least one dimension of layout must be positive"
253
+ with pytest.raises(ValueError, match=msg):
254
+ df.plot(subplots=True, layout=(-1, -1))
255
+
256
+ @pytest.mark.parametrize(
257
+ "kwargs, expected_axes_num, expected_layout, expected_shape",
258
+ [
259
+ ({}, 1, (1, 1), (1,)),
260
+ ({"layout": (3, 3)}, 1, (3, 3), (3, 3)),
261
+ ],
262
+ )
263
+ def test_subplots_layout_single_column(
264
+ self, kwargs, expected_axes_num, expected_layout, expected_shape
265
+ ):
266
+ # GH 6667
267
+ df = DataFrame(
268
+ np.random.default_rng(2).random((10, 1)),
269
+ index=list(string.ascii_letters[:10]),
270
+ )
271
+ axes = df.plot(subplots=True, **kwargs)
272
+ _check_axes_shape(
273
+ axes,
274
+ axes_num=expected_axes_num,
275
+ layout=expected_layout,
276
+ )
277
+ assert axes.shape == expected_shape
278
+
279
+ @pytest.mark.slow
280
+ @pytest.mark.parametrize("idx", [range(5), date_range("1/1/2000", periods=5)])
281
+ def test_subplots_warnings(self, idx):
282
+ # GH 9464
283
+ with tm.assert_produces_warning(None):
284
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 4)), index=idx)
285
+ df.plot(subplots=True, layout=(3, 2))
286
+
287
+ def test_subplots_multiple_axes(self):
288
+ # GH 5353, 6970, GH 7069
289
+ fig, axes = mpl.pyplot.subplots(2, 3)
290
+ df = DataFrame(
291
+ np.random.default_rng(2).random((10, 3)),
292
+ index=list(string.ascii_letters[:10]),
293
+ )
294
+
295
+ returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
296
+ _check_axes_shape(returned, axes_num=3, layout=(1, 3))
297
+ assert returned.shape == (3,)
298
+ assert returned[0].figure is fig
299
+ # draw on second row
300
+ returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
301
+ _check_axes_shape(returned, axes_num=3, layout=(1, 3))
302
+ assert returned.shape == (3,)
303
+ assert returned[0].figure is fig
304
+ _check_axes_shape(axes, axes_num=6, layout=(2, 3))
305
+
306
+ def test_subplots_multiple_axes_error(self):
307
+ # GH 5353, 6970, GH 7069
308
+ df = DataFrame(
309
+ np.random.default_rng(2).random((10, 3)),
310
+ index=list(string.ascii_letters[:10]),
311
+ )
312
+ msg = "The number of passed axes must be 3, the same as the output plot"
313
+ _, axes = mpl.pyplot.subplots(2, 3)
314
+
315
+ with pytest.raises(ValueError, match=msg):
316
+ # pass different number of axes from required
317
+ df.plot(subplots=True, ax=axes)
318
+
319
+ @pytest.mark.parametrize(
320
+ "layout, exp_layout",
321
+ [
322
+ [(2, 1), (2, 2)],
323
+ [(2, -1), (2, 2)],
324
+ [(-1, 2), (2, 2)],
325
+ ],
326
+ )
327
+ def test_subplots_multiple_axes_2_dim(self, layout, exp_layout):
328
+ # GH 5353, 6970, GH 7069
329
+ # pass 2-dim axes and invalid layout
330
+ # invalid lauout should not affect to input and return value
331
+ # (show warning is tested in
332
+ # TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
333
+ _, axes = mpl.pyplot.subplots(2, 2)
334
+ df = DataFrame(
335
+ np.random.default_rng(2).random((10, 4)),
336
+ index=list(string.ascii_letters[:10]),
337
+ )
338
+ with tm.assert_produces_warning(UserWarning):
339
+ returned = df.plot(
340
+ subplots=True, ax=axes, layout=layout, sharex=False, sharey=False
341
+ )
342
+ _check_axes_shape(returned, axes_num=4, layout=exp_layout)
343
+ assert returned.shape == (4,)
344
+
345
+ def test_subplots_multiple_axes_single_col(self):
346
+ # GH 5353, 6970, GH 7069
347
+ # single column
348
+ _, axes = mpl.pyplot.subplots(1, 1)
349
+ df = DataFrame(
350
+ np.random.default_rng(2).random((10, 1)),
351
+ index=list(string.ascii_letters[:10]),
352
+ )
353
+
354
+ axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
355
+ _check_axes_shape(axes, axes_num=1, layout=(1, 1))
356
+ assert axes.shape == (1,)
357
+
358
+ def test_subplots_ts_share_axes(self):
359
+ # GH 3964
360
+ _, axes = mpl.pyplot.subplots(3, 3, sharex=True, sharey=True)
361
+ mpl.pyplot.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
362
+ df = DataFrame(
363
+ np.random.default_rng(2).standard_normal((10, 9)),
364
+ index=date_range(start="2014-07-01", freq="ME", periods=10),
365
+ )
366
+ for i, ax in enumerate(axes.ravel()):
367
+ df[i].plot(ax=ax, fontsize=5)
368
+
369
+ # Rows other than bottom should not be visible
370
+ for ax in axes[0:-1].ravel():
371
+ _check_visible(ax.get_xticklabels(), visible=False)
372
+
373
+ # Bottom row should be visible
374
+ for ax in axes[-1].ravel():
375
+ _check_visible(ax.get_xticklabels(), visible=True)
376
+
377
+ # First column should be visible
378
+ for ax in axes[[0, 1, 2], [0]].ravel():
379
+ _check_visible(ax.get_yticklabels(), visible=True)
380
+
381
+ # Other columns should not be visible
382
+ for ax in axes[[0, 1, 2], [1]].ravel():
383
+ _check_visible(ax.get_yticklabels(), visible=False)
384
+ for ax in axes[[0, 1, 2], [2]].ravel():
385
+ _check_visible(ax.get_yticklabels(), visible=False)
386
+
387
+ def test_subplots_sharex_axes_existing_axes(self):
388
+ # GH 9158
389
+ d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
390
+ df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
391
+
392
+ axes = df[["A", "B"]].plot(subplots=True)
393
+ df["C"].plot(ax=axes[0], secondary_y=True)
394
+
395
+ _check_visible(axes[0].get_xticklabels(), visible=False)
396
+ _check_visible(axes[1].get_xticklabels(), visible=True)
397
+ for ax in axes.ravel():
398
+ _check_visible(ax.get_yticklabels(), visible=True)
399
+
400
+ def test_subplots_dup_columns(self):
401
+ # GH 10962
402
+ df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa"))
403
+ axes = df.plot(subplots=True)
404
+ for ax in axes:
405
+ _check_legend_labels(ax, labels=["a"])
406
+ assert len(ax.lines) == 1
407
+
408
+ def test_subplots_dup_columns_secondary_y(self):
409
+ # GH 10962
410
+ df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa"))
411
+ axes = df.plot(subplots=True, secondary_y="a")
412
+ for ax in axes:
413
+ # (right) is only attached when subplots=False
414
+ _check_legend_labels(ax, labels=["a"])
415
+ assert len(ax.lines) == 1
416
+
417
+ def test_subplots_dup_columns_secondary_y_no_subplot(self):
418
+ # GH 10962
419
+ df = DataFrame(np.random.default_rng(2).random((5, 5)), columns=list("aaaaa"))
420
+ ax = df.plot(secondary_y="a")
421
+ _check_legend_labels(ax, labels=["a (right)"] * 5)
422
+ assert len(ax.lines) == 0
423
+ assert len(ax.right_ax.lines) == 5
424
+
425
+ @pytest.mark.xfail(
426
+ np_version_gte1p24 and is_platform_linux(),
427
+ reason="Weird rounding problems",
428
+ strict=False,
429
+ )
430
+ def test_bar_log_no_subplots(self):
431
+ # GH3254, GH3298 matplotlib/matplotlib#1882, #1892
432
+ # regressions in 1.2.1
433
+ expected = np.array([0.1, 1.0, 10.0, 100])
434
+
435
+ # no subplots
436
+ df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
437
+ ax = df.plot.bar(grid=True, log=True)
438
+ tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
439
+
440
+ @pytest.mark.xfail(
441
+ np_version_gte1p24 and is_platform_linux(),
442
+ reason="Weird rounding problems",
443
+ strict=False,
444
+ )
445
+ def test_bar_log_subplots(self):
446
+ expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
447
+
448
+ ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
449
+ log=True, subplots=True
450
+ )
451
+
452
+ tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
453
+ tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
454
+
455
+ def test_boxplot_subplots_return_type_default(self, hist_df):
456
+ df = hist_df
457
+
458
+ # normal style: return_type=None
459
+ result = df.plot.box(subplots=True)
460
+ assert isinstance(result, Series)
461
+ _check_box_return_type(
462
+ result, None, expected_keys=["height", "weight", "category"]
463
+ )
464
+
465
+ @pytest.mark.parametrize("rt", ["dict", "axes", "both"])
466
+ def test_boxplot_subplots_return_type(self, hist_df, rt):
467
+ df = hist_df
468
+ returned = df.plot.box(return_type=rt, subplots=True)
469
+ _check_box_return_type(
470
+ returned,
471
+ rt,
472
+ expected_keys=["height", "weight", "category"],
473
+ check_ax_title=False,
474
+ )
475
+
476
+ def test_df_subplots_patterns_minorticks(self):
477
+ # GH 10657
478
+ df = DataFrame(
479
+ np.random.default_rng(2).standard_normal((10, 2)),
480
+ index=date_range("1/1/2000", periods=10),
481
+ columns=list("AB"),
482
+ )
483
+
484
+ # shared subplots
485
+ _, axes = plt.subplots(2, 1, sharex=True)
486
+ axes = df.plot(subplots=True, ax=axes)
487
+ for ax in axes:
488
+ assert len(ax.lines) == 1
489
+ _check_visible(ax.get_yticklabels(), visible=True)
490
+ # xaxis of 1st ax must be hidden
491
+ _check_visible(axes[0].get_xticklabels(), visible=False)
492
+ _check_visible(axes[0].get_xticklabels(minor=True), visible=False)
493
+ _check_visible(axes[1].get_xticklabels(), visible=True)
494
+ _check_visible(axes[1].get_xticklabels(minor=True), visible=True)
495
+
496
+ def test_df_subplots_patterns_minorticks_1st_ax_hidden(self):
497
+ # GH 10657
498
+ df = DataFrame(
499
+ np.random.default_rng(2).standard_normal((10, 2)),
500
+ index=date_range("1/1/2000", periods=10),
501
+ columns=list("AB"),
502
+ )
503
+ _, axes = plt.subplots(2, 1)
504
+ with tm.assert_produces_warning(UserWarning):
505
+ axes = df.plot(subplots=True, ax=axes, sharex=True)
506
+ for ax in axes:
507
+ assert len(ax.lines) == 1
508
+ _check_visible(ax.get_yticklabels(), visible=True)
509
+ # xaxis of 1st ax must be hidden
510
+ _check_visible(axes[0].get_xticklabels(), visible=False)
511
+ _check_visible(axes[0].get_xticklabels(minor=True), visible=False)
512
+ _check_visible(axes[1].get_xticklabels(), visible=True)
513
+ _check_visible(axes[1].get_xticklabels(minor=True), visible=True)
514
+
515
+ def test_df_subplots_patterns_minorticks_not_shared(self):
516
+ # GH 10657
517
+ df = DataFrame(
518
+ np.random.default_rng(2).standard_normal((10, 2)),
519
+ index=date_range("1/1/2000", periods=10),
520
+ columns=list("AB"),
521
+ )
522
+ # not shared
523
+ _, axes = plt.subplots(2, 1)
524
+ axes = df.plot(subplots=True, ax=axes)
525
+ for ax in axes:
526
+ assert len(ax.lines) == 1
527
+ _check_visible(ax.get_yticklabels(), visible=True)
528
+ _check_visible(ax.get_xticklabels(), visible=True)
529
+ _check_visible(ax.get_xticklabels(minor=True), visible=True)
530
+
531
+ def test_subplots_sharex_false(self):
532
+ # test when sharex is set to False, two plots should have different
533
+ # labels, GH 25160
534
+ df = DataFrame(np.random.default_rng(2).random((10, 2)))
535
+ df.iloc[5:, 1] = np.nan
536
+ df.iloc[:5, 0] = np.nan
537
+
538
+ _, axs = mpl.pyplot.subplots(2, 1)
539
+ df.plot.line(ax=axs, subplots=True, sharex=False)
540
+
541
+ expected_ax1 = np.arange(4.5, 10, 0.5)
542
+ expected_ax2 = np.arange(-0.5, 5, 0.5)
543
+
544
+ tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)
545
+ tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)
546
+
547
+ def test_subplots_constrained_layout(self):
548
+ # GH 25261
549
+ idx = date_range(start="now", periods=10)
550
+ df = DataFrame(np.random.default_rng(2).random((10, 3)), index=idx)
551
+ kwargs = {}
552
+ if hasattr(mpl.pyplot.Figure, "get_constrained_layout"):
553
+ kwargs["constrained_layout"] = True
554
+ _, axes = mpl.pyplot.subplots(2, **kwargs)
555
+ with tm.assert_produces_warning(None):
556
+ df.plot(ax=axes[0])
557
+ with tm.ensure_clean(return_filelike=True) as path:
558
+ mpl.pyplot.savefig(path)
559
+
560
+ @pytest.mark.parametrize(
561
+ "index_name, old_label, new_label",
562
+ [
563
+ (None, "", "new"),
564
+ ("old", "old", "new"),
565
+ (None, "", ""),
566
+ (None, "", 1),
567
+ (None, "", [1, 2]),
568
+ ],
569
+ )
570
+ @pytest.mark.parametrize("kind", ["line", "area", "bar"])
571
+ def test_xlabel_ylabel_dataframe_subplots(
572
+ self, kind, index_name, old_label, new_label
573
+ ):
574
+ # GH 9093
575
+ df = DataFrame([[1, 2], [2, 5]], columns=["Type A", "Type B"])
576
+ df.index.name = index_name
577
+
578
+ # default is the ylabel is not shown and xlabel is index name
579
+ axes = df.plot(kind=kind, subplots=True)
580
+ assert all(ax.get_ylabel() == "" for ax in axes)
581
+ assert all(ax.get_xlabel() == old_label for ax in axes)
582
+
583
+ # old xlabel will be overridden and assigned ylabel will be used as ylabel
584
+ axes = df.plot(kind=kind, ylabel=new_label, xlabel=new_label, subplots=True)
585
+ assert all(ax.get_ylabel() == str(new_label) for ax in axes)
586
+ assert all(ax.get_xlabel() == str(new_label) for ax in axes)
587
+
588
+ @pytest.mark.parametrize(
589
+ "kwargs",
590
+ [
591
+ # stacked center
592
+ {"kind": "bar", "stacked": True},
593
+ {"kind": "bar", "stacked": True, "width": 0.9},
594
+ {"kind": "barh", "stacked": True},
595
+ {"kind": "barh", "stacked": True, "width": 0.9},
596
+ # center
597
+ {"kind": "bar", "stacked": False},
598
+ {"kind": "bar", "stacked": False, "width": 0.9},
599
+ {"kind": "barh", "stacked": False},
600
+ {"kind": "barh", "stacked": False, "width": 0.9},
601
+ # subplots center
602
+ {"kind": "bar", "subplots": True},
603
+ {"kind": "bar", "subplots": True, "width": 0.9},
604
+ {"kind": "barh", "subplots": True},
605
+ {"kind": "barh", "subplots": True, "width": 0.9},
606
+ # align edge
607
+ {"kind": "bar", "stacked": True, "align": "edge"},
608
+ {"kind": "bar", "stacked": True, "width": 0.9, "align": "edge"},
609
+ {"kind": "barh", "stacked": True, "align": "edge"},
610
+ {"kind": "barh", "stacked": True, "width": 0.9, "align": "edge"},
611
+ {"kind": "bar", "stacked": False, "align": "edge"},
612
+ {"kind": "bar", "stacked": False, "width": 0.9, "align": "edge"},
613
+ {"kind": "barh", "stacked": False, "align": "edge"},
614
+ {"kind": "barh", "stacked": False, "width": 0.9, "align": "edge"},
615
+ {"kind": "bar", "subplots": True, "align": "edge"},
616
+ {"kind": "bar", "subplots": True, "width": 0.9, "align": "edge"},
617
+ {"kind": "barh", "subplots": True, "align": "edge"},
618
+ {"kind": "barh", "subplots": True, "width": 0.9, "align": "edge"},
619
+ ],
620
+ )
621
+ def test_bar_align_multiple_columns(self, kwargs):
622
+ # GH2157
623
+ df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
624
+ self._check_bar_alignment(df, **kwargs)
625
+
626
+ @pytest.mark.parametrize(
627
+ "kwargs",
628
+ [
629
+ {"kind": "bar", "stacked": False},
630
+ {"kind": "bar", "stacked": True},
631
+ {"kind": "barh", "stacked": False},
632
+ {"kind": "barh", "stacked": True},
633
+ {"kind": "bar", "subplots": True},
634
+ {"kind": "barh", "subplots": True},
635
+ ],
636
+ )
637
+ def test_bar_align_single_column(self, kwargs):
638
+ df = DataFrame(np.random.default_rng(2).standard_normal(5))
639
+ self._check_bar_alignment(df, **kwargs)
640
+
641
+ @pytest.mark.parametrize(
642
+ "kwargs",
643
+ [
644
+ {"kind": "bar", "stacked": False},
645
+ {"kind": "bar", "stacked": True},
646
+ {"kind": "barh", "stacked": False},
647
+ {"kind": "barh", "stacked": True},
648
+ {"kind": "bar", "subplots": True},
649
+ {"kind": "barh", "subplots": True},
650
+ ],
651
+ )
652
+ def test_bar_barwidth_position(self, kwargs):
653
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
654
+ self._check_bar_alignment(df, width=0.9, position=0.2, **kwargs)
655
+
656
+ @pytest.mark.parametrize("w", [1, 1.0])
657
+ def test_bar_barwidth_position_int(self, w):
658
+ # GH 12979
659
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
660
+ ax = df.plot.bar(stacked=True, width=w)
661
+ ticks = ax.xaxis.get_ticklocs()
662
+ tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
663
+ assert ax.get_xlim() == (-0.75, 4.75)
664
+ # check left-edge of bars
665
+ assert ax.patches[0].get_x() == -0.5
666
+ assert ax.patches[-1].get_x() == 3.5
667
+
668
+ @pytest.mark.parametrize(
669
+ "kind, kwargs",
670
+ [
671
+ ["bar", {"stacked": True}],
672
+ ["barh", {"stacked": False}],
673
+ ["barh", {"stacked": True}],
674
+ ["bar", {"subplots": True}],
675
+ ["barh", {"subplots": True}],
676
+ ],
677
+ )
678
+ def test_bar_barwidth_position_int_width_1(self, kind, kwargs):
679
+ # GH 12979
680
+ df = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))
681
+ self._check_bar_alignment(df, kind=kind, width=1, **kwargs)
682
+
683
+ def _check_bar_alignment(
684
+ self,
685
+ df,
686
+ kind="bar",
687
+ stacked=False,
688
+ subplots=False,
689
+ align="center",
690
+ width=0.5,
691
+ position=0.5,
692
+ ):
693
+ axes = df.plot(
694
+ kind=kind,
695
+ stacked=stacked,
696
+ subplots=subplots,
697
+ align=align,
698
+ width=width,
699
+ position=position,
700
+ grid=True,
701
+ )
702
+
703
+ axes = _flatten_visible(axes)
704
+
705
+ for ax in axes:
706
+ if kind == "bar":
707
+ axis = ax.xaxis
708
+ ax_min, ax_max = ax.get_xlim()
709
+ min_edge = min(p.get_x() for p in ax.patches)
710
+ max_edge = max(p.get_x() + p.get_width() for p in ax.patches)
711
+ elif kind == "barh":
712
+ axis = ax.yaxis
713
+ ax_min, ax_max = ax.get_ylim()
714
+ min_edge = min(p.get_y() for p in ax.patches)
715
+ max_edge = max(p.get_y() + p.get_height() for p in ax.patches)
716
+ else:
717
+ raise ValueError
718
+
719
+ # GH 7498
720
+ # compare margins between lim and bar edges
721
+ tm.assert_almost_equal(ax_min, min_edge - 0.25)
722
+ tm.assert_almost_equal(ax_max, max_edge + 0.25)
723
+
724
+ p = ax.patches[0]
725
+ if kind == "bar" and (stacked is True or subplots is True):
726
+ edge = p.get_x()
727
+ center = edge + p.get_width() * position
728
+ elif kind == "bar" and stacked is False:
729
+ center = p.get_x() + p.get_width() * len(df.columns) * position
730
+ edge = p.get_x()
731
+ elif kind == "barh" and (stacked is True or subplots is True):
732
+ center = p.get_y() + p.get_height() * position
733
+ edge = p.get_y()
734
+ elif kind == "barh" and stacked is False:
735
+ center = p.get_y() + p.get_height() * len(df.columns) * position
736
+ edge = p.get_y()
737
+ else:
738
+ raise ValueError
739
+
740
+ # Check the ticks locates on integer
741
+ assert (axis.get_ticklocs() == np.arange(len(df))).all()
742
+
743
+ if align == "center":
744
+ # Check whether the bar locates on center
745
+ tm.assert_almost_equal(axis.get_ticklocs()[0], center)
746
+ elif align == "edge":
747
+ # Check whether the bar's edge starts from the tick
748
+ tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
749
+ else:
750
+ raise ValueError
751
+
752
+ return axes
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/frame/test_hist_box_by.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+
3
+ import numpy as np
4
+ import pytest
5
+
6
+ from pandas import DataFrame
7
+ import pandas._testing as tm
8
+ from pandas.tests.plotting.common import (
9
+ _check_axes_shape,
10
+ _check_plot_works,
11
+ get_x_axis,
12
+ get_y_axis,
13
+ )
14
+
15
+ pytest.importorskip("matplotlib")
16
+
17
+
18
+ @pytest.fixture
19
+ def hist_df():
20
+ df = DataFrame(
21
+ np.random.default_rng(2).standard_normal((30, 2)), columns=["A", "B"]
22
+ )
23
+ df["C"] = np.random.default_rng(2).choice(["a", "b", "c"], 30)
24
+ df["D"] = np.random.default_rng(2).choice(["a", "b", "c"], 30)
25
+ return df
26
+
27
+
28
+ class TestHistWithBy:
29
+ @pytest.mark.slow
30
+ @pytest.mark.parametrize(
31
+ "by, column, titles, legends",
32
+ [
33
+ ("C", "A", ["a", "b", "c"], [["A"]] * 3),
34
+ ("C", ["A", "B"], ["a", "b", "c"], [["A", "B"]] * 3),
35
+ ("C", None, ["a", "b", "c"], [["A", "B"]] * 3),
36
+ (
37
+ ["C", "D"],
38
+ "A",
39
+ [
40
+ "(a, a)",
41
+ "(b, b)",
42
+ "(c, c)",
43
+ ],
44
+ [["A"]] * 3,
45
+ ),
46
+ (
47
+ ["C", "D"],
48
+ ["A", "B"],
49
+ [
50
+ "(a, a)",
51
+ "(b, b)",
52
+ "(c, c)",
53
+ ],
54
+ [["A", "B"]] * 3,
55
+ ),
56
+ (
57
+ ["C", "D"],
58
+ None,
59
+ [
60
+ "(a, a)",
61
+ "(b, b)",
62
+ "(c, c)",
63
+ ],
64
+ [["A", "B"]] * 3,
65
+ ),
66
+ ],
67
+ )
68
+ def test_hist_plot_by_argument(self, by, column, titles, legends, hist_df):
69
+ # GH 15079
70
+ axes = _check_plot_works(
71
+ hist_df.plot.hist, column=column, by=by, default_axes=True
72
+ )
73
+ result_titles = [ax.get_title() for ax in axes]
74
+ result_legends = [
75
+ [legend.get_text() for legend in ax.get_legend().texts] for ax in axes
76
+ ]
77
+
78
+ assert result_legends == legends
79
+ assert result_titles == titles
80
+
81
+ @pytest.mark.parametrize(
82
+ "by, column, titles, legends",
83
+ [
84
+ (0, "A", ["a", "b", "c"], [["A"]] * 3),
85
+ (0, None, ["a", "b", "c"], [["A", "B"]] * 3),
86
+ (
87
+ [0, "D"],
88
+ "A",
89
+ [
90
+ "(a, a)",
91
+ "(b, b)",
92
+ "(c, c)",
93
+ ],
94
+ [["A"]] * 3,
95
+ ),
96
+ ],
97
+ )
98
+ def test_hist_plot_by_0(self, by, column, titles, legends, hist_df):
99
+ # GH 15079
100
+ df = hist_df.copy()
101
+ df = df.rename(columns={"C": 0})
102
+
103
+ axes = _check_plot_works(df.plot.hist, default_axes=True, column=column, by=by)
104
+ result_titles = [ax.get_title() for ax in axes]
105
+ result_legends = [
106
+ [legend.get_text() for legend in ax.get_legend().texts] for ax in axes
107
+ ]
108
+
109
+ assert result_legends == legends
110
+ assert result_titles == titles
111
+
112
+ @pytest.mark.parametrize(
113
+ "by, column",
114
+ [
115
+ ([], ["A"]),
116
+ ([], ["A", "B"]),
117
+ ((), None),
118
+ ((), ["A", "B"]),
119
+ ],
120
+ )
121
+ def test_hist_plot_empty_list_string_tuple_by(self, by, column, hist_df):
122
+ # GH 15079
123
+ msg = "No group keys passed"
124
+ with pytest.raises(ValueError, match=msg):
125
+ _check_plot_works(
126
+ hist_df.plot.hist, default_axes=True, column=column, by=by
127
+ )
128
+
129
+ @pytest.mark.slow
130
+ @pytest.mark.parametrize(
131
+ "by, column, layout, axes_num",
132
+ [
133
+ (["C"], "A", (2, 2), 3),
134
+ ("C", "A", (2, 2), 3),
135
+ (["C"], ["A"], (1, 3), 3),
136
+ ("C", None, (3, 1), 3),
137
+ ("C", ["A", "B"], (3, 1), 3),
138
+ (["C", "D"], "A", (9, 1), 3),
139
+ (["C", "D"], "A", (3, 3), 3),
140
+ (["C", "D"], ["A"], (5, 2), 3),
141
+ (["C", "D"], ["A", "B"], (9, 1), 3),
142
+ (["C", "D"], None, (9, 1), 3),
143
+ (["C", "D"], ["A", "B"], (5, 2), 3),
144
+ ],
145
+ )
146
+ def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
147
+ # GH 15079
148
+ # _check_plot_works adds an ax so catch warning. see GH #13188
149
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
150
+ axes = _check_plot_works(
151
+ hist_df.plot.hist, column=column, by=by, layout=layout
152
+ )
153
+ _check_axes_shape(axes, axes_num=axes_num, layout=layout)
154
+
155
+ @pytest.mark.parametrize(
156
+ "msg, by, layout",
157
+ [
158
+ ("larger than required size", ["C", "D"], (1, 1)),
159
+ (re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)),
160
+ ("At least one dimension of layout must be positive", "C", (-1, -1)),
161
+ ],
162
+ )
163
+ def test_hist_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df):
164
+ # GH 15079, test if error is raised when invalid layout is given
165
+
166
+ with pytest.raises(ValueError, match=msg):
167
+ hist_df.plot.hist(column=["A", "B"], by=by, layout=layout)
168
+
169
+ @pytest.mark.slow
170
+ def test_axis_share_x_with_by(self, hist_df):
171
+ # GH 15079
172
+ ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharex=True)
173
+
174
+ # share x
175
+ assert get_x_axis(ax1).joined(ax1, ax2)
176
+ assert get_x_axis(ax2).joined(ax1, ax2)
177
+ assert get_x_axis(ax3).joined(ax1, ax3)
178
+ assert get_x_axis(ax3).joined(ax2, ax3)
179
+
180
+ # don't share y
181
+ assert not get_y_axis(ax1).joined(ax1, ax2)
182
+ assert not get_y_axis(ax2).joined(ax1, ax2)
183
+ assert not get_y_axis(ax3).joined(ax1, ax3)
184
+ assert not get_y_axis(ax3).joined(ax2, ax3)
185
+
186
+ @pytest.mark.slow
187
+ def test_axis_share_y_with_by(self, hist_df):
188
+ # GH 15079
189
+ ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharey=True)
190
+
191
+ # share y
192
+ assert get_y_axis(ax1).joined(ax1, ax2)
193
+ assert get_y_axis(ax2).joined(ax1, ax2)
194
+ assert get_y_axis(ax3).joined(ax1, ax3)
195
+ assert get_y_axis(ax3).joined(ax2, ax3)
196
+
197
+ # don't share x
198
+ assert not get_x_axis(ax1).joined(ax1, ax2)
199
+ assert not get_x_axis(ax2).joined(ax1, ax2)
200
+ assert not get_x_axis(ax3).joined(ax1, ax3)
201
+ assert not get_x_axis(ax3).joined(ax2, ax3)
202
+
203
+ @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
204
+ def test_figure_shape_hist_with_by(self, figsize, hist_df):
205
+ # GH 15079
206
+ axes = hist_df.plot.hist(column="A", by="C", figsize=figsize)
207
+ _check_axes_shape(axes, axes_num=3, figsize=figsize)
208
+
209
+
210
+ class TestBoxWithBy:
211
+ @pytest.mark.parametrize(
212
+ "by, column, titles, xticklabels",
213
+ [
214
+ ("C", "A", ["A"], [["a", "b", "c"]]),
215
+ (
216
+ ["C", "D"],
217
+ "A",
218
+ ["A"],
219
+ [
220
+ [
221
+ "(a, a)",
222
+ "(b, b)",
223
+ "(c, c)",
224
+ ]
225
+ ],
226
+ ),
227
+ ("C", ["A", "B"], ["A", "B"], [["a", "b", "c"]] * 2),
228
+ (
229
+ ["C", "D"],
230
+ ["A", "B"],
231
+ ["A", "B"],
232
+ [
233
+ [
234
+ "(a, a)",
235
+ "(b, b)",
236
+ "(c, c)",
237
+ ]
238
+ ]
239
+ * 2,
240
+ ),
241
+ (["C"], None, ["A", "B"], [["a", "b", "c"]] * 2),
242
+ ],
243
+ )
244
+ def test_box_plot_by_argument(self, by, column, titles, xticklabels, hist_df):
245
+ # GH 15079
246
+ axes = _check_plot_works(
247
+ hist_df.plot.box, default_axes=True, column=column, by=by
248
+ )
249
+ result_titles = [ax.get_title() for ax in axes]
250
+ result_xticklabels = [
251
+ [label.get_text() for label in ax.get_xticklabels()] for ax in axes
252
+ ]
253
+
254
+ assert result_xticklabels == xticklabels
255
+ assert result_titles == titles
256
+
257
+ @pytest.mark.parametrize(
258
+ "by, column, titles, xticklabels",
259
+ [
260
+ (0, "A", ["A"], [["a", "b", "c"]]),
261
+ (
262
+ [0, "D"],
263
+ "A",
264
+ ["A"],
265
+ [
266
+ [
267
+ "(a, a)",
268
+ "(b, b)",
269
+ "(c, c)",
270
+ ]
271
+ ],
272
+ ),
273
+ (0, None, ["A", "B"], [["a", "b", "c"]] * 2),
274
+ ],
275
+ )
276
+ def test_box_plot_by_0(self, by, column, titles, xticklabels, hist_df):
277
+ # GH 15079
278
+ df = hist_df.copy()
279
+ df = df.rename(columns={"C": 0})
280
+
281
+ axes = _check_plot_works(df.plot.box, default_axes=True, column=column, by=by)
282
+ result_titles = [ax.get_title() for ax in axes]
283
+ result_xticklabels = [
284
+ [label.get_text() for label in ax.get_xticklabels()] for ax in axes
285
+ ]
286
+
287
+ assert result_xticklabels == xticklabels
288
+ assert result_titles == titles
289
+
290
+ @pytest.mark.parametrize(
291
+ "by, column",
292
+ [
293
+ ([], ["A"]),
294
+ ((), "A"),
295
+ ([], None),
296
+ ((), ["A", "B"]),
297
+ ],
298
+ )
299
+ def test_box_plot_with_none_empty_list_by(self, by, column, hist_df):
300
+ # GH 15079
301
+ msg = "No group keys passed"
302
+ with pytest.raises(ValueError, match=msg):
303
+ _check_plot_works(hist_df.plot.box, default_axes=True, column=column, by=by)
304
+
305
+ @pytest.mark.slow
306
+ @pytest.mark.parametrize(
307
+ "by, column, layout, axes_num",
308
+ [
309
+ (["C"], "A", (1, 1), 1),
310
+ ("C", "A", (1, 1), 1),
311
+ ("C", None, (2, 1), 2),
312
+ ("C", ["A", "B"], (1, 2), 2),
313
+ (["C", "D"], "A", (1, 1), 1),
314
+ (["C", "D"], None, (1, 2), 2),
315
+ ],
316
+ )
317
+ def test_box_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
318
+ # GH 15079
319
+ axes = _check_plot_works(
320
+ hist_df.plot.box, default_axes=True, column=column, by=by, layout=layout
321
+ )
322
+ _check_axes_shape(axes, axes_num=axes_num, layout=layout)
323
+
324
+ @pytest.mark.parametrize(
325
+ "msg, by, layout",
326
+ [
327
+ ("larger than required size", ["C", "D"], (1, 1)),
328
+ (re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)),
329
+ ("At least one dimension of layout must be positive", "C", (-1, -1)),
330
+ ],
331
+ )
332
+ def test_box_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df):
333
+ # GH 15079, test if error is raised when invalid layout is given
334
+
335
+ with pytest.raises(ValueError, match=msg):
336
+ hist_df.plot.box(column=["A", "B"], by=by, layout=layout)
337
+
338
+ @pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
339
+ def test_figure_shape_hist_with_by(self, figsize, hist_df):
340
+ # GH 15079
341
+ axes = hist_df.plot.box(column="A", by="C", figsize=figsize)
342
+ _check_axes_shape(axes, axes_num=1, figsize=figsize)
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_backend.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import types
3
+
4
+ import pytest
5
+
6
+ import pandas.util._test_decorators as td
7
+
8
+ import pandas
9
+
10
+
11
+ @pytest.fixture
12
+ def dummy_backend():
13
+ db = types.ModuleType("pandas_dummy_backend")
14
+ setattr(db, "plot", lambda *args, **kwargs: "used_dummy")
15
+ return db
16
+
17
+
18
+ @pytest.fixture
19
+ def restore_backend():
20
+ """Restore the plotting backend to matplotlib"""
21
+ with pandas.option_context("plotting.backend", "matplotlib"):
22
+ yield
23
+
24
+
25
+ def test_backend_is_not_module():
26
+ msg = "Could not find plotting backend 'not_an_existing_module'."
27
+ with pytest.raises(ValueError, match=msg):
28
+ pandas.set_option("plotting.backend", "not_an_existing_module")
29
+
30
+ assert pandas.options.plotting.backend == "matplotlib"
31
+
32
+
33
+ def test_backend_is_correct(monkeypatch, restore_backend, dummy_backend):
34
+ monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
35
+
36
+ pandas.set_option("plotting.backend", "pandas_dummy_backend")
37
+ assert pandas.get_option("plotting.backend") == "pandas_dummy_backend"
38
+ assert (
39
+ pandas.plotting._core._get_plot_backend("pandas_dummy_backend") is dummy_backend
40
+ )
41
+
42
+
43
+ def test_backend_can_be_set_in_plot_call(monkeypatch, restore_backend, dummy_backend):
44
+ monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
45
+ df = pandas.DataFrame([1, 2, 3])
46
+
47
+ assert pandas.get_option("plotting.backend") == "matplotlib"
48
+ assert df.plot(backend="pandas_dummy_backend") == "used_dummy"
49
+
50
+
51
+ def test_register_entrypoint(restore_backend, tmp_path, monkeypatch, dummy_backend):
52
+ monkeypatch.syspath_prepend(tmp_path)
53
+ monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
54
+
55
+ dist_info = tmp_path / "my_backend-0.0.0.dist-info"
56
+ dist_info.mkdir()
57
+ # entry_point name should not match module name - otherwise pandas will
58
+ # fall back to backend lookup by module name
59
+ (dist_info / "entry_points.txt").write_bytes(
60
+ b"[pandas_plotting_backends]\nmy_ep_backend = pandas_dummy_backend\n"
61
+ )
62
+
63
+ assert pandas.plotting._core._get_plot_backend("my_ep_backend") is dummy_backend
64
+
65
+ with pandas.option_context("plotting.backend", "my_ep_backend"):
66
+ assert pandas.plotting._core._get_plot_backend() is dummy_backend
67
+
68
+
69
+ def test_setting_backend_without_plot_raises(monkeypatch):
70
+ # GH-28163
71
+ module = types.ModuleType("pandas_plot_backend")
72
+ monkeypatch.setitem(sys.modules, "pandas_plot_backend", module)
73
+
74
+ assert pandas.options.plotting.backend == "matplotlib"
75
+ with pytest.raises(
76
+ ValueError, match="Could not find plotting backend 'pandas_plot_backend'."
77
+ ):
78
+ pandas.set_option("plotting.backend", "pandas_plot_backend")
79
+
80
+ assert pandas.options.plotting.backend == "matplotlib"
81
+
82
+
83
+ @td.skip_if_installed("matplotlib")
84
+ def test_no_matplotlib_ok():
85
+ msg = (
86
+ 'matplotlib is required for plotting when the default backend "matplotlib" is '
87
+ "selected."
88
+ )
89
+ with pytest.raises(ImportError, match=msg):
90
+ pandas.plotting._core._get_plot_backend("matplotlib")
91
+
92
+
93
+ def test_extra_kinds_ok(monkeypatch, restore_backend, dummy_backend):
94
+ # https://github.com/pandas-dev/pandas/pull/28647
95
+ monkeypatch.setitem(sys.modules, "pandas_dummy_backend", dummy_backend)
96
+ pandas.set_option("plotting.backend", "pandas_dummy_backend")
97
+ df = pandas.DataFrame({"A": [1, 2, 3]})
98
+ df.plot(kind="not a real kind")
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_boxplot_method.py ADDED
@@ -0,0 +1,761 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test cases for .boxplot method """
2
+
3
+ import itertools
4
+ import string
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ from pandas import (
10
+ DataFrame,
11
+ MultiIndex,
12
+ Series,
13
+ date_range,
14
+ plotting,
15
+ timedelta_range,
16
+ )
17
+ import pandas._testing as tm
18
+ from pandas.tests.plotting.common import (
19
+ _check_axes_shape,
20
+ _check_box_return_type,
21
+ _check_plot_works,
22
+ _check_ticks_props,
23
+ _check_visible,
24
+ )
25
+
26
+ from pandas.io.formats.printing import pprint_thing
27
+
28
+ mpl = pytest.importorskip("matplotlib")
29
+ plt = pytest.importorskip("matplotlib.pyplot")
30
+
31
+
32
+ def _check_ax_limits(col, ax):
33
+ y_min, y_max = ax.get_ylim()
34
+ assert y_min <= col.min()
35
+ assert y_max >= col.max()
36
+
37
+
38
+ class TestDataFramePlots:
39
+ def test_stacked_boxplot_set_axis(self):
40
+ # GH2980
41
+ import matplotlib.pyplot as plt
42
+
43
+ n = 80
44
+ df = DataFrame(
45
+ {
46
+ "Clinical": np.random.default_rng(2).choice([0, 1, 2, 3], n),
47
+ "Confirmed": np.random.default_rng(2).choice([0, 1, 2, 3], n),
48
+ "Discarded": np.random.default_rng(2).choice([0, 1, 2, 3], n),
49
+ },
50
+ index=np.arange(0, n),
51
+ )
52
+ ax = df.plot(kind="bar", stacked=True)
53
+ assert [int(x.get_text()) for x in ax.get_xticklabels()] == df.index.to_list()
54
+ ax.set_xticks(np.arange(0, 80, 10))
55
+ plt.draw() # Update changes
56
+ assert [int(x.get_text()) for x in ax.get_xticklabels()] == list(
57
+ np.arange(0, 80, 10)
58
+ )
59
+
60
+ @pytest.mark.slow
61
+ @pytest.mark.parametrize(
62
+ "kwargs, warn",
63
+ [
64
+ [{"return_type": "dict"}, None],
65
+ [{"column": ["one", "two"]}, None],
66
+ [{"column": ["one", "two"], "by": "indic"}, UserWarning],
67
+ [{"column": ["one"], "by": ["indic", "indic2"]}, None],
68
+ [{"by": "indic"}, UserWarning],
69
+ [{"by": ["indic", "indic2"]}, UserWarning],
70
+ [{"notch": 1}, None],
71
+ [{"by": "indic", "notch": 1}, UserWarning],
72
+ ],
73
+ )
74
+ def test_boxplot_legacy1(self, kwargs, warn):
75
+ df = DataFrame(
76
+ np.random.default_rng(2).standard_normal((6, 4)),
77
+ index=list(string.ascii_letters[:6]),
78
+ columns=["one", "two", "three", "four"],
79
+ )
80
+ df["indic"] = ["foo", "bar"] * 3
81
+ df["indic2"] = ["foo", "bar", "foo"] * 2
82
+
83
+ # _check_plot_works can add an ax so catch warning. see GH #13188
84
+ with tm.assert_produces_warning(warn, check_stacklevel=False):
85
+ _check_plot_works(df.boxplot, **kwargs)
86
+
87
+ def test_boxplot_legacy1_series(self):
88
+ ser = Series(np.random.default_rng(2).standard_normal(6))
89
+ _check_plot_works(plotting._core.boxplot, data=ser, return_type="dict")
90
+
91
+ def test_boxplot_legacy2(self):
92
+ df = DataFrame(
93
+ np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
94
+ )
95
+ df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
96
+ df["Y"] = Series(["A"] * 10)
97
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
98
+ _check_plot_works(df.boxplot, by="X")
99
+
100
+ def test_boxplot_legacy2_with_ax(self):
101
+ df = DataFrame(
102
+ np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
103
+ )
104
+ df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
105
+ df["Y"] = Series(["A"] * 10)
106
+ # When ax is supplied and required number of axes is 1,
107
+ # passed ax should be used:
108
+ _, ax = mpl.pyplot.subplots()
109
+ axes = df.boxplot("Col1", by="X", ax=ax)
110
+ ax_axes = ax.axes
111
+ assert ax_axes is axes
112
+
113
+ def test_boxplot_legacy2_with_ax_return_type(self):
114
+ df = DataFrame(
115
+ np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
116
+ )
117
+ df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
118
+ df["Y"] = Series(["A"] * 10)
119
+ fig, ax = mpl.pyplot.subplots()
120
+ axes = df.groupby("Y").boxplot(ax=ax, return_type="axes")
121
+ ax_axes = ax.axes
122
+ assert ax_axes is axes["A"]
123
+
124
+ def test_boxplot_legacy2_with_multi_col(self):
125
+ df = DataFrame(
126
+ np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
127
+ )
128
+ df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
129
+ df["Y"] = Series(["A"] * 10)
130
+ # Multiple columns with an ax argument should use same figure
131
+ fig, ax = mpl.pyplot.subplots()
132
+ with tm.assert_produces_warning(UserWarning):
133
+ axes = df.boxplot(
134
+ column=["Col1", "Col2"], by="X", ax=ax, return_type="axes"
135
+ )
136
+ assert axes["Col1"].get_figure() is fig
137
+
138
+ def test_boxplot_legacy2_by_none(self):
139
+ df = DataFrame(
140
+ np.random.default_rng(2).random((10, 2)), columns=["Col1", "Col2"]
141
+ )
142
+ df["X"] = Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"])
143
+ df["Y"] = Series(["A"] * 10)
144
+ # When by is None, check that all relevant lines are present in the
145
+ # dict
146
+ _, ax = mpl.pyplot.subplots()
147
+ d = df.boxplot(ax=ax, return_type="dict")
148
+ lines = list(itertools.chain.from_iterable(d.values()))
149
+ assert len(ax.get_lines()) == len(lines)
150
+
151
+ def test_boxplot_return_type_none(self, hist_df):
152
+ # GH 12216; return_type=None & by=None -> axes
153
+ result = hist_df.boxplot()
154
+ assert isinstance(result, mpl.pyplot.Axes)
155
+
156
+ def test_boxplot_return_type_legacy(self):
157
+ # API change in https://github.com/pandas-dev/pandas/pull/7096
158
+
159
+ df = DataFrame(
160
+ np.random.default_rng(2).standard_normal((6, 4)),
161
+ index=list(string.ascii_letters[:6]),
162
+ columns=["one", "two", "three", "four"],
163
+ )
164
+ msg = "return_type must be {'axes', 'dict', 'both'}"
165
+ with pytest.raises(ValueError, match=msg):
166
+ df.boxplot(return_type="NOT_A_TYPE")
167
+
168
+ result = df.boxplot()
169
+ _check_box_return_type(result, "axes")
170
+
171
+ @pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
172
+ def test_boxplot_return_type_legacy_return_type(self, return_type):
173
+ # API change in https://github.com/pandas-dev/pandas/pull/7096
174
+
175
+ df = DataFrame(
176
+ np.random.default_rng(2).standard_normal((6, 4)),
177
+ index=list(string.ascii_letters[:6]),
178
+ columns=["one", "two", "three", "four"],
179
+ )
180
+ with tm.assert_produces_warning(False):
181
+ result = df.boxplot(return_type=return_type)
182
+ _check_box_return_type(result, return_type)
183
+
184
+ def test_boxplot_axis_limits(self, hist_df):
185
+ df = hist_df.copy()
186
+ df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0])
187
+ # One full row
188
+ height_ax, weight_ax = df.boxplot(["height", "weight"], by="category")
189
+ _check_ax_limits(df["height"], height_ax)
190
+ _check_ax_limits(df["weight"], weight_ax)
191
+ assert weight_ax._sharey == height_ax
192
+
193
+ def test_boxplot_axis_limits_two_rows(self, hist_df):
194
+ df = hist_df.copy()
195
+ df["age"] = np.random.default_rng(2).integers(1, 20, df.shape[0])
196
+ # Two rows, one partial
197
+ p = df.boxplot(["height", "weight", "age"], by="category")
198
+ height_ax, weight_ax, age_ax = p[0, 0], p[0, 1], p[1, 0]
199
+ dummy_ax = p[1, 1]
200
+
201
+ _check_ax_limits(df["height"], height_ax)
202
+ _check_ax_limits(df["weight"], weight_ax)
203
+ _check_ax_limits(df["age"], age_ax)
204
+ assert weight_ax._sharey == height_ax
205
+ assert age_ax._sharey == height_ax
206
+ assert dummy_ax._sharey is None
207
+
208
+ def test_boxplot_empty_column(self):
209
+ df = DataFrame(np.random.default_rng(2).standard_normal((20, 4)))
210
+ df.loc[:, 0] = np.nan
211
+ _check_plot_works(df.boxplot, return_type="axes")
212
+
213
+ def test_figsize(self):
214
+ df = DataFrame(
215
+ np.random.default_rng(2).random((10, 5)), columns=["A", "B", "C", "D", "E"]
216
+ )
217
+ result = df.boxplot(return_type="axes", figsize=(12, 8))
218
+ assert result.figure.bbox_inches.width == 12
219
+ assert result.figure.bbox_inches.height == 8
220
+
221
+ def test_fontsize(self):
222
+ df = DataFrame({"a": [1, 2, 3, 4, 5, 6]})
223
+ _check_ticks_props(df.boxplot("a", fontsize=16), xlabelsize=16, ylabelsize=16)
224
+
225
+ def test_boxplot_numeric_data(self):
226
+ # GH 22799
227
+ df = DataFrame(
228
+ {
229
+ "a": date_range("2012-01-01", periods=100),
230
+ "b": np.random.default_rng(2).standard_normal(100),
231
+ "c": np.random.default_rng(2).standard_normal(100) + 2,
232
+ "d": date_range("2012-01-01", periods=100).astype(str),
233
+ "e": date_range("2012-01-01", periods=100, tz="UTC"),
234
+ "f": timedelta_range("1 days", periods=100),
235
+ }
236
+ )
237
+ ax = df.plot(kind="box")
238
+ assert [x.get_text() for x in ax.get_xticklabels()] == ["b", "c"]
239
+
240
+ @pytest.mark.parametrize(
241
+ "colors_kwd, expected",
242
+ [
243
+ (
244
+ {"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"},
245
+ {"boxes": "r", "whiskers": "b", "medians": "g", "caps": "c"},
246
+ ),
247
+ ({"boxes": "r"}, {"boxes": "r"}),
248
+ ("r", {"boxes": "r", "whiskers": "r", "medians": "r", "caps": "r"}),
249
+ ],
250
+ )
251
+ def test_color_kwd(self, colors_kwd, expected):
252
+ # GH: 26214
253
+ df = DataFrame(np.random.default_rng(2).random((10, 2)))
254
+ result = df.boxplot(color=colors_kwd, return_type="dict")
255
+ for k, v in expected.items():
256
+ assert result[k][0].get_color() == v
257
+
258
+ @pytest.mark.parametrize(
259
+ "scheme,expected",
260
+ [
261
+ (
262
+ "dark_background",
263
+ {
264
+ "boxes": "#8dd3c7",
265
+ "whiskers": "#8dd3c7",
266
+ "medians": "#bfbbd9",
267
+ "caps": "#8dd3c7",
268
+ },
269
+ ),
270
+ (
271
+ "default",
272
+ {
273
+ "boxes": "#1f77b4",
274
+ "whiskers": "#1f77b4",
275
+ "medians": "#2ca02c",
276
+ "caps": "#1f77b4",
277
+ },
278
+ ),
279
+ ],
280
+ )
281
+ def test_colors_in_theme(self, scheme, expected):
282
+ # GH: 40769
283
+ df = DataFrame(np.random.default_rng(2).random((10, 2)))
284
+ import matplotlib.pyplot as plt
285
+
286
+ plt.style.use(scheme)
287
+ result = df.plot.box(return_type="dict")
288
+ for k, v in expected.items():
289
+ assert result[k][0].get_color() == v
290
+
291
+ @pytest.mark.parametrize(
292
+ "dict_colors, msg",
293
+ [({"boxes": "r", "invalid_key": "r"}, "invalid key 'invalid_key'")],
294
+ )
295
+ def test_color_kwd_errors(self, dict_colors, msg):
296
+ # GH: 26214
297
+ df = DataFrame(np.random.default_rng(2).random((10, 2)))
298
+ with pytest.raises(ValueError, match=msg):
299
+ df.boxplot(color=dict_colors, return_type="dict")
300
+
301
+ @pytest.mark.parametrize(
302
+ "props, expected",
303
+ [
304
+ ("boxprops", "boxes"),
305
+ ("whiskerprops", "whiskers"),
306
+ ("capprops", "caps"),
307
+ ("medianprops", "medians"),
308
+ ],
309
+ )
310
+ def test_specified_props_kwd(self, props, expected):
311
+ # GH 30346
312
+ df = DataFrame({k: np.random.default_rng(2).random(10) for k in "ABC"})
313
+ kwd = {props: {"color": "C1"}}
314
+ result = df.boxplot(return_type="dict", **kwd)
315
+
316
+ assert result[expected][0].get_color() == "C1"
317
+
318
+ @pytest.mark.parametrize("vert", [True, False])
319
+ def test_plot_xlabel_ylabel(self, vert):
320
+ df = DataFrame(
321
+ {
322
+ "a": np.random.default_rng(2).standard_normal(10),
323
+ "b": np.random.default_rng(2).standard_normal(10),
324
+ "group": np.random.default_rng(2).choice(["group1", "group2"], 10),
325
+ }
326
+ )
327
+ xlabel, ylabel = "x", "y"
328
+ ax = df.plot(kind="box", vert=vert, xlabel=xlabel, ylabel=ylabel)
329
+ assert ax.get_xlabel() == xlabel
330
+ assert ax.get_ylabel() == ylabel
331
+
332
+ @pytest.mark.parametrize("vert", [True, False])
333
+ def test_plot_box(self, vert):
334
+ # GH 54941
335
+ rng = np.random.default_rng(2)
336
+ df1 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD"))
337
+ df2 = DataFrame(rng.integers(0, 100, size=(100, 4)), columns=list("ABCD"))
338
+
339
+ xlabel, ylabel = "x", "y"
340
+ _, axs = plt.subplots(ncols=2, figsize=(10, 7), sharey=True)
341
+ df1.plot.box(ax=axs[0], vert=vert, xlabel=xlabel, ylabel=ylabel)
342
+ df2.plot.box(ax=axs[1], vert=vert, xlabel=xlabel, ylabel=ylabel)
343
+ for ax in axs:
344
+ assert ax.get_xlabel() == xlabel
345
+ assert ax.get_ylabel() == ylabel
346
+ mpl.pyplot.close()
347
+
348
+ @pytest.mark.parametrize("vert", [True, False])
349
+ def test_boxplot_xlabel_ylabel(self, vert):
350
+ df = DataFrame(
351
+ {
352
+ "a": np.random.default_rng(2).standard_normal(10),
353
+ "b": np.random.default_rng(2).standard_normal(10),
354
+ "group": np.random.default_rng(2).choice(["group1", "group2"], 10),
355
+ }
356
+ )
357
+ xlabel, ylabel = "x", "y"
358
+ ax = df.boxplot(vert=vert, xlabel=xlabel, ylabel=ylabel)
359
+ assert ax.get_xlabel() == xlabel
360
+ assert ax.get_ylabel() == ylabel
361
+
362
+ @pytest.mark.parametrize("vert", [True, False])
363
+ def test_boxplot_group_xlabel_ylabel(self, vert):
364
+ df = DataFrame(
365
+ {
366
+ "a": np.random.default_rng(2).standard_normal(10),
367
+ "b": np.random.default_rng(2).standard_normal(10),
368
+ "group": np.random.default_rng(2).choice(["group1", "group2"], 10),
369
+ }
370
+ )
371
+ xlabel, ylabel = "x", "y"
372
+ ax = df.boxplot(by="group", vert=vert, xlabel=xlabel, ylabel=ylabel)
373
+ for subplot in ax:
374
+ assert subplot.get_xlabel() == xlabel
375
+ assert subplot.get_ylabel() == ylabel
376
+ mpl.pyplot.close()
377
+
378
+ @pytest.mark.parametrize("vert", [True, False])
379
+ def test_boxplot_group_no_xlabel_ylabel(self, vert):
380
+ df = DataFrame(
381
+ {
382
+ "a": np.random.default_rng(2).standard_normal(10),
383
+ "b": np.random.default_rng(2).standard_normal(10),
384
+ "group": np.random.default_rng(2).choice(["group1", "group2"], 10),
385
+ }
386
+ )
387
+ ax = df.boxplot(by="group", vert=vert)
388
+ for subplot in ax:
389
+ target_label = subplot.get_xlabel() if vert else subplot.get_ylabel()
390
+ assert target_label == pprint_thing(["group"])
391
+ mpl.pyplot.close()
392
+
393
+
394
+ class TestDataFrameGroupByPlots:
395
+ def test_boxplot_legacy1(self, hist_df):
396
+ grouped = hist_df.groupby(by="gender")
397
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
398
+ axes = _check_plot_works(grouped.boxplot, return_type="axes")
399
+ _check_axes_shape(list(axes.values), axes_num=2, layout=(1, 2))
400
+
401
+ def test_boxplot_legacy1_return_type(self, hist_df):
402
+ grouped = hist_df.groupby(by="gender")
403
+ axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
404
+ _check_axes_shape(axes, axes_num=1, layout=(1, 1))
405
+
406
+ @pytest.mark.slow
407
+ def test_boxplot_legacy2(self):
408
+ tuples = zip(string.ascii_letters[:10], range(10))
409
+ df = DataFrame(
410
+ np.random.default_rng(2).random((10, 3)),
411
+ index=MultiIndex.from_tuples(tuples),
412
+ )
413
+ grouped = df.groupby(level=1)
414
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
415
+ axes = _check_plot_works(grouped.boxplot, return_type="axes")
416
+ _check_axes_shape(list(axes.values), axes_num=10, layout=(4, 3))
417
+
418
+ @pytest.mark.slow
419
+ def test_boxplot_legacy2_return_type(self):
420
+ tuples = zip(string.ascii_letters[:10], range(10))
421
+ df = DataFrame(
422
+ np.random.default_rng(2).random((10, 3)),
423
+ index=MultiIndex.from_tuples(tuples),
424
+ )
425
+ grouped = df.groupby(level=1)
426
+ axes = _check_plot_works(grouped.boxplot, subplots=False, return_type="axes")
427
+ _check_axes_shape(axes, axes_num=1, layout=(1, 1))
428
+
429
+ @pytest.mark.parametrize(
430
+ "subplots, warn, axes_num, layout",
431
+ [[True, UserWarning, 3, (2, 2)], [False, None, 1, (1, 1)]],
432
+ )
433
+ def test_boxplot_legacy3(self, subplots, warn, axes_num, layout):
434
+ tuples = zip(string.ascii_letters[:10], range(10))
435
+ df = DataFrame(
436
+ np.random.default_rng(2).random((10, 3)),
437
+ index=MultiIndex.from_tuples(tuples),
438
+ )
439
+ msg = "DataFrame.groupby with axis=1 is deprecated"
440
+ with tm.assert_produces_warning(FutureWarning, match=msg):
441
+ grouped = df.unstack(level=1).groupby(level=0, axis=1)
442
+ with tm.assert_produces_warning(warn, check_stacklevel=False):
443
+ axes = _check_plot_works(
444
+ grouped.boxplot, subplots=subplots, return_type="axes"
445
+ )
446
+ _check_axes_shape(axes, axes_num=axes_num, layout=layout)
447
+
448
+ def test_grouped_plot_fignums(self):
449
+ n = 10
450
+ weight = Series(np.random.default_rng(2).normal(166, 20, size=n))
451
+ height = Series(np.random.default_rng(2).normal(60, 10, size=n))
452
+ gender = np.random.default_rng(2).choice(["male", "female"], size=n)
453
+ df = DataFrame({"height": height, "weight": weight, "gender": gender})
454
+ gb = df.groupby("gender")
455
+
456
+ res = gb.plot()
457
+ assert len(mpl.pyplot.get_fignums()) == 2
458
+ assert len(res) == 2
459
+ plt.close("all")
460
+
461
+ res = gb.boxplot(return_type="axes")
462
+ assert len(mpl.pyplot.get_fignums()) == 1
463
+ assert len(res) == 2
464
+
465
+ def test_grouped_plot_fignums_excluded_col(self):
466
+ n = 10
467
+ weight = Series(np.random.default_rng(2).normal(166, 20, size=n))
468
+ height = Series(np.random.default_rng(2).normal(60, 10, size=n))
469
+ gender = np.random.default_rng(2).choice(["male", "female"], size=n)
470
+ df = DataFrame({"height": height, "weight": weight, "gender": gender})
471
+ # now works with GH 5610 as gender is excluded
472
+ df.groupby("gender").hist()
473
+
474
+ @pytest.mark.slow
475
+ def test_grouped_box_return_type(self, hist_df):
476
+ df = hist_df
477
+
478
+ # old style: return_type=None
479
+ result = df.boxplot(by="gender")
480
+ assert isinstance(result, np.ndarray)
481
+ _check_box_return_type(
482
+ result, None, expected_keys=["height", "weight", "category"]
483
+ )
484
+
485
+ @pytest.mark.slow
486
+ def test_grouped_box_return_type_groupby(self, hist_df):
487
+ df = hist_df
488
+ # now for groupby
489
+ result = df.groupby("gender").boxplot(return_type="dict")
490
+ _check_box_return_type(result, "dict", expected_keys=["Male", "Female"])
491
+
492
+ @pytest.mark.slow
493
+ @pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
494
+ def test_grouped_box_return_type_arg(self, hist_df, return_type):
495
+ df = hist_df
496
+
497
+ returned = df.groupby("classroom").boxplot(return_type=return_type)
498
+ _check_box_return_type(returned, return_type, expected_keys=["A", "B", "C"])
499
+
500
+ returned = df.boxplot(by="classroom", return_type=return_type)
501
+ _check_box_return_type(
502
+ returned, return_type, expected_keys=["height", "weight", "category"]
503
+ )
504
+
505
+ @pytest.mark.slow
506
+ @pytest.mark.parametrize("return_type", ["dict", "axes", "both"])
507
+ def test_grouped_box_return_type_arg_duplcate_cats(self, return_type):
508
+ columns2 = "X B C D A".split()
509
+ df2 = DataFrame(
510
+ np.random.default_rng(2).standard_normal((6, 5)), columns=columns2
511
+ )
512
+ categories2 = "A B".split()
513
+ df2["category"] = categories2 * 3
514
+
515
+ returned = df2.groupby("category").boxplot(return_type=return_type)
516
+ _check_box_return_type(returned, return_type, expected_keys=categories2)
517
+
518
+ returned = df2.boxplot(by="category", return_type=return_type)
519
+ _check_box_return_type(returned, return_type, expected_keys=columns2)
520
+
521
+ @pytest.mark.slow
522
+ def test_grouped_box_layout_too_small(self, hist_df):
523
+ df = hist_df
524
+
525
+ msg = "Layout of 1x1 must be larger than required size 2"
526
+ with pytest.raises(ValueError, match=msg):
527
+ df.boxplot(column=["weight", "height"], by=df.gender, layout=(1, 1))
528
+
529
+ @pytest.mark.slow
530
+ def test_grouped_box_layout_needs_by(self, hist_df):
531
+ df = hist_df
532
+ msg = "The 'layout' keyword is not supported when 'by' is None"
533
+ with pytest.raises(ValueError, match=msg):
534
+ df.boxplot(
535
+ column=["height", "weight", "category"],
536
+ layout=(2, 1),
537
+ return_type="dict",
538
+ )
539
+
540
+ @pytest.mark.slow
541
+ def test_grouped_box_layout_positive_layout(self, hist_df):
542
+ df = hist_df
543
+ msg = "At least one dimension of layout must be positive"
544
+ with pytest.raises(ValueError, match=msg):
545
+ df.boxplot(column=["weight", "height"], by=df.gender, layout=(-1, -1))
546
+
547
+ @pytest.mark.slow
548
+ @pytest.mark.parametrize(
549
+ "gb_key, axes_num, rows",
550
+ [["gender", 2, 1], ["category", 4, 2], ["classroom", 3, 2]],
551
+ )
552
+ def test_grouped_box_layout_positive_layout_axes(
553
+ self, hist_df, gb_key, axes_num, rows
554
+ ):
555
+ df = hist_df
556
+ # _check_plot_works adds an ax so catch warning. see GH #13188 GH 6769
557
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
558
+ _check_plot_works(
559
+ df.groupby(gb_key).boxplot, column="height", return_type="dict"
560
+ )
561
+ _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=axes_num, layout=(rows, 2))
562
+
563
+ @pytest.mark.slow
564
+ @pytest.mark.parametrize(
565
+ "col, visible", [["height", False], ["weight", True], ["category", True]]
566
+ )
567
+ def test_grouped_box_layout_visible(self, hist_df, col, visible):
568
+ df = hist_df
569
+ # GH 5897
570
+ axes = df.boxplot(
571
+ column=["height", "weight", "category"], by="gender", return_type="axes"
572
+ )
573
+ _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2))
574
+ ax = axes[col]
575
+ _check_visible(ax.get_xticklabels(), visible=visible)
576
+ _check_visible([ax.xaxis.get_label()], visible=visible)
577
+
578
+ @pytest.mark.slow
579
+ def test_grouped_box_layout_shape(self, hist_df):
580
+ df = hist_df
581
+ df.groupby("classroom").boxplot(
582
+ column=["height", "weight", "category"], return_type="dict"
583
+ )
584
+ _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(2, 2))
585
+
586
+ @pytest.mark.slow
587
+ @pytest.mark.parametrize("cols", [2, -1])
588
+ def test_grouped_box_layout_works(self, hist_df, cols):
589
+ df = hist_df
590
+ with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
591
+ _check_plot_works(
592
+ df.groupby("category").boxplot,
593
+ column="height",
594
+ layout=(3, cols),
595
+ return_type="dict",
596
+ )
597
+ _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(3, 2))
598
+
599
+ @pytest.mark.slow
600
+ @pytest.mark.parametrize("rows, res", [[4, 4], [-1, 3]])
601
+ def test_grouped_box_layout_axes_shape_rows(self, hist_df, rows, res):
602
+ df = hist_df
603
+ df.boxplot(
604
+ column=["height", "weight", "category"], by="gender", layout=(rows, 1)
605
+ )
606
+ _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(res, 1))
607
+
608
+ @pytest.mark.slow
609
+ @pytest.mark.parametrize("cols, res", [[4, 4], [-1, 3]])
610
+ def test_grouped_box_layout_axes_shape_cols_groupby(self, hist_df, cols, res):
611
+ df = hist_df
612
+ df.groupby("classroom").boxplot(
613
+ column=["height", "weight", "category"],
614
+ layout=(1, cols),
615
+ return_type="dict",
616
+ )
617
+ _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=3, layout=(1, res))
618
+
619
+ @pytest.mark.slow
620
+ def test_grouped_box_multiple_axes(self, hist_df):
621
+ # GH 6970, GH 7069
622
+ df = hist_df
623
+
624
+ # check warning to ignore sharex / sharey
625
+ # this check should be done in the first function which
626
+ # passes multiple axes to plot, hist or boxplot
627
+ # location should be changed if other test is added
628
+ # which has earlier alphabetical order
629
+ with tm.assert_produces_warning(UserWarning):
630
+ _, axes = mpl.pyplot.subplots(2, 2)
631
+ df.groupby("category").boxplot(column="height", return_type="axes", ax=axes)
632
+ _check_axes_shape(mpl.pyplot.gcf().axes, axes_num=4, layout=(2, 2))
633
+
634
+ @pytest.mark.slow
635
+ def test_grouped_box_multiple_axes_on_fig(self, hist_df):
636
+ # GH 6970, GH 7069
637
+ df = hist_df
638
+ fig, axes = mpl.pyplot.subplots(2, 3)
639
+ with tm.assert_produces_warning(UserWarning):
640
+ returned = df.boxplot(
641
+ column=["height", "weight", "category"],
642
+ by="gender",
643
+ return_type="axes",
644
+ ax=axes[0],
645
+ )
646
+ returned = np.array(list(returned.values))
647
+ _check_axes_shape(returned, axes_num=3, layout=(1, 3))
648
+ tm.assert_numpy_array_equal(returned, axes[0])
649
+ assert returned[0].figure is fig
650
+
651
+ # draw on second row
652
+ with tm.assert_produces_warning(UserWarning):
653
+ returned = df.groupby("classroom").boxplot(
654
+ column=["height", "weight", "category"], return_type="axes", ax=axes[1]
655
+ )
656
+ returned = np.array(list(returned.values))
657
+ _check_axes_shape(returned, axes_num=3, layout=(1, 3))
658
+ tm.assert_numpy_array_equal(returned, axes[1])
659
+ assert returned[0].figure is fig
660
+
661
+ @pytest.mark.slow
662
+ def test_grouped_box_multiple_axes_ax_error(self, hist_df):
663
+ # GH 6970, GH 7069
664
+ df = hist_df
665
+ msg = "The number of passed axes must be 3, the same as the output plot"
666
+ with pytest.raises(ValueError, match=msg):
667
+ fig, axes = mpl.pyplot.subplots(2, 3)
668
+ # pass different number of axes from required
669
+ with tm.assert_produces_warning(UserWarning):
670
+ axes = df.groupby("classroom").boxplot(ax=axes)
671
+
672
+ def test_fontsize(self):
673
+ df = DataFrame({"a": [1, 2, 3, 4, 5, 6], "b": [0, 0, 0, 1, 1, 1]})
674
+ _check_ticks_props(
675
+ df.boxplot("a", by="b", fontsize=16), xlabelsize=16, ylabelsize=16
676
+ )
677
+
678
+ @pytest.mark.parametrize(
679
+ "col, expected_xticklabel",
680
+ [
681
+ ("v", ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]),
682
+ (["v"], ["(a, v)", "(b, v)", "(c, v)", "(d, v)", "(e, v)"]),
683
+ ("v1", ["(a, v1)", "(b, v1)", "(c, v1)", "(d, v1)", "(e, v1)"]),
684
+ (
685
+ ["v", "v1"],
686
+ [
687
+ "(a, v)",
688
+ "(a, v1)",
689
+ "(b, v)",
690
+ "(b, v1)",
691
+ "(c, v)",
692
+ "(c, v1)",
693
+ "(d, v)",
694
+ "(d, v1)",
695
+ "(e, v)",
696
+ "(e, v1)",
697
+ ],
698
+ ),
699
+ (
700
+ None,
701
+ [
702
+ "(a, v)",
703
+ "(a, v1)",
704
+ "(b, v)",
705
+ "(b, v1)",
706
+ "(c, v)",
707
+ "(c, v1)",
708
+ "(d, v)",
709
+ "(d, v1)",
710
+ "(e, v)",
711
+ "(e, v1)",
712
+ ],
713
+ ),
714
+ ],
715
+ )
716
+ def test_groupby_boxplot_subplots_false(self, col, expected_xticklabel):
717
+ # GH 16748
718
+ df = DataFrame(
719
+ {
720
+ "cat": np.random.default_rng(2).choice(list("abcde"), 100),
721
+ "v": np.random.default_rng(2).random(100),
722
+ "v1": np.random.default_rng(2).random(100),
723
+ }
724
+ )
725
+ grouped = df.groupby("cat")
726
+
727
+ axes = _check_plot_works(
728
+ grouped.boxplot, subplots=False, column=col, return_type="axes"
729
+ )
730
+
731
+ result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
732
+ assert expected_xticklabel == result_xticklabel
733
+
734
+ def test_groupby_boxplot_object(self, hist_df):
735
+ # GH 43480
736
+ df = hist_df.astype("object")
737
+ grouped = df.groupby("gender")
738
+ msg = "boxplot method requires numerical columns, nothing to plot"
739
+ with pytest.raises(ValueError, match=msg):
740
+ _check_plot_works(grouped.boxplot, subplots=False)
741
+
742
+ def test_boxplot_multiindex_column(self):
743
+ # GH 16748
744
+ arrays = [
745
+ ["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
746
+ ["one", "two", "one", "two", "one", "two", "one", "two"],
747
+ ]
748
+ tuples = list(zip(*arrays))
749
+ index = MultiIndex.from_tuples(tuples, names=["first", "second"])
750
+ df = DataFrame(
751
+ np.random.default_rng(2).standard_normal((3, 8)),
752
+ index=["A", "B", "C"],
753
+ columns=index,
754
+ )
755
+
756
+ col = [("bar", "one"), ("bar", "two")]
757
+ axes = _check_plot_works(df.boxplot, column=col, return_type="axes")
758
+
759
+ expected_xticklabel = ["(bar, one)", "(bar, two)"]
760
+ result_xticklabel = [x.get_text() for x in axes.get_xticklabels()]
761
+ assert expected_xticklabel == result_xticklabel
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_common.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ from pandas import DataFrame
4
+ from pandas.tests.plotting.common import (
5
+ _check_plot_works,
6
+ _check_ticks_props,
7
+ _gen_two_subplots,
8
+ )
9
+
10
+ plt = pytest.importorskip("matplotlib.pyplot")
11
+
12
+
13
+ class TestCommon:
14
+ def test__check_ticks_props(self):
15
+ # GH 34768
16
+ df = DataFrame({"b": [0, 1, 0], "a": [1, 2, 3]})
17
+ ax = _check_plot_works(df.plot, rot=30)
18
+ ax.yaxis.set_tick_params(rotation=30)
19
+ msg = "expected 0.00000 but got "
20
+ with pytest.raises(AssertionError, match=msg):
21
+ _check_ticks_props(ax, xrot=0)
22
+ with pytest.raises(AssertionError, match=msg):
23
+ _check_ticks_props(ax, xlabelsize=0)
24
+ with pytest.raises(AssertionError, match=msg):
25
+ _check_ticks_props(ax, yrot=0)
26
+ with pytest.raises(AssertionError, match=msg):
27
+ _check_ticks_props(ax, ylabelsize=0)
28
+
29
+ def test__gen_two_subplots_with_ax(self):
30
+ fig = plt.gcf()
31
+ gen = _gen_two_subplots(f=lambda **kwargs: None, fig=fig, ax="test")
32
+ # On the first yield, no subplot should be added since ax was passed
33
+ next(gen)
34
+ assert fig.get_axes() == []
35
+ # On the second, the one axis should match fig.subplot(2, 1, 2)
36
+ next(gen)
37
+ axes = fig.get_axes()
38
+ assert len(axes) == 1
39
+ subplot_geometry = list(axes[0].get_subplotspec().get_geometry()[:-1])
40
+ subplot_geometry[-1] += 1
41
+ assert subplot_geometry == [2, 1, 2]
42
+
43
+ def test_colorbar_layout(self):
44
+ fig = plt.figure()
45
+
46
+ axes = fig.subplot_mosaic(
47
+ """
48
+ AB
49
+ CC
50
+ """
51
+ )
52
+
53
+ x = [1, 2, 3]
54
+ y = [1, 2, 3]
55
+
56
+ cs0 = axes["A"].scatter(x, y)
57
+ axes["B"].scatter(x, y)
58
+
59
+ fig.colorbar(cs0, ax=[axes["A"], axes["B"]], location="right")
60
+ DataFrame(x).plot(ax=axes["C"])
env-llmeval/lib/python3.10/site-packages/pandas/tests/plotting/test_converter.py ADDED
@@ -0,0 +1,410 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from datetime import (
2
+ date,
3
+ datetime,
4
+ )
5
+ import subprocess
6
+ import sys
7
+
8
+ import numpy as np
9
+ import pytest
10
+
11
+ import pandas._config.config as cf
12
+
13
+ from pandas._libs.tslibs import to_offset
14
+
15
+ from pandas import (
16
+ Index,
17
+ Period,
18
+ PeriodIndex,
19
+ Series,
20
+ Timestamp,
21
+ arrays,
22
+ date_range,
23
+ )
24
+ import pandas._testing as tm
25
+
26
+ from pandas.plotting import (
27
+ deregister_matplotlib_converters,
28
+ register_matplotlib_converters,
29
+ )
30
+ from pandas.tseries.offsets import (
31
+ Day,
32
+ Micro,
33
+ Milli,
34
+ Second,
35
+ )
36
+
37
+ try:
38
+ from pandas.plotting._matplotlib import converter
39
+ except ImportError:
40
+ # try / except, rather than skip, to avoid internal refactoring
41
+ # causing an improper skip
42
+ pass
43
+
44
+ pytest.importorskip("matplotlib.pyplot")
45
+ dates = pytest.importorskip("matplotlib.dates")
46
+
47
+
48
+ @pytest.mark.single_cpu
49
+ def test_registry_mpl_resets():
50
+ # Check that Matplotlib converters are properly reset (see issue #27481)
51
+ code = (
52
+ "import matplotlib.units as units; "
53
+ "import matplotlib.dates as mdates; "
54
+ "n_conv = len(units.registry); "
55
+ "import pandas as pd; "
56
+ "pd.plotting.register_matplotlib_converters(); "
57
+ "pd.plotting.deregister_matplotlib_converters(); "
58
+ "assert len(units.registry) == n_conv"
59
+ )
60
+ call = [sys.executable, "-c", code]
61
+ subprocess.check_output(call)
62
+
63
+
64
+ def test_timtetonum_accepts_unicode():
65
+ assert converter.time2num("00:01") == converter.time2num("00:01")
66
+
67
+
68
+ class TestRegistration:
69
+ @pytest.mark.single_cpu
70
+ def test_dont_register_by_default(self):
71
+ # Run in subprocess to ensure a clean state
72
+ code = (
73
+ "import matplotlib.units; "
74
+ "import pandas as pd; "
75
+ "units = dict(matplotlib.units.registry); "
76
+ "assert pd.Timestamp not in units"
77
+ )
78
+ call = [sys.executable, "-c", code]
79
+ assert subprocess.check_call(call) == 0
80
+
81
+ def test_registering_no_warning(self):
82
+ plt = pytest.importorskip("matplotlib.pyplot")
83
+ s = Series(range(12), index=date_range("2017", periods=12))
84
+ _, ax = plt.subplots()
85
+
86
+ # Set to the "warn" state, in case this isn't the first test run
87
+ register_matplotlib_converters()
88
+ ax.plot(s.index, s.values)
89
+ plt.close()
90
+
91
+ def test_pandas_plots_register(self):
92
+ plt = pytest.importorskip("matplotlib.pyplot")
93
+ s = Series(range(12), index=date_range("2017", periods=12))
94
+ # Set to the "warn" state, in case this isn't the first test run
95
+ with tm.assert_produces_warning(None) as w:
96
+ s.plot()
97
+
98
+ try:
99
+ assert len(w) == 0
100
+ finally:
101
+ plt.close()
102
+
103
+ def test_matplotlib_formatters(self):
104
+ units = pytest.importorskip("matplotlib.units")
105
+
106
+ # Can't make any assertion about the start state.
107
+ # We we check that toggling converters off removes it, and toggling it
108
+ # on restores it.
109
+
110
+ with cf.option_context("plotting.matplotlib.register_converters", True):
111
+ with cf.option_context("plotting.matplotlib.register_converters", False):
112
+ assert Timestamp not in units.registry
113
+ assert Timestamp in units.registry
114
+
115
+ def test_option_no_warning(self):
116
+ pytest.importorskip("matplotlib.pyplot")
117
+ ctx = cf.option_context("plotting.matplotlib.register_converters", False)
118
+ plt = pytest.importorskip("matplotlib.pyplot")
119
+ s = Series(range(12), index=date_range("2017", periods=12))
120
+ _, ax = plt.subplots()
121
+
122
+ # Test without registering first, no warning
123
+ with ctx:
124
+ ax.plot(s.index, s.values)
125
+
126
+ # Now test with registering
127
+ register_matplotlib_converters()
128
+ with ctx:
129
+ ax.plot(s.index, s.values)
130
+ plt.close()
131
+
132
+ def test_registry_resets(self):
133
+ units = pytest.importorskip("matplotlib.units")
134
+ dates = pytest.importorskip("matplotlib.dates")
135
+
136
+ # make a copy, to reset to
137
+ original = dict(units.registry)
138
+
139
+ try:
140
+ # get to a known state
141
+ units.registry.clear()
142
+ date_converter = dates.DateConverter()
143
+ units.registry[datetime] = date_converter
144
+ units.registry[date] = date_converter
145
+
146
+ register_matplotlib_converters()
147
+ assert units.registry[date] is not date_converter
148
+ deregister_matplotlib_converters()
149
+ assert units.registry[date] is date_converter
150
+
151
+ finally:
152
+ # restore original stater
153
+ units.registry.clear()
154
+ for k, v in original.items():
155
+ units.registry[k] = v
156
+
157
+
158
+ class TestDateTimeConverter:
159
+ @pytest.fixture
160
+ def dtc(self):
161
+ return converter.DatetimeConverter()
162
+
163
+ def test_convert_accepts_unicode(self, dtc):
164
+ r1 = dtc.convert("2000-01-01 12:22", None, None)
165
+ r2 = dtc.convert("2000-01-01 12:22", None, None)
166
+ assert r1 == r2, "DatetimeConverter.convert should accept unicode"
167
+
168
+ def test_conversion(self, dtc):
169
+ rs = dtc.convert(["2012-1-1"], None, None)[0]
170
+ xp = dates.date2num(datetime(2012, 1, 1))
171
+ assert rs == xp
172
+
173
+ rs = dtc.convert("2012-1-1", None, None)
174
+ assert rs == xp
175
+
176
+ rs = dtc.convert(date(2012, 1, 1), None, None)
177
+ assert rs == xp
178
+
179
+ rs = dtc.convert("2012-1-1", None, None)
180
+ assert rs == xp
181
+
182
+ rs = dtc.convert(Timestamp("2012-1-1"), None, None)
183
+ assert rs == xp
184
+
185
+ # also testing datetime64 dtype (GH8614)
186
+ rs = dtc.convert("2012-01-01", None, None)
187
+ assert rs == xp
188
+
189
+ rs = dtc.convert("2012-01-01 00:00:00+0000", None, None)
190
+ assert rs == xp
191
+
192
+ rs = dtc.convert(
193
+ np.array(["2012-01-01 00:00:00+0000", "2012-01-02 00:00:00+0000"]),
194
+ None,
195
+ None,
196
+ )
197
+ assert rs[0] == xp
198
+
199
+ # we have a tz-aware date (constructed to that when we turn to utc it
200
+ # is the same as our sample)
201
+ ts = Timestamp("2012-01-01").tz_localize("UTC").tz_convert("US/Eastern")
202
+ rs = dtc.convert(ts, None, None)
203
+ assert rs == xp
204
+
205
+ rs = dtc.convert(ts.to_pydatetime(), None, None)
206
+ assert rs == xp
207
+
208
+ rs = dtc.convert(Index([ts - Day(1), ts]), None, None)
209
+ assert rs[1] == xp
210
+
211
+ rs = dtc.convert(Index([ts - Day(1), ts]).to_pydatetime(), None, None)
212
+ assert rs[1] == xp
213
+
214
+ def test_conversion_float(self, dtc):
215
+ rtol = 0.5 * 10**-9
216
+
217
+ rs = dtc.convert(Timestamp("2012-1-1 01:02:03", tz="UTC"), None, None)
218
+ xp = converter.mdates.date2num(Timestamp("2012-1-1 01:02:03", tz="UTC"))
219
+ tm.assert_almost_equal(rs, xp, rtol=rtol)
220
+
221
+ rs = dtc.convert(
222
+ Timestamp("2012-1-1 09:02:03", tz="Asia/Hong_Kong"), None, None
223
+ )
224
+ tm.assert_almost_equal(rs, xp, rtol=rtol)
225
+
226
+ rs = dtc.convert(datetime(2012, 1, 1, 1, 2, 3), None, None)
227
+ tm.assert_almost_equal(rs, xp, rtol=rtol)
228
+
229
+ @pytest.mark.parametrize(
230
+ "values",
231
+ [
232
+ [date(1677, 1, 1), date(1677, 1, 2)],
233
+ [datetime(1677, 1, 1, 12), datetime(1677, 1, 2, 12)],
234
+ ],
235
+ )
236
+ def test_conversion_outofbounds_datetime(self, dtc, values):
237
+ # 2579
238
+ rs = dtc.convert(values, None, None)
239
+ xp = converter.mdates.date2num(values)
240
+ tm.assert_numpy_array_equal(rs, xp)
241
+ rs = dtc.convert(values[0], None, None)
242
+ xp = converter.mdates.date2num(values[0])
243
+ assert rs == xp
244
+
245
+ @pytest.mark.parametrize(
246
+ "time,format_expected",
247
+ [
248
+ (0, "00:00"), # time2num(datetime.time.min)
249
+ (86399.999999, "23:59:59.999999"), # time2num(datetime.time.max)
250
+ (90000, "01:00"),
251
+ (3723, "01:02:03"),
252
+ (39723.2, "11:02:03.200"),
253
+ ],
254
+ )
255
+ def test_time_formatter(self, time, format_expected):
256
+ # issue 18478
257
+ result = converter.TimeFormatter(None)(time)
258
+ assert result == format_expected
259
+
260
+ @pytest.mark.parametrize("freq", ("B", "ms", "s"))
261
+ def test_dateindex_conversion(self, freq, dtc):
262
+ rtol = 10**-9
263
+ dateindex = date_range("2020-01-01", periods=10, freq=freq)
264
+ rs = dtc.convert(dateindex, None, None)
265
+ xp = converter.mdates.date2num(dateindex._mpl_repr())
266
+ tm.assert_almost_equal(rs, xp, rtol=rtol)
267
+
268
+ @pytest.mark.parametrize("offset", [Second(), Milli(), Micro(50)])
269
+ def test_resolution(self, offset, dtc):
270
+ # Matplotlib's time representation using floats cannot distinguish
271
+ # intervals smaller than ~10 microsecond in the common range of years.
272
+ ts1 = Timestamp("2012-1-1")
273
+ ts2 = ts1 + offset
274
+ val1 = dtc.convert(ts1, None, None)
275
+ val2 = dtc.convert(ts2, None, None)
276
+ if not val1 < val2:
277
+ raise AssertionError(f"{val1} is not less than {val2}.")
278
+
279
+ def test_convert_nested(self, dtc):
280
+ inner = [Timestamp("2017-01-01"), Timestamp("2017-01-02")]
281
+ data = [inner, inner]
282
+ result = dtc.convert(data, None, None)
283
+ expected = [dtc.convert(x, None, None) for x in data]
284
+ assert (np.array(result) == expected).all()
285
+
286
+
287
+ class TestPeriodConverter:
288
+ @pytest.fixture
289
+ def pc(self):
290
+ return converter.PeriodConverter()
291
+
292
+ @pytest.fixture
293
+ def axis(self):
294
+ class Axis:
295
+ pass
296
+
297
+ axis = Axis()
298
+ axis.freq = "D"
299
+ return axis
300
+
301
+ def test_convert_accepts_unicode(self, pc, axis):
302
+ r1 = pc.convert("2012-1-1", None, axis)
303
+ r2 = pc.convert("2012-1-1", None, axis)
304
+ assert r1 == r2
305
+
306
+ def test_conversion(self, pc, axis):
307
+ rs = pc.convert(["2012-1-1"], None, axis)[0]
308
+ xp = Period("2012-1-1").ordinal
309
+ assert rs == xp
310
+
311
+ rs = pc.convert("2012-1-1", None, axis)
312
+ assert rs == xp
313
+
314
+ rs = pc.convert([date(2012, 1, 1)], None, axis)[0]
315
+ assert rs == xp
316
+
317
+ rs = pc.convert(date(2012, 1, 1), None, axis)
318
+ assert rs == xp
319
+
320
+ rs = pc.convert([Timestamp("2012-1-1")], None, axis)[0]
321
+ assert rs == xp
322
+
323
+ rs = pc.convert(Timestamp("2012-1-1"), None, axis)
324
+ assert rs == xp
325
+
326
+ rs = pc.convert("2012-01-01", None, axis)
327
+ assert rs == xp
328
+
329
+ rs = pc.convert("2012-01-01 00:00:00+0000", None, axis)
330
+ assert rs == xp
331
+
332
+ rs = pc.convert(
333
+ np.array(
334
+ ["2012-01-01 00:00:00", "2012-01-02 00:00:00"],
335
+ dtype="datetime64[ns]",
336
+ ),
337
+ None,
338
+ axis,
339
+ )
340
+ assert rs[0] == xp
341
+
342
+ def test_integer_passthrough(self, pc, axis):
343
+ # GH9012
344
+ rs = pc.convert([0, 1], None, axis)
345
+ xp = [0, 1]
346
+ assert rs == xp
347
+
348
+ def test_convert_nested(self, pc, axis):
349
+ data = ["2012-1-1", "2012-1-2"]
350
+ r1 = pc.convert([data, data], None, axis)
351
+ r2 = [pc.convert(data, None, axis) for _ in range(2)]
352
+ assert r1 == r2
353
+
354
+
355
+ class TestTimeDeltaConverter:
356
+ """Test timedelta converter"""
357
+
358
+ @pytest.mark.parametrize(
359
+ "x, decimal, format_expected",
360
+ [
361
+ (0.0, 0, "00:00:00"),
362
+ (3972320000000, 1, "01:06:12.3"),
363
+ (713233432000000, 2, "8 days 06:07:13.43"),
364
+ (32423432000000, 4, "09:00:23.4320"),
365
+ ],
366
+ )
367
+ def test_format_timedelta_ticks(self, x, decimal, format_expected):
368
+ tdc = converter.TimeSeries_TimedeltaFormatter
369
+ result = tdc.format_timedelta_ticks(x, pos=None, n_decimals=decimal)
370
+ assert result == format_expected
371
+
372
+ @pytest.mark.parametrize("view_interval", [(1, 2), (2, 1)])
373
+ def test_call_w_different_view_intervals(self, view_interval, monkeypatch):
374
+ # previously broke on reversed xlmits; see GH37454
375
+ class mock_axis:
376
+ def get_view_interval(self):
377
+ return view_interval
378
+
379
+ tdc = converter.TimeSeries_TimedeltaFormatter()
380
+ monkeypatch.setattr(tdc, "axis", mock_axis())
381
+ tdc(0.0, 0)
382
+
383
+
384
+ @pytest.mark.parametrize("year_span", [11.25, 30, 80, 150, 400, 800, 1500, 2500, 3500])
385
+ # The range is limited to 11.25 at the bottom by if statements in
386
+ # the _quarterly_finder() function
387
+ def test_quarterly_finder(year_span):
388
+ vmin = -1000
389
+ vmax = vmin + year_span * 4
390
+ span = vmax - vmin + 1
391
+ if span < 45:
392
+ pytest.skip("the quarterly finder is only invoked if the span is >= 45")
393
+ nyears = span / 4
394
+ (min_anndef, maj_anndef) = converter._get_default_annual_spacing(nyears)
395
+ result = converter._quarterly_finder(vmin, vmax, to_offset("QE"))
396
+ quarters = PeriodIndex(
397
+ arrays.PeriodArray(np.array([x[0] for x in result]), dtype="period[Q]")
398
+ )
399
+ majors = np.array([x[1] for x in result])
400
+ minors = np.array([x[2] for x in result])
401
+ major_quarters = quarters[majors]
402
+ minor_quarters = quarters[minors]
403
+ check_major_years = major_quarters.year % maj_anndef == 0
404
+ check_minor_years = minor_quarters.year % min_anndef == 0
405
+ check_major_quarters = major_quarters.quarter == 1
406
+ check_minor_quarters = minor_quarters.quarter == 1
407
+ assert np.all(check_major_years)
408
+ assert np.all(check_minor_years)
409
+ assert np.all(check_major_quarters)
410
+ assert np.all(check_minor_quarters)