applied-ai-018 commited on
Commit
ec0dc24
·
verified ·
1 Parent(s): 4753e7c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/__init__.cpython-310.pyc +0 -0
  2. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/conftest.cpython-310.pyc +0 -0
  3. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_arrow.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_categorical.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_common.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_datetime.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_extension.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_interval.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_masked.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_numpy.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_period.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_sparse.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_string.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/base.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc +0 -0
  20. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc +0 -0
  24. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc +0 -0
  25. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/io.cpython-310.pyc +0 -0
  26. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/casting.py +87 -0
  34. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/dim2.py +345 -0
  35. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/getitem.py +469 -0
  36. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/date/__init__.py +6 -0
  37. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/__init__.cpython-310.pyc +0 -0
  38. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/array.cpython-310.pyc +0 -0
  39. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/date/array.py +188 -0
  40. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/__init__.py +8 -0
  41. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/__init__.cpython-310.pyc +0 -0
  42. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/array.cpython-310.pyc +0 -0
  43. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/test_decimal.cpython-310.pyc +0 -0
  44. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/array.py +311 -0
  45. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/test_decimal.py +567 -0
  46. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/__init__.py +7 -0
  47. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/__pycache__/__init__.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/__pycache__/array.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/__pycache__/test_json.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/array.py +256 -0
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (195 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/conftest.cpython-310.pyc ADDED
Binary file (6.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_arrow.cpython-310.pyc ADDED
Binary file (90.3 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_categorical.cpython-310.pyc ADDED
Binary file (7.31 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_common.cpython-310.pyc ADDED
Binary file (3.79 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_datetime.cpython-310.pyc ADDED
Binary file (5.47 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_extension.cpython-310.pyc ADDED
Binary file (1.26 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_interval.cpython-310.pyc ADDED
Binary file (3.87 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_masked.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_numpy.cpython-310.pyc ADDED
Binary file (15.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_period.cpython-310.pyc ADDED
Binary file (4.66 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_sparse.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/__pycache__/test_string.cpython-310.pyc ADDED
Binary file (7.91 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (3.81 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/accumulate.cpython-310.pyc ADDED
Binary file (1.65 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/base.cpython-310.pyc ADDED
Binary file (358 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/casting.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/constructors.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dim2.cpython-310.pyc ADDED
Binary file (9.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/dtype.cpython-310.pyc ADDED
Binary file (5.34 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/getitem.cpython-310.pyc ADDED
Binary file (14.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/groupby.cpython-310.pyc ADDED
Binary file (5.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/index.cpython-310.pyc ADDED
Binary file (999 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/interface.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/io.cpython-310.pyc ADDED
Binary file (1.39 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/methods.cpython-310.pyc ADDED
Binary file (23.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/missing.cpython-310.pyc ADDED
Binary file (6.14 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/ops.cpython-310.pyc ADDED
Binary file (8.95 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/printing.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reduce.cpython-310.pyc ADDED
Binary file (4.38 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/reshaping.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/__pycache__/setitem.cpython-310.pyc ADDED
Binary file (15.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/casting.py ADDED
@@ -0,0 +1,87 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas.util._test_decorators as td
5
+
6
+ import pandas as pd
7
+ import pandas._testing as tm
8
+ from pandas.core.internals.blocks import NumpyBlock
9
+
10
+
11
+ class BaseCastingTests:
12
+ """Casting to and from ExtensionDtypes"""
13
+
14
+ def test_astype_object_series(self, all_data):
15
+ ser = pd.Series(all_data, name="A")
16
+ result = ser.astype(object)
17
+ assert result.dtype == np.dtype(object)
18
+ if hasattr(result._mgr, "blocks"):
19
+ blk = result._mgr.blocks[0]
20
+ assert isinstance(blk, NumpyBlock)
21
+ assert blk.is_object
22
+ assert isinstance(result._mgr.array, np.ndarray)
23
+ assert result._mgr.array.dtype == np.dtype(object)
24
+
25
+ def test_astype_object_frame(self, all_data):
26
+ df = pd.DataFrame({"A": all_data})
27
+
28
+ result = df.astype(object)
29
+ if hasattr(result._mgr, "blocks"):
30
+ blk = result._mgr.blocks[0]
31
+ assert isinstance(blk, NumpyBlock), type(blk)
32
+ assert blk.is_object
33
+ assert isinstance(result._mgr.arrays[0], np.ndarray)
34
+ assert result._mgr.arrays[0].dtype == np.dtype(object)
35
+
36
+ # check that we can compare the dtypes
37
+ comp = result.dtypes == df.dtypes
38
+ assert not comp.any()
39
+
40
+ def test_tolist(self, data):
41
+ result = pd.Series(data).tolist()
42
+ expected = list(data)
43
+ assert result == expected
44
+
45
+ def test_astype_str(self, data):
46
+ result = pd.Series(data[:5]).astype(str)
47
+ expected = pd.Series([str(x) for x in data[:5]], dtype=str)
48
+ tm.assert_series_equal(result, expected)
49
+
50
+ @pytest.mark.parametrize(
51
+ "nullable_string_dtype",
52
+ [
53
+ "string[python]",
54
+ pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),
55
+ ],
56
+ )
57
+ def test_astype_string(self, data, nullable_string_dtype):
58
+ # GH-33465, GH#45326 as of 2.0 we decode bytes instead of calling str(obj)
59
+ result = pd.Series(data[:5]).astype(nullable_string_dtype)
60
+ expected = pd.Series(
61
+ [str(x) if not isinstance(x, bytes) else x.decode() for x in data[:5]],
62
+ dtype=nullable_string_dtype,
63
+ )
64
+ tm.assert_series_equal(result, expected)
65
+
66
+ def test_to_numpy(self, data):
67
+ expected = np.asarray(data)
68
+
69
+ result = data.to_numpy()
70
+ tm.assert_equal(result, expected)
71
+
72
+ result = pd.Series(data).to_numpy()
73
+ tm.assert_equal(result, expected)
74
+
75
+ def test_astype_empty_dataframe(self, dtype):
76
+ # https://github.com/pandas-dev/pandas/issues/33113
77
+ df = pd.DataFrame()
78
+ result = df.astype(dtype)
79
+ tm.assert_frame_equal(result, df)
80
+
81
+ @pytest.mark.parametrize("copy", [True, False])
82
+ def test_astype_own_type(self, data, copy):
83
+ # ensure that astype returns the original object for equal dtype and copy=False
84
+ # https://github.com/pandas-dev/pandas/issues/28488
85
+ result = data.astype(data.dtype, copy=copy)
86
+ assert (result is data) is (not copy)
87
+ tm.assert_extension_array_equal(result, data)
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/dim2.py ADDED
@@ -0,0 +1,345 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Tests for 2D compatibility.
3
+ """
4
+ import numpy as np
5
+ import pytest
6
+
7
+ from pandas._libs.missing import is_matching_na
8
+
9
+ from pandas.core.dtypes.common import (
10
+ is_bool_dtype,
11
+ is_integer_dtype,
12
+ )
13
+
14
+ import pandas as pd
15
+ import pandas._testing as tm
16
+ from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
17
+
18
+
19
+ class Dim2CompatTests:
20
+ # Note: these are ONLY for ExtensionArray subclasses that support 2D arrays.
21
+ # i.e. not for pyarrow-backed EAs.
22
+
23
+ @pytest.fixture(autouse=True)
24
+ def skip_if_doesnt_support_2d(self, dtype, request):
25
+ if not dtype._supports_2d:
26
+ node = request.node
27
+ # In cases where we are mixed in to ExtensionTests, we only want to
28
+ # skip tests that are defined in Dim2CompatTests
29
+ test_func = node._obj
30
+ if test_func.__qualname__.startswith("Dim2CompatTests"):
31
+ # TODO: is there a less hacky way of checking this?
32
+ pytest.skip(f"{dtype} does not support 2D.")
33
+
34
+ def test_transpose(self, data):
35
+ arr2d = data.repeat(2).reshape(-1, 2)
36
+ shape = arr2d.shape
37
+ assert shape[0] != shape[-1] # otherwise the rest of the test is useless
38
+
39
+ assert arr2d.T.shape == shape[::-1]
40
+
41
+ def test_frame_from_2d_array(self, data):
42
+ arr2d = data.repeat(2).reshape(-1, 2)
43
+
44
+ df = pd.DataFrame(arr2d)
45
+ expected = pd.DataFrame({0: arr2d[:, 0], 1: arr2d[:, 1]})
46
+ tm.assert_frame_equal(df, expected)
47
+
48
+ def test_swapaxes(self, data):
49
+ arr2d = data.repeat(2).reshape(-1, 2)
50
+
51
+ result = arr2d.swapaxes(0, 1)
52
+ expected = arr2d.T
53
+ tm.assert_extension_array_equal(result, expected)
54
+
55
+ def test_delete_2d(self, data):
56
+ arr2d = data.repeat(3).reshape(-1, 3)
57
+
58
+ # axis = 0
59
+ result = arr2d.delete(1, axis=0)
60
+ expected = data.delete(1).repeat(3).reshape(-1, 3)
61
+ tm.assert_extension_array_equal(result, expected)
62
+
63
+ # axis = 1
64
+ result = arr2d.delete(1, axis=1)
65
+ expected = data.repeat(2).reshape(-1, 2)
66
+ tm.assert_extension_array_equal(result, expected)
67
+
68
+ def test_take_2d(self, data):
69
+ arr2d = data.reshape(-1, 1)
70
+
71
+ result = arr2d.take([0, 0, -1], axis=0)
72
+
73
+ expected = data.take([0, 0, -1]).reshape(-1, 1)
74
+ tm.assert_extension_array_equal(result, expected)
75
+
76
+ def test_repr_2d(self, data):
77
+ # this could fail in a corner case where an element contained the name
78
+ res = repr(data.reshape(1, -1))
79
+ assert res.count(f"<{type(data).__name__}") == 1
80
+
81
+ res = repr(data.reshape(-1, 1))
82
+ assert res.count(f"<{type(data).__name__}") == 1
83
+
84
+ def test_reshape(self, data):
85
+ arr2d = data.reshape(-1, 1)
86
+ assert arr2d.shape == (data.size, 1)
87
+ assert len(arr2d) == len(data)
88
+
89
+ arr2d = data.reshape((-1, 1))
90
+ assert arr2d.shape == (data.size, 1)
91
+ assert len(arr2d) == len(data)
92
+
93
+ with pytest.raises(ValueError):
94
+ data.reshape((data.size, 2))
95
+ with pytest.raises(ValueError):
96
+ data.reshape(data.size, 2)
97
+
98
+ def test_getitem_2d(self, data):
99
+ arr2d = data.reshape(1, -1)
100
+
101
+ result = arr2d[0]
102
+ tm.assert_extension_array_equal(result, data)
103
+
104
+ with pytest.raises(IndexError):
105
+ arr2d[1]
106
+
107
+ with pytest.raises(IndexError):
108
+ arr2d[-2]
109
+
110
+ result = arr2d[:]
111
+ tm.assert_extension_array_equal(result, arr2d)
112
+
113
+ result = arr2d[:, :]
114
+ tm.assert_extension_array_equal(result, arr2d)
115
+
116
+ result = arr2d[:, 0]
117
+ expected = data[[0]]
118
+ tm.assert_extension_array_equal(result, expected)
119
+
120
+ # dimension-expanding getitem on 1D
121
+ result = data[:, np.newaxis]
122
+ tm.assert_extension_array_equal(result, arr2d.T)
123
+
124
+ def test_iter_2d(self, data):
125
+ arr2d = data.reshape(1, -1)
126
+
127
+ objs = list(iter(arr2d))
128
+ assert len(objs) == arr2d.shape[0]
129
+
130
+ for obj in objs:
131
+ assert isinstance(obj, type(data))
132
+ assert obj.dtype == data.dtype
133
+ assert obj.ndim == 1
134
+ assert len(obj) == arr2d.shape[1]
135
+
136
+ def test_tolist_2d(self, data):
137
+ arr2d = data.reshape(1, -1)
138
+
139
+ result = arr2d.tolist()
140
+ expected = [data.tolist()]
141
+
142
+ assert isinstance(result, list)
143
+ assert all(isinstance(x, list) for x in result)
144
+
145
+ assert result == expected
146
+
147
+ def test_concat_2d(self, data):
148
+ left = type(data)._concat_same_type([data, data]).reshape(-1, 2)
149
+ right = left.copy()
150
+
151
+ # axis=0
152
+ result = left._concat_same_type([left, right], axis=0)
153
+ expected = data._concat_same_type([data] * 4).reshape(-1, 2)
154
+ tm.assert_extension_array_equal(result, expected)
155
+
156
+ # axis=1
157
+ result = left._concat_same_type([left, right], axis=1)
158
+ assert result.shape == (len(data), 4)
159
+ tm.assert_extension_array_equal(result[:, :2], left)
160
+ tm.assert_extension_array_equal(result[:, 2:], right)
161
+
162
+ # axis > 1 -> invalid
163
+ msg = "axis 2 is out of bounds for array of dimension 2"
164
+ with pytest.raises(ValueError, match=msg):
165
+ left._concat_same_type([left, right], axis=2)
166
+
167
+ @pytest.mark.parametrize("method", ["backfill", "pad"])
168
+ def test_fillna_2d_method(self, data_missing, method):
169
+ # pad_or_backfill is always along axis=0
170
+ arr = data_missing.repeat(2).reshape(2, 2)
171
+ assert arr[0].isna().all()
172
+ assert not arr[1].isna().any()
173
+
174
+ result = arr._pad_or_backfill(method=method, limit=None)
175
+
176
+ expected = data_missing._pad_or_backfill(method=method).repeat(2).reshape(2, 2)
177
+ tm.assert_extension_array_equal(result, expected)
178
+
179
+ # Reverse so that backfill is not a no-op.
180
+ arr2 = arr[::-1]
181
+ assert not arr2[0].isna().any()
182
+ assert arr2[1].isna().all()
183
+
184
+ result2 = arr2._pad_or_backfill(method=method, limit=None)
185
+
186
+ expected2 = (
187
+ data_missing[::-1]._pad_or_backfill(method=method).repeat(2).reshape(2, 2)
188
+ )
189
+ tm.assert_extension_array_equal(result2, expected2)
190
+
191
+ @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
192
+ def test_reductions_2d_axis_none(self, data, method):
193
+ arr2d = data.reshape(1, -1)
194
+
195
+ err_expected = None
196
+ err_result = None
197
+ try:
198
+ expected = getattr(data, method)()
199
+ except Exception as err:
200
+ # if the 1D reduction is invalid, the 2D reduction should be as well
201
+ err_expected = err
202
+ try:
203
+ result = getattr(arr2d, method)(axis=None)
204
+ except Exception as err2:
205
+ err_result = err2
206
+
207
+ else:
208
+ result = getattr(arr2d, method)(axis=None)
209
+
210
+ if err_result is not None or err_expected is not None:
211
+ assert type(err_result) == type(err_expected)
212
+ return
213
+
214
+ assert is_matching_na(result, expected) or result == expected
215
+
216
+ @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
217
+ @pytest.mark.parametrize("min_count", [0, 1])
218
+ def test_reductions_2d_axis0(self, data, method, min_count):
219
+ if min_count == 1 and method not in ["sum", "prod"]:
220
+ pytest.skip(f"min_count not relevant for {method}")
221
+
222
+ arr2d = data.reshape(1, -1)
223
+
224
+ kwargs = {}
225
+ if method in ["std", "var"]:
226
+ # pass ddof=0 so we get all-zero std instead of all-NA std
227
+ kwargs["ddof"] = 0
228
+ elif method in ["prod", "sum"]:
229
+ kwargs["min_count"] = min_count
230
+
231
+ try:
232
+ result = getattr(arr2d, method)(axis=0, **kwargs)
233
+ except Exception as err:
234
+ try:
235
+ getattr(data, method)()
236
+ except Exception as err2:
237
+ assert type(err) == type(err2)
238
+ return
239
+ else:
240
+ raise AssertionError("Both reductions should raise or neither")
241
+
242
+ def get_reduction_result_dtype(dtype):
243
+ # windows and 32bit builds will in some cases have int32/uint32
244
+ # where other builds will have int64/uint64.
245
+ if dtype.itemsize == 8:
246
+ return dtype
247
+ elif dtype.kind in "ib":
248
+ return NUMPY_INT_TO_DTYPE[np.dtype(int)]
249
+ else:
250
+ # i.e. dtype.kind == "u"
251
+ return NUMPY_INT_TO_DTYPE[np.dtype("uint")]
252
+
253
+ if method in ["sum", "prod"]:
254
+ # std and var are not dtype-preserving
255
+ expected = data
256
+ if data.dtype.kind in "iub":
257
+ dtype = get_reduction_result_dtype(data.dtype)
258
+ expected = data.astype(dtype)
259
+ assert dtype == expected.dtype
260
+
261
+ if min_count == 0:
262
+ fill_value = 1 if method == "prod" else 0
263
+ expected = expected.fillna(fill_value)
264
+
265
+ tm.assert_extension_array_equal(result, expected)
266
+ elif method == "median":
267
+ # std and var are not dtype-preserving
268
+ expected = data
269
+ tm.assert_extension_array_equal(result, expected)
270
+ elif method in ["mean", "std", "var"]:
271
+ if is_integer_dtype(data) or is_bool_dtype(data):
272
+ data = data.astype("Float64")
273
+ if method == "mean":
274
+ tm.assert_extension_array_equal(result, data)
275
+ else:
276
+ tm.assert_extension_array_equal(result, data - data)
277
+
278
+ @pytest.mark.parametrize("method", ["mean", "median", "var", "std", "sum", "prod"])
279
+ def test_reductions_2d_axis1(self, data, method):
280
+ arr2d = data.reshape(1, -1)
281
+
282
+ try:
283
+ result = getattr(arr2d, method)(axis=1)
284
+ except Exception as err:
285
+ try:
286
+ getattr(data, method)()
287
+ except Exception as err2:
288
+ assert type(err) == type(err2)
289
+ return
290
+ else:
291
+ raise AssertionError("Both reductions should raise or neither")
292
+
293
+ # not necessarily type/dtype-preserving, so weaker assertions
294
+ assert result.shape == (1,)
295
+ expected_scalar = getattr(data, method)()
296
+ res = result[0]
297
+ assert is_matching_na(res, expected_scalar) or res == expected_scalar
298
+
299
+
300
+ class NDArrayBacked2DTests(Dim2CompatTests):
301
+ # More specific tests for NDArrayBackedExtensionArray subclasses
302
+
303
+ def test_copy_order(self, data):
304
+ # We should be matching numpy semantics for the "order" keyword in 'copy'
305
+ arr2d = data.repeat(2).reshape(-1, 2)
306
+ assert arr2d._ndarray.flags["C_CONTIGUOUS"]
307
+
308
+ res = arr2d.copy()
309
+ assert res._ndarray.flags["C_CONTIGUOUS"]
310
+
311
+ res = arr2d[::2, ::2].copy()
312
+ assert res._ndarray.flags["C_CONTIGUOUS"]
313
+
314
+ res = arr2d.copy("F")
315
+ assert not res._ndarray.flags["C_CONTIGUOUS"]
316
+ assert res._ndarray.flags["F_CONTIGUOUS"]
317
+
318
+ res = arr2d.copy("K")
319
+ assert res._ndarray.flags["C_CONTIGUOUS"]
320
+
321
+ res = arr2d.T.copy("K")
322
+ assert not res._ndarray.flags["C_CONTIGUOUS"]
323
+ assert res._ndarray.flags["F_CONTIGUOUS"]
324
+
325
+ # order not accepted by numpy
326
+ msg = r"order must be one of 'C', 'F', 'A', or 'K' \(got 'Q'\)"
327
+ with pytest.raises(ValueError, match=msg):
328
+ arr2d.copy("Q")
329
+
330
+ # neither contiguity
331
+ arr_nc = arr2d[::2]
332
+ assert not arr_nc._ndarray.flags["C_CONTIGUOUS"]
333
+ assert not arr_nc._ndarray.flags["F_CONTIGUOUS"]
334
+
335
+ assert arr_nc.copy()._ndarray.flags["C_CONTIGUOUS"]
336
+ assert not arr_nc.copy()._ndarray.flags["F_CONTIGUOUS"]
337
+
338
+ assert arr_nc.copy("C")._ndarray.flags["C_CONTIGUOUS"]
339
+ assert not arr_nc.copy("C")._ndarray.flags["F_CONTIGUOUS"]
340
+
341
+ assert not arr_nc.copy("F")._ndarray.flags["C_CONTIGUOUS"]
342
+ assert arr_nc.copy("F")._ndarray.flags["F_CONTIGUOUS"]
343
+
344
+ assert arr_nc.copy("K")._ndarray.flags["C_CONTIGUOUS"]
345
+ assert not arr_nc.copy("K")._ndarray.flags["F_CONTIGUOUS"]
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/base/getitem.py ADDED
@@ -0,0 +1,469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pytest
3
+
4
+ import pandas as pd
5
+ import pandas._testing as tm
6
+
7
+
8
+ class BaseGetitemTests:
9
+ """Tests for ExtensionArray.__getitem__."""
10
+
11
+ def test_iloc_series(self, data):
12
+ ser = pd.Series(data)
13
+ result = ser.iloc[:4]
14
+ expected = pd.Series(data[:4])
15
+ tm.assert_series_equal(result, expected)
16
+
17
+ result = ser.iloc[[0, 1, 2, 3]]
18
+ tm.assert_series_equal(result, expected)
19
+
20
+ def test_iloc_frame(self, data):
21
+ df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")})
22
+ expected = pd.DataFrame({"A": data[:4]})
23
+
24
+ # slice -> frame
25
+ result = df.iloc[:4, [0]]
26
+ tm.assert_frame_equal(result, expected)
27
+
28
+ # sequence -> frame
29
+ result = df.iloc[[0, 1, 2, 3], [0]]
30
+ tm.assert_frame_equal(result, expected)
31
+
32
+ expected = pd.Series(data[:4], name="A")
33
+
34
+ # slice -> series
35
+ result = df.iloc[:4, 0]
36
+ tm.assert_series_equal(result, expected)
37
+
38
+ # sequence -> series
39
+ result = df.iloc[:4, 0]
40
+ tm.assert_series_equal(result, expected)
41
+
42
+ # GH#32959 slice columns with step
43
+ result = df.iloc[:, ::2]
44
+ tm.assert_frame_equal(result, df[["A"]])
45
+ result = df[["B", "A"]].iloc[:, ::2]
46
+ tm.assert_frame_equal(result, df[["B"]])
47
+
48
+ def test_iloc_frame_single_block(self, data):
49
+ # GH#32959 null slice along index, slice along columns with single-block
50
+ df = pd.DataFrame({"A": data})
51
+
52
+ result = df.iloc[:, :]
53
+ tm.assert_frame_equal(result, df)
54
+
55
+ result = df.iloc[:, :1]
56
+ tm.assert_frame_equal(result, df)
57
+
58
+ result = df.iloc[:, :2]
59
+ tm.assert_frame_equal(result, df)
60
+
61
+ result = df.iloc[:, ::2]
62
+ tm.assert_frame_equal(result, df)
63
+
64
+ result = df.iloc[:, 1:2]
65
+ tm.assert_frame_equal(result, df.iloc[:, :0])
66
+
67
+ result = df.iloc[:, -1:]
68
+ tm.assert_frame_equal(result, df)
69
+
70
+ def test_loc_series(self, data):
71
+ ser = pd.Series(data)
72
+ result = ser.loc[:3]
73
+ expected = pd.Series(data[:4])
74
+ tm.assert_series_equal(result, expected)
75
+
76
+ result = ser.loc[[0, 1, 2, 3]]
77
+ tm.assert_series_equal(result, expected)
78
+
79
+ def test_loc_frame(self, data):
80
+ df = pd.DataFrame({"A": data, "B": np.arange(len(data), dtype="int64")})
81
+ expected = pd.DataFrame({"A": data[:4]})
82
+
83
+ # slice -> frame
84
+ result = df.loc[:3, ["A"]]
85
+ tm.assert_frame_equal(result, expected)
86
+
87
+ # sequence -> frame
88
+ result = df.loc[[0, 1, 2, 3], ["A"]]
89
+ tm.assert_frame_equal(result, expected)
90
+
91
+ expected = pd.Series(data[:4], name="A")
92
+
93
+ # slice -> series
94
+ result = df.loc[:3, "A"]
95
+ tm.assert_series_equal(result, expected)
96
+
97
+ # sequence -> series
98
+ result = df.loc[:3, "A"]
99
+ tm.assert_series_equal(result, expected)
100
+
101
+ def test_loc_iloc_frame_single_dtype(self, data):
102
+ # GH#27110 bug in ExtensionBlock.iget caused df.iloc[n] to incorrectly
103
+ # return a scalar
104
+ df = pd.DataFrame({"A": data})
105
+ expected = pd.Series([data[2]], index=["A"], name=2, dtype=data.dtype)
106
+
107
+ result = df.loc[2]
108
+ tm.assert_series_equal(result, expected)
109
+
110
+ expected = pd.Series(
111
+ [data[-1]], index=["A"], name=len(data) - 1, dtype=data.dtype
112
+ )
113
+ result = df.iloc[-1]
114
+ tm.assert_series_equal(result, expected)
115
+
116
+ def test_getitem_scalar(self, data):
117
+ result = data[0]
118
+ assert isinstance(result, data.dtype.type)
119
+
120
+ result = pd.Series(data)[0]
121
+ assert isinstance(result, data.dtype.type)
122
+
123
+ def test_getitem_invalid(self, data):
124
+ # TODO: box over scalar, [scalar], (scalar,)?
125
+
126
+ msg = (
127
+ r"only integers, slices \(`:`\), ellipsis \(`...`\), numpy.newaxis "
128
+ r"\(`None`\) and integer or boolean arrays are valid indices"
129
+ )
130
+ with pytest.raises(IndexError, match=msg):
131
+ data["foo"]
132
+ with pytest.raises(IndexError, match=msg):
133
+ data[2.5]
134
+
135
+ ub = len(data)
136
+ msg = "|".join(
137
+ [
138
+ "list index out of range", # json
139
+ "index out of bounds", # pyarrow
140
+ "Out of bounds access", # Sparse
141
+ f"loc must be an integer between -{ub} and {ub}", # Sparse
142
+ f"index {ub+1} is out of bounds for axis 0 with size {ub}",
143
+ f"index -{ub+1} is out of bounds for axis 0 with size {ub}",
144
+ ]
145
+ )
146
+ with pytest.raises(IndexError, match=msg):
147
+ data[ub + 1]
148
+ with pytest.raises(IndexError, match=msg):
149
+ data[-ub - 1]
150
+
151
+ def test_getitem_scalar_na(self, data_missing, na_cmp, na_value):
152
+ result = data_missing[0]
153
+ assert na_cmp(result, na_value)
154
+
155
+ def test_getitem_empty(self, data):
156
+ # Indexing with empty list
157
+ result = data[[]]
158
+ assert len(result) == 0
159
+ assert isinstance(result, type(data))
160
+
161
+ expected = data[np.array([], dtype="int64")]
162
+ tm.assert_extension_array_equal(result, expected)
163
+
164
+ def test_getitem_mask(self, data):
165
+ # Empty mask, raw array
166
+ mask = np.zeros(len(data), dtype=bool)
167
+ result = data[mask]
168
+ assert len(result) == 0
169
+ assert isinstance(result, type(data))
170
+
171
+ # Empty mask, in series
172
+ mask = np.zeros(len(data), dtype=bool)
173
+ result = pd.Series(data)[mask]
174
+ assert len(result) == 0
175
+ assert result.dtype == data.dtype
176
+
177
+ # non-empty mask, raw array
178
+ mask[0] = True
179
+ result = data[mask]
180
+ assert len(result) == 1
181
+ assert isinstance(result, type(data))
182
+
183
+ # non-empty mask, in series
184
+ result = pd.Series(data)[mask]
185
+ assert len(result) == 1
186
+ assert result.dtype == data.dtype
187
+
188
+ def test_getitem_mask_raises(self, data):
189
+ mask = np.array([True, False])
190
+ msg = f"Boolean index has wrong length: 2 instead of {len(data)}"
191
+ with pytest.raises(IndexError, match=msg):
192
+ data[mask]
193
+
194
+ mask = pd.array(mask, dtype="boolean")
195
+ with pytest.raises(IndexError, match=msg):
196
+ data[mask]
197
+
198
+ def test_getitem_boolean_array_mask(self, data):
199
+ mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
200
+ result = data[mask]
201
+ assert len(result) == 0
202
+ assert isinstance(result, type(data))
203
+
204
+ result = pd.Series(data)[mask]
205
+ assert len(result) == 0
206
+ assert result.dtype == data.dtype
207
+
208
+ mask[:5] = True
209
+ expected = data.take([0, 1, 2, 3, 4])
210
+ result = data[mask]
211
+ tm.assert_extension_array_equal(result, expected)
212
+
213
+ expected = pd.Series(expected)
214
+ result = pd.Series(data)[mask]
215
+ tm.assert_series_equal(result, expected)
216
+
217
+ def test_getitem_boolean_na_treated_as_false(self, data):
218
+ # https://github.com/pandas-dev/pandas/issues/31503
219
+ mask = pd.array(np.zeros(data.shape, dtype="bool"), dtype="boolean")
220
+ mask[:2] = pd.NA
221
+ mask[2:4] = True
222
+
223
+ result = data[mask]
224
+ expected = data[mask.fillna(False)]
225
+
226
+ tm.assert_extension_array_equal(result, expected)
227
+
228
+ s = pd.Series(data)
229
+
230
+ result = s[mask]
231
+ expected = s[mask.fillna(False)]
232
+
233
+ tm.assert_series_equal(result, expected)
234
+
235
+ @pytest.mark.parametrize(
236
+ "idx",
237
+ [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],
238
+ ids=["list", "integer-array", "numpy-array"],
239
+ )
240
+ def test_getitem_integer_array(self, data, idx):
241
+ result = data[idx]
242
+ assert len(result) == 3
243
+ assert isinstance(result, type(data))
244
+ expected = data.take([0, 1, 2])
245
+ tm.assert_extension_array_equal(result, expected)
246
+
247
+ expected = pd.Series(expected)
248
+ result = pd.Series(data)[idx]
249
+ tm.assert_series_equal(result, expected)
250
+
251
+ @pytest.mark.parametrize(
252
+ "idx",
253
+ [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")],
254
+ ids=["list", "integer-array"],
255
+ )
256
+ def test_getitem_integer_with_missing_raises(self, data, idx):
257
+ msg = "Cannot index with an integer indexer containing NA values"
258
+ with pytest.raises(ValueError, match=msg):
259
+ data[idx]
260
+
261
+ @pytest.mark.xfail(
262
+ reason="Tries label-based and raises KeyError; "
263
+ "in some cases raises when calling np.asarray"
264
+ )
265
+ @pytest.mark.parametrize(
266
+ "idx",
267
+ [[0, 1, 2, pd.NA], pd.array([0, 1, 2, pd.NA], dtype="Int64")],
268
+ ids=["list", "integer-array"],
269
+ )
270
+ def test_getitem_series_integer_with_missing_raises(self, data, idx):
271
+ msg = "Cannot index with an integer indexer containing NA values"
272
+ # TODO: this raises KeyError about labels not found (it tries label-based)
273
+
274
+ ser = pd.Series(data, index=[chr(100 + i) for i in range(len(data))])
275
+ with pytest.raises(ValueError, match=msg):
276
+ ser[idx]
277
+
278
+ def test_getitem_slice(self, data):
279
+ # getitem[slice] should return an array
280
+ result = data[slice(0)] # empty
281
+ assert isinstance(result, type(data))
282
+
283
+ result = data[slice(1)] # scalar
284
+ assert isinstance(result, type(data))
285
+
286
+ def test_getitem_ellipsis_and_slice(self, data):
287
+ # GH#40353 this is called from slice_block_rows
288
+ result = data[..., :]
289
+ tm.assert_extension_array_equal(result, data)
290
+
291
+ result = data[:, ...]
292
+ tm.assert_extension_array_equal(result, data)
293
+
294
+ result = data[..., :3]
295
+ tm.assert_extension_array_equal(result, data[:3])
296
+
297
+ result = data[:3, ...]
298
+ tm.assert_extension_array_equal(result, data[:3])
299
+
300
+ result = data[..., ::2]
301
+ tm.assert_extension_array_equal(result, data[::2])
302
+
303
+ result = data[::2, ...]
304
+ tm.assert_extension_array_equal(result, data[::2])
305
+
306
+ def test_get(self, data):
307
+ # GH 20882
308
+ s = pd.Series(data, index=[2 * i for i in range(len(data))])
309
+ assert s.get(4) == s.iloc[2]
310
+
311
+ result = s.get([4, 6])
312
+ expected = s.iloc[[2, 3]]
313
+ tm.assert_series_equal(result, expected)
314
+
315
+ result = s.get(slice(2))
316
+ expected = s.iloc[[0, 1]]
317
+ tm.assert_series_equal(result, expected)
318
+
319
+ assert s.get(-1) is None
320
+ assert s.get(s.index.max() + 1) is None
321
+
322
+ s = pd.Series(data[:6], index=list("abcdef"))
323
+ assert s.get("c") == s.iloc[2]
324
+
325
+ result = s.get(slice("b", "d"))
326
+ expected = s.iloc[[1, 2, 3]]
327
+ tm.assert_series_equal(result, expected)
328
+
329
+ result = s.get("Z")
330
+ assert result is None
331
+
332
+ msg = "Series.__getitem__ treating keys as positions is deprecated"
333
+ with tm.assert_produces_warning(FutureWarning, match=msg):
334
+ assert s.get(4) == s.iloc[4]
335
+ assert s.get(-1) == s.iloc[-1]
336
+ assert s.get(len(s)) is None
337
+
338
+ # GH 21257
339
+ s = pd.Series(data)
340
+ with tm.assert_produces_warning(None):
341
+ # GH#45324 make sure we aren't giving a spurious FutureWarning
342
+ s2 = s[::2]
343
+ assert s2.get(1) is None
344
+
345
+ def test_take_sequence(self, data):
346
+ result = pd.Series(data)[[0, 1, 3]]
347
+ assert result.iloc[0] == data[0]
348
+ assert result.iloc[1] == data[1]
349
+ assert result.iloc[2] == data[3]
350
+
351
+ def test_take(self, data, na_value, na_cmp):
352
+ result = data.take([0, -1])
353
+ assert result.dtype == data.dtype
354
+ assert result[0] == data[0]
355
+ assert result[1] == data[-1]
356
+
357
+ result = data.take([0, -1], allow_fill=True, fill_value=na_value)
358
+ assert result[0] == data[0]
359
+ assert na_cmp(result[1], na_value)
360
+
361
+ with pytest.raises(IndexError, match="out of bounds"):
362
+ data.take([len(data) + 1])
363
+
364
+ def test_take_empty(self, data, na_value, na_cmp):
365
+ empty = data[:0]
366
+
367
+ result = empty.take([-1], allow_fill=True)
368
+ assert na_cmp(result[0], na_value)
369
+
370
+ msg = "cannot do a non-empty take from an empty axes|out of bounds"
371
+
372
+ with pytest.raises(IndexError, match=msg):
373
+ empty.take([-1])
374
+
375
+ with pytest.raises(IndexError, match="cannot do a non-empty take"):
376
+ empty.take([0, 1])
377
+
378
+ def test_take_negative(self, data):
379
+ # https://github.com/pandas-dev/pandas/issues/20640
380
+ n = len(data)
381
+ result = data.take([0, -n, n - 1, -1])
382
+ expected = data.take([0, 0, n - 1, n - 1])
383
+ tm.assert_extension_array_equal(result, expected)
384
+
385
+ def test_take_non_na_fill_value(self, data_missing):
386
+ fill_value = data_missing[1] # valid
387
+ na = data_missing[0]
388
+
389
+ arr = data_missing._from_sequence(
390
+ [na, fill_value, na], dtype=data_missing.dtype
391
+ )
392
+ result = arr.take([-1, 1], fill_value=fill_value, allow_fill=True)
393
+ expected = arr.take([1, 1])
394
+ tm.assert_extension_array_equal(result, expected)
395
+
396
+ def test_take_pandas_style_negative_raises(self, data, na_value):
397
+ with pytest.raises(ValueError, match=""):
398
+ data.take([0, -2], fill_value=na_value, allow_fill=True)
399
+
400
+ @pytest.mark.parametrize("allow_fill", [True, False])
401
+ def test_take_out_of_bounds_raises(self, data, allow_fill):
402
+ arr = data[:3]
403
+
404
+ with pytest.raises(IndexError, match="out of bounds|out-of-bounds"):
405
+ arr.take(np.asarray([0, 3]), allow_fill=allow_fill)
406
+
407
+ def test_take_series(self, data):
408
+ s = pd.Series(data)
409
+ result = s.take([0, -1])
410
+ expected = pd.Series(
411
+ data._from_sequence([data[0], data[len(data) - 1]], dtype=s.dtype),
412
+ index=[0, len(data) - 1],
413
+ )
414
+ tm.assert_series_equal(result, expected)
415
+
416
+ def test_reindex(self, data, na_value):
417
+ s = pd.Series(data)
418
+ result = s.reindex([0, 1, 3])
419
+ expected = pd.Series(data.take([0, 1, 3]), index=[0, 1, 3])
420
+ tm.assert_series_equal(result, expected)
421
+
422
+ n = len(data)
423
+ result = s.reindex([-1, 0, n])
424
+ expected = pd.Series(
425
+ data._from_sequence([na_value, data[0], na_value], dtype=s.dtype),
426
+ index=[-1, 0, n],
427
+ )
428
+ tm.assert_series_equal(result, expected)
429
+
430
+ result = s.reindex([n, n + 1])
431
+ expected = pd.Series(
432
+ data._from_sequence([na_value, na_value], dtype=s.dtype), index=[n, n + 1]
433
+ )
434
+ tm.assert_series_equal(result, expected)
435
+
436
+ def test_reindex_non_na_fill_value(self, data_missing):
437
+ valid = data_missing[1]
438
+ na = data_missing[0]
439
+
440
+ arr = data_missing._from_sequence([na, valid], dtype=data_missing.dtype)
441
+ ser = pd.Series(arr)
442
+ result = ser.reindex([0, 1, 2], fill_value=valid)
443
+ expected = pd.Series(
444
+ data_missing._from_sequence([na, valid, valid], dtype=data_missing.dtype)
445
+ )
446
+
447
+ tm.assert_series_equal(result, expected)
448
+
449
+ def test_loc_len1(self, data):
450
+ # see GH-27785 take_nd with indexer of len 1 resulting in wrong ndim
451
+ df = pd.DataFrame({"A": data})
452
+ res = df.loc[[0], "A"]
453
+ assert res.ndim == 1
454
+ assert res._mgr.arrays[0].ndim == 1
455
+ if hasattr(res._mgr, "blocks"):
456
+ assert res._mgr._block.ndim == 1
457
+
458
+ def test_item(self, data):
459
+ # https://github.com/pandas-dev/pandas/pull/30175
460
+ s = pd.Series(data)
461
+ result = s[:1].item()
462
+ assert result == data[0]
463
+
464
+ msg = "can only convert an array of size 1 to a Python scalar"
465
+ with pytest.raises(ValueError, match=msg):
466
+ s[:0].item()
467
+
468
+ with pytest.raises(ValueError, match=msg):
469
+ s.item()
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/date/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from pandas.tests.extension.date.array import (
2
+ DateArray,
3
+ DateDtype,
4
+ )
5
+
6
+ __all__ = ["DateArray", "DateDtype"]
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (316 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/date/__pycache__/array.cpython-310.pyc ADDED
Binary file (6.22 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/date/array.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import datetime as dt
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ Any,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas.core.dtypes.dtypes import register_extension_dtype
13
+
14
+ from pandas.api.extensions import (
15
+ ExtensionArray,
16
+ ExtensionDtype,
17
+ )
18
+ from pandas.api.types import pandas_dtype
19
+
20
+ if TYPE_CHECKING:
21
+ from collections.abc import Sequence
22
+
23
+ from pandas._typing import (
24
+ Dtype,
25
+ PositionalIndexer,
26
+ )
27
+
28
+
29
+ @register_extension_dtype
30
+ class DateDtype(ExtensionDtype):
31
+ @property
32
+ def type(self):
33
+ return dt.date
34
+
35
+ @property
36
+ def name(self):
37
+ return "DateDtype"
38
+
39
+ @classmethod
40
+ def construct_from_string(cls, string: str):
41
+ if not isinstance(string, str):
42
+ raise TypeError(
43
+ f"'construct_from_string' expects a string, got {type(string)}"
44
+ )
45
+
46
+ if string == cls.__name__:
47
+ return cls()
48
+ else:
49
+ raise TypeError(f"Cannot construct a '{cls.__name__}' from '{string}'")
50
+
51
+ @classmethod
52
+ def construct_array_type(cls):
53
+ return DateArray
54
+
55
+ @property
56
+ def na_value(self):
57
+ return dt.date.min
58
+
59
+ def __repr__(self) -> str:
60
+ return self.name
61
+
62
+
63
+ class DateArray(ExtensionArray):
64
+ def __init__(
65
+ self,
66
+ dates: (
67
+ dt.date
68
+ | Sequence[dt.date]
69
+ | tuple[np.ndarray, np.ndarray, np.ndarray]
70
+ | np.ndarray
71
+ ),
72
+ ) -> None:
73
+ if isinstance(dates, dt.date):
74
+ self._year = np.array([dates.year])
75
+ self._month = np.array([dates.month])
76
+ self._day = np.array([dates.year])
77
+ return
78
+
79
+ ldates = len(dates)
80
+ if isinstance(dates, list):
81
+ # pre-allocate the arrays since we know the size before hand
82
+ self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999)
83
+ self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)
84
+ self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)
85
+ # populate them
86
+ for i, (y, m, d) in enumerate(
87
+ (date.year, date.month, date.day) for date in dates
88
+ ):
89
+ self._year[i] = y
90
+ self._month[i] = m
91
+ self._day[i] = d
92
+
93
+ elif isinstance(dates, tuple):
94
+ # only support triples
95
+ if ldates != 3:
96
+ raise ValueError("only triples are valid")
97
+ # check if all elements have the same type
98
+ if any(not isinstance(x, np.ndarray) for x in dates):
99
+ raise TypeError("invalid type")
100
+ ly, lm, ld = (len(cast(np.ndarray, d)) for d in dates)
101
+ if not ly == lm == ld:
102
+ raise ValueError(
103
+ f"tuple members must have the same length: {(ly, lm, ld)}"
104
+ )
105
+ self._year = dates[0].astype(np.uint16)
106
+ self._month = dates[1].astype(np.uint8)
107
+ self._day = dates[2].astype(np.uint8)
108
+
109
+ elif isinstance(dates, np.ndarray) and dates.dtype == "U10":
110
+ self._year = np.zeros(ldates, dtype=np.uint16) # 65535 (0, 9999)
111
+ self._month = np.zeros(ldates, dtype=np.uint8) # 255 (1, 31)
112
+ self._day = np.zeros(ldates, dtype=np.uint8) # 255 (1, 12)
113
+
114
+ # error: "object_" object is not iterable
115
+ obj = np.char.split(dates, sep="-")
116
+ for (i,), (y, m, d) in np.ndenumerate(obj): # type: ignore[misc]
117
+ self._year[i] = int(y)
118
+ self._month[i] = int(m)
119
+ self._day[i] = int(d)
120
+
121
+ else:
122
+ raise TypeError(f"{type(dates)} is not supported")
123
+
124
+ @property
125
+ def dtype(self) -> ExtensionDtype:
126
+ return DateDtype()
127
+
128
+ def astype(self, dtype, copy=True):
129
+ dtype = pandas_dtype(dtype)
130
+
131
+ if isinstance(dtype, DateDtype):
132
+ data = self.copy() if copy else self
133
+ else:
134
+ data = self.to_numpy(dtype=dtype, copy=copy, na_value=dt.date.min)
135
+
136
+ return data
137
+
138
+ @property
139
+ def nbytes(self) -> int:
140
+ return self._year.nbytes + self._month.nbytes + self._day.nbytes
141
+
142
+ def __len__(self) -> int:
143
+ return len(self._year) # all 3 arrays are enforced to have the same length
144
+
145
+ def __getitem__(self, item: PositionalIndexer):
146
+ if isinstance(item, int):
147
+ return dt.date(self._year[item], self._month[item], self._day[item])
148
+ else:
149
+ raise NotImplementedError("only ints are supported as indexes")
150
+
151
+ def __setitem__(self, key: int | slice | np.ndarray, value: Any) -> None:
152
+ if not isinstance(key, int):
153
+ raise NotImplementedError("only ints are supported as indexes")
154
+
155
+ if not isinstance(value, dt.date):
156
+ raise TypeError("you can only set datetime.date types")
157
+
158
+ self._year[key] = value.year
159
+ self._month[key] = value.month
160
+ self._day[key] = value.day
161
+
162
+ def __repr__(self) -> str:
163
+ return f"DateArray{list(zip(self._year, self._month, self._day))}"
164
+
165
+ def copy(self) -> DateArray:
166
+ return DateArray((self._year.copy(), self._month.copy(), self._day.copy()))
167
+
168
+ def isna(self) -> np.ndarray:
169
+ return np.logical_and(
170
+ np.logical_and(
171
+ self._year == dt.date.min.year, self._month == dt.date.min.month
172
+ ),
173
+ self._day == dt.date.min.day,
174
+ )
175
+
176
+ @classmethod
177
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy=False):
178
+ if isinstance(scalars, dt.date):
179
+ raise TypeError
180
+ elif isinstance(scalars, DateArray):
181
+ if dtype is not None:
182
+ return scalars.astype(dtype, copy=copy)
183
+ if copy:
184
+ return scalars.copy()
185
+ return scalars[:]
186
+ elif isinstance(scalars, np.ndarray):
187
+ scalars = scalars.astype("U10") # 10 chars for yyyy-mm-dd
188
+ return DateArray(scalars)
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from pandas.tests.extension.decimal.array import (
2
+ DecimalArray,
3
+ DecimalDtype,
4
+ make_data,
5
+ to_decimal,
6
+ )
7
+
8
+ __all__ = ["DecimalArray", "DecimalDtype", "to_decimal", "make_data"]
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (381 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/array.cpython-310.pyc ADDED
Binary file (10.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/__pycache__/test_decimal.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/array.py ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import decimal
4
+ import numbers
5
+ import sys
6
+ from typing import TYPE_CHECKING
7
+
8
+ import numpy as np
9
+
10
+ from pandas.core.dtypes.base import ExtensionDtype
11
+ from pandas.core.dtypes.common import (
12
+ is_dtype_equal,
13
+ is_float,
14
+ is_integer,
15
+ pandas_dtype,
16
+ )
17
+
18
+ import pandas as pd
19
+ from pandas.api.extensions import (
20
+ no_default,
21
+ register_extension_dtype,
22
+ )
23
+ from pandas.api.types import (
24
+ is_list_like,
25
+ is_scalar,
26
+ )
27
+ from pandas.core import arraylike
28
+ from pandas.core.algorithms import value_counts_internal as value_counts
29
+ from pandas.core.arraylike import OpsMixin
30
+ from pandas.core.arrays import (
31
+ ExtensionArray,
32
+ ExtensionScalarOpsMixin,
33
+ )
34
+ from pandas.core.indexers import check_array_indexer
35
+
36
+ if TYPE_CHECKING:
37
+ from pandas._typing import type_t
38
+
39
+
40
+ @register_extension_dtype
41
+ class DecimalDtype(ExtensionDtype):
42
+ type = decimal.Decimal
43
+ name = "decimal"
44
+ na_value = decimal.Decimal("NaN")
45
+ _metadata = ("context",)
46
+
47
+ def __init__(self, context=None) -> None:
48
+ self.context = context or decimal.getcontext()
49
+
50
+ def __repr__(self) -> str:
51
+ return f"DecimalDtype(context={self.context})"
52
+
53
+ @classmethod
54
+ def construct_array_type(cls) -> type_t[DecimalArray]:
55
+ """
56
+ Return the array type associated with this dtype.
57
+
58
+ Returns
59
+ -------
60
+ type
61
+ """
62
+ return DecimalArray
63
+
64
+ @property
65
+ def _is_numeric(self) -> bool:
66
+ return True
67
+
68
+
69
+ class DecimalArray(OpsMixin, ExtensionScalarOpsMixin, ExtensionArray):
70
+ __array_priority__ = 1000
71
+
72
+ def __init__(self, values, dtype=None, copy=False, context=None) -> None:
73
+ for i, val in enumerate(values):
74
+ if is_float(val) or is_integer(val):
75
+ if np.isnan(val):
76
+ values[i] = DecimalDtype.na_value
77
+ else:
78
+ # error: Argument 1 has incompatible type "float | int |
79
+ # integer[Any]"; expected "Decimal | float | str | tuple[int,
80
+ # Sequence[int], int]"
81
+ values[i] = DecimalDtype.type(val) # type: ignore[arg-type]
82
+ elif not isinstance(val, decimal.Decimal):
83
+ raise TypeError("All values must be of type " + str(decimal.Decimal))
84
+ values = np.asarray(values, dtype=object)
85
+
86
+ self._data = values
87
+ # Some aliases for common attribute names to ensure pandas supports
88
+ # these
89
+ self._items = self.data = self._data
90
+ # those aliases are currently not working due to assumptions
91
+ # in internal code (GH-20735)
92
+ # self._values = self.values = self.data
93
+ self._dtype = DecimalDtype(context)
94
+
95
+ @property
96
+ def dtype(self):
97
+ return self._dtype
98
+
99
+ @classmethod
100
+ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
101
+ return cls(scalars)
102
+
103
+ @classmethod
104
+ def _from_sequence_of_strings(cls, strings, dtype=None, copy=False):
105
+ return cls._from_sequence(
106
+ [decimal.Decimal(x) for x in strings], dtype=dtype, copy=copy
107
+ )
108
+
109
+ @classmethod
110
+ def _from_factorized(cls, values, original):
111
+ return cls(values)
112
+
113
+ _HANDLED_TYPES = (decimal.Decimal, numbers.Number, np.ndarray)
114
+
115
+ def to_numpy(
116
+ self,
117
+ dtype=None,
118
+ copy: bool = False,
119
+ na_value: object = no_default,
120
+ decimals=None,
121
+ ) -> np.ndarray:
122
+ result = np.asarray(self, dtype=dtype)
123
+ if decimals is not None:
124
+ result = np.asarray([round(x, decimals) for x in result])
125
+ return result
126
+
127
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
128
+ #
129
+ if not all(
130
+ isinstance(t, self._HANDLED_TYPES + (DecimalArray,)) for t in inputs
131
+ ):
132
+ return NotImplemented
133
+
134
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
135
+ self, ufunc, method, *inputs, **kwargs
136
+ )
137
+ if result is not NotImplemented:
138
+ # e.g. test_array_ufunc_series_scalar_other
139
+ return result
140
+
141
+ if "out" in kwargs:
142
+ return arraylike.dispatch_ufunc_with_out(
143
+ self, ufunc, method, *inputs, **kwargs
144
+ )
145
+
146
+ inputs = tuple(x._data if isinstance(x, DecimalArray) else x for x in inputs)
147
+ result = getattr(ufunc, method)(*inputs, **kwargs)
148
+
149
+ if method == "reduce":
150
+ result = arraylike.dispatch_reduction_ufunc(
151
+ self, ufunc, method, *inputs, **kwargs
152
+ )
153
+ if result is not NotImplemented:
154
+ return result
155
+
156
+ def reconstruct(x):
157
+ if isinstance(x, (decimal.Decimal, numbers.Number)):
158
+ return x
159
+ else:
160
+ return type(self)._from_sequence(x, dtype=self.dtype)
161
+
162
+ if ufunc.nout > 1:
163
+ return tuple(reconstruct(x) for x in result)
164
+ else:
165
+ return reconstruct(result)
166
+
167
+ def __getitem__(self, item):
168
+ if isinstance(item, numbers.Integral):
169
+ return self._data[item]
170
+ else:
171
+ # array, slice.
172
+ item = pd.api.indexers.check_array_indexer(self, item)
173
+ return type(self)(self._data[item])
174
+
175
+ def take(self, indexer, allow_fill=False, fill_value=None):
176
+ from pandas.api.extensions import take
177
+
178
+ data = self._data
179
+ if allow_fill and fill_value is None:
180
+ fill_value = self.dtype.na_value
181
+
182
+ result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)
183
+ return self._from_sequence(result, dtype=self.dtype)
184
+
185
+ def copy(self):
186
+ return type(self)(self._data.copy(), dtype=self.dtype)
187
+
188
+ def astype(self, dtype, copy=True):
189
+ if is_dtype_equal(dtype, self._dtype):
190
+ if not copy:
191
+ return self
192
+ dtype = pandas_dtype(dtype)
193
+ if isinstance(dtype, type(self.dtype)):
194
+ return type(self)(self._data, copy=copy, context=dtype.context)
195
+
196
+ return super().astype(dtype, copy=copy)
197
+
198
+ def __setitem__(self, key, value) -> None:
199
+ if is_list_like(value):
200
+ if is_scalar(key):
201
+ raise ValueError("setting an array element with a sequence.")
202
+ value = [decimal.Decimal(v) for v in value]
203
+ else:
204
+ value = decimal.Decimal(value)
205
+
206
+ key = check_array_indexer(self, key)
207
+ self._data[key] = value
208
+
209
+ def __len__(self) -> int:
210
+ return len(self._data)
211
+
212
+ def __contains__(self, item) -> bool | np.bool_:
213
+ if not isinstance(item, decimal.Decimal):
214
+ return False
215
+ elif item.is_nan():
216
+ return self.isna().any()
217
+ else:
218
+ return super().__contains__(item)
219
+
220
+ @property
221
+ def nbytes(self) -> int:
222
+ n = len(self)
223
+ if n:
224
+ return n * sys.getsizeof(self[0])
225
+ return 0
226
+
227
+ def isna(self):
228
+ return np.array([x.is_nan() for x in self._data], dtype=bool)
229
+
230
+ @property
231
+ def _na_value(self):
232
+ return decimal.Decimal("NaN")
233
+
234
+ def _formatter(self, boxed=False):
235
+ if boxed:
236
+ return "Decimal: {}".format
237
+ return repr
238
+
239
+ @classmethod
240
+ def _concat_same_type(cls, to_concat):
241
+ return cls(np.concatenate([x._data for x in to_concat]))
242
+
243
+ def _reduce(
244
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
245
+ ):
246
+ if skipna and self.isna().any():
247
+ # If we don't have any NAs, we can ignore skipna
248
+ other = self[~self.isna()]
249
+ result = other._reduce(name, **kwargs)
250
+ elif name == "sum" and len(self) == 0:
251
+ # GH#29630 avoid returning int 0 or np.bool_(False) on old numpy
252
+ result = decimal.Decimal(0)
253
+ else:
254
+ try:
255
+ op = getattr(self.data, name)
256
+ except AttributeError as err:
257
+ raise NotImplementedError(
258
+ f"decimal does not support the {name} operation"
259
+ ) from err
260
+ result = op(axis=0)
261
+
262
+ if keepdims:
263
+ return type(self)([result])
264
+ else:
265
+ return result
266
+
267
+ def _cmp_method(self, other, op):
268
+ # For use with OpsMixin
269
+ def convert_values(param):
270
+ if isinstance(param, ExtensionArray) or is_list_like(param):
271
+ ovalues = param
272
+ else:
273
+ # Assume it's an object
274
+ ovalues = [param] * len(self)
275
+ return ovalues
276
+
277
+ lvalues = self
278
+ rvalues = convert_values(other)
279
+
280
+ # If the operator is not defined for the underlying objects,
281
+ # a TypeError should be raised
282
+ res = [op(a, b) for (a, b) in zip(lvalues, rvalues)]
283
+
284
+ return np.asarray(res, dtype=bool)
285
+
286
+ def value_counts(self, dropna: bool = True):
287
+ return value_counts(self.to_numpy(), dropna=dropna)
288
+
289
+ # We override fillna here to simulate a 3rd party EA that has done so. This
290
+ # lets us test the deprecation telling authors to implement _pad_or_backfill
291
+ # Simulate a 3rd-party EA that has not yet updated to include a "copy"
292
+ # keyword in its fillna method.
293
+ # error: Signature of "fillna" incompatible with supertype "ExtensionArray"
294
+ def fillna( # type: ignore[override]
295
+ self,
296
+ value=None,
297
+ method=None,
298
+ limit: int | None = None,
299
+ ):
300
+ return super().fillna(value=value, method=method, limit=limit, copy=True)
301
+
302
+
303
+ def to_decimal(values, context=None):
304
+ return DecimalArray([decimal.Decimal(x) for x in values], context=context)
305
+
306
+
307
+ def make_data():
308
+ return [decimal.Decimal(val) for val in np.random.default_rng(2).random(100)]
309
+
310
+
311
+ DecimalArray._add_arithmetic_ops()
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/decimal/test_decimal.py ADDED
@@ -0,0 +1,567 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import decimal
4
+ import operator
5
+
6
+ import numpy as np
7
+ import pytest
8
+
9
+ import pandas as pd
10
+ import pandas._testing as tm
11
+ from pandas.tests.extension import base
12
+ from pandas.tests.extension.decimal.array import (
13
+ DecimalArray,
14
+ DecimalDtype,
15
+ make_data,
16
+ to_decimal,
17
+ )
18
+
19
+
20
+ @pytest.fixture
21
+ def dtype():
22
+ return DecimalDtype()
23
+
24
+
25
+ @pytest.fixture
26
+ def data():
27
+ return DecimalArray(make_data())
28
+
29
+
30
+ @pytest.fixture
31
+ def data_for_twos():
32
+ return DecimalArray([decimal.Decimal(2) for _ in range(100)])
33
+
34
+
35
+ @pytest.fixture
36
+ def data_missing():
37
+ return DecimalArray([decimal.Decimal("NaN"), decimal.Decimal(1)])
38
+
39
+
40
+ @pytest.fixture
41
+ def data_for_sorting():
42
+ return DecimalArray(
43
+ [decimal.Decimal("1"), decimal.Decimal("2"), decimal.Decimal("0")]
44
+ )
45
+
46
+
47
+ @pytest.fixture
48
+ def data_missing_for_sorting():
49
+ return DecimalArray(
50
+ [decimal.Decimal("1"), decimal.Decimal("NaN"), decimal.Decimal("0")]
51
+ )
52
+
53
+
54
+ @pytest.fixture
55
+ def na_cmp():
56
+ return lambda x, y: x.is_nan() and y.is_nan()
57
+
58
+
59
+ @pytest.fixture
60
+ def data_for_grouping():
61
+ b = decimal.Decimal("1.0")
62
+ a = decimal.Decimal("0.0")
63
+ c = decimal.Decimal("2.0")
64
+ na = decimal.Decimal("NaN")
65
+ return DecimalArray([b, b, na, na, a, a, b, c])
66
+
67
+
68
+ class TestDecimalArray(base.ExtensionTests):
69
+ def _get_expected_exception(
70
+ self, op_name: str, obj, other
71
+ ) -> type[Exception] | None:
72
+ return None
73
+
74
+ def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:
75
+ return True
76
+
77
+ def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):
78
+ if op_name == "count":
79
+ return super().check_reduce(ser, op_name, skipna)
80
+ else:
81
+ result = getattr(ser, op_name)(skipna=skipna)
82
+ expected = getattr(np.asarray(ser), op_name)()
83
+ tm.assert_almost_equal(result, expected)
84
+
85
+ def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):
86
+ if all_numeric_reductions in ["kurt", "skew", "sem", "median"]:
87
+ mark = pytest.mark.xfail(raises=NotImplementedError)
88
+ request.applymarker(mark)
89
+ super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)
90
+
91
+ def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):
92
+ op_name = all_numeric_reductions
93
+ if op_name in ["skew", "median"]:
94
+ mark = pytest.mark.xfail(raises=NotImplementedError)
95
+ request.applymarker(mark)
96
+
97
+ return super().test_reduce_frame(data, all_numeric_reductions, skipna)
98
+
99
+ def test_compare_scalar(self, data, comparison_op):
100
+ ser = pd.Series(data)
101
+ self._compare_other(ser, data, comparison_op, 0.5)
102
+
103
+ def test_compare_array(self, data, comparison_op):
104
+ ser = pd.Series(data)
105
+
106
+ alter = np.random.default_rng(2).choice([-1, 0, 1], len(data))
107
+ # Randomly double, halve or keep same value
108
+ other = pd.Series(data) * [decimal.Decimal(pow(2.0, i)) for i in alter]
109
+ self._compare_other(ser, data, comparison_op, other)
110
+
111
+ def test_arith_series_with_array(self, data, all_arithmetic_operators):
112
+ op_name = all_arithmetic_operators
113
+ ser = pd.Series(data)
114
+
115
+ context = decimal.getcontext()
116
+ divbyzerotrap = context.traps[decimal.DivisionByZero]
117
+ invalidoptrap = context.traps[decimal.InvalidOperation]
118
+ context.traps[decimal.DivisionByZero] = 0
119
+ context.traps[decimal.InvalidOperation] = 0
120
+
121
+ # Decimal supports ops with int, but not float
122
+ other = pd.Series([int(d * 100) for d in data])
123
+ self.check_opname(ser, op_name, other)
124
+
125
+ if "mod" not in op_name:
126
+ self.check_opname(ser, op_name, ser * 2)
127
+
128
+ self.check_opname(ser, op_name, 0)
129
+ self.check_opname(ser, op_name, 5)
130
+ context.traps[decimal.DivisionByZero] = divbyzerotrap
131
+ context.traps[decimal.InvalidOperation] = invalidoptrap
132
+
133
+ def test_fillna_frame(self, data_missing):
134
+ msg = "ExtensionArray.fillna added a 'copy' keyword"
135
+ with tm.assert_produces_warning(
136
+ DeprecationWarning, match=msg, check_stacklevel=False
137
+ ):
138
+ super().test_fillna_frame(data_missing)
139
+
140
+ def test_fillna_limit_pad(self, data_missing):
141
+ msg = "ExtensionArray.fillna 'method' keyword is deprecated"
142
+ with tm.assert_produces_warning(
143
+ DeprecationWarning,
144
+ match=msg,
145
+ check_stacklevel=False,
146
+ raise_on_extra_warnings=False,
147
+ ):
148
+ super().test_fillna_limit_pad(data_missing)
149
+
150
+ msg = "The 'method' keyword in DecimalArray.fillna is deprecated"
151
+ with tm.assert_produces_warning(
152
+ FutureWarning,
153
+ match=msg,
154
+ check_stacklevel=False,
155
+ raise_on_extra_warnings=False,
156
+ ):
157
+ super().test_fillna_limit_pad(data_missing)
158
+
159
+ @pytest.mark.parametrize(
160
+ "limit_area, input_ilocs, expected_ilocs",
161
+ [
162
+ ("outside", [1, 0, 0, 0, 1], [1, 0, 0, 0, 1]),
163
+ ("outside", [1, 0, 1, 0, 1], [1, 0, 1, 0, 1]),
164
+ ("outside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 1]),
165
+ ("outside", [0, 1, 0, 1, 0], [0, 1, 0, 1, 1]),
166
+ ("inside", [1, 0, 0, 0, 1], [1, 1, 1, 1, 1]),
167
+ ("inside", [1, 0, 1, 0, 1], [1, 1, 1, 1, 1]),
168
+ ("inside", [0, 1, 1, 1, 0], [0, 1, 1, 1, 0]),
169
+ ("inside", [0, 1, 0, 1, 0], [0, 1, 1, 1, 0]),
170
+ ],
171
+ )
172
+ def test_ffill_limit_area(
173
+ self, data_missing, limit_area, input_ilocs, expected_ilocs
174
+ ):
175
+ # GH#56616
176
+ msg = "ExtensionArray.fillna 'method' keyword is deprecated"
177
+ with tm.assert_produces_warning(
178
+ DeprecationWarning,
179
+ match=msg,
180
+ check_stacklevel=False,
181
+ raise_on_extra_warnings=False,
182
+ ):
183
+ msg = "DecimalArray does not implement limit_area"
184
+ with pytest.raises(NotImplementedError, match=msg):
185
+ super().test_ffill_limit_area(
186
+ data_missing, limit_area, input_ilocs, expected_ilocs
187
+ )
188
+
189
+ def test_fillna_limit_backfill(self, data_missing):
190
+ msg = "Series.fillna with 'method' is deprecated"
191
+ with tm.assert_produces_warning(
192
+ FutureWarning,
193
+ match=msg,
194
+ check_stacklevel=False,
195
+ raise_on_extra_warnings=False,
196
+ ):
197
+ super().test_fillna_limit_backfill(data_missing)
198
+
199
+ msg = "ExtensionArray.fillna 'method' keyword is deprecated"
200
+ with tm.assert_produces_warning(
201
+ DeprecationWarning,
202
+ match=msg,
203
+ check_stacklevel=False,
204
+ raise_on_extra_warnings=False,
205
+ ):
206
+ super().test_fillna_limit_backfill(data_missing)
207
+
208
+ msg = "The 'method' keyword in DecimalArray.fillna is deprecated"
209
+ with tm.assert_produces_warning(
210
+ FutureWarning,
211
+ match=msg,
212
+ check_stacklevel=False,
213
+ raise_on_extra_warnings=False,
214
+ ):
215
+ super().test_fillna_limit_backfill(data_missing)
216
+
217
+ def test_fillna_no_op_returns_copy(self, data):
218
+ msg = "|".join(
219
+ [
220
+ "ExtensionArray.fillna 'method' keyword is deprecated",
221
+ "The 'method' keyword in DecimalArray.fillna is deprecated",
222
+ ]
223
+ )
224
+ with tm.assert_produces_warning(
225
+ (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False
226
+ ):
227
+ super().test_fillna_no_op_returns_copy(data)
228
+
229
+ def test_fillna_series(self, data_missing):
230
+ msg = "ExtensionArray.fillna added a 'copy' keyword"
231
+ with tm.assert_produces_warning(
232
+ DeprecationWarning, match=msg, check_stacklevel=False
233
+ ):
234
+ super().test_fillna_series(data_missing)
235
+
236
+ def test_fillna_series_method(self, data_missing, fillna_method):
237
+ msg = "|".join(
238
+ [
239
+ "ExtensionArray.fillna 'method' keyword is deprecated",
240
+ "The 'method' keyword in DecimalArray.fillna is deprecated",
241
+ ]
242
+ )
243
+ with tm.assert_produces_warning(
244
+ (FutureWarning, DeprecationWarning), match=msg, check_stacklevel=False
245
+ ):
246
+ super().test_fillna_series_method(data_missing, fillna_method)
247
+
248
+ def test_fillna_copy_frame(self, data_missing, using_copy_on_write):
249
+ warn = DeprecationWarning if not using_copy_on_write else None
250
+ msg = "ExtensionArray.fillna added a 'copy' keyword"
251
+ with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
252
+ super().test_fillna_copy_frame(data_missing)
253
+
254
+ def test_fillna_copy_series(self, data_missing, using_copy_on_write):
255
+ warn = DeprecationWarning if not using_copy_on_write else None
256
+ msg = "ExtensionArray.fillna added a 'copy' keyword"
257
+ with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):
258
+ super().test_fillna_copy_series(data_missing)
259
+
260
+ @pytest.mark.parametrize("dropna", [True, False])
261
+ def test_value_counts(self, all_data, dropna, request):
262
+ all_data = all_data[:10]
263
+ if dropna:
264
+ other = np.array(all_data[~all_data.isna()])
265
+ else:
266
+ other = all_data
267
+
268
+ vcs = pd.Series(all_data).value_counts(dropna=dropna)
269
+ vcs_ex = pd.Series(other).value_counts(dropna=dropna)
270
+
271
+ with decimal.localcontext() as ctx:
272
+ # avoid raising when comparing Decimal("NAN") < Decimal(2)
273
+ ctx.traps[decimal.InvalidOperation] = False
274
+
275
+ result = vcs.sort_index()
276
+ expected = vcs_ex.sort_index()
277
+
278
+ tm.assert_series_equal(result, expected)
279
+
280
+ def test_series_repr(self, data):
281
+ # Overriding this base test to explicitly test that
282
+ # the custom _formatter is used
283
+ ser = pd.Series(data)
284
+ assert data.dtype.name in repr(ser)
285
+ assert "Decimal: " in repr(ser)
286
+
287
+ @pytest.mark.xfail(reason="Inconsistent array-vs-scalar behavior")
288
+ @pytest.mark.parametrize("ufunc", [np.positive, np.negative, np.abs])
289
+ def test_unary_ufunc_dunder_equivalence(self, data, ufunc):
290
+ super().test_unary_ufunc_dunder_equivalence(data, ufunc)
291
+
292
+
293
+ def test_take_na_value_other_decimal():
294
+ arr = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
295
+ result = arr.take([0, -1], allow_fill=True, fill_value=decimal.Decimal("-1.0"))
296
+ expected = DecimalArray([decimal.Decimal("1.0"), decimal.Decimal("-1.0")])
297
+ tm.assert_extension_array_equal(result, expected)
298
+
299
+
300
+ def test_series_constructor_coerce_data_to_extension_dtype():
301
+ dtype = DecimalDtype()
302
+ ser = pd.Series([0, 1, 2], dtype=dtype)
303
+
304
+ arr = DecimalArray(
305
+ [decimal.Decimal(0), decimal.Decimal(1), decimal.Decimal(2)],
306
+ dtype=dtype,
307
+ )
308
+ exp = pd.Series(arr)
309
+ tm.assert_series_equal(ser, exp)
310
+
311
+
312
+ def test_series_constructor_with_dtype():
313
+ arr = DecimalArray([decimal.Decimal("10.0")])
314
+ result = pd.Series(arr, dtype=DecimalDtype())
315
+ expected = pd.Series(arr)
316
+ tm.assert_series_equal(result, expected)
317
+
318
+ result = pd.Series(arr, dtype="int64")
319
+ expected = pd.Series([10])
320
+ tm.assert_series_equal(result, expected)
321
+
322
+
323
+ def test_dataframe_constructor_with_dtype():
324
+ arr = DecimalArray([decimal.Decimal("10.0")])
325
+
326
+ result = pd.DataFrame({"A": arr}, dtype=DecimalDtype())
327
+ expected = pd.DataFrame({"A": arr})
328
+ tm.assert_frame_equal(result, expected)
329
+
330
+ arr = DecimalArray([decimal.Decimal("10.0")])
331
+ result = pd.DataFrame({"A": arr}, dtype="int64")
332
+ expected = pd.DataFrame({"A": [10]})
333
+ tm.assert_frame_equal(result, expected)
334
+
335
+
336
+ @pytest.mark.parametrize("frame", [True, False])
337
+ def test_astype_dispatches(frame):
338
+ # This is a dtype-specific test that ensures Series[decimal].astype
339
+ # gets all the way through to ExtensionArray.astype
340
+ # Designing a reliable smoke test that works for arbitrary data types
341
+ # is difficult.
342
+ data = pd.Series(DecimalArray([decimal.Decimal(2)]), name="a")
343
+ ctx = decimal.Context()
344
+ ctx.prec = 5
345
+
346
+ if frame:
347
+ data = data.to_frame()
348
+
349
+ result = data.astype(DecimalDtype(ctx))
350
+
351
+ if frame:
352
+ result = result["a"]
353
+
354
+ assert result.dtype.context.prec == ctx.prec
355
+
356
+
357
+ class DecimalArrayWithoutFromSequence(DecimalArray):
358
+ """Helper class for testing error handling in _from_sequence."""
359
+
360
+ @classmethod
361
+ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
362
+ raise KeyError("For the test")
363
+
364
+
365
+ class DecimalArrayWithoutCoercion(DecimalArrayWithoutFromSequence):
366
+ @classmethod
367
+ def _create_arithmetic_method(cls, op):
368
+ return cls._create_method(op, coerce_to_dtype=False)
369
+
370
+
371
+ DecimalArrayWithoutCoercion._add_arithmetic_ops()
372
+
373
+
374
+ def test_combine_from_sequence_raises(monkeypatch):
375
+ # https://github.com/pandas-dev/pandas/issues/22850
376
+ cls = DecimalArrayWithoutFromSequence
377
+
378
+ @classmethod
379
+ def construct_array_type(cls):
380
+ return DecimalArrayWithoutFromSequence
381
+
382
+ monkeypatch.setattr(DecimalDtype, "construct_array_type", construct_array_type)
383
+
384
+ arr = cls([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
385
+ ser = pd.Series(arr)
386
+ result = ser.combine(ser, operator.add)
387
+
388
+ # note: object dtype
389
+ expected = pd.Series(
390
+ [decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object"
391
+ )
392
+ tm.assert_series_equal(result, expected)
393
+
394
+
395
+ @pytest.mark.parametrize(
396
+ "class_", [DecimalArrayWithoutFromSequence, DecimalArrayWithoutCoercion]
397
+ )
398
+ def test_scalar_ops_from_sequence_raises(class_):
399
+ # op(EA, EA) should return an EA, or an ndarray if it's not possible
400
+ # to return an EA with the return values.
401
+ arr = class_([decimal.Decimal("1.0"), decimal.Decimal("2.0")])
402
+ result = arr + arr
403
+ expected = np.array(
404
+ [decimal.Decimal("2.0"), decimal.Decimal("4.0")], dtype="object"
405
+ )
406
+ tm.assert_numpy_array_equal(result, expected)
407
+
408
+
409
+ @pytest.mark.parametrize(
410
+ "reverse, expected_div, expected_mod",
411
+ [(False, [0, 1, 1, 2], [1, 0, 1, 0]), (True, [2, 1, 0, 0], [0, 0, 2, 2])],
412
+ )
413
+ def test_divmod_array(reverse, expected_div, expected_mod):
414
+ # https://github.com/pandas-dev/pandas/issues/22930
415
+ arr = to_decimal([1, 2, 3, 4])
416
+ if reverse:
417
+ div, mod = divmod(2, arr)
418
+ else:
419
+ div, mod = divmod(arr, 2)
420
+ expected_div = to_decimal(expected_div)
421
+ expected_mod = to_decimal(expected_mod)
422
+
423
+ tm.assert_extension_array_equal(div, expected_div)
424
+ tm.assert_extension_array_equal(mod, expected_mod)
425
+
426
+
427
+ def test_ufunc_fallback(data):
428
+ a = data[:5]
429
+ s = pd.Series(a, index=range(3, 8))
430
+ result = np.abs(s)
431
+ expected = pd.Series(np.abs(a), index=range(3, 8))
432
+ tm.assert_series_equal(result, expected)
433
+
434
+
435
+ def test_array_ufunc():
436
+ a = to_decimal([1, 2, 3])
437
+ result = np.exp(a)
438
+ expected = to_decimal(np.exp(a._data))
439
+ tm.assert_extension_array_equal(result, expected)
440
+
441
+
442
+ def test_array_ufunc_series():
443
+ a = to_decimal([1, 2, 3])
444
+ s = pd.Series(a)
445
+ result = np.exp(s)
446
+ expected = pd.Series(to_decimal(np.exp(a._data)))
447
+ tm.assert_series_equal(result, expected)
448
+
449
+
450
+ def test_array_ufunc_series_scalar_other():
451
+ # check _HANDLED_TYPES
452
+ a = to_decimal([1, 2, 3])
453
+ s = pd.Series(a)
454
+ result = np.add(s, decimal.Decimal(1))
455
+ expected = pd.Series(np.add(a, decimal.Decimal(1)))
456
+ tm.assert_series_equal(result, expected)
457
+
458
+
459
+ def test_array_ufunc_series_defer():
460
+ a = to_decimal([1, 2, 3])
461
+ s = pd.Series(a)
462
+
463
+ expected = pd.Series(to_decimal([2, 4, 6]))
464
+ r1 = np.add(s, a)
465
+ r2 = np.add(a, s)
466
+
467
+ tm.assert_series_equal(r1, expected)
468
+ tm.assert_series_equal(r2, expected)
469
+
470
+
471
+ def test_groupby_agg():
472
+ # Ensure that the result of agg is inferred to be decimal dtype
473
+ # https://github.com/pandas-dev/pandas/issues/29141
474
+
475
+ data = make_data()[:5]
476
+ df = pd.DataFrame(
477
+ {"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
478
+ )
479
+
480
+ # single key, selected column
481
+ expected = pd.Series(to_decimal([data[0], data[3]]))
482
+ result = df.groupby("id1")["decimals"].agg(lambda x: x.iloc[0])
483
+ tm.assert_series_equal(result, expected, check_names=False)
484
+ result = df["decimals"].groupby(df["id1"]).agg(lambda x: x.iloc[0])
485
+ tm.assert_series_equal(result, expected, check_names=False)
486
+
487
+ # multiple keys, selected column
488
+ expected = pd.Series(
489
+ to_decimal([data[0], data[1], data[3]]),
490
+ index=pd.MultiIndex.from_tuples([(0, 0), (0, 1), (1, 1)]),
491
+ )
492
+ result = df.groupby(["id1", "id2"])["decimals"].agg(lambda x: x.iloc[0])
493
+ tm.assert_series_equal(result, expected, check_names=False)
494
+ result = df["decimals"].groupby([df["id1"], df["id2"]]).agg(lambda x: x.iloc[0])
495
+ tm.assert_series_equal(result, expected, check_names=False)
496
+
497
+ # multiple columns
498
+ expected = pd.DataFrame({"id2": [0, 1], "decimals": to_decimal([data[0], data[3]])})
499
+ result = df.groupby("id1").agg(lambda x: x.iloc[0])
500
+ tm.assert_frame_equal(result, expected, check_names=False)
501
+
502
+
503
+ def test_groupby_agg_ea_method(monkeypatch):
504
+ # Ensure that the result of agg is inferred to be decimal dtype
505
+ # https://github.com/pandas-dev/pandas/issues/29141
506
+
507
+ def DecimalArray__my_sum(self):
508
+ return np.sum(np.array(self))
509
+
510
+ monkeypatch.setattr(DecimalArray, "my_sum", DecimalArray__my_sum, raising=False)
511
+
512
+ data = make_data()[:5]
513
+ df = pd.DataFrame({"id": [0, 0, 0, 1, 1], "decimals": DecimalArray(data)})
514
+ expected = pd.Series(to_decimal([data[0] + data[1] + data[2], data[3] + data[4]]))
515
+
516
+ result = df.groupby("id")["decimals"].agg(lambda x: x.values.my_sum())
517
+ tm.assert_series_equal(result, expected, check_names=False)
518
+ s = pd.Series(DecimalArray(data))
519
+ grouper = np.array([0, 0, 0, 1, 1], dtype=np.int64)
520
+ result = s.groupby(grouper).agg(lambda x: x.values.my_sum())
521
+ tm.assert_series_equal(result, expected, check_names=False)
522
+
523
+
524
+ def test_indexing_no_materialize(monkeypatch):
525
+ # See https://github.com/pandas-dev/pandas/issues/29708
526
+ # Ensure that indexing operations do not materialize (convert to a numpy
527
+ # array) the ExtensionArray unnecessary
528
+
529
+ def DecimalArray__array__(self, dtype=None):
530
+ raise Exception("tried to convert a DecimalArray to a numpy array")
531
+
532
+ monkeypatch.setattr(DecimalArray, "__array__", DecimalArray__array__, raising=False)
533
+
534
+ data = make_data()
535
+ s = pd.Series(DecimalArray(data))
536
+ df = pd.DataFrame({"a": s, "b": range(len(s))})
537
+
538
+ # ensure the following operations do not raise an error
539
+ s[s > 0.5]
540
+ df[s > 0.5]
541
+ s.at[0]
542
+ df.at[0, "a"]
543
+
544
+
545
+ def test_to_numpy_keyword():
546
+ # test the extra keyword
547
+ values = [decimal.Decimal("1.1111"), decimal.Decimal("2.2222")]
548
+ expected = np.array(
549
+ [decimal.Decimal("1.11"), decimal.Decimal("2.22")], dtype="object"
550
+ )
551
+ a = pd.array(values, dtype="decimal")
552
+ result = a.to_numpy(decimals=2)
553
+ tm.assert_numpy_array_equal(result, expected)
554
+
555
+ result = pd.Series(a).to_numpy(decimals=2)
556
+ tm.assert_numpy_array_equal(result, expected)
557
+
558
+
559
+ def test_array_copy_on_write(using_copy_on_write):
560
+ df = pd.DataFrame({"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype="object")
561
+ df2 = df.astype(DecimalDtype())
562
+ df.iloc[0, 0] = 0
563
+ if using_copy_on_write:
564
+ expected = pd.DataFrame(
565
+ {"a": [decimal.Decimal(2), decimal.Decimal(3)]}, dtype=DecimalDtype()
566
+ )
567
+ tm.assert_equal(df2.values, expected.values)
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pandas.tests.extension.json.array import (
2
+ JSONArray,
3
+ JSONDtype,
4
+ make_data,
5
+ )
6
+
7
+ __all__ = ["JSONArray", "JSONDtype", "make_data"]
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (326 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/__pycache__/array.cpython-310.pyc ADDED
Binary file (9.15 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/__pycache__/test_json.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/tests/extension/json/array.py ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test extension array for storing nested data in a pandas container.
3
+
4
+ The JSONArray stores lists of dictionaries. The storage mechanism is a list,
5
+ not an ndarray.
6
+
7
+ Note
8
+ ----
9
+ We currently store lists of UserDicts. Pandas has a few places
10
+ internally that specifically check for dicts, and does non-scalar things
11
+ in that case. We *want* the dictionaries to be treated as scalars, so we
12
+ hack around pandas by using UserDicts.
13
+ """
14
+ from __future__ import annotations
15
+
16
+ from collections import (
17
+ UserDict,
18
+ abc,
19
+ )
20
+ import itertools
21
+ import numbers
22
+ import string
23
+ import sys
24
+ from typing import (
25
+ TYPE_CHECKING,
26
+ Any,
27
+ )
28
+
29
+ import numpy as np
30
+
31
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
32
+ from pandas.core.dtypes.common import (
33
+ is_bool_dtype,
34
+ is_list_like,
35
+ pandas_dtype,
36
+ )
37
+
38
+ import pandas as pd
39
+ from pandas.api.extensions import (
40
+ ExtensionArray,
41
+ ExtensionDtype,
42
+ )
43
+ from pandas.core.indexers import unpack_tuple_and_ellipses
44
+
45
+ if TYPE_CHECKING:
46
+ from collections.abc import Mapping
47
+
48
+ from pandas._typing import type_t
49
+
50
+
51
+ class JSONDtype(ExtensionDtype):
52
+ type = abc.Mapping
53
+ name = "json"
54
+ na_value: Mapping[str, Any] = UserDict()
55
+
56
+ @classmethod
57
+ def construct_array_type(cls) -> type_t[JSONArray]:
58
+ """
59
+ Return the array type associated with this dtype.
60
+
61
+ Returns
62
+ -------
63
+ type
64
+ """
65
+ return JSONArray
66
+
67
+
68
+ class JSONArray(ExtensionArray):
69
+ dtype = JSONDtype()
70
+ __array_priority__ = 1000
71
+
72
+ def __init__(self, values, dtype=None, copy=False) -> None:
73
+ for val in values:
74
+ if not isinstance(val, self.dtype.type):
75
+ raise TypeError("All values must be of type " + str(self.dtype.type))
76
+ self.data = values
77
+
78
+ # Some aliases for common attribute names to ensure pandas supports
79
+ # these
80
+ self._items = self._data = self.data
81
+ # those aliases are currently not working due to assumptions
82
+ # in internal code (GH-20735)
83
+ # self._values = self.values = self.data
84
+
85
+ @classmethod
86
+ def _from_sequence(cls, scalars, *, dtype=None, copy=False):
87
+ return cls(scalars)
88
+
89
+ @classmethod
90
+ def _from_factorized(cls, values, original):
91
+ return cls([UserDict(x) for x in values if x != ()])
92
+
93
+ def __getitem__(self, item):
94
+ if isinstance(item, tuple):
95
+ item = unpack_tuple_and_ellipses(item)
96
+
97
+ if isinstance(item, numbers.Integral):
98
+ return self.data[item]
99
+ elif isinstance(item, slice) and item == slice(None):
100
+ # Make sure we get a view
101
+ return type(self)(self.data)
102
+ elif isinstance(item, slice):
103
+ # slice
104
+ return type(self)(self.data[item])
105
+ elif not is_list_like(item):
106
+ # e.g. "foo" or 2.5
107
+ # exception message copied from numpy
108
+ raise IndexError(
109
+ r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
110
+ r"(`None`) and integer or boolean arrays are valid indices"
111
+ )
112
+ else:
113
+ item = pd.api.indexers.check_array_indexer(self, item)
114
+ if is_bool_dtype(item.dtype):
115
+ return type(self)._from_sequence(
116
+ [x for x, m in zip(self, item) if m], dtype=self.dtype
117
+ )
118
+ # integer
119
+ return type(self)([self.data[i] for i in item])
120
+
121
+ def __setitem__(self, key, value) -> None:
122
+ if isinstance(key, numbers.Integral):
123
+ self.data[key] = value
124
+ else:
125
+ if not isinstance(value, (type(self), abc.Sequence)):
126
+ # broadcast value
127
+ value = itertools.cycle([value])
128
+
129
+ if isinstance(key, np.ndarray) and key.dtype == "bool":
130
+ # masking
131
+ for i, (k, v) in enumerate(zip(key, value)):
132
+ if k:
133
+ assert isinstance(v, self.dtype.type)
134
+ self.data[i] = v
135
+ else:
136
+ for k, v in zip(key, value):
137
+ assert isinstance(v, self.dtype.type)
138
+ self.data[k] = v
139
+
140
+ def __len__(self) -> int:
141
+ return len(self.data)
142
+
143
+ def __eq__(self, other):
144
+ return NotImplemented
145
+
146
+ def __ne__(self, other):
147
+ return NotImplemented
148
+
149
+ def __array__(self, dtype=None, copy=None):
150
+ if dtype is None:
151
+ dtype = object
152
+ if dtype == object:
153
+ # on py38 builds it looks like numpy is inferring to a non-1D array
154
+ return construct_1d_object_array_from_listlike(list(self))
155
+ return np.asarray(self.data, dtype=dtype)
156
+
157
+ @property
158
+ def nbytes(self) -> int:
159
+ return sys.getsizeof(self.data)
160
+
161
+ def isna(self):
162
+ return np.array([x == self.dtype.na_value for x in self.data], dtype=bool)
163
+
164
+ def take(self, indexer, allow_fill=False, fill_value=None):
165
+ # re-implement here, since NumPy has trouble setting
166
+ # sized objects like UserDicts into scalar slots of
167
+ # an ndarary.
168
+ indexer = np.asarray(indexer)
169
+ msg = (
170
+ "Index is out of bounds or cannot do a "
171
+ "non-empty take from an empty array."
172
+ )
173
+
174
+ if allow_fill:
175
+ if fill_value is None:
176
+ fill_value = self.dtype.na_value
177
+ # bounds check
178
+ if (indexer < -1).any():
179
+ raise ValueError
180
+ try:
181
+ output = [
182
+ self.data[loc] if loc != -1 else fill_value for loc in indexer
183
+ ]
184
+ except IndexError as err:
185
+ raise IndexError(msg) from err
186
+ else:
187
+ try:
188
+ output = [self.data[loc] for loc in indexer]
189
+ except IndexError as err:
190
+ raise IndexError(msg) from err
191
+
192
+ return type(self)._from_sequence(output, dtype=self.dtype)
193
+
194
+ def copy(self):
195
+ return type(self)(self.data[:])
196
+
197
+ def astype(self, dtype, copy=True):
198
+ # NumPy has issues when all the dicts are the same length.
199
+ # np.array([UserDict(...), UserDict(...)]) fails,
200
+ # but np.array([{...}, {...}]) works, so cast.
201
+ from pandas.core.arrays.string_ import StringDtype
202
+
203
+ dtype = pandas_dtype(dtype)
204
+ # needed to add this check for the Series constructor
205
+ if isinstance(dtype, type(self.dtype)) and dtype == self.dtype:
206
+ if copy:
207
+ return self.copy()
208
+ return self
209
+ elif isinstance(dtype, StringDtype):
210
+ value = self.astype(str) # numpy doesn't like nested dicts
211
+ arr_cls = dtype.construct_array_type()
212
+ return arr_cls._from_sequence(value, dtype=dtype, copy=False)
213
+ elif not copy:
214
+ return np.asarray([dict(x) for x in self], dtype=dtype)
215
+ else:
216
+ return np.array([dict(x) for x in self], dtype=dtype, copy=copy)
217
+
218
+ def unique(self):
219
+ # Parent method doesn't work since np.array will try to infer
220
+ # a 2-dim object.
221
+ return type(self)([dict(x) for x in {tuple(d.items()) for d in self.data}])
222
+
223
+ @classmethod
224
+ def _concat_same_type(cls, to_concat):
225
+ data = list(itertools.chain.from_iterable(x.data for x in to_concat))
226
+ return cls(data)
227
+
228
+ def _values_for_factorize(self):
229
+ frozen = self._values_for_argsort()
230
+ if len(frozen) == 0:
231
+ # factorize_array expects 1-d array, this is a len-0 2-d array.
232
+ frozen = frozen.ravel()
233
+ return frozen, ()
234
+
235
+ def _values_for_argsort(self):
236
+ # Bypass NumPy's shape inference to get a (N,) array of tuples.
237
+ frozen = [tuple(x.items()) for x in self]
238
+ return construct_1d_object_array_from_listlike(frozen)
239
+
240
+ def _pad_or_backfill(self, *, method, limit=None, copy=True):
241
+ # GH#56616 - test EA method without limit_area argument
242
+ return super()._pad_or_backfill(method=method, limit=limit, copy=copy)
243
+
244
+
245
+ def make_data():
246
+ # TODO: Use a regular dict. See _NDFrameIndexer._setitem_with_indexer
247
+ rng = np.random.default_rng(2)
248
+ return [
249
+ UserDict(
250
+ [
251
+ (rng.choice(list(string.ascii_letters)), rng.integers(0, 100))
252
+ for _ in range(rng.integers(0, 10))
253
+ ]
254
+ )
255
+ for _ in range(100)
256
+ ]