applied-ai-018 commited on
Commit
43a2cb6
·
verified ·
1 Parent(s): 06c6650

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_125_mp_rank_01_optim_states.pt +3 -0
  2. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_200_mp_rank_03_optim_states.pt +3 -0
  3. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_34_mp_rank_00_optim_states.pt +3 -0
  4. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_3_mp_rank_01_optim_states.pt +3 -0
  5. ckpts/llama-3b/global_step100/layer_14-model_01-model_states.pt +3 -0
  6. ckpts/llama-3b/global_step100/layer_21-model_01-model_states.pt +3 -0
  7. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py +7 -0
  28. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py +66 -0
  34. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py +473 -0
  35. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py +0 -0
  36. venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py +174 -0
  37. venv/lib/python3.10/site-packages/pandas/core/arrays/boolean.py +407 -0
  38. venv/lib/python3.10/site-packages/pandas/core/arrays/integer.py +272 -0
  39. venv/lib/python3.10/site-packages/pandas/core/arrays/masked.py +1650 -0
  40. venv/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py +563 -0
  41. venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py +19 -0
  42. venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc +0 -0
  43. venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc +0 -0
  44. venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc +0 -0
  45. venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc +0 -0
  46. venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py +414 -0
  47. venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py +1929 -0
  48. venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py +207 -0
  49. venv/lib/python3.10/site-packages/pandas/core/indexes/__init__.py +0 -0
  50. venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc +0 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_125_mp_rank_01_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fe74a0bf3eaa6619b0e88374dfaf1986a67d547039483528c4baef5a9d03bfe
3
+ size 41830212
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_200_mp_rank_03_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75086d31d499a1357955bac8eaf2a6408d1d6868ed932b1b9960c1ebe1b57fd5
3
+ size 41830404
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_34_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81ec562d5d937ded4e66f0bf369942662738263cbdf8480749d3252640c7d2c3
3
+ size 41830202
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_3_mp_rank_01_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3af788ef6d207d9605d6bb50a14d55ffa09ab37ae871a0cf4bf316754ee781ea
3
+ size 41830128
ckpts/llama-3b/global_step100/layer_14-model_01-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e9d48654bed9180d0818d0a5e254521d0f6a1725130b7a6375e8061c56af372b
3
+ size 116407086
ckpts/llama-3b/global_step100/layer_21-model_01-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f559f6bc09117df3dade889d51b3d86f5b1c80adc8556584b95e17001fbb10e3
3
+ size 116407086
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.36 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_arrow_string_mixins.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_mixins.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_ranges.cpython-310.pyc ADDED
Binary file (4.84 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (1.41 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/base.cpython-310.pyc ADDED
Binary file (74.7 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/boolean.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/categorical.cpython-310.pyc ADDED
Binary file (81.2 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimelike.cpython-310.pyc ADDED
Binary file (63.4 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/datetimes.cpython-310.pyc ADDED
Binary file (70.6 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/floating.cpython-310.pyc ADDED
Binary file (4.69 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/integer.cpython-310.pyc ADDED
Binary file (6.98 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/interval.cpython-310.pyc ADDED
Binary file (47.6 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/masked.cpython-310.pyc ADDED
Binary file (41.4 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numeric.cpython-310.pyc ADDED
Binary file (7.4 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/numpy_.cpython-310.pyc ADDED
Binary file (12.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/period.cpython-310.pyc ADDED
Binary file (32.7 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_.cpython-310.pyc ADDED
Binary file (17 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/string_arrow.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/__pycache__/timedeltas.cpython-310.pyc ADDED
Binary file (30.3 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from pandas.core.arrays.arrow.accessors import (
2
+ ListAccessor,
3
+ StructAccessor,
4
+ )
5
+ from pandas.core.arrays.arrow.array import ArrowExtensionArray
6
+
7
+ __all__ = ["ArrowExtensionArray", "StructAccessor", "ListAccessor"]
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (398 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/_arrow_utils.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/accessors.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/array.cpython-310.pyc ADDED
Binary file (82.6 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/__pycache__/extension_types.cpython-310.pyc ADDED
Binary file (6.3 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/_arrow_utils.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import warnings
4
+
5
+ import numpy as np
6
+ import pyarrow
7
+
8
+ from pandas.errors import PerformanceWarning
9
+ from pandas.util._exceptions import find_stack_level
10
+
11
+
12
+ def fallback_performancewarning(version: str | None = None) -> None:
13
+ """
14
+ Raise a PerformanceWarning for falling back to ExtensionArray's
15
+ non-pyarrow method
16
+ """
17
+ msg = "Falling back on a non-pyarrow code path which may decrease performance."
18
+ if version is not None:
19
+ msg += f" Upgrade to pyarrow >={version} to possibly suppress this warning."
20
+ warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
21
+
22
+
23
+ def pyarrow_array_to_numpy_and_mask(
24
+ arr, dtype: np.dtype
25
+ ) -> tuple[np.ndarray, np.ndarray]:
26
+ """
27
+ Convert a primitive pyarrow.Array to a numpy array and boolean mask based
28
+ on the buffers of the Array.
29
+
30
+ At the moment pyarrow.BooleanArray is not supported.
31
+
32
+ Parameters
33
+ ----------
34
+ arr : pyarrow.Array
35
+ dtype : numpy.dtype
36
+
37
+ Returns
38
+ -------
39
+ (data, mask)
40
+ Tuple of two numpy arrays with the raw data (with specified dtype) and
41
+ a boolean mask (validity mask, so False means missing)
42
+ """
43
+ dtype = np.dtype(dtype)
44
+
45
+ if pyarrow.types.is_null(arr.type):
46
+ # No initialization of data is needed since everything is null
47
+ data = np.empty(len(arr), dtype=dtype)
48
+ mask = np.zeros(len(arr), dtype=bool)
49
+ return data, mask
50
+ buflist = arr.buffers()
51
+ # Since Arrow buffers might contain padding and the data might be offset,
52
+ # the buffer gets sliced here before handing it to numpy.
53
+ # See also https://github.com/pandas-dev/pandas/issues/40896
54
+ offset = arr.offset * dtype.itemsize
55
+ length = len(arr) * dtype.itemsize
56
+ data_buf = buflist[1][offset : offset + length]
57
+ data = np.frombuffer(data_buf, dtype=dtype)
58
+ bitmask = buflist[0]
59
+ if bitmask is not None:
60
+ mask = pyarrow.BooleanArray.from_buffers(
61
+ pyarrow.bool_(), len(arr), [None, bitmask], offset=arr.offset
62
+ )
63
+ mask = np.asarray(mask)
64
+ else:
65
+ mask = np.ones(len(arr), dtype=bool)
66
+ return data, mask
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/accessors.py ADDED
@@ -0,0 +1,473 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Accessors for arrow-backed data."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from abc import (
6
+ ABCMeta,
7
+ abstractmethod,
8
+ )
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ cast,
12
+ )
13
+
14
+ from pandas.compat import (
15
+ pa_version_under10p1,
16
+ pa_version_under11p0,
17
+ )
18
+
19
+ from pandas.core.dtypes.common import is_list_like
20
+
21
+ if not pa_version_under10p1:
22
+ import pyarrow as pa
23
+ import pyarrow.compute as pc
24
+
25
+ from pandas.core.dtypes.dtypes import ArrowDtype
26
+
27
+ if TYPE_CHECKING:
28
+ from collections.abc import Iterator
29
+
30
+ from pandas import (
31
+ DataFrame,
32
+ Series,
33
+ )
34
+
35
+
36
+ class ArrowAccessor(metaclass=ABCMeta):
37
+ @abstractmethod
38
+ def __init__(self, data, validation_msg: str) -> None:
39
+ self._data = data
40
+ self._validation_msg = validation_msg
41
+ self._validate(data)
42
+
43
+ @abstractmethod
44
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
45
+ pass
46
+
47
+ def _validate(self, data):
48
+ dtype = data.dtype
49
+ if not isinstance(dtype, ArrowDtype):
50
+ # Raise AttributeError so that inspect can handle non-struct Series.
51
+ raise AttributeError(self._validation_msg.format(dtype=dtype))
52
+
53
+ if not self._is_valid_pyarrow_dtype(dtype.pyarrow_dtype):
54
+ # Raise AttributeError so that inspect can handle invalid Series.
55
+ raise AttributeError(self._validation_msg.format(dtype=dtype))
56
+
57
+ @property
58
+ def _pa_array(self):
59
+ return self._data.array._pa_array
60
+
61
+
62
+ class ListAccessor(ArrowAccessor):
63
+ """
64
+ Accessor object for list data properties of the Series values.
65
+
66
+ Parameters
67
+ ----------
68
+ data : Series
69
+ Series containing Arrow list data.
70
+ """
71
+
72
+ def __init__(self, data=None) -> None:
73
+ super().__init__(
74
+ data,
75
+ validation_msg="Can only use the '.list' accessor with "
76
+ "'list[pyarrow]' dtype, not {dtype}.",
77
+ )
78
+
79
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
80
+ return (
81
+ pa.types.is_list(pyarrow_dtype)
82
+ or pa.types.is_fixed_size_list(pyarrow_dtype)
83
+ or pa.types.is_large_list(pyarrow_dtype)
84
+ )
85
+
86
+ def len(self) -> Series:
87
+ """
88
+ Return the length of each list in the Series.
89
+
90
+ Returns
91
+ -------
92
+ pandas.Series
93
+ The length of each list.
94
+
95
+ Examples
96
+ --------
97
+ >>> import pyarrow as pa
98
+ >>> s = pd.Series(
99
+ ... [
100
+ ... [1, 2, 3],
101
+ ... [3],
102
+ ... ],
103
+ ... dtype=pd.ArrowDtype(pa.list_(
104
+ ... pa.int64()
105
+ ... ))
106
+ ... )
107
+ >>> s.list.len()
108
+ 0 3
109
+ 1 1
110
+ dtype: int32[pyarrow]
111
+ """
112
+ from pandas import Series
113
+
114
+ value_lengths = pc.list_value_length(self._pa_array)
115
+ return Series(value_lengths, dtype=ArrowDtype(value_lengths.type))
116
+
117
+ def __getitem__(self, key: int | slice) -> Series:
118
+ """
119
+ Index or slice lists in the Series.
120
+
121
+ Parameters
122
+ ----------
123
+ key : int | slice
124
+ Index or slice of indices to access from each list.
125
+
126
+ Returns
127
+ -------
128
+ pandas.Series
129
+ The list at requested index.
130
+
131
+ Examples
132
+ --------
133
+ >>> import pyarrow as pa
134
+ >>> s = pd.Series(
135
+ ... [
136
+ ... [1, 2, 3],
137
+ ... [3],
138
+ ... ],
139
+ ... dtype=pd.ArrowDtype(pa.list_(
140
+ ... pa.int64()
141
+ ... ))
142
+ ... )
143
+ >>> s.list[0]
144
+ 0 1
145
+ 1 3
146
+ dtype: int64[pyarrow]
147
+ """
148
+ from pandas import Series
149
+
150
+ if isinstance(key, int):
151
+ # TODO: Support negative key but pyarrow does not allow
152
+ # element index to be an array.
153
+ # if key < 0:
154
+ # key = pc.add(key, pc.list_value_length(self._pa_array))
155
+ element = pc.list_element(self._pa_array, key)
156
+ return Series(element, dtype=ArrowDtype(element.type))
157
+ elif isinstance(key, slice):
158
+ if pa_version_under11p0:
159
+ raise NotImplementedError(
160
+ f"List slice not supported by pyarrow {pa.__version__}."
161
+ )
162
+
163
+ # TODO: Support negative start/stop/step, ideally this would be added
164
+ # upstream in pyarrow.
165
+ start, stop, step = key.start, key.stop, key.step
166
+ if start is None:
167
+ # TODO: When adding negative step support
168
+ # this should be setto last element of array
169
+ # when step is negative.
170
+ start = 0
171
+ if step is None:
172
+ step = 1
173
+ sliced = pc.list_slice(self._pa_array, start, stop, step)
174
+ return Series(sliced, dtype=ArrowDtype(sliced.type))
175
+ else:
176
+ raise ValueError(f"key must be an int or slice, got {type(key).__name__}")
177
+
178
+ def __iter__(self) -> Iterator:
179
+ raise TypeError(f"'{type(self).__name__}' object is not iterable")
180
+
181
+ def flatten(self) -> Series:
182
+ """
183
+ Flatten list values.
184
+
185
+ Returns
186
+ -------
187
+ pandas.Series
188
+ The data from all lists in the series flattened.
189
+
190
+ Examples
191
+ --------
192
+ >>> import pyarrow as pa
193
+ >>> s = pd.Series(
194
+ ... [
195
+ ... [1, 2, 3],
196
+ ... [3],
197
+ ... ],
198
+ ... dtype=pd.ArrowDtype(pa.list_(
199
+ ... pa.int64()
200
+ ... ))
201
+ ... )
202
+ >>> s.list.flatten()
203
+ 0 1
204
+ 1 2
205
+ 2 3
206
+ 3 3
207
+ dtype: int64[pyarrow]
208
+ """
209
+ from pandas import Series
210
+
211
+ flattened = pc.list_flatten(self._pa_array)
212
+ return Series(flattened, dtype=ArrowDtype(flattened.type))
213
+
214
+
215
+ class StructAccessor(ArrowAccessor):
216
+ """
217
+ Accessor object for structured data properties of the Series values.
218
+
219
+ Parameters
220
+ ----------
221
+ data : Series
222
+ Series containing Arrow struct data.
223
+ """
224
+
225
+ def __init__(self, data=None) -> None:
226
+ super().__init__(
227
+ data,
228
+ validation_msg=(
229
+ "Can only use the '.struct' accessor with 'struct[pyarrow]' "
230
+ "dtype, not {dtype}."
231
+ ),
232
+ )
233
+
234
+ def _is_valid_pyarrow_dtype(self, pyarrow_dtype) -> bool:
235
+ return pa.types.is_struct(pyarrow_dtype)
236
+
237
+ @property
238
+ def dtypes(self) -> Series:
239
+ """
240
+ Return the dtype object of each child field of the struct.
241
+
242
+ Returns
243
+ -------
244
+ pandas.Series
245
+ The data type of each child field.
246
+
247
+ Examples
248
+ --------
249
+ >>> import pyarrow as pa
250
+ >>> s = pd.Series(
251
+ ... [
252
+ ... {"version": 1, "project": "pandas"},
253
+ ... {"version": 2, "project": "pandas"},
254
+ ... {"version": 1, "project": "numpy"},
255
+ ... ],
256
+ ... dtype=pd.ArrowDtype(pa.struct(
257
+ ... [("version", pa.int64()), ("project", pa.string())]
258
+ ... ))
259
+ ... )
260
+ >>> s.struct.dtypes
261
+ version int64[pyarrow]
262
+ project string[pyarrow]
263
+ dtype: object
264
+ """
265
+ from pandas import (
266
+ Index,
267
+ Series,
268
+ )
269
+
270
+ pa_type = self._data.dtype.pyarrow_dtype
271
+ types = [ArrowDtype(struct.type) for struct in pa_type]
272
+ names = [struct.name for struct in pa_type]
273
+ return Series(types, index=Index(names))
274
+
275
+ def field(
276
+ self,
277
+ name_or_index: list[str]
278
+ | list[bytes]
279
+ | list[int]
280
+ | pc.Expression
281
+ | bytes
282
+ | str
283
+ | int,
284
+ ) -> Series:
285
+ """
286
+ Extract a child field of a struct as a Series.
287
+
288
+ Parameters
289
+ ----------
290
+ name_or_index : str | bytes | int | expression | list
291
+ Name or index of the child field to extract.
292
+
293
+ For list-like inputs, this will index into a nested
294
+ struct.
295
+
296
+ Returns
297
+ -------
298
+ pandas.Series
299
+ The data corresponding to the selected child field.
300
+
301
+ See Also
302
+ --------
303
+ Series.struct.explode : Return all child fields as a DataFrame.
304
+
305
+ Notes
306
+ -----
307
+ The name of the resulting Series will be set using the following
308
+ rules:
309
+
310
+ - For string, bytes, or integer `name_or_index` (or a list of these, for
311
+ a nested selection), the Series name is set to the selected
312
+ field's name.
313
+ - For a :class:`pyarrow.compute.Expression`, this is set to
314
+ the string form of the expression.
315
+ - For list-like `name_or_index`, the name will be set to the
316
+ name of the final field selected.
317
+
318
+ Examples
319
+ --------
320
+ >>> import pyarrow as pa
321
+ >>> s = pd.Series(
322
+ ... [
323
+ ... {"version": 1, "project": "pandas"},
324
+ ... {"version": 2, "project": "pandas"},
325
+ ... {"version": 1, "project": "numpy"},
326
+ ... ],
327
+ ... dtype=pd.ArrowDtype(pa.struct(
328
+ ... [("version", pa.int64()), ("project", pa.string())]
329
+ ... ))
330
+ ... )
331
+
332
+ Extract by field name.
333
+
334
+ >>> s.struct.field("project")
335
+ 0 pandas
336
+ 1 pandas
337
+ 2 numpy
338
+ Name: project, dtype: string[pyarrow]
339
+
340
+ Extract by field index.
341
+
342
+ >>> s.struct.field(0)
343
+ 0 1
344
+ 1 2
345
+ 2 1
346
+ Name: version, dtype: int64[pyarrow]
347
+
348
+ Or an expression
349
+
350
+ >>> import pyarrow.compute as pc
351
+ >>> s.struct.field(pc.field("project"))
352
+ 0 pandas
353
+ 1 pandas
354
+ 2 numpy
355
+ Name: project, dtype: string[pyarrow]
356
+
357
+ For nested struct types, you can pass a list of values to index
358
+ multiple levels:
359
+
360
+ >>> version_type = pa.struct([
361
+ ... ("major", pa.int64()),
362
+ ... ("minor", pa.int64()),
363
+ ... ])
364
+ >>> s = pd.Series(
365
+ ... [
366
+ ... {"version": {"major": 1, "minor": 5}, "project": "pandas"},
367
+ ... {"version": {"major": 2, "minor": 1}, "project": "pandas"},
368
+ ... {"version": {"major": 1, "minor": 26}, "project": "numpy"},
369
+ ... ],
370
+ ... dtype=pd.ArrowDtype(pa.struct(
371
+ ... [("version", version_type), ("project", pa.string())]
372
+ ... ))
373
+ ... )
374
+ >>> s.struct.field(["version", "minor"])
375
+ 0 5
376
+ 1 1
377
+ 2 26
378
+ Name: minor, dtype: int64[pyarrow]
379
+ >>> s.struct.field([0, 0])
380
+ 0 1
381
+ 1 2
382
+ 2 1
383
+ Name: major, dtype: int64[pyarrow]
384
+ """
385
+ from pandas import Series
386
+
387
+ def get_name(
388
+ level_name_or_index: list[str]
389
+ | list[bytes]
390
+ | list[int]
391
+ | pc.Expression
392
+ | bytes
393
+ | str
394
+ | int,
395
+ data: pa.ChunkedArray,
396
+ ):
397
+ if isinstance(level_name_or_index, int):
398
+ name = data.type.field(level_name_or_index).name
399
+ elif isinstance(level_name_or_index, (str, bytes)):
400
+ name = level_name_or_index
401
+ elif isinstance(level_name_or_index, pc.Expression):
402
+ name = str(level_name_or_index)
403
+ elif is_list_like(level_name_or_index):
404
+ # For nested input like [2, 1, 2]
405
+ # iteratively get the struct and field name. The last
406
+ # one is used for the name of the index.
407
+ level_name_or_index = list(reversed(level_name_or_index))
408
+ selected = data
409
+ while level_name_or_index:
410
+ # we need the cast, otherwise mypy complains about
411
+ # getting ints, bytes, or str here, which isn't possible.
412
+ level_name_or_index = cast(list, level_name_or_index)
413
+ name_or_index = level_name_or_index.pop()
414
+ name = get_name(name_or_index, selected)
415
+ selected = selected.type.field(selected.type.get_field_index(name))
416
+ name = selected.name
417
+ else:
418
+ raise ValueError(
419
+ "name_or_index must be an int, str, bytes, "
420
+ "pyarrow.compute.Expression, or list of those"
421
+ )
422
+ return name
423
+
424
+ pa_arr = self._data.array._pa_array
425
+ name = get_name(name_or_index, pa_arr)
426
+ field_arr = pc.struct_field(pa_arr, name_or_index)
427
+
428
+ return Series(
429
+ field_arr,
430
+ dtype=ArrowDtype(field_arr.type),
431
+ index=self._data.index,
432
+ name=name,
433
+ )
434
+
435
+ def explode(self) -> DataFrame:
436
+ """
437
+ Extract all child fields of a struct as a DataFrame.
438
+
439
+ Returns
440
+ -------
441
+ pandas.DataFrame
442
+ The data corresponding to all child fields.
443
+
444
+ See Also
445
+ --------
446
+ Series.struct.field : Return a single child field as a Series.
447
+
448
+ Examples
449
+ --------
450
+ >>> import pyarrow as pa
451
+ >>> s = pd.Series(
452
+ ... [
453
+ ... {"version": 1, "project": "pandas"},
454
+ ... {"version": 2, "project": "pandas"},
455
+ ... {"version": 1, "project": "numpy"},
456
+ ... ],
457
+ ... dtype=pd.ArrowDtype(pa.struct(
458
+ ... [("version", pa.int64()), ("project", pa.string())]
459
+ ... ))
460
+ ... )
461
+
462
+ >>> s.struct.explode()
463
+ version project
464
+ 0 1 pandas
465
+ 1 2 pandas
466
+ 2 1 numpy
467
+ """
468
+ from pandas import concat
469
+
470
+ pa_type = self._pa_array.type
471
+ return concat(
472
+ [self.field(i) for i in range(pa_type.num_fields)], axis="columns"
473
+ )
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/array.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/core/arrays/arrow/extension_types.py ADDED
@@ -0,0 +1,174 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import json
4
+ from typing import TYPE_CHECKING
5
+
6
+ import pyarrow
7
+
8
+ from pandas.compat import pa_version_under14p1
9
+
10
+ from pandas.core.dtypes.dtypes import (
11
+ IntervalDtype,
12
+ PeriodDtype,
13
+ )
14
+
15
+ from pandas.core.arrays.interval import VALID_CLOSED
16
+
17
+ if TYPE_CHECKING:
18
+ from pandas._typing import IntervalClosedType
19
+
20
+
21
+ class ArrowPeriodType(pyarrow.ExtensionType):
22
+ def __init__(self, freq) -> None:
23
+ # attributes need to be set first before calling
24
+ # super init (as that calls serialize)
25
+ self._freq = freq
26
+ pyarrow.ExtensionType.__init__(self, pyarrow.int64(), "pandas.period")
27
+
28
+ @property
29
+ def freq(self):
30
+ return self._freq
31
+
32
+ def __arrow_ext_serialize__(self) -> bytes:
33
+ metadata = {"freq": self.freq}
34
+ return json.dumps(metadata).encode()
35
+
36
+ @classmethod
37
+ def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowPeriodType:
38
+ metadata = json.loads(serialized.decode())
39
+ return ArrowPeriodType(metadata["freq"])
40
+
41
+ def __eq__(self, other):
42
+ if isinstance(other, pyarrow.BaseExtensionType):
43
+ return type(self) == type(other) and self.freq == other.freq
44
+ else:
45
+ return NotImplemented
46
+
47
+ def __ne__(self, other) -> bool:
48
+ return not self == other
49
+
50
+ def __hash__(self) -> int:
51
+ return hash((str(self), self.freq))
52
+
53
+ def to_pandas_dtype(self) -> PeriodDtype:
54
+ return PeriodDtype(freq=self.freq)
55
+
56
+
57
+ # register the type with a dummy instance
58
+ _period_type = ArrowPeriodType("D")
59
+ pyarrow.register_extension_type(_period_type)
60
+
61
+
62
+ class ArrowIntervalType(pyarrow.ExtensionType):
63
+ def __init__(self, subtype, closed: IntervalClosedType) -> None:
64
+ # attributes need to be set first before calling
65
+ # super init (as that calls serialize)
66
+ assert closed in VALID_CLOSED
67
+ self._closed: IntervalClosedType = closed
68
+ if not isinstance(subtype, pyarrow.DataType):
69
+ subtype = pyarrow.type_for_alias(str(subtype))
70
+ self._subtype = subtype
71
+
72
+ storage_type = pyarrow.struct([("left", subtype), ("right", subtype)])
73
+ pyarrow.ExtensionType.__init__(self, storage_type, "pandas.interval")
74
+
75
+ @property
76
+ def subtype(self):
77
+ return self._subtype
78
+
79
+ @property
80
+ def closed(self) -> IntervalClosedType:
81
+ return self._closed
82
+
83
+ def __arrow_ext_serialize__(self) -> bytes:
84
+ metadata = {"subtype": str(self.subtype), "closed": self.closed}
85
+ return json.dumps(metadata).encode()
86
+
87
+ @classmethod
88
+ def __arrow_ext_deserialize__(cls, storage_type, serialized) -> ArrowIntervalType:
89
+ metadata = json.loads(serialized.decode())
90
+ subtype = pyarrow.type_for_alias(metadata["subtype"])
91
+ closed = metadata["closed"]
92
+ return ArrowIntervalType(subtype, closed)
93
+
94
+ def __eq__(self, other):
95
+ if isinstance(other, pyarrow.BaseExtensionType):
96
+ return (
97
+ type(self) == type(other)
98
+ and self.subtype == other.subtype
99
+ and self.closed == other.closed
100
+ )
101
+ else:
102
+ return NotImplemented
103
+
104
+ def __ne__(self, other) -> bool:
105
+ return not self == other
106
+
107
+ def __hash__(self) -> int:
108
+ return hash((str(self), str(self.subtype), self.closed))
109
+
110
+ def to_pandas_dtype(self) -> IntervalDtype:
111
+ return IntervalDtype(self.subtype.to_pandas_dtype(), self.closed)
112
+
113
+
114
+ # register the type with a dummy instance
115
+ _interval_type = ArrowIntervalType(pyarrow.int64(), "left")
116
+ pyarrow.register_extension_type(_interval_type)
117
+
118
+
119
+ _ERROR_MSG = """\
120
+ Disallowed deserialization of 'arrow.py_extension_type':
121
+ storage_type = {storage_type}
122
+ serialized = {serialized}
123
+ pickle disassembly:\n{pickle_disassembly}
124
+
125
+ Reading of untrusted Parquet or Feather files with a PyExtensionType column
126
+ allows arbitrary code execution.
127
+ If you trust this file, you can enable reading the extension type by one of:
128
+
129
+ - upgrading to pyarrow >= 14.0.1, and call `pa.PyExtensionType.set_auto_load(True)`
130
+ - install pyarrow-hotfix (`pip install pyarrow-hotfix`) and disable it by running
131
+ `import pyarrow_hotfix; pyarrow_hotfix.uninstall()`
132
+
133
+ We strongly recommend updating your Parquet/Feather files to use extension types
134
+ derived from `pyarrow.ExtensionType` instead, and register this type explicitly.
135
+ """
136
+
137
+
138
+ def patch_pyarrow():
139
+ # starting from pyarrow 14.0.1, it has its own mechanism
140
+ if not pa_version_under14p1:
141
+ return
142
+
143
+ # if https://github.com/pitrou/pyarrow-hotfix was installed and enabled
144
+ if getattr(pyarrow, "_hotfix_installed", False):
145
+ return
146
+
147
+ class ForbiddenExtensionType(pyarrow.ExtensionType):
148
+ def __arrow_ext_serialize__(self):
149
+ return b""
150
+
151
+ @classmethod
152
+ def __arrow_ext_deserialize__(cls, storage_type, serialized):
153
+ import io
154
+ import pickletools
155
+
156
+ out = io.StringIO()
157
+ pickletools.dis(serialized, out)
158
+ raise RuntimeError(
159
+ _ERROR_MSG.format(
160
+ storage_type=storage_type,
161
+ serialized=serialized,
162
+ pickle_disassembly=out.getvalue(),
163
+ )
164
+ )
165
+
166
+ pyarrow.unregister_extension_type("arrow.py_extension_type")
167
+ pyarrow.register_extension_type(
168
+ ForbiddenExtensionType(pyarrow.null(), "arrow.py_extension_type")
169
+ )
170
+
171
+ pyarrow._hotfix_installed = True
172
+
173
+
174
+ patch_pyarrow()
venv/lib/python3.10/site-packages/pandas/core/arrays/boolean.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import numbers
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ ClassVar,
7
+ cast,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import (
13
+ lib,
14
+ missing as libmissing,
15
+ )
16
+
17
+ from pandas.core.dtypes.common import is_list_like
18
+ from pandas.core.dtypes.dtypes import register_extension_dtype
19
+ from pandas.core.dtypes.missing import isna
20
+
21
+ from pandas.core import ops
22
+ from pandas.core.array_algos import masked_accumulations
23
+ from pandas.core.arrays.masked import (
24
+ BaseMaskedArray,
25
+ BaseMaskedDtype,
26
+ )
27
+
28
+ if TYPE_CHECKING:
29
+ import pyarrow
30
+
31
+ from pandas._typing import (
32
+ Dtype,
33
+ DtypeObj,
34
+ Self,
35
+ npt,
36
+ type_t,
37
+ )
38
+
39
+
40
+ @register_extension_dtype
41
+ class BooleanDtype(BaseMaskedDtype):
42
+ """
43
+ Extension dtype for boolean data.
44
+
45
+ .. warning::
46
+
47
+ BooleanDtype is considered experimental. The implementation and
48
+ parts of the API may change without warning.
49
+
50
+ Attributes
51
+ ----------
52
+ None
53
+
54
+ Methods
55
+ -------
56
+ None
57
+
58
+ Examples
59
+ --------
60
+ >>> pd.BooleanDtype()
61
+ BooleanDtype
62
+ """
63
+
64
+ name: ClassVar[str] = "boolean"
65
+
66
+ # https://github.com/python/mypy/issues/4125
67
+ # error: Signature of "type" incompatible with supertype "BaseMaskedDtype"
68
+ @property
69
+ def type(self) -> type: # type: ignore[override]
70
+ return np.bool_
71
+
72
+ @property
73
+ def kind(self) -> str:
74
+ return "b"
75
+
76
+ @property
77
+ def numpy_dtype(self) -> np.dtype:
78
+ return np.dtype("bool")
79
+
80
+ @classmethod
81
+ def construct_array_type(cls) -> type_t[BooleanArray]:
82
+ """
83
+ Return the array type associated with this dtype.
84
+
85
+ Returns
86
+ -------
87
+ type
88
+ """
89
+ return BooleanArray
90
+
91
+ def __repr__(self) -> str:
92
+ return "BooleanDtype"
93
+
94
+ @property
95
+ def _is_boolean(self) -> bool:
96
+ return True
97
+
98
+ @property
99
+ def _is_numeric(self) -> bool:
100
+ return True
101
+
102
+ def __from_arrow__(
103
+ self, array: pyarrow.Array | pyarrow.ChunkedArray
104
+ ) -> BooleanArray:
105
+ """
106
+ Construct BooleanArray from pyarrow Array/ChunkedArray.
107
+ """
108
+ import pyarrow
109
+
110
+ if array.type != pyarrow.bool_() and not pyarrow.types.is_null(array.type):
111
+ raise TypeError(f"Expected array of boolean type, got {array.type} instead")
112
+
113
+ if isinstance(array, pyarrow.Array):
114
+ chunks = [array]
115
+ length = len(array)
116
+ else:
117
+ # pyarrow.ChunkedArray
118
+ chunks = array.chunks
119
+ length = array.length()
120
+
121
+ if pyarrow.types.is_null(array.type):
122
+ mask = np.ones(length, dtype=bool)
123
+ # No need to init data, since all null
124
+ data = np.empty(length, dtype=bool)
125
+ return BooleanArray(data, mask)
126
+
127
+ results = []
128
+ for arr in chunks:
129
+ buflist = arr.buffers()
130
+ data = pyarrow.BooleanArray.from_buffers(
131
+ arr.type, len(arr), [None, buflist[1]], offset=arr.offset
132
+ ).to_numpy(zero_copy_only=False)
133
+ if arr.null_count != 0:
134
+ mask = pyarrow.BooleanArray.from_buffers(
135
+ arr.type, len(arr), [None, buflist[0]], offset=arr.offset
136
+ ).to_numpy(zero_copy_only=False)
137
+ mask = ~mask
138
+ else:
139
+ mask = np.zeros(len(arr), dtype=bool)
140
+
141
+ bool_arr = BooleanArray(data, mask)
142
+ results.append(bool_arr)
143
+
144
+ if not results:
145
+ return BooleanArray(
146
+ np.array([], dtype=np.bool_), np.array([], dtype=np.bool_)
147
+ )
148
+ else:
149
+ return BooleanArray._concat_same_type(results)
150
+
151
+
152
+ def coerce_to_array(
153
+ values, mask=None, copy: bool = False
154
+ ) -> tuple[np.ndarray, np.ndarray]:
155
+ """
156
+ Coerce the input values array to numpy arrays with a mask.
157
+
158
+ Parameters
159
+ ----------
160
+ values : 1D list-like
161
+ mask : bool 1D array, optional
162
+ copy : bool, default False
163
+ if True, copy the input
164
+
165
+ Returns
166
+ -------
167
+ tuple of (values, mask)
168
+ """
169
+ if isinstance(values, BooleanArray):
170
+ if mask is not None:
171
+ raise ValueError("cannot pass mask for BooleanArray input")
172
+ values, mask = values._data, values._mask
173
+ if copy:
174
+ values = values.copy()
175
+ mask = mask.copy()
176
+ return values, mask
177
+
178
+ mask_values = None
179
+ if isinstance(values, np.ndarray) and values.dtype == np.bool_:
180
+ if copy:
181
+ values = values.copy()
182
+ elif isinstance(values, np.ndarray) and values.dtype.kind in "iufcb":
183
+ mask_values = isna(values)
184
+
185
+ values_bool = np.zeros(len(values), dtype=bool)
186
+ values_bool[~mask_values] = values[~mask_values].astype(bool)
187
+
188
+ if not np.all(
189
+ values_bool[~mask_values].astype(values.dtype) == values[~mask_values]
190
+ ):
191
+ raise TypeError("Need to pass bool-like values")
192
+
193
+ values = values_bool
194
+ else:
195
+ values_object = np.asarray(values, dtype=object)
196
+
197
+ inferred_dtype = lib.infer_dtype(values_object, skipna=True)
198
+ integer_like = ("floating", "integer", "mixed-integer-float")
199
+ if inferred_dtype not in ("boolean", "empty") + integer_like:
200
+ raise TypeError("Need to pass bool-like values")
201
+
202
+ # mypy does not narrow the type of mask_values to npt.NDArray[np.bool_]
203
+ # within this branch, it assumes it can also be None
204
+ mask_values = cast("npt.NDArray[np.bool_]", isna(values_object))
205
+ values = np.zeros(len(values), dtype=bool)
206
+ values[~mask_values] = values_object[~mask_values].astype(bool)
207
+
208
+ # if the values were integer-like, validate it were actually 0/1's
209
+ if (inferred_dtype in integer_like) and not (
210
+ np.all(
211
+ values[~mask_values].astype(float)
212
+ == values_object[~mask_values].astype(float)
213
+ )
214
+ ):
215
+ raise TypeError("Need to pass bool-like values")
216
+
217
+ if mask is None and mask_values is None:
218
+ mask = np.zeros(values.shape, dtype=bool)
219
+ elif mask is None:
220
+ mask = mask_values
221
+ else:
222
+ if isinstance(mask, np.ndarray) and mask.dtype == np.bool_:
223
+ if mask_values is not None:
224
+ mask = mask | mask_values
225
+ else:
226
+ if copy:
227
+ mask = mask.copy()
228
+ else:
229
+ mask = np.array(mask, dtype=bool)
230
+ if mask_values is not None:
231
+ mask = mask | mask_values
232
+
233
+ if values.shape != mask.shape:
234
+ raise ValueError("values.shape and mask.shape must match")
235
+
236
+ return values, mask
237
+
238
+
239
+ class BooleanArray(BaseMaskedArray):
240
+ """
241
+ Array of boolean (True/False) data with missing values.
242
+
243
+ This is a pandas Extension array for boolean data, under the hood
244
+ represented by 2 numpy arrays: a boolean array with the data and
245
+ a boolean array with the mask (True indicating missing).
246
+
247
+ BooleanArray implements Kleene logic (sometimes called three-value
248
+ logic) for logical operations. See :ref:`boolean.kleene` for more.
249
+
250
+ To construct an BooleanArray from generic array-like input, use
251
+ :func:`pandas.array` specifying ``dtype="boolean"`` (see examples
252
+ below).
253
+
254
+ .. warning::
255
+
256
+ BooleanArray is considered experimental. The implementation and
257
+ parts of the API may change without warning.
258
+
259
+ Parameters
260
+ ----------
261
+ values : numpy.ndarray
262
+ A 1-d boolean-dtype array with the data.
263
+ mask : numpy.ndarray
264
+ A 1-d boolean-dtype array indicating missing values (True
265
+ indicates missing).
266
+ copy : bool, default False
267
+ Whether to copy the `values` and `mask` arrays.
268
+
269
+ Attributes
270
+ ----------
271
+ None
272
+
273
+ Methods
274
+ -------
275
+ None
276
+
277
+ Returns
278
+ -------
279
+ BooleanArray
280
+
281
+ Examples
282
+ --------
283
+ Create an BooleanArray with :func:`pandas.array`:
284
+
285
+ >>> pd.array([True, False, None], dtype="boolean")
286
+ <BooleanArray>
287
+ [True, False, <NA>]
288
+ Length: 3, dtype: boolean
289
+ """
290
+
291
+ # The value used to fill '_data' to avoid upcasting
292
+ _internal_fill_value = False
293
+ # Fill values used for any/all
294
+ # Incompatible types in assignment (expression has type "bool", base class
295
+ # "BaseMaskedArray" defined the type as "<typing special form>")
296
+ _truthy_value = True # type: ignore[assignment]
297
+ _falsey_value = False # type: ignore[assignment]
298
+ _TRUE_VALUES = {"True", "TRUE", "true", "1", "1.0"}
299
+ _FALSE_VALUES = {"False", "FALSE", "false", "0", "0.0"}
300
+
301
+ @classmethod
302
+ def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self:
303
+ result = super()._simple_new(values, mask)
304
+ result._dtype = BooleanDtype()
305
+ return result
306
+
307
+ def __init__(
308
+ self, values: np.ndarray, mask: np.ndarray, copy: bool = False
309
+ ) -> None:
310
+ if not (isinstance(values, np.ndarray) and values.dtype == np.bool_):
311
+ raise TypeError(
312
+ "values should be boolean numpy array. Use "
313
+ "the 'pd.array' function instead"
314
+ )
315
+ self._dtype = BooleanDtype()
316
+ super().__init__(values, mask, copy=copy)
317
+
318
+ @property
319
+ def dtype(self) -> BooleanDtype:
320
+ return self._dtype
321
+
322
+ @classmethod
323
+ def _from_sequence_of_strings(
324
+ cls,
325
+ strings: list[str],
326
+ *,
327
+ dtype: Dtype | None = None,
328
+ copy: bool = False,
329
+ true_values: list[str] | None = None,
330
+ false_values: list[str] | None = None,
331
+ ) -> BooleanArray:
332
+ true_values_union = cls._TRUE_VALUES.union(true_values or [])
333
+ false_values_union = cls._FALSE_VALUES.union(false_values or [])
334
+
335
+ def map_string(s) -> bool:
336
+ if s in true_values_union:
337
+ return True
338
+ elif s in false_values_union:
339
+ return False
340
+ else:
341
+ raise ValueError(f"{s} cannot be cast to bool")
342
+
343
+ scalars = np.array(strings, dtype=object)
344
+ mask = isna(scalars)
345
+ scalars[~mask] = list(map(map_string, scalars[~mask]))
346
+ return cls._from_sequence(scalars, dtype=dtype, copy=copy)
347
+
348
+ _HANDLED_TYPES = (np.ndarray, numbers.Number, bool, np.bool_)
349
+
350
+ @classmethod
351
+ def _coerce_to_array(
352
+ cls, value, *, dtype: DtypeObj, copy: bool = False
353
+ ) -> tuple[np.ndarray, np.ndarray]:
354
+ if dtype:
355
+ assert dtype == "boolean"
356
+ return coerce_to_array(value, copy=copy)
357
+
358
+ def _logical_method(self, other, op):
359
+ assert op.__name__ in {"or_", "ror_", "and_", "rand_", "xor", "rxor"}
360
+ other_is_scalar = lib.is_scalar(other)
361
+ mask = None
362
+
363
+ if isinstance(other, BooleanArray):
364
+ other, mask = other._data, other._mask
365
+ elif is_list_like(other):
366
+ other = np.asarray(other, dtype="bool")
367
+ if other.ndim > 1:
368
+ raise NotImplementedError("can only perform ops with 1-d structures")
369
+ other, mask = coerce_to_array(other, copy=False)
370
+ elif isinstance(other, np.bool_):
371
+ other = other.item()
372
+
373
+ if other_is_scalar and other is not libmissing.NA and not lib.is_bool(other):
374
+ raise TypeError(
375
+ "'other' should be pandas.NA or a bool. "
376
+ f"Got {type(other).__name__} instead."
377
+ )
378
+
379
+ if not other_is_scalar and len(self) != len(other):
380
+ raise ValueError("Lengths must match")
381
+
382
+ if op.__name__ in {"or_", "ror_"}:
383
+ result, mask = ops.kleene_or(self._data, other, self._mask, mask)
384
+ elif op.__name__ in {"and_", "rand_"}:
385
+ result, mask = ops.kleene_and(self._data, other, self._mask, mask)
386
+ else:
387
+ # i.e. xor, rxor
388
+ result, mask = ops.kleene_xor(self._data, other, self._mask, mask)
389
+
390
+ # i.e. BooleanArray
391
+ return self._maybe_mask_result(result, mask)
392
+
393
+ def _accumulate(
394
+ self, name: str, *, skipna: bool = True, **kwargs
395
+ ) -> BaseMaskedArray:
396
+ data = self._data
397
+ mask = self._mask
398
+ if name in ("cummin", "cummax"):
399
+ op = getattr(masked_accumulations, name)
400
+ data, mask = op(data, mask, skipna=skipna, **kwargs)
401
+ return self._simple_new(data, mask)
402
+ else:
403
+ from pandas.core.arrays import IntegerArray
404
+
405
+ return IntegerArray(data.astype(int), mask)._accumulate(
406
+ name, skipna=skipna, **kwargs
407
+ )
venv/lib/python3.10/site-packages/pandas/core/arrays/integer.py ADDED
@@ -0,0 +1,272 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import ClassVar
4
+
5
+ import numpy as np
6
+
7
+ from pandas.core.dtypes.base import register_extension_dtype
8
+ from pandas.core.dtypes.common import is_integer_dtype
9
+
10
+ from pandas.core.arrays.numeric import (
11
+ NumericArray,
12
+ NumericDtype,
13
+ )
14
+
15
+
16
+ class IntegerDtype(NumericDtype):
17
+ """
18
+ An ExtensionDtype to hold a single size & kind of integer dtype.
19
+
20
+ These specific implementations are subclasses of the non-public
21
+ IntegerDtype. For example, we have Int8Dtype to represent signed int 8s.
22
+
23
+ The attributes name & type are set when these subclasses are created.
24
+ """
25
+
26
+ _default_np_dtype = np.dtype(np.int64)
27
+ _checker = is_integer_dtype
28
+
29
+ @classmethod
30
+ def construct_array_type(cls) -> type[IntegerArray]:
31
+ """
32
+ Return the array type associated with this dtype.
33
+
34
+ Returns
35
+ -------
36
+ type
37
+ """
38
+ return IntegerArray
39
+
40
+ @classmethod
41
+ def _get_dtype_mapping(cls) -> dict[np.dtype, IntegerDtype]:
42
+ return NUMPY_INT_TO_DTYPE
43
+
44
+ @classmethod
45
+ def _safe_cast(cls, values: np.ndarray, dtype: np.dtype, copy: bool) -> np.ndarray:
46
+ """
47
+ Safely cast the values to the given dtype.
48
+
49
+ "safe" in this context means the casting is lossless. e.g. if 'values'
50
+ has a floating dtype, each value must be an integer.
51
+ """
52
+ try:
53
+ return values.astype(dtype, casting="safe", copy=copy)
54
+ except TypeError as err:
55
+ casted = values.astype(dtype, copy=copy)
56
+ if (casted == values).all():
57
+ return casted
58
+
59
+ raise TypeError(
60
+ f"cannot safely cast non-equivalent {values.dtype} to {np.dtype(dtype)}"
61
+ ) from err
62
+
63
+
64
+ class IntegerArray(NumericArray):
65
+ """
66
+ Array of integer (optional missing) values.
67
+
68
+ Uses :attr:`pandas.NA` as the missing value.
69
+
70
+ .. warning::
71
+
72
+ IntegerArray is currently experimental, and its API or internal
73
+ implementation may change without warning.
74
+
75
+ We represent an IntegerArray with 2 numpy arrays:
76
+
77
+ - data: contains a numpy integer array of the appropriate dtype
78
+ - mask: a boolean array holding a mask on the data, True is missing
79
+
80
+ To construct an IntegerArray from generic array-like input, use
81
+ :func:`pandas.array` with one of the integer dtypes (see examples).
82
+
83
+ See :ref:`integer_na` for more.
84
+
85
+ Parameters
86
+ ----------
87
+ values : numpy.ndarray
88
+ A 1-d integer-dtype array.
89
+ mask : numpy.ndarray
90
+ A 1-d boolean-dtype array indicating missing values.
91
+ copy : bool, default False
92
+ Whether to copy the `values` and `mask`.
93
+
94
+ Attributes
95
+ ----------
96
+ None
97
+
98
+ Methods
99
+ -------
100
+ None
101
+
102
+ Returns
103
+ -------
104
+ IntegerArray
105
+
106
+ Examples
107
+ --------
108
+ Create an IntegerArray with :func:`pandas.array`.
109
+
110
+ >>> int_array = pd.array([1, None, 3], dtype=pd.Int32Dtype())
111
+ >>> int_array
112
+ <IntegerArray>
113
+ [1, <NA>, 3]
114
+ Length: 3, dtype: Int32
115
+
116
+ String aliases for the dtypes are also available. They are capitalized.
117
+
118
+ >>> pd.array([1, None, 3], dtype='Int32')
119
+ <IntegerArray>
120
+ [1, <NA>, 3]
121
+ Length: 3, dtype: Int32
122
+
123
+ >>> pd.array([1, None, 3], dtype='UInt16')
124
+ <IntegerArray>
125
+ [1, <NA>, 3]
126
+ Length: 3, dtype: UInt16
127
+ """
128
+
129
+ _dtype_cls = IntegerDtype
130
+
131
+ # The value used to fill '_data' to avoid upcasting
132
+ _internal_fill_value = 1
133
+ # Fill values used for any/all
134
+ # Incompatible types in assignment (expression has type "int", base class
135
+ # "BaseMaskedArray" defined the type as "<typing special form>")
136
+ _truthy_value = 1 # type: ignore[assignment]
137
+ _falsey_value = 0 # type: ignore[assignment]
138
+
139
+
140
+ _dtype_docstring = """
141
+ An ExtensionDtype for {dtype} integer data.
142
+
143
+ Uses :attr:`pandas.NA` as its missing value, rather than :attr:`numpy.nan`.
144
+
145
+ Attributes
146
+ ----------
147
+ None
148
+
149
+ Methods
150
+ -------
151
+ None
152
+
153
+ Examples
154
+ --------
155
+ For Int8Dtype:
156
+
157
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.Int8Dtype())
158
+ >>> ser.dtype
159
+ Int8Dtype()
160
+
161
+ For Int16Dtype:
162
+
163
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.Int16Dtype())
164
+ >>> ser.dtype
165
+ Int16Dtype()
166
+
167
+ For Int32Dtype:
168
+
169
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.Int32Dtype())
170
+ >>> ser.dtype
171
+ Int32Dtype()
172
+
173
+ For Int64Dtype:
174
+
175
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.Int64Dtype())
176
+ >>> ser.dtype
177
+ Int64Dtype()
178
+
179
+ For UInt8Dtype:
180
+
181
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt8Dtype())
182
+ >>> ser.dtype
183
+ UInt8Dtype()
184
+
185
+ For UInt16Dtype:
186
+
187
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt16Dtype())
188
+ >>> ser.dtype
189
+ UInt16Dtype()
190
+
191
+ For UInt32Dtype:
192
+
193
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt32Dtype())
194
+ >>> ser.dtype
195
+ UInt32Dtype()
196
+
197
+ For UInt64Dtype:
198
+
199
+ >>> ser = pd.Series([2, pd.NA], dtype=pd.UInt64Dtype())
200
+ >>> ser.dtype
201
+ UInt64Dtype()
202
+ """
203
+
204
+ # create the Dtype
205
+
206
+
207
+ @register_extension_dtype
208
+ class Int8Dtype(IntegerDtype):
209
+ type = np.int8
210
+ name: ClassVar[str] = "Int8"
211
+ __doc__ = _dtype_docstring.format(dtype="int8")
212
+
213
+
214
+ @register_extension_dtype
215
+ class Int16Dtype(IntegerDtype):
216
+ type = np.int16
217
+ name: ClassVar[str] = "Int16"
218
+ __doc__ = _dtype_docstring.format(dtype="int16")
219
+
220
+
221
+ @register_extension_dtype
222
+ class Int32Dtype(IntegerDtype):
223
+ type = np.int32
224
+ name: ClassVar[str] = "Int32"
225
+ __doc__ = _dtype_docstring.format(dtype="int32")
226
+
227
+
228
+ @register_extension_dtype
229
+ class Int64Dtype(IntegerDtype):
230
+ type = np.int64
231
+ name: ClassVar[str] = "Int64"
232
+ __doc__ = _dtype_docstring.format(dtype="int64")
233
+
234
+
235
+ @register_extension_dtype
236
+ class UInt8Dtype(IntegerDtype):
237
+ type = np.uint8
238
+ name: ClassVar[str] = "UInt8"
239
+ __doc__ = _dtype_docstring.format(dtype="uint8")
240
+
241
+
242
+ @register_extension_dtype
243
+ class UInt16Dtype(IntegerDtype):
244
+ type = np.uint16
245
+ name: ClassVar[str] = "UInt16"
246
+ __doc__ = _dtype_docstring.format(dtype="uint16")
247
+
248
+
249
+ @register_extension_dtype
250
+ class UInt32Dtype(IntegerDtype):
251
+ type = np.uint32
252
+ name: ClassVar[str] = "UInt32"
253
+ __doc__ = _dtype_docstring.format(dtype="uint32")
254
+
255
+
256
+ @register_extension_dtype
257
+ class UInt64Dtype(IntegerDtype):
258
+ type = np.uint64
259
+ name: ClassVar[str] = "UInt64"
260
+ __doc__ = _dtype_docstring.format(dtype="uint64")
261
+
262
+
263
+ NUMPY_INT_TO_DTYPE: dict[np.dtype, IntegerDtype] = {
264
+ np.dtype(np.int8): Int8Dtype(),
265
+ np.dtype(np.int16): Int16Dtype(),
266
+ np.dtype(np.int32): Int32Dtype(),
267
+ np.dtype(np.int64): Int64Dtype(),
268
+ np.dtype(np.uint8): UInt8Dtype(),
269
+ np.dtype(np.uint16): UInt16Dtype(),
270
+ np.dtype(np.uint32): UInt32Dtype(),
271
+ np.dtype(np.uint64): UInt64Dtype(),
272
+ }
venv/lib/python3.10/site-packages/pandas/core/arrays/masked.py ADDED
@@ -0,0 +1,1650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Any,
6
+ Callable,
7
+ Literal,
8
+ overload,
9
+ )
10
+ import warnings
11
+
12
+ import numpy as np
13
+
14
+ from pandas._libs import (
15
+ lib,
16
+ missing as libmissing,
17
+ )
18
+ from pandas._libs.tslibs import is_supported_dtype
19
+ from pandas._typing import (
20
+ ArrayLike,
21
+ AstypeArg,
22
+ AxisInt,
23
+ DtypeObj,
24
+ FillnaOptions,
25
+ InterpolateOptions,
26
+ NpDtype,
27
+ PositionalIndexer,
28
+ Scalar,
29
+ ScalarIndexer,
30
+ Self,
31
+ SequenceIndexer,
32
+ Shape,
33
+ npt,
34
+ )
35
+ from pandas.compat import (
36
+ IS64,
37
+ is_platform_windows,
38
+ )
39
+ from pandas.errors import AbstractMethodError
40
+ from pandas.util._decorators import doc
41
+ from pandas.util._validators import validate_fillna_kwargs
42
+
43
+ from pandas.core.dtypes.base import ExtensionDtype
44
+ from pandas.core.dtypes.common import (
45
+ is_bool,
46
+ is_integer_dtype,
47
+ is_list_like,
48
+ is_scalar,
49
+ is_string_dtype,
50
+ pandas_dtype,
51
+ )
52
+ from pandas.core.dtypes.dtypes import BaseMaskedDtype
53
+ from pandas.core.dtypes.missing import (
54
+ array_equivalent,
55
+ is_valid_na_for_dtype,
56
+ isna,
57
+ notna,
58
+ )
59
+
60
+ from pandas.core import (
61
+ algorithms as algos,
62
+ arraylike,
63
+ missing,
64
+ nanops,
65
+ ops,
66
+ )
67
+ from pandas.core.algorithms import (
68
+ factorize_array,
69
+ isin,
70
+ map_array,
71
+ mode,
72
+ take,
73
+ )
74
+ from pandas.core.array_algos import (
75
+ masked_accumulations,
76
+ masked_reductions,
77
+ )
78
+ from pandas.core.array_algos.quantile import quantile_with_mask
79
+ from pandas.core.arraylike import OpsMixin
80
+ from pandas.core.arrays._utils import to_numpy_dtype_inference
81
+ from pandas.core.arrays.base import ExtensionArray
82
+ from pandas.core.construction import (
83
+ array as pd_array,
84
+ ensure_wrapped_if_datetimelike,
85
+ extract_array,
86
+ )
87
+ from pandas.core.indexers import check_array_indexer
88
+ from pandas.core.ops import invalid_comparison
89
+ from pandas.core.util.hashing import hash_array
90
+
91
+ if TYPE_CHECKING:
92
+ from collections.abc import (
93
+ Iterator,
94
+ Sequence,
95
+ )
96
+ from pandas import Series
97
+ from pandas.core.arrays import BooleanArray
98
+ from pandas._typing import (
99
+ NumpySorter,
100
+ NumpyValueArrayLike,
101
+ )
102
+ from pandas.core.arrays import FloatingArray
103
+
104
+ from pandas.compat.numpy import function as nv
105
+
106
+
107
+ class BaseMaskedArray(OpsMixin, ExtensionArray):
108
+ """
109
+ Base class for masked arrays (which use _data and _mask to store the data).
110
+
111
+ numpy based
112
+ """
113
+
114
+ # The value used to fill '_data' to avoid upcasting
115
+ _internal_fill_value: Scalar
116
+ # our underlying data and mask are each ndarrays
117
+ _data: np.ndarray
118
+ _mask: npt.NDArray[np.bool_]
119
+
120
+ # Fill values used for any/all
121
+ _truthy_value = Scalar # bool(_truthy_value) = True
122
+ _falsey_value = Scalar # bool(_falsey_value) = False
123
+
124
+ @classmethod
125
+ def _simple_new(cls, values: np.ndarray, mask: npt.NDArray[np.bool_]) -> Self:
126
+ result = BaseMaskedArray.__new__(cls)
127
+ result._data = values
128
+ result._mask = mask
129
+ return result
130
+
131
+ def __init__(
132
+ self, values: np.ndarray, mask: npt.NDArray[np.bool_], copy: bool = False
133
+ ) -> None:
134
+ # values is supposed to already be validated in the subclass
135
+ if not (isinstance(mask, np.ndarray) and mask.dtype == np.bool_):
136
+ raise TypeError(
137
+ "mask should be boolean numpy array. Use "
138
+ "the 'pd.array' function instead"
139
+ )
140
+ if values.shape != mask.shape:
141
+ raise ValueError("values.shape must match mask.shape")
142
+
143
+ if copy:
144
+ values = values.copy()
145
+ mask = mask.copy()
146
+
147
+ self._data = values
148
+ self._mask = mask
149
+
150
+ @classmethod
151
+ def _from_sequence(cls, scalars, *, dtype=None, copy: bool = False) -> Self:
152
+ values, mask = cls._coerce_to_array(scalars, dtype=dtype, copy=copy)
153
+ return cls(values, mask)
154
+
155
+ @classmethod
156
+ @doc(ExtensionArray._empty)
157
+ def _empty(cls, shape: Shape, dtype: ExtensionDtype):
158
+ values = np.empty(shape, dtype=dtype.type)
159
+ values.fill(cls._internal_fill_value)
160
+ mask = np.ones(shape, dtype=bool)
161
+ result = cls(values, mask)
162
+ if not isinstance(result, cls) or dtype != result.dtype:
163
+ raise NotImplementedError(
164
+ f"Default 'empty' implementation is invalid for dtype='{dtype}'"
165
+ )
166
+ return result
167
+
168
+ def _formatter(self, boxed: bool = False) -> Callable[[Any], str | None]:
169
+ # NEP 51: https://github.com/numpy/numpy/pull/22449
170
+ return str
171
+
172
+ @property
173
+ def dtype(self) -> BaseMaskedDtype:
174
+ raise AbstractMethodError(self)
175
+
176
+ @overload
177
+ def __getitem__(self, item: ScalarIndexer) -> Any:
178
+ ...
179
+
180
+ @overload
181
+ def __getitem__(self, item: SequenceIndexer) -> Self:
182
+ ...
183
+
184
+ def __getitem__(self, item: PositionalIndexer) -> Self | Any:
185
+ item = check_array_indexer(self, item)
186
+
187
+ newmask = self._mask[item]
188
+ if is_bool(newmask):
189
+ # This is a scalar indexing
190
+ if newmask:
191
+ return self.dtype.na_value
192
+ return self._data[item]
193
+
194
+ return self._simple_new(self._data[item], newmask)
195
+
196
+ def _pad_or_backfill(
197
+ self,
198
+ *,
199
+ method: FillnaOptions,
200
+ limit: int | None = None,
201
+ limit_area: Literal["inside", "outside"] | None = None,
202
+ copy: bool = True,
203
+ ) -> Self:
204
+ mask = self._mask
205
+
206
+ if mask.any():
207
+ func = missing.get_fill_func(method, ndim=self.ndim)
208
+
209
+ npvalues = self._data.T
210
+ new_mask = mask.T
211
+ if copy:
212
+ npvalues = npvalues.copy()
213
+ new_mask = new_mask.copy()
214
+ elif limit_area is not None:
215
+ mask = mask.copy()
216
+ func(npvalues, limit=limit, mask=new_mask)
217
+
218
+ if limit_area is not None and not mask.all():
219
+ mask = mask.T
220
+ neg_mask = ~mask
221
+ first = neg_mask.argmax()
222
+ last = len(neg_mask) - neg_mask[::-1].argmax() - 1
223
+ if limit_area == "inside":
224
+ new_mask[:first] |= mask[:first]
225
+ new_mask[last + 1 :] |= mask[last + 1 :]
226
+ elif limit_area == "outside":
227
+ new_mask[first + 1 : last] |= mask[first + 1 : last]
228
+
229
+ if copy:
230
+ return self._simple_new(npvalues.T, new_mask.T)
231
+ else:
232
+ return self
233
+ else:
234
+ if copy:
235
+ new_values = self.copy()
236
+ else:
237
+ new_values = self
238
+ return new_values
239
+
240
+ @doc(ExtensionArray.fillna)
241
+ def fillna(
242
+ self, value=None, method=None, limit: int | None = None, copy: bool = True
243
+ ) -> Self:
244
+ value, method = validate_fillna_kwargs(value, method)
245
+
246
+ mask = self._mask
247
+
248
+ value = missing.check_value_size(value, mask, len(self))
249
+
250
+ if mask.any():
251
+ if method is not None:
252
+ func = missing.get_fill_func(method, ndim=self.ndim)
253
+ npvalues = self._data.T
254
+ new_mask = mask.T
255
+ if copy:
256
+ npvalues = npvalues.copy()
257
+ new_mask = new_mask.copy()
258
+ func(npvalues, limit=limit, mask=new_mask)
259
+ return self._simple_new(npvalues.T, new_mask.T)
260
+ else:
261
+ # fill with value
262
+ if copy:
263
+ new_values = self.copy()
264
+ else:
265
+ new_values = self[:]
266
+ new_values[mask] = value
267
+ else:
268
+ if copy:
269
+ new_values = self.copy()
270
+ else:
271
+ new_values = self[:]
272
+ return new_values
273
+
274
+ @classmethod
275
+ def _coerce_to_array(
276
+ cls, values, *, dtype: DtypeObj, copy: bool = False
277
+ ) -> tuple[np.ndarray, np.ndarray]:
278
+ raise AbstractMethodError(cls)
279
+
280
+ def _validate_setitem_value(self, value):
281
+ """
282
+ Check if we have a scalar that we can cast losslessly.
283
+
284
+ Raises
285
+ ------
286
+ TypeError
287
+ """
288
+ kind = self.dtype.kind
289
+ # TODO: get this all from np_can_hold_element?
290
+ if kind == "b":
291
+ if lib.is_bool(value):
292
+ return value
293
+
294
+ elif kind == "f":
295
+ if lib.is_integer(value) or lib.is_float(value):
296
+ return value
297
+
298
+ else:
299
+ if lib.is_integer(value) or (lib.is_float(value) and value.is_integer()):
300
+ return value
301
+ # TODO: unsigned checks
302
+
303
+ # Note: without the "str" here, the f-string rendering raises in
304
+ # py38 builds.
305
+ raise TypeError(f"Invalid value '{str(value)}' for dtype {self.dtype}")
306
+
307
+ def __setitem__(self, key, value) -> None:
308
+ key = check_array_indexer(self, key)
309
+
310
+ if is_scalar(value):
311
+ if is_valid_na_for_dtype(value, self.dtype):
312
+ self._mask[key] = True
313
+ else:
314
+ value = self._validate_setitem_value(value)
315
+ self._data[key] = value
316
+ self._mask[key] = False
317
+ return
318
+
319
+ value, mask = self._coerce_to_array(value, dtype=self.dtype)
320
+
321
+ self._data[key] = value
322
+ self._mask[key] = mask
323
+
324
+ def __contains__(self, key) -> bool:
325
+ if isna(key) and key is not self.dtype.na_value:
326
+ # GH#52840
327
+ if self._data.dtype.kind == "f" and lib.is_float(key):
328
+ return bool((np.isnan(self._data) & ~self._mask).any())
329
+
330
+ return bool(super().__contains__(key))
331
+
332
+ def __iter__(self) -> Iterator:
333
+ if self.ndim == 1:
334
+ if not self._hasna:
335
+ for val in self._data:
336
+ yield val
337
+ else:
338
+ na_value = self.dtype.na_value
339
+ for isna_, val in zip(self._mask, self._data):
340
+ if isna_:
341
+ yield na_value
342
+ else:
343
+ yield val
344
+ else:
345
+ for i in range(len(self)):
346
+ yield self[i]
347
+
348
+ def __len__(self) -> int:
349
+ return len(self._data)
350
+
351
+ @property
352
+ def shape(self) -> Shape:
353
+ return self._data.shape
354
+
355
+ @property
356
+ def ndim(self) -> int:
357
+ return self._data.ndim
358
+
359
+ def swapaxes(self, axis1, axis2) -> Self:
360
+ data = self._data.swapaxes(axis1, axis2)
361
+ mask = self._mask.swapaxes(axis1, axis2)
362
+ return self._simple_new(data, mask)
363
+
364
+ def delete(self, loc, axis: AxisInt = 0) -> Self:
365
+ data = np.delete(self._data, loc, axis=axis)
366
+ mask = np.delete(self._mask, loc, axis=axis)
367
+ return self._simple_new(data, mask)
368
+
369
+ def reshape(self, *args, **kwargs) -> Self:
370
+ data = self._data.reshape(*args, **kwargs)
371
+ mask = self._mask.reshape(*args, **kwargs)
372
+ return self._simple_new(data, mask)
373
+
374
+ def ravel(self, *args, **kwargs) -> Self:
375
+ # TODO: need to make sure we have the same order for data/mask
376
+ data = self._data.ravel(*args, **kwargs)
377
+ mask = self._mask.ravel(*args, **kwargs)
378
+ return type(self)(data, mask)
379
+
380
+ @property
381
+ def T(self) -> Self:
382
+ return self._simple_new(self._data.T, self._mask.T)
383
+
384
+ def round(self, decimals: int = 0, *args, **kwargs):
385
+ """
386
+ Round each value in the array a to the given number of decimals.
387
+
388
+ Parameters
389
+ ----------
390
+ decimals : int, default 0
391
+ Number of decimal places to round to. If decimals is negative,
392
+ it specifies the number of positions to the left of the decimal point.
393
+ *args, **kwargs
394
+ Additional arguments and keywords have no effect but might be
395
+ accepted for compatibility with NumPy.
396
+
397
+ Returns
398
+ -------
399
+ NumericArray
400
+ Rounded values of the NumericArray.
401
+
402
+ See Also
403
+ --------
404
+ numpy.around : Round values of an np.array.
405
+ DataFrame.round : Round values of a DataFrame.
406
+ Series.round : Round values of a Series.
407
+ """
408
+ if self.dtype.kind == "b":
409
+ return self
410
+ nv.validate_round(args, kwargs)
411
+ values = np.round(self._data, decimals=decimals, **kwargs)
412
+
413
+ # Usually we'll get same type as self, but ndarray[bool] casts to float
414
+ return self._maybe_mask_result(values, self._mask.copy())
415
+
416
+ # ------------------------------------------------------------------
417
+ # Unary Methods
418
+
419
+ def __invert__(self) -> Self:
420
+ return self._simple_new(~self._data, self._mask.copy())
421
+
422
+ def __neg__(self) -> Self:
423
+ return self._simple_new(-self._data, self._mask.copy())
424
+
425
+ def __pos__(self) -> Self:
426
+ return self.copy()
427
+
428
+ def __abs__(self) -> Self:
429
+ return self._simple_new(abs(self._data), self._mask.copy())
430
+
431
+ # ------------------------------------------------------------------
432
+
433
+ def _values_for_json(self) -> np.ndarray:
434
+ return np.asarray(self, dtype=object)
435
+
436
+ def to_numpy(
437
+ self,
438
+ dtype: npt.DTypeLike | None = None,
439
+ copy: bool = False,
440
+ na_value: object = lib.no_default,
441
+ ) -> np.ndarray:
442
+ """
443
+ Convert to a NumPy Array.
444
+
445
+ By default converts to an object-dtype NumPy array. Specify the `dtype` and
446
+ `na_value` keywords to customize the conversion.
447
+
448
+ Parameters
449
+ ----------
450
+ dtype : dtype, default object
451
+ The numpy dtype to convert to.
452
+ copy : bool, default False
453
+ Whether to ensure that the returned value is a not a view on
454
+ the array. Note that ``copy=False`` does not *ensure* that
455
+ ``to_numpy()`` is no-copy. Rather, ``copy=True`` ensure that
456
+ a copy is made, even if not strictly necessary. This is typically
457
+ only possible when no missing values are present and `dtype`
458
+ is the equivalent numpy dtype.
459
+ na_value : scalar, optional
460
+ Scalar missing value indicator to use in numpy array. Defaults
461
+ to the native missing value indicator of this array (pd.NA).
462
+
463
+ Returns
464
+ -------
465
+ numpy.ndarray
466
+
467
+ Examples
468
+ --------
469
+ An object-dtype is the default result
470
+
471
+ >>> a = pd.array([True, False, pd.NA], dtype="boolean")
472
+ >>> a.to_numpy()
473
+ array([True, False, <NA>], dtype=object)
474
+
475
+ When no missing values are present, an equivalent dtype can be used.
476
+
477
+ >>> pd.array([True, False], dtype="boolean").to_numpy(dtype="bool")
478
+ array([ True, False])
479
+ >>> pd.array([1, 2], dtype="Int64").to_numpy("int64")
480
+ array([1, 2])
481
+
482
+ However, requesting such dtype will raise a ValueError if
483
+ missing values are present and the default missing value :attr:`NA`
484
+ is used.
485
+
486
+ >>> a = pd.array([True, False, pd.NA], dtype="boolean")
487
+ >>> a
488
+ <BooleanArray>
489
+ [True, False, <NA>]
490
+ Length: 3, dtype: boolean
491
+
492
+ >>> a.to_numpy(dtype="bool")
493
+ Traceback (most recent call last):
494
+ ...
495
+ ValueError: cannot convert to bool numpy array in presence of missing values
496
+
497
+ Specify a valid `na_value` instead
498
+
499
+ >>> a.to_numpy(dtype="bool", na_value=False)
500
+ array([ True, False, False])
501
+ """
502
+ hasna = self._hasna
503
+ dtype, na_value = to_numpy_dtype_inference(self, dtype, na_value, hasna)
504
+ if dtype is None:
505
+ dtype = object
506
+
507
+ if hasna:
508
+ if (
509
+ dtype != object
510
+ and not is_string_dtype(dtype)
511
+ and na_value is libmissing.NA
512
+ ):
513
+ raise ValueError(
514
+ f"cannot convert to '{dtype}'-dtype NumPy array "
515
+ "with missing values. Specify an appropriate 'na_value' "
516
+ "for this dtype."
517
+ )
518
+ # don't pass copy to astype -> always need a copy since we are mutating
519
+ with warnings.catch_warnings():
520
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
521
+ data = self._data.astype(dtype)
522
+ data[self._mask] = na_value
523
+ else:
524
+ with warnings.catch_warnings():
525
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
526
+ data = self._data.astype(dtype, copy=copy)
527
+ return data
528
+
529
+ @doc(ExtensionArray.tolist)
530
+ def tolist(self):
531
+ if self.ndim > 1:
532
+ return [x.tolist() for x in self]
533
+ dtype = None if self._hasna else self._data.dtype
534
+ return self.to_numpy(dtype=dtype, na_value=libmissing.NA).tolist()
535
+
536
+ @overload
537
+ def astype(self, dtype: npt.DTypeLike, copy: bool = ...) -> np.ndarray:
538
+ ...
539
+
540
+ @overload
541
+ def astype(self, dtype: ExtensionDtype, copy: bool = ...) -> ExtensionArray:
542
+ ...
543
+
544
+ @overload
545
+ def astype(self, dtype: AstypeArg, copy: bool = ...) -> ArrayLike:
546
+ ...
547
+
548
+ def astype(self, dtype: AstypeArg, copy: bool = True) -> ArrayLike:
549
+ dtype = pandas_dtype(dtype)
550
+
551
+ if dtype == self.dtype:
552
+ if copy:
553
+ return self.copy()
554
+ return self
555
+
556
+ # if we are astyping to another nullable masked dtype, we can fastpath
557
+ if isinstance(dtype, BaseMaskedDtype):
558
+ # TODO deal with NaNs for FloatingArray case
559
+ with warnings.catch_warnings():
560
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
561
+ # TODO: Is rounding what we want long term?
562
+ data = self._data.astype(dtype.numpy_dtype, copy=copy)
563
+ # mask is copied depending on whether the data was copied, and
564
+ # not directly depending on the `copy` keyword
565
+ mask = self._mask if data is self._data else self._mask.copy()
566
+ cls = dtype.construct_array_type()
567
+ return cls(data, mask, copy=False)
568
+
569
+ if isinstance(dtype, ExtensionDtype):
570
+ eacls = dtype.construct_array_type()
571
+ return eacls._from_sequence(self, dtype=dtype, copy=copy)
572
+
573
+ na_value: float | np.datetime64 | lib.NoDefault
574
+
575
+ # coerce
576
+ if dtype.kind == "f":
577
+ # In astype, we consider dtype=float to also mean na_value=np.nan
578
+ na_value = np.nan
579
+ elif dtype.kind == "M":
580
+ na_value = np.datetime64("NaT")
581
+ else:
582
+ na_value = lib.no_default
583
+
584
+ # to_numpy will also raise, but we get somewhat nicer exception messages here
585
+ if dtype.kind in "iu" and self._hasna:
586
+ raise ValueError("cannot convert NA to integer")
587
+ if dtype.kind == "b" and self._hasna:
588
+ # careful: astype_nansafe converts np.nan to True
589
+ raise ValueError("cannot convert float NaN to bool")
590
+
591
+ data = self.to_numpy(dtype=dtype, na_value=na_value, copy=copy)
592
+ return data
593
+
594
+ __array_priority__ = 1000 # higher than ndarray so ops dispatch to us
595
+
596
+ def __array__(
597
+ self, dtype: NpDtype | None = None, copy: bool | None = None
598
+ ) -> np.ndarray:
599
+ """
600
+ the array interface, return my values
601
+ We return an object array here to preserve our scalar values
602
+ """
603
+ return self.to_numpy(dtype=dtype)
604
+
605
+ _HANDLED_TYPES: tuple[type, ...]
606
+
607
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
608
+ # For MaskedArray inputs, we apply the ufunc to ._data
609
+ # and mask the result.
610
+
611
+ out = kwargs.get("out", ())
612
+
613
+ for x in inputs + out:
614
+ if not isinstance(x, self._HANDLED_TYPES + (BaseMaskedArray,)):
615
+ return NotImplemented
616
+
617
+ # for binary ops, use our custom dunder methods
618
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
619
+ self, ufunc, method, *inputs, **kwargs
620
+ )
621
+ if result is not NotImplemented:
622
+ return result
623
+
624
+ if "out" in kwargs:
625
+ # e.g. test_ufunc_with_out
626
+ return arraylike.dispatch_ufunc_with_out(
627
+ self, ufunc, method, *inputs, **kwargs
628
+ )
629
+
630
+ if method == "reduce":
631
+ result = arraylike.dispatch_reduction_ufunc(
632
+ self, ufunc, method, *inputs, **kwargs
633
+ )
634
+ if result is not NotImplemented:
635
+ return result
636
+
637
+ mask = np.zeros(len(self), dtype=bool)
638
+ inputs2 = []
639
+ for x in inputs:
640
+ if isinstance(x, BaseMaskedArray):
641
+ mask |= x._mask
642
+ inputs2.append(x._data)
643
+ else:
644
+ inputs2.append(x)
645
+
646
+ def reconstruct(x: np.ndarray):
647
+ # we don't worry about scalar `x` here, since we
648
+ # raise for reduce up above.
649
+ from pandas.core.arrays import (
650
+ BooleanArray,
651
+ FloatingArray,
652
+ IntegerArray,
653
+ )
654
+
655
+ if x.dtype.kind == "b":
656
+ m = mask.copy()
657
+ return BooleanArray(x, m)
658
+ elif x.dtype.kind in "iu":
659
+ m = mask.copy()
660
+ return IntegerArray(x, m)
661
+ elif x.dtype.kind == "f":
662
+ m = mask.copy()
663
+ if x.dtype == np.float16:
664
+ # reached in e.g. np.sqrt on BooleanArray
665
+ # we don't support float16
666
+ x = x.astype(np.float32)
667
+ return FloatingArray(x, m)
668
+ else:
669
+ x[mask] = np.nan
670
+ return x
671
+
672
+ result = getattr(ufunc, method)(*inputs2, **kwargs)
673
+ if ufunc.nout > 1:
674
+ # e.g. np.divmod
675
+ return tuple(reconstruct(x) for x in result)
676
+ elif method == "reduce":
677
+ # e.g. np.add.reduce; test_ufunc_reduce_raises
678
+ if self._mask.any():
679
+ return self._na_value
680
+ return result
681
+ else:
682
+ return reconstruct(result)
683
+
684
+ def __arrow_array__(self, type=None):
685
+ """
686
+ Convert myself into a pyarrow Array.
687
+ """
688
+ import pyarrow as pa
689
+
690
+ return pa.array(self._data, mask=self._mask, type=type)
691
+
692
+ @property
693
+ def _hasna(self) -> bool:
694
+ # Note: this is expensive right now! The hope is that we can
695
+ # make this faster by having an optional mask, but not have to change
696
+ # source code using it..
697
+
698
+ # error: Incompatible return value type (got "bool_", expected "bool")
699
+ return self._mask.any() # type: ignore[return-value]
700
+
701
+ def _propagate_mask(
702
+ self, mask: npt.NDArray[np.bool_] | None, other
703
+ ) -> npt.NDArray[np.bool_]:
704
+ if mask is None:
705
+ mask = self._mask.copy() # TODO: need test for BooleanArray needing a copy
706
+ if other is libmissing.NA:
707
+ # GH#45421 don't alter inplace
708
+ mask = mask | True
709
+ elif is_list_like(other) and len(other) == len(mask):
710
+ mask = mask | isna(other)
711
+ else:
712
+ mask = self._mask | mask
713
+ # Incompatible return value type (got "Optional[ndarray[Any, dtype[bool_]]]",
714
+ # expected "ndarray[Any, dtype[bool_]]")
715
+ return mask # type: ignore[return-value]
716
+
717
+ def _arith_method(self, other, op):
718
+ op_name = op.__name__
719
+ omask = None
720
+
721
+ if (
722
+ not hasattr(other, "dtype")
723
+ and is_list_like(other)
724
+ and len(other) == len(self)
725
+ ):
726
+ # Try inferring masked dtype instead of casting to object
727
+ other = pd_array(other)
728
+ other = extract_array(other, extract_numpy=True)
729
+
730
+ if isinstance(other, BaseMaskedArray):
731
+ other, omask = other._data, other._mask
732
+
733
+ elif is_list_like(other):
734
+ if not isinstance(other, ExtensionArray):
735
+ other = np.asarray(other)
736
+ if other.ndim > 1:
737
+ raise NotImplementedError("can only perform ops with 1-d structures")
738
+
739
+ # We wrap the non-masked arithmetic logic used for numpy dtypes
740
+ # in Series/Index arithmetic ops.
741
+ other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
742
+ pd_op = ops.get_array_op(op)
743
+ other = ensure_wrapped_if_datetimelike(other)
744
+
745
+ if op_name in {"pow", "rpow"} and isinstance(other, np.bool_):
746
+ # Avoid DeprecationWarning: In future, it will be an error
747
+ # for 'np.bool_' scalars to be interpreted as an index
748
+ # e.g. test_array_scalar_like_equivalence
749
+ other = bool(other)
750
+
751
+ mask = self._propagate_mask(omask, other)
752
+
753
+ if other is libmissing.NA:
754
+ result = np.ones_like(self._data)
755
+ if self.dtype.kind == "b":
756
+ if op_name in {
757
+ "floordiv",
758
+ "rfloordiv",
759
+ "pow",
760
+ "rpow",
761
+ "truediv",
762
+ "rtruediv",
763
+ }:
764
+ # GH#41165 Try to match non-masked Series behavior
765
+ # This is still imperfect GH#46043
766
+ raise NotImplementedError(
767
+ f"operator '{op_name}' not implemented for bool dtypes"
768
+ )
769
+ if op_name in {"mod", "rmod"}:
770
+ dtype = "int8"
771
+ else:
772
+ dtype = "bool"
773
+ result = result.astype(dtype)
774
+ elif "truediv" in op_name and self.dtype.kind != "f":
775
+ # The actual data here doesn't matter since the mask
776
+ # will be all-True, but since this is division, we want
777
+ # to end up with floating dtype.
778
+ result = result.astype(np.float64)
779
+ else:
780
+ # Make sure we do this before the "pow" mask checks
781
+ # to get an expected exception message on shape mismatch.
782
+ if self.dtype.kind in "iu" and op_name in ["floordiv", "mod"]:
783
+ # TODO(GH#30188) ATM we don't match the behavior of non-masked
784
+ # types with respect to floordiv-by-zero
785
+ pd_op = op
786
+
787
+ with np.errstate(all="ignore"):
788
+ result = pd_op(self._data, other)
789
+
790
+ if op_name == "pow":
791
+ # 1 ** x is 1.
792
+ mask = np.where((self._data == 1) & ~self._mask, False, mask)
793
+ # x ** 0 is 1.
794
+ if omask is not None:
795
+ mask = np.where((other == 0) & ~omask, False, mask)
796
+ elif other is not libmissing.NA:
797
+ mask = np.where(other == 0, False, mask)
798
+
799
+ elif op_name == "rpow":
800
+ # 1 ** x is 1.
801
+ if omask is not None:
802
+ mask = np.where((other == 1) & ~omask, False, mask)
803
+ elif other is not libmissing.NA:
804
+ mask = np.where(other == 1, False, mask)
805
+ # x ** 0 is 1.
806
+ mask = np.where((self._data == 0) & ~self._mask, False, mask)
807
+
808
+ return self._maybe_mask_result(result, mask)
809
+
810
+ _logical_method = _arith_method
811
+
812
+ def _cmp_method(self, other, op) -> BooleanArray:
813
+ from pandas.core.arrays import BooleanArray
814
+
815
+ mask = None
816
+
817
+ if isinstance(other, BaseMaskedArray):
818
+ other, mask = other._data, other._mask
819
+
820
+ elif is_list_like(other):
821
+ other = np.asarray(other)
822
+ if other.ndim > 1:
823
+ raise NotImplementedError("can only perform ops with 1-d structures")
824
+ if len(self) != len(other):
825
+ raise ValueError("Lengths must match to compare")
826
+
827
+ if other is libmissing.NA:
828
+ # numpy does not handle pd.NA well as "other" scalar (it returns
829
+ # a scalar False instead of an array)
830
+ # This may be fixed by NA.__array_ufunc__. Revisit this check
831
+ # once that's implemented.
832
+ result = np.zeros(self._data.shape, dtype="bool")
833
+ mask = np.ones(self._data.shape, dtype="bool")
834
+ else:
835
+ with warnings.catch_warnings():
836
+ # numpy may show a FutureWarning or DeprecationWarning:
837
+ # elementwise comparison failed; returning scalar instead,
838
+ # but in the future will perform elementwise comparison
839
+ # before returning NotImplemented. We fall back to the correct
840
+ # behavior today, so that should be fine to ignore.
841
+ warnings.filterwarnings("ignore", "elementwise", FutureWarning)
842
+ warnings.filterwarnings("ignore", "elementwise", DeprecationWarning)
843
+ method = getattr(self._data, f"__{op.__name__}__")
844
+ result = method(other)
845
+
846
+ if result is NotImplemented:
847
+ result = invalid_comparison(self._data, other, op)
848
+
849
+ mask = self._propagate_mask(mask, other)
850
+ return BooleanArray(result, mask, copy=False)
851
+
852
+ def _maybe_mask_result(
853
+ self, result: np.ndarray | tuple[np.ndarray, np.ndarray], mask: np.ndarray
854
+ ):
855
+ """
856
+ Parameters
857
+ ----------
858
+ result : array-like or tuple[array-like]
859
+ mask : array-like bool
860
+ """
861
+ if isinstance(result, tuple):
862
+ # i.e. divmod
863
+ div, mod = result
864
+ return (
865
+ self._maybe_mask_result(div, mask),
866
+ self._maybe_mask_result(mod, mask),
867
+ )
868
+
869
+ if result.dtype.kind == "f":
870
+ from pandas.core.arrays import FloatingArray
871
+
872
+ return FloatingArray(result, mask, copy=False)
873
+
874
+ elif result.dtype.kind == "b":
875
+ from pandas.core.arrays import BooleanArray
876
+
877
+ return BooleanArray(result, mask, copy=False)
878
+
879
+ elif lib.is_np_dtype(result.dtype, "m") and is_supported_dtype(result.dtype):
880
+ # e.g. test_numeric_arr_mul_tdscalar_numexpr_path
881
+ from pandas.core.arrays import TimedeltaArray
882
+
883
+ result[mask] = result.dtype.type("NaT")
884
+
885
+ if not isinstance(result, TimedeltaArray):
886
+ return TimedeltaArray._simple_new(result, dtype=result.dtype)
887
+
888
+ return result
889
+
890
+ elif result.dtype.kind in "iu":
891
+ from pandas.core.arrays import IntegerArray
892
+
893
+ return IntegerArray(result, mask, copy=False)
894
+
895
+ else:
896
+ result[mask] = np.nan
897
+ return result
898
+
899
+ def isna(self) -> np.ndarray:
900
+ return self._mask.copy()
901
+
902
+ @property
903
+ def _na_value(self):
904
+ return self.dtype.na_value
905
+
906
+ @property
907
+ def nbytes(self) -> int:
908
+ return self._data.nbytes + self._mask.nbytes
909
+
910
+ @classmethod
911
+ def _concat_same_type(
912
+ cls,
913
+ to_concat: Sequence[Self],
914
+ axis: AxisInt = 0,
915
+ ) -> Self:
916
+ data = np.concatenate([x._data for x in to_concat], axis=axis)
917
+ mask = np.concatenate([x._mask for x in to_concat], axis=axis)
918
+ return cls(data, mask)
919
+
920
+ def _hash_pandas_object(
921
+ self, *, encoding: str, hash_key: str, categorize: bool
922
+ ) -> npt.NDArray[np.uint64]:
923
+ hashed_array = hash_array(
924
+ self._data, encoding=encoding, hash_key=hash_key, categorize=categorize
925
+ )
926
+ hashed_array[self.isna()] = hash(self.dtype.na_value)
927
+ return hashed_array
928
+
929
+ def take(
930
+ self,
931
+ indexer,
932
+ *,
933
+ allow_fill: bool = False,
934
+ fill_value: Scalar | None = None,
935
+ axis: AxisInt = 0,
936
+ ) -> Self:
937
+ # we always fill with 1 internally
938
+ # to avoid upcasting
939
+ data_fill_value = self._internal_fill_value if isna(fill_value) else fill_value
940
+ result = take(
941
+ self._data,
942
+ indexer,
943
+ fill_value=data_fill_value,
944
+ allow_fill=allow_fill,
945
+ axis=axis,
946
+ )
947
+
948
+ mask = take(
949
+ self._mask, indexer, fill_value=True, allow_fill=allow_fill, axis=axis
950
+ )
951
+
952
+ # if we are filling
953
+ # we only fill where the indexer is null
954
+ # not existing missing values
955
+ # TODO(jreback) what if we have a non-na float as a fill value?
956
+ if allow_fill and notna(fill_value):
957
+ fill_mask = np.asarray(indexer) == -1
958
+ result[fill_mask] = fill_value
959
+ mask = mask ^ fill_mask
960
+
961
+ return self._simple_new(result, mask)
962
+
963
+ # error: Return type "BooleanArray" of "isin" incompatible with return type
964
+ # "ndarray" in supertype "ExtensionArray"
965
+ def isin(self, values: ArrayLike) -> BooleanArray: # type: ignore[override]
966
+ from pandas.core.arrays import BooleanArray
967
+
968
+ # algorithms.isin will eventually convert values to an ndarray, so no extra
969
+ # cost to doing it here first
970
+ values_arr = np.asarray(values)
971
+ result = isin(self._data, values_arr)
972
+
973
+ if self._hasna:
974
+ values_have_NA = values_arr.dtype == object and any(
975
+ val is self.dtype.na_value for val in values_arr
976
+ )
977
+
978
+ # For now, NA does not propagate so set result according to presence of NA,
979
+ # see https://github.com/pandas-dev/pandas/pull/38379 for some discussion
980
+ result[self._mask] = values_have_NA
981
+
982
+ mask = np.zeros(self._data.shape, dtype=bool)
983
+ return BooleanArray(result, mask, copy=False)
984
+
985
+ def copy(self) -> Self:
986
+ data = self._data.copy()
987
+ mask = self._mask.copy()
988
+ return self._simple_new(data, mask)
989
+
990
+ @doc(ExtensionArray.duplicated)
991
+ def duplicated(
992
+ self, keep: Literal["first", "last", False] = "first"
993
+ ) -> npt.NDArray[np.bool_]:
994
+ values = self._data
995
+ mask = self._mask
996
+ return algos.duplicated(values, keep=keep, mask=mask)
997
+
998
+ def unique(self) -> Self:
999
+ """
1000
+ Compute the BaseMaskedArray of unique values.
1001
+
1002
+ Returns
1003
+ -------
1004
+ uniques : BaseMaskedArray
1005
+ """
1006
+ uniques, mask = algos.unique_with_mask(self._data, self._mask)
1007
+ return self._simple_new(uniques, mask)
1008
+
1009
+ @doc(ExtensionArray.searchsorted)
1010
+ def searchsorted(
1011
+ self,
1012
+ value: NumpyValueArrayLike | ExtensionArray,
1013
+ side: Literal["left", "right"] = "left",
1014
+ sorter: NumpySorter | None = None,
1015
+ ) -> npt.NDArray[np.intp] | np.intp:
1016
+ if self._hasna:
1017
+ raise ValueError(
1018
+ "searchsorted requires array to be sorted, which is impossible "
1019
+ "with NAs present."
1020
+ )
1021
+ if isinstance(value, ExtensionArray):
1022
+ value = value.astype(object)
1023
+ # Base class searchsorted would cast to object, which is *much* slower.
1024
+ return self._data.searchsorted(value, side=side, sorter=sorter)
1025
+
1026
+ @doc(ExtensionArray.factorize)
1027
+ def factorize(
1028
+ self,
1029
+ use_na_sentinel: bool = True,
1030
+ ) -> tuple[np.ndarray, ExtensionArray]:
1031
+ arr = self._data
1032
+ mask = self._mask
1033
+
1034
+ # Use a sentinel for na; recode and add NA to uniques if necessary below
1035
+ codes, uniques = factorize_array(arr, use_na_sentinel=True, mask=mask)
1036
+
1037
+ # check that factorize_array correctly preserves dtype.
1038
+ assert uniques.dtype == self.dtype.numpy_dtype, (uniques.dtype, self.dtype)
1039
+
1040
+ has_na = mask.any()
1041
+ if use_na_sentinel or not has_na:
1042
+ size = len(uniques)
1043
+ else:
1044
+ # Make room for an NA value
1045
+ size = len(uniques) + 1
1046
+ uniques_mask = np.zeros(size, dtype=bool)
1047
+ if not use_na_sentinel and has_na:
1048
+ na_index = mask.argmax()
1049
+ # Insert na with the proper code
1050
+ if na_index == 0:
1051
+ na_code = np.intp(0)
1052
+ else:
1053
+ na_code = codes[:na_index].max() + 1
1054
+ codes[codes >= na_code] += 1
1055
+ codes[codes == -1] = na_code
1056
+ # dummy value for uniques; not used since uniques_mask will be True
1057
+ uniques = np.insert(uniques, na_code, 0)
1058
+ uniques_mask[na_code] = True
1059
+ uniques_ea = self._simple_new(uniques, uniques_mask)
1060
+
1061
+ return codes, uniques_ea
1062
+
1063
+ @doc(ExtensionArray._values_for_argsort)
1064
+ def _values_for_argsort(self) -> np.ndarray:
1065
+ return self._data
1066
+
1067
+ def value_counts(self, dropna: bool = True) -> Series:
1068
+ """
1069
+ Returns a Series containing counts of each unique value.
1070
+
1071
+ Parameters
1072
+ ----------
1073
+ dropna : bool, default True
1074
+ Don't include counts of missing values.
1075
+
1076
+ Returns
1077
+ -------
1078
+ counts : Series
1079
+
1080
+ See Also
1081
+ --------
1082
+ Series.value_counts
1083
+ """
1084
+ from pandas import (
1085
+ Index,
1086
+ Series,
1087
+ )
1088
+ from pandas.arrays import IntegerArray
1089
+
1090
+ keys, value_counts, na_counter = algos.value_counts_arraylike(
1091
+ self._data, dropna=dropna, mask=self._mask
1092
+ )
1093
+ mask_index = np.zeros((len(value_counts),), dtype=np.bool_)
1094
+ mask = mask_index.copy()
1095
+
1096
+ if na_counter > 0:
1097
+ mask_index[-1] = True
1098
+
1099
+ arr = IntegerArray(value_counts, mask)
1100
+ index = Index(
1101
+ self.dtype.construct_array_type()(
1102
+ keys, mask_index # type: ignore[arg-type]
1103
+ )
1104
+ )
1105
+ return Series(arr, index=index, name="count", copy=False)
1106
+
1107
+ def _mode(self, dropna: bool = True) -> Self:
1108
+ if dropna:
1109
+ result = mode(self._data, dropna=dropna, mask=self._mask)
1110
+ res_mask = np.zeros(result.shape, dtype=np.bool_)
1111
+ else:
1112
+ result, res_mask = mode(self._data, dropna=dropna, mask=self._mask)
1113
+ result = type(self)(result, res_mask) # type: ignore[arg-type]
1114
+ return result[result.argsort()]
1115
+
1116
+ @doc(ExtensionArray.equals)
1117
+ def equals(self, other) -> bool:
1118
+ if type(self) != type(other):
1119
+ return False
1120
+ if other.dtype != self.dtype:
1121
+ return False
1122
+
1123
+ # GH#44382 if e.g. self[1] is np.nan and other[1] is pd.NA, we are NOT
1124
+ # equal.
1125
+ if not np.array_equal(self._mask, other._mask):
1126
+ return False
1127
+
1128
+ left = self._data[~self._mask]
1129
+ right = other._data[~other._mask]
1130
+ return array_equivalent(left, right, strict_nan=True, dtype_equal=True)
1131
+
1132
+ def _quantile(
1133
+ self, qs: npt.NDArray[np.float64], interpolation: str
1134
+ ) -> BaseMaskedArray:
1135
+ """
1136
+ Dispatch to quantile_with_mask, needed because we do not have
1137
+ _from_factorized.
1138
+
1139
+ Notes
1140
+ -----
1141
+ We assume that all impacted cases are 1D-only.
1142
+ """
1143
+ res = quantile_with_mask(
1144
+ self._data,
1145
+ mask=self._mask,
1146
+ # TODO(GH#40932): na_value_for_dtype(self.dtype.numpy_dtype)
1147
+ # instead of np.nan
1148
+ fill_value=np.nan,
1149
+ qs=qs,
1150
+ interpolation=interpolation,
1151
+ )
1152
+
1153
+ if self._hasna:
1154
+ # Our result mask is all-False unless we are all-NA, in which
1155
+ # case it is all-True.
1156
+ if self.ndim == 2:
1157
+ # I think this should be out_mask=self.isna().all(axis=1)
1158
+ # but am holding off until we have tests
1159
+ raise NotImplementedError
1160
+ if self.isna().all():
1161
+ out_mask = np.ones(res.shape, dtype=bool)
1162
+
1163
+ if is_integer_dtype(self.dtype):
1164
+ # We try to maintain int dtype if possible for not all-na case
1165
+ # as well
1166
+ res = np.zeros(res.shape, dtype=self.dtype.numpy_dtype)
1167
+ else:
1168
+ out_mask = np.zeros(res.shape, dtype=bool)
1169
+ else:
1170
+ out_mask = np.zeros(res.shape, dtype=bool)
1171
+ return self._maybe_mask_result(res, mask=out_mask)
1172
+
1173
+ # ------------------------------------------------------------------
1174
+ # Reductions
1175
+
1176
+ def _reduce(
1177
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
1178
+ ):
1179
+ if name in {"any", "all", "min", "max", "sum", "prod", "mean", "var", "std"}:
1180
+ result = getattr(self, name)(skipna=skipna, **kwargs)
1181
+ else:
1182
+ # median, skew, kurt, sem
1183
+ data = self._data
1184
+ mask = self._mask
1185
+ op = getattr(nanops, f"nan{name}")
1186
+ axis = kwargs.pop("axis", None)
1187
+ result = op(data, axis=axis, skipna=skipna, mask=mask, **kwargs)
1188
+
1189
+ if keepdims:
1190
+ if isna(result):
1191
+ return self._wrap_na_result(name=name, axis=0, mask_size=(1,))
1192
+ else:
1193
+ result = result.reshape(1)
1194
+ mask = np.zeros(1, dtype=bool)
1195
+ return self._maybe_mask_result(result, mask)
1196
+
1197
+ if isna(result):
1198
+ return libmissing.NA
1199
+ else:
1200
+ return result
1201
+
1202
+ def _wrap_reduction_result(self, name: str, result, *, skipna, axis):
1203
+ if isinstance(result, np.ndarray):
1204
+ if skipna:
1205
+ # we only retain mask for all-NA rows/columns
1206
+ mask = self._mask.all(axis=axis)
1207
+ else:
1208
+ mask = self._mask.any(axis=axis)
1209
+
1210
+ return self._maybe_mask_result(result, mask)
1211
+ return result
1212
+
1213
+ def _wrap_na_result(self, *, name, axis, mask_size):
1214
+ mask = np.ones(mask_size, dtype=bool)
1215
+
1216
+ float_dtyp = "float32" if self.dtype == "Float32" else "float64"
1217
+ if name in ["mean", "median", "var", "std", "skew", "kurt"]:
1218
+ np_dtype = float_dtyp
1219
+ elif name in ["min", "max"] or self.dtype.itemsize == 8:
1220
+ np_dtype = self.dtype.numpy_dtype.name
1221
+ else:
1222
+ is_windows_or_32bit = is_platform_windows() or not IS64
1223
+ int_dtyp = "int32" if is_windows_or_32bit else "int64"
1224
+ uint_dtyp = "uint32" if is_windows_or_32bit else "uint64"
1225
+ np_dtype = {"b": int_dtyp, "i": int_dtyp, "u": uint_dtyp, "f": float_dtyp}[
1226
+ self.dtype.kind
1227
+ ]
1228
+
1229
+ value = np.array([1], dtype=np_dtype)
1230
+ return self._maybe_mask_result(value, mask=mask)
1231
+
1232
+ def _wrap_min_count_reduction_result(
1233
+ self, name: str, result, *, skipna, min_count, axis
1234
+ ):
1235
+ if min_count == 0 and isinstance(result, np.ndarray):
1236
+ return self._maybe_mask_result(result, np.zeros(result.shape, dtype=bool))
1237
+ return self._wrap_reduction_result(name, result, skipna=skipna, axis=axis)
1238
+
1239
+ def sum(
1240
+ self,
1241
+ *,
1242
+ skipna: bool = True,
1243
+ min_count: int = 0,
1244
+ axis: AxisInt | None = 0,
1245
+ **kwargs,
1246
+ ):
1247
+ nv.validate_sum((), kwargs)
1248
+
1249
+ result = masked_reductions.sum(
1250
+ self._data,
1251
+ self._mask,
1252
+ skipna=skipna,
1253
+ min_count=min_count,
1254
+ axis=axis,
1255
+ )
1256
+ return self._wrap_min_count_reduction_result(
1257
+ "sum", result, skipna=skipna, min_count=min_count, axis=axis
1258
+ )
1259
+
1260
+ def prod(
1261
+ self,
1262
+ *,
1263
+ skipna: bool = True,
1264
+ min_count: int = 0,
1265
+ axis: AxisInt | None = 0,
1266
+ **kwargs,
1267
+ ):
1268
+ nv.validate_prod((), kwargs)
1269
+
1270
+ result = masked_reductions.prod(
1271
+ self._data,
1272
+ self._mask,
1273
+ skipna=skipna,
1274
+ min_count=min_count,
1275
+ axis=axis,
1276
+ )
1277
+ return self._wrap_min_count_reduction_result(
1278
+ "prod", result, skipna=skipna, min_count=min_count, axis=axis
1279
+ )
1280
+
1281
+ def mean(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1282
+ nv.validate_mean((), kwargs)
1283
+ result = masked_reductions.mean(
1284
+ self._data,
1285
+ self._mask,
1286
+ skipna=skipna,
1287
+ axis=axis,
1288
+ )
1289
+ return self._wrap_reduction_result("mean", result, skipna=skipna, axis=axis)
1290
+
1291
+ def var(
1292
+ self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs
1293
+ ):
1294
+ nv.validate_stat_ddof_func((), kwargs, fname="var")
1295
+ result = masked_reductions.var(
1296
+ self._data,
1297
+ self._mask,
1298
+ skipna=skipna,
1299
+ axis=axis,
1300
+ ddof=ddof,
1301
+ )
1302
+ return self._wrap_reduction_result("var", result, skipna=skipna, axis=axis)
1303
+
1304
+ def std(
1305
+ self, *, skipna: bool = True, axis: AxisInt | None = 0, ddof: int = 1, **kwargs
1306
+ ):
1307
+ nv.validate_stat_ddof_func((), kwargs, fname="std")
1308
+ result = masked_reductions.std(
1309
+ self._data,
1310
+ self._mask,
1311
+ skipna=skipna,
1312
+ axis=axis,
1313
+ ddof=ddof,
1314
+ )
1315
+ return self._wrap_reduction_result("std", result, skipna=skipna, axis=axis)
1316
+
1317
+ def min(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1318
+ nv.validate_min((), kwargs)
1319
+ result = masked_reductions.min(
1320
+ self._data,
1321
+ self._mask,
1322
+ skipna=skipna,
1323
+ axis=axis,
1324
+ )
1325
+ return self._wrap_reduction_result("min", result, skipna=skipna, axis=axis)
1326
+
1327
+ def max(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1328
+ nv.validate_max((), kwargs)
1329
+ result = masked_reductions.max(
1330
+ self._data,
1331
+ self._mask,
1332
+ skipna=skipna,
1333
+ axis=axis,
1334
+ )
1335
+ return self._wrap_reduction_result("max", result, skipna=skipna, axis=axis)
1336
+
1337
+ def map(self, mapper, na_action=None):
1338
+ return map_array(self.to_numpy(), mapper, na_action=na_action)
1339
+
1340
+ def any(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1341
+ """
1342
+ Return whether any element is truthy.
1343
+
1344
+ Returns False unless there is at least one element that is truthy.
1345
+ By default, NAs are skipped. If ``skipna=False`` is specified and
1346
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
1347
+ is used as for logical operations.
1348
+
1349
+ .. versionchanged:: 1.4.0
1350
+
1351
+ Parameters
1352
+ ----------
1353
+ skipna : bool, default True
1354
+ Exclude NA values. If the entire array is NA and `skipna` is
1355
+ True, then the result will be False, as for an empty array.
1356
+ If `skipna` is False, the result will still be True if there is
1357
+ at least one element that is truthy, otherwise NA will be returned
1358
+ if there are NA's present.
1359
+ axis : int, optional, default 0
1360
+ **kwargs : any, default None
1361
+ Additional keywords have no effect but might be accepted for
1362
+ compatibility with NumPy.
1363
+
1364
+ Returns
1365
+ -------
1366
+ bool or :attr:`pandas.NA`
1367
+
1368
+ See Also
1369
+ --------
1370
+ numpy.any : Numpy version of this method.
1371
+ BaseMaskedArray.all : Return whether all elements are truthy.
1372
+
1373
+ Examples
1374
+ --------
1375
+ The result indicates whether any element is truthy (and by default
1376
+ skips NAs):
1377
+
1378
+ >>> pd.array([True, False, True]).any()
1379
+ True
1380
+ >>> pd.array([True, False, pd.NA]).any()
1381
+ True
1382
+ >>> pd.array([False, False, pd.NA]).any()
1383
+ False
1384
+ >>> pd.array([], dtype="boolean").any()
1385
+ False
1386
+ >>> pd.array([pd.NA], dtype="boolean").any()
1387
+ False
1388
+ >>> pd.array([pd.NA], dtype="Float64").any()
1389
+ False
1390
+
1391
+ With ``skipna=False``, the result can be NA if this is logically
1392
+ required (whether ``pd.NA`` is True or False influences the result):
1393
+
1394
+ >>> pd.array([True, False, pd.NA]).any(skipna=False)
1395
+ True
1396
+ >>> pd.array([1, 0, pd.NA]).any(skipna=False)
1397
+ True
1398
+ >>> pd.array([False, False, pd.NA]).any(skipna=False)
1399
+ <NA>
1400
+ >>> pd.array([0, 0, pd.NA]).any(skipna=False)
1401
+ <NA>
1402
+ """
1403
+ nv.validate_any((), kwargs)
1404
+
1405
+ values = self._data.copy()
1406
+ # error: Argument 3 to "putmask" has incompatible type "object";
1407
+ # expected "Union[_SupportsArray[dtype[Any]],
1408
+ # _NestedSequence[_SupportsArray[dtype[Any]]],
1409
+ # bool, int, float, complex, str, bytes,
1410
+ # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
1411
+ np.putmask(values, self._mask, self._falsey_value) # type: ignore[arg-type]
1412
+ result = values.any()
1413
+ if skipna:
1414
+ return result
1415
+ else:
1416
+ if result or len(self) == 0 or not self._mask.any():
1417
+ return result
1418
+ else:
1419
+ return self.dtype.na_value
1420
+
1421
+ def all(self, *, skipna: bool = True, axis: AxisInt | None = 0, **kwargs):
1422
+ """
1423
+ Return whether all elements are truthy.
1424
+
1425
+ Returns True unless there is at least one element that is falsey.
1426
+ By default, NAs are skipped. If ``skipna=False`` is specified and
1427
+ missing values are present, similar :ref:`Kleene logic <boolean.kleene>`
1428
+ is used as for logical operations.
1429
+
1430
+ .. versionchanged:: 1.4.0
1431
+
1432
+ Parameters
1433
+ ----------
1434
+ skipna : bool, default True
1435
+ Exclude NA values. If the entire array is NA and `skipna` is
1436
+ True, then the result will be True, as for an empty array.
1437
+ If `skipna` is False, the result will still be False if there is
1438
+ at least one element that is falsey, otherwise NA will be returned
1439
+ if there are NA's present.
1440
+ axis : int, optional, default 0
1441
+ **kwargs : any, default None
1442
+ Additional keywords have no effect but might be accepted for
1443
+ compatibility with NumPy.
1444
+
1445
+ Returns
1446
+ -------
1447
+ bool or :attr:`pandas.NA`
1448
+
1449
+ See Also
1450
+ --------
1451
+ numpy.all : Numpy version of this method.
1452
+ BooleanArray.any : Return whether any element is truthy.
1453
+
1454
+ Examples
1455
+ --------
1456
+ The result indicates whether all elements are truthy (and by default
1457
+ skips NAs):
1458
+
1459
+ >>> pd.array([True, True, pd.NA]).all()
1460
+ True
1461
+ >>> pd.array([1, 1, pd.NA]).all()
1462
+ True
1463
+ >>> pd.array([True, False, pd.NA]).all()
1464
+ False
1465
+ >>> pd.array([], dtype="boolean").all()
1466
+ True
1467
+ >>> pd.array([pd.NA], dtype="boolean").all()
1468
+ True
1469
+ >>> pd.array([pd.NA], dtype="Float64").all()
1470
+ True
1471
+
1472
+ With ``skipna=False``, the result can be NA if this is logically
1473
+ required (whether ``pd.NA`` is True or False influences the result):
1474
+
1475
+ >>> pd.array([True, True, pd.NA]).all(skipna=False)
1476
+ <NA>
1477
+ >>> pd.array([1, 1, pd.NA]).all(skipna=False)
1478
+ <NA>
1479
+ >>> pd.array([True, False, pd.NA]).all(skipna=False)
1480
+ False
1481
+ >>> pd.array([1, 0, pd.NA]).all(skipna=False)
1482
+ False
1483
+ """
1484
+ nv.validate_all((), kwargs)
1485
+
1486
+ values = self._data.copy()
1487
+ # error: Argument 3 to "putmask" has incompatible type "object";
1488
+ # expected "Union[_SupportsArray[dtype[Any]],
1489
+ # _NestedSequence[_SupportsArray[dtype[Any]]],
1490
+ # bool, int, float, complex, str, bytes,
1491
+ # _NestedSequence[Union[bool, int, float, complex, str, bytes]]]"
1492
+ np.putmask(values, self._mask, self._truthy_value) # type: ignore[arg-type]
1493
+ result = values.all(axis=axis)
1494
+
1495
+ if skipna:
1496
+ return result
1497
+ else:
1498
+ if not result or len(self) == 0 or not self._mask.any():
1499
+ return result
1500
+ else:
1501
+ return self.dtype.na_value
1502
+
1503
+ def interpolate(
1504
+ self,
1505
+ *,
1506
+ method: InterpolateOptions,
1507
+ axis: int,
1508
+ index,
1509
+ limit,
1510
+ limit_direction,
1511
+ limit_area,
1512
+ copy: bool,
1513
+ **kwargs,
1514
+ ) -> FloatingArray:
1515
+ """
1516
+ See NDFrame.interpolate.__doc__.
1517
+ """
1518
+ # NB: we return type(self) even if copy=False
1519
+ if self.dtype.kind == "f":
1520
+ if copy:
1521
+ data = self._data.copy()
1522
+ mask = self._mask.copy()
1523
+ else:
1524
+ data = self._data
1525
+ mask = self._mask
1526
+ elif self.dtype.kind in "iu":
1527
+ copy = True
1528
+ data = self._data.astype("f8")
1529
+ mask = self._mask.copy()
1530
+ else:
1531
+ raise NotImplementedError(
1532
+ f"interpolate is not implemented for dtype={self.dtype}"
1533
+ )
1534
+
1535
+ missing.interpolate_2d_inplace(
1536
+ data,
1537
+ method=method,
1538
+ axis=0,
1539
+ index=index,
1540
+ limit=limit,
1541
+ limit_direction=limit_direction,
1542
+ limit_area=limit_area,
1543
+ mask=mask,
1544
+ **kwargs,
1545
+ )
1546
+ if not copy:
1547
+ return self # type: ignore[return-value]
1548
+ if self.dtype.kind == "f":
1549
+ return type(self)._simple_new(data, mask) # type: ignore[return-value]
1550
+ else:
1551
+ from pandas.core.arrays import FloatingArray
1552
+
1553
+ return FloatingArray._simple_new(data, mask)
1554
+
1555
+ def _accumulate(
1556
+ self, name: str, *, skipna: bool = True, **kwargs
1557
+ ) -> BaseMaskedArray:
1558
+ data = self._data
1559
+ mask = self._mask
1560
+
1561
+ op = getattr(masked_accumulations, name)
1562
+ data, mask = op(data, mask, skipna=skipna, **kwargs)
1563
+
1564
+ return self._simple_new(data, mask)
1565
+
1566
+ # ------------------------------------------------------------------
1567
+ # GroupBy Methods
1568
+
1569
+ def _groupby_op(
1570
+ self,
1571
+ *,
1572
+ how: str,
1573
+ has_dropped_na: bool,
1574
+ min_count: int,
1575
+ ngroups: int,
1576
+ ids: npt.NDArray[np.intp],
1577
+ **kwargs,
1578
+ ):
1579
+ from pandas.core.groupby.ops import WrappedCythonOp
1580
+
1581
+ kind = WrappedCythonOp.get_kind_from_how(how)
1582
+ op = WrappedCythonOp(how=how, kind=kind, has_dropped_na=has_dropped_na)
1583
+
1584
+ # libgroupby functions are responsible for NOT altering mask
1585
+ mask = self._mask
1586
+ if op.kind != "aggregate":
1587
+ result_mask = mask.copy()
1588
+ else:
1589
+ result_mask = np.zeros(ngroups, dtype=bool)
1590
+
1591
+ if how == "rank" and kwargs.get("na_option") in ["top", "bottom"]:
1592
+ result_mask[:] = False
1593
+
1594
+ res_values = op._cython_op_ndim_compat(
1595
+ self._data,
1596
+ min_count=min_count,
1597
+ ngroups=ngroups,
1598
+ comp_ids=ids,
1599
+ mask=mask,
1600
+ result_mask=result_mask,
1601
+ **kwargs,
1602
+ )
1603
+
1604
+ if op.how == "ohlc":
1605
+ arity = op._cython_arity.get(op.how, 1)
1606
+ result_mask = np.tile(result_mask, (arity, 1)).T
1607
+
1608
+ if op.how in ["idxmin", "idxmax"]:
1609
+ # Result values are indexes to take, keep as ndarray
1610
+ return res_values
1611
+ else:
1612
+ # res_values should already have the correct dtype, we just need to
1613
+ # wrap in a MaskedArray
1614
+ return self._maybe_mask_result(res_values, result_mask)
1615
+
1616
+
1617
+ def transpose_homogeneous_masked_arrays(
1618
+ masked_arrays: Sequence[BaseMaskedArray],
1619
+ ) -> list[BaseMaskedArray]:
1620
+ """Transpose masked arrays in a list, but faster.
1621
+
1622
+ Input should be a list of 1-dim masked arrays of equal length and all have the
1623
+ same dtype. The caller is responsible for ensuring validity of input data.
1624
+ """
1625
+ masked_arrays = list(masked_arrays)
1626
+ dtype = masked_arrays[0].dtype
1627
+
1628
+ values = [arr._data.reshape(1, -1) for arr in masked_arrays]
1629
+ transposed_values = np.concatenate(
1630
+ values,
1631
+ axis=0,
1632
+ out=np.empty(
1633
+ (len(masked_arrays), len(masked_arrays[0])),
1634
+ order="F",
1635
+ dtype=dtype.numpy_dtype,
1636
+ ),
1637
+ )
1638
+
1639
+ masks = [arr._mask.reshape(1, -1) for arr in masked_arrays]
1640
+ transposed_masks = np.concatenate(
1641
+ masks, axis=0, out=np.empty_like(transposed_values, dtype=bool)
1642
+ )
1643
+
1644
+ arr_type = dtype.construct_array_type()
1645
+ transposed_arrays: list[BaseMaskedArray] = []
1646
+ for i in range(transposed_values.shape[1]):
1647
+ transposed_arr = arr_type(transposed_values[:, i], mask=transposed_masks[:, i])
1648
+ transposed_arrays.append(transposed_arr)
1649
+
1650
+ return transposed_arrays
venv/lib/python3.10/site-packages/pandas/core/arrays/numpy_.py ADDED
@@ -0,0 +1,563 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ Literal,
6
+ )
7
+
8
+ import numpy as np
9
+
10
+ from pandas._libs import lib
11
+ from pandas._libs.tslibs import is_supported_dtype
12
+ from pandas.compat.numpy import function as nv
13
+
14
+ from pandas.core.dtypes.astype import astype_array
15
+ from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
16
+ from pandas.core.dtypes.common import pandas_dtype
17
+ from pandas.core.dtypes.dtypes import NumpyEADtype
18
+ from pandas.core.dtypes.missing import isna
19
+
20
+ from pandas.core import (
21
+ arraylike,
22
+ missing,
23
+ nanops,
24
+ ops,
25
+ )
26
+ from pandas.core.arraylike import OpsMixin
27
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
28
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
29
+ from pandas.core.strings.object_array import ObjectStringArrayMixin
30
+
31
+ if TYPE_CHECKING:
32
+ from pandas._typing import (
33
+ AxisInt,
34
+ Dtype,
35
+ FillnaOptions,
36
+ InterpolateOptions,
37
+ NpDtype,
38
+ Scalar,
39
+ Self,
40
+ npt,
41
+ )
42
+
43
+ from pandas import Index
44
+
45
+
46
+ # error: Definition of "_concat_same_type" in base class "NDArrayBacked" is
47
+ # incompatible with definition in base class "ExtensionArray"
48
+ class NumpyExtensionArray( # type: ignore[misc]
49
+ OpsMixin,
50
+ NDArrayBackedExtensionArray,
51
+ ObjectStringArrayMixin,
52
+ ):
53
+ """
54
+ A pandas ExtensionArray for NumPy data.
55
+
56
+ This is mostly for internal compatibility, and is not especially
57
+ useful on its own.
58
+
59
+ Parameters
60
+ ----------
61
+ values : ndarray
62
+ The NumPy ndarray to wrap. Must be 1-dimensional.
63
+ copy : bool, default False
64
+ Whether to copy `values`.
65
+
66
+ Attributes
67
+ ----------
68
+ None
69
+
70
+ Methods
71
+ -------
72
+ None
73
+
74
+ Examples
75
+ --------
76
+ >>> pd.arrays.NumpyExtensionArray(np.array([0, 1, 2, 3]))
77
+ <NumpyExtensionArray>
78
+ [0, 1, 2, 3]
79
+ Length: 4, dtype: int64
80
+ """
81
+
82
+ # If you're wondering why pd.Series(cls) doesn't put the array in an
83
+ # ExtensionBlock, search for `ABCNumpyExtensionArray`. We check for
84
+ # that _typ to ensure that users don't unnecessarily use EAs inside
85
+ # pandas internals, which turns off things like block consolidation.
86
+ _typ = "npy_extension"
87
+ __array_priority__ = 1000
88
+ _ndarray: np.ndarray
89
+ _dtype: NumpyEADtype
90
+ _internal_fill_value = np.nan
91
+
92
+ # ------------------------------------------------------------------------
93
+ # Constructors
94
+
95
+ def __init__(
96
+ self, values: np.ndarray | NumpyExtensionArray, copy: bool = False
97
+ ) -> None:
98
+ if isinstance(values, type(self)):
99
+ values = values._ndarray
100
+ if not isinstance(values, np.ndarray):
101
+ raise ValueError(
102
+ f"'values' must be a NumPy array, not {type(values).__name__}"
103
+ )
104
+
105
+ if values.ndim == 0:
106
+ # Technically we support 2, but do not advertise that fact.
107
+ raise ValueError("NumpyExtensionArray must be 1-dimensional.")
108
+
109
+ if copy:
110
+ values = values.copy()
111
+
112
+ dtype = NumpyEADtype(values.dtype)
113
+ super().__init__(values, dtype)
114
+
115
+ @classmethod
116
+ def _from_sequence(
117
+ cls, scalars, *, dtype: Dtype | None = None, copy: bool = False
118
+ ) -> NumpyExtensionArray:
119
+ if isinstance(dtype, NumpyEADtype):
120
+ dtype = dtype._dtype
121
+
122
+ # error: Argument "dtype" to "asarray" has incompatible type
123
+ # "Union[ExtensionDtype, str, dtype[Any], dtype[floating[_64Bit]], Type[object],
124
+ # None]"; expected "Union[dtype[Any], None, type, _SupportsDType, str,
125
+ # Union[Tuple[Any, int], Tuple[Any, Union[int, Sequence[int]]], List[Any],
126
+ # _DTypeDict, Tuple[Any, Any]]]"
127
+ result = np.asarray(scalars, dtype=dtype) # type: ignore[arg-type]
128
+ if (
129
+ result.ndim > 1
130
+ and not hasattr(scalars, "dtype")
131
+ and (dtype is None or dtype == object)
132
+ ):
133
+ # e.g. list-of-tuples
134
+ result = construct_1d_object_array_from_listlike(scalars)
135
+
136
+ if copy and result is scalars:
137
+ result = result.copy()
138
+ return cls(result)
139
+
140
+ def _from_backing_data(self, arr: np.ndarray) -> NumpyExtensionArray:
141
+ return type(self)(arr)
142
+
143
+ # ------------------------------------------------------------------------
144
+ # Data
145
+
146
+ @property
147
+ def dtype(self) -> NumpyEADtype:
148
+ return self._dtype
149
+
150
+ # ------------------------------------------------------------------------
151
+ # NumPy Array Interface
152
+
153
+ def __array__(
154
+ self, dtype: NpDtype | None = None, copy: bool | None = None
155
+ ) -> np.ndarray:
156
+ return np.asarray(self._ndarray, dtype=dtype)
157
+
158
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
159
+ # Lightly modified version of
160
+ # https://numpy.org/doc/stable/reference/generated/numpy.lib.mixins.NDArrayOperatorsMixin.html
161
+ # The primary modification is not boxing scalar return values
162
+ # in NumpyExtensionArray, since pandas' ExtensionArrays are 1-d.
163
+ out = kwargs.get("out", ())
164
+
165
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
166
+ self, ufunc, method, *inputs, **kwargs
167
+ )
168
+ if result is not NotImplemented:
169
+ return result
170
+
171
+ if "out" in kwargs:
172
+ # e.g. test_ufunc_unary
173
+ return arraylike.dispatch_ufunc_with_out(
174
+ self, ufunc, method, *inputs, **kwargs
175
+ )
176
+
177
+ if method == "reduce":
178
+ result = arraylike.dispatch_reduction_ufunc(
179
+ self, ufunc, method, *inputs, **kwargs
180
+ )
181
+ if result is not NotImplemented:
182
+ # e.g. tests.series.test_ufunc.TestNumpyReductions
183
+ return result
184
+
185
+ # Defer to the implementation of the ufunc on unwrapped values.
186
+ inputs = tuple(
187
+ x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in inputs
188
+ )
189
+ if out:
190
+ kwargs["out"] = tuple(
191
+ x._ndarray if isinstance(x, NumpyExtensionArray) else x for x in out
192
+ )
193
+ result = getattr(ufunc, method)(*inputs, **kwargs)
194
+
195
+ if ufunc.nout > 1:
196
+ # multiple return values; re-box array-like results
197
+ return tuple(type(self)(x) for x in result)
198
+ elif method == "at":
199
+ # no return value
200
+ return None
201
+ elif method == "reduce":
202
+ if isinstance(result, np.ndarray):
203
+ # e.g. test_np_reduce_2d
204
+ return type(self)(result)
205
+
206
+ # e.g. test_np_max_nested_tuples
207
+ return result
208
+ else:
209
+ # one return value; re-box array-like results
210
+ return type(self)(result)
211
+
212
+ # ------------------------------------------------------------------------
213
+ # Pandas ExtensionArray Interface
214
+
215
+ def astype(self, dtype, copy: bool = True):
216
+ dtype = pandas_dtype(dtype)
217
+
218
+ if dtype == self.dtype:
219
+ if copy:
220
+ return self.copy()
221
+ return self
222
+
223
+ result = astype_array(self._ndarray, dtype=dtype, copy=copy)
224
+ return result
225
+
226
+ def isna(self) -> np.ndarray:
227
+ return isna(self._ndarray)
228
+
229
+ def _validate_scalar(self, fill_value):
230
+ if fill_value is None:
231
+ # Primarily for subclasses
232
+ fill_value = self.dtype.na_value
233
+ return fill_value
234
+
235
+ def _values_for_factorize(self) -> tuple[np.ndarray, float | None]:
236
+ if self.dtype.kind in "iub":
237
+ fv = None
238
+ else:
239
+ fv = np.nan
240
+ return self._ndarray, fv
241
+
242
+ # Base EA class (and all other EA classes) don't have limit_area keyword
243
+ # This can be removed here as well when the interpolate ffill/bfill method
244
+ # deprecation is enforced
245
+ def _pad_or_backfill(
246
+ self,
247
+ *,
248
+ method: FillnaOptions,
249
+ limit: int | None = None,
250
+ limit_area: Literal["inside", "outside"] | None = None,
251
+ copy: bool = True,
252
+ ) -> Self:
253
+ """
254
+ ffill or bfill along axis=0.
255
+ """
256
+ if copy:
257
+ out_data = self._ndarray.copy()
258
+ else:
259
+ out_data = self._ndarray
260
+
261
+ meth = missing.clean_fill_method(method)
262
+ missing.pad_or_backfill_inplace(
263
+ out_data.T,
264
+ method=meth,
265
+ axis=0,
266
+ limit=limit,
267
+ limit_area=limit_area,
268
+ )
269
+
270
+ if not copy:
271
+ return self
272
+ return type(self)._simple_new(out_data, dtype=self.dtype)
273
+
274
+ def interpolate(
275
+ self,
276
+ *,
277
+ method: InterpolateOptions,
278
+ axis: int,
279
+ index: Index,
280
+ limit,
281
+ limit_direction,
282
+ limit_area,
283
+ copy: bool,
284
+ **kwargs,
285
+ ) -> Self:
286
+ """
287
+ See NDFrame.interpolate.__doc__.
288
+ """
289
+ # NB: we return type(self) even if copy=False
290
+ if not copy:
291
+ out_data = self._ndarray
292
+ else:
293
+ out_data = self._ndarray.copy()
294
+
295
+ # TODO: assert we have floating dtype?
296
+ missing.interpolate_2d_inplace(
297
+ out_data,
298
+ method=method,
299
+ axis=axis,
300
+ index=index,
301
+ limit=limit,
302
+ limit_direction=limit_direction,
303
+ limit_area=limit_area,
304
+ **kwargs,
305
+ )
306
+ if not copy:
307
+ return self
308
+ return type(self)._simple_new(out_data, dtype=self.dtype)
309
+
310
+ # ------------------------------------------------------------------------
311
+ # Reductions
312
+
313
+ def any(
314
+ self,
315
+ *,
316
+ axis: AxisInt | None = None,
317
+ out=None,
318
+ keepdims: bool = False,
319
+ skipna: bool = True,
320
+ ):
321
+ nv.validate_any((), {"out": out, "keepdims": keepdims})
322
+ result = nanops.nanany(self._ndarray, axis=axis, skipna=skipna)
323
+ return self._wrap_reduction_result(axis, result)
324
+
325
+ def all(
326
+ self,
327
+ *,
328
+ axis: AxisInt | None = None,
329
+ out=None,
330
+ keepdims: bool = False,
331
+ skipna: bool = True,
332
+ ):
333
+ nv.validate_all((), {"out": out, "keepdims": keepdims})
334
+ result = nanops.nanall(self._ndarray, axis=axis, skipna=skipna)
335
+ return self._wrap_reduction_result(axis, result)
336
+
337
+ def min(
338
+ self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs
339
+ ) -> Scalar:
340
+ nv.validate_min((), kwargs)
341
+ result = nanops.nanmin(
342
+ values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
343
+ )
344
+ return self._wrap_reduction_result(axis, result)
345
+
346
+ def max(
347
+ self, *, axis: AxisInt | None = None, skipna: bool = True, **kwargs
348
+ ) -> Scalar:
349
+ nv.validate_max((), kwargs)
350
+ result = nanops.nanmax(
351
+ values=self._ndarray, axis=axis, mask=self.isna(), skipna=skipna
352
+ )
353
+ return self._wrap_reduction_result(axis, result)
354
+
355
+ def sum(
356
+ self,
357
+ *,
358
+ axis: AxisInt | None = None,
359
+ skipna: bool = True,
360
+ min_count: int = 0,
361
+ **kwargs,
362
+ ) -> Scalar:
363
+ nv.validate_sum((), kwargs)
364
+ result = nanops.nansum(
365
+ self._ndarray, axis=axis, skipna=skipna, min_count=min_count
366
+ )
367
+ return self._wrap_reduction_result(axis, result)
368
+
369
+ def prod(
370
+ self,
371
+ *,
372
+ axis: AxisInt | None = None,
373
+ skipna: bool = True,
374
+ min_count: int = 0,
375
+ **kwargs,
376
+ ) -> Scalar:
377
+ nv.validate_prod((), kwargs)
378
+ result = nanops.nanprod(
379
+ self._ndarray, axis=axis, skipna=skipna, min_count=min_count
380
+ )
381
+ return self._wrap_reduction_result(axis, result)
382
+
383
+ def mean(
384
+ self,
385
+ *,
386
+ axis: AxisInt | None = None,
387
+ dtype: NpDtype | None = None,
388
+ out=None,
389
+ keepdims: bool = False,
390
+ skipna: bool = True,
391
+ ):
392
+ nv.validate_mean((), {"dtype": dtype, "out": out, "keepdims": keepdims})
393
+ result = nanops.nanmean(self._ndarray, axis=axis, skipna=skipna)
394
+ return self._wrap_reduction_result(axis, result)
395
+
396
+ def median(
397
+ self,
398
+ *,
399
+ axis: AxisInt | None = None,
400
+ out=None,
401
+ overwrite_input: bool = False,
402
+ keepdims: bool = False,
403
+ skipna: bool = True,
404
+ ):
405
+ nv.validate_median(
406
+ (), {"out": out, "overwrite_input": overwrite_input, "keepdims": keepdims}
407
+ )
408
+ result = nanops.nanmedian(self._ndarray, axis=axis, skipna=skipna)
409
+ return self._wrap_reduction_result(axis, result)
410
+
411
+ def std(
412
+ self,
413
+ *,
414
+ axis: AxisInt | None = None,
415
+ dtype: NpDtype | None = None,
416
+ out=None,
417
+ ddof: int = 1,
418
+ keepdims: bool = False,
419
+ skipna: bool = True,
420
+ ):
421
+ nv.validate_stat_ddof_func(
422
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="std"
423
+ )
424
+ result = nanops.nanstd(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
425
+ return self._wrap_reduction_result(axis, result)
426
+
427
+ def var(
428
+ self,
429
+ *,
430
+ axis: AxisInt | None = None,
431
+ dtype: NpDtype | None = None,
432
+ out=None,
433
+ ddof: int = 1,
434
+ keepdims: bool = False,
435
+ skipna: bool = True,
436
+ ):
437
+ nv.validate_stat_ddof_func(
438
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="var"
439
+ )
440
+ result = nanops.nanvar(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
441
+ return self._wrap_reduction_result(axis, result)
442
+
443
+ def sem(
444
+ self,
445
+ *,
446
+ axis: AxisInt | None = None,
447
+ dtype: NpDtype | None = None,
448
+ out=None,
449
+ ddof: int = 1,
450
+ keepdims: bool = False,
451
+ skipna: bool = True,
452
+ ):
453
+ nv.validate_stat_ddof_func(
454
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="sem"
455
+ )
456
+ result = nanops.nansem(self._ndarray, axis=axis, skipna=skipna, ddof=ddof)
457
+ return self._wrap_reduction_result(axis, result)
458
+
459
+ def kurt(
460
+ self,
461
+ *,
462
+ axis: AxisInt | None = None,
463
+ dtype: NpDtype | None = None,
464
+ out=None,
465
+ keepdims: bool = False,
466
+ skipna: bool = True,
467
+ ):
468
+ nv.validate_stat_ddof_func(
469
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="kurt"
470
+ )
471
+ result = nanops.nankurt(self._ndarray, axis=axis, skipna=skipna)
472
+ return self._wrap_reduction_result(axis, result)
473
+
474
+ def skew(
475
+ self,
476
+ *,
477
+ axis: AxisInt | None = None,
478
+ dtype: NpDtype | None = None,
479
+ out=None,
480
+ keepdims: bool = False,
481
+ skipna: bool = True,
482
+ ):
483
+ nv.validate_stat_ddof_func(
484
+ (), {"dtype": dtype, "out": out, "keepdims": keepdims}, fname="skew"
485
+ )
486
+ result = nanops.nanskew(self._ndarray, axis=axis, skipna=skipna)
487
+ return self._wrap_reduction_result(axis, result)
488
+
489
+ # ------------------------------------------------------------------------
490
+ # Additional Methods
491
+
492
+ def to_numpy(
493
+ self,
494
+ dtype: npt.DTypeLike | None = None,
495
+ copy: bool = False,
496
+ na_value: object = lib.no_default,
497
+ ) -> np.ndarray:
498
+ mask = self.isna()
499
+ if na_value is not lib.no_default and mask.any():
500
+ result = self._ndarray.copy()
501
+ result[mask] = na_value
502
+ else:
503
+ result = self._ndarray
504
+
505
+ result = np.asarray(result, dtype=dtype)
506
+
507
+ if copy and result is self._ndarray:
508
+ result = result.copy()
509
+
510
+ return result
511
+
512
+ # ------------------------------------------------------------------------
513
+ # Ops
514
+
515
+ def __invert__(self) -> NumpyExtensionArray:
516
+ return type(self)(~self._ndarray)
517
+
518
+ def __neg__(self) -> NumpyExtensionArray:
519
+ return type(self)(-self._ndarray)
520
+
521
+ def __pos__(self) -> NumpyExtensionArray:
522
+ return type(self)(+self._ndarray)
523
+
524
+ def __abs__(self) -> NumpyExtensionArray:
525
+ return type(self)(abs(self._ndarray))
526
+
527
+ def _cmp_method(self, other, op):
528
+ if isinstance(other, NumpyExtensionArray):
529
+ other = other._ndarray
530
+
531
+ other = ops.maybe_prepare_scalar_for_op(other, (len(self),))
532
+ pd_op = ops.get_array_op(op)
533
+ other = ensure_wrapped_if_datetimelike(other)
534
+ result = pd_op(self._ndarray, other)
535
+
536
+ if op is divmod or op is ops.rdivmod:
537
+ a, b = result
538
+ if isinstance(a, np.ndarray):
539
+ # for e.g. op vs TimedeltaArray, we may already
540
+ # have an ExtensionArray, in which case we do not wrap
541
+ return self._wrap_ndarray_result(a), self._wrap_ndarray_result(b)
542
+ return a, b
543
+
544
+ if isinstance(result, np.ndarray):
545
+ # for e.g. multiplication vs TimedeltaArray, we may already
546
+ # have an ExtensionArray, in which case we do not wrap
547
+ return self._wrap_ndarray_result(result)
548
+ return result
549
+
550
+ _arith_method = _cmp_method
551
+
552
+ def _wrap_ndarray_result(self, result: np.ndarray):
553
+ # If we have timedelta64[ns] result, return a TimedeltaArray instead
554
+ # of a NumpyExtensionArray
555
+ if result.dtype.kind == "m" and is_supported_dtype(result.dtype):
556
+ from pandas.core.arrays import TimedeltaArray
557
+
558
+ return TimedeltaArray._simple_new(result, dtype=result.dtype)
559
+ return type(self)(result)
560
+
561
+ # ------------------------------------------------------------------------
562
+ # String methods interface
563
+ _str_na_value = np.nan
venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.arrays.sparse.accessor import (
2
+ SparseAccessor,
3
+ SparseFrameAccessor,
4
+ )
5
+ from pandas.core.arrays.sparse.array import (
6
+ BlockIndex,
7
+ IntIndex,
8
+ SparseArray,
9
+ make_sparse_index,
10
+ )
11
+
12
+ __all__ = [
13
+ "BlockIndex",
14
+ "IntIndex",
15
+ "make_sparse_index",
16
+ "SparseAccessor",
17
+ "SparseArray",
18
+ "SparseFrameAccessor",
19
+ ]
venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (482 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/accessor.cpython-310.pyc ADDED
Binary file (13.2 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/array.cpython-310.pyc ADDED
Binary file (44.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/__pycache__/scipy_sparse.cpython-310.pyc ADDED
Binary file (6.43 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/accessor.py ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Sparse accessor"""
2
+ from __future__ import annotations
3
+
4
+ from typing import TYPE_CHECKING
5
+
6
+ import numpy as np
7
+
8
+ from pandas.compat._optional import import_optional_dependency
9
+
10
+ from pandas.core.dtypes.cast import find_common_type
11
+ from pandas.core.dtypes.dtypes import SparseDtype
12
+
13
+ from pandas.core.accessor import (
14
+ PandasDelegate,
15
+ delegate_names,
16
+ )
17
+ from pandas.core.arrays.sparse.array import SparseArray
18
+
19
+ if TYPE_CHECKING:
20
+ from pandas import (
21
+ DataFrame,
22
+ Series,
23
+ )
24
+
25
+
26
+ class BaseAccessor:
27
+ _validation_msg = "Can only use the '.sparse' accessor with Sparse data."
28
+
29
+ def __init__(self, data=None) -> None:
30
+ self._parent = data
31
+ self._validate(data)
32
+
33
+ def _validate(self, data):
34
+ raise NotImplementedError
35
+
36
+
37
+ @delegate_names(
38
+ SparseArray, ["npoints", "density", "fill_value", "sp_values"], typ="property"
39
+ )
40
+ class SparseAccessor(BaseAccessor, PandasDelegate):
41
+ """
42
+ Accessor for SparseSparse from other sparse matrix data types.
43
+
44
+ Examples
45
+ --------
46
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")
47
+ >>> ser.sparse.density
48
+ 0.6
49
+ >>> ser.sparse.sp_values
50
+ array([2, 2, 2])
51
+ """
52
+
53
+ def _validate(self, data):
54
+ if not isinstance(data.dtype, SparseDtype):
55
+ raise AttributeError(self._validation_msg)
56
+
57
+ def _delegate_property_get(self, name: str, *args, **kwargs):
58
+ return getattr(self._parent.array, name)
59
+
60
+ def _delegate_method(self, name: str, *args, **kwargs):
61
+ if name == "from_coo":
62
+ return self.from_coo(*args, **kwargs)
63
+ elif name == "to_coo":
64
+ return self.to_coo(*args, **kwargs)
65
+ else:
66
+ raise ValueError
67
+
68
+ @classmethod
69
+ def from_coo(cls, A, dense_index: bool = False) -> Series:
70
+ """
71
+ Create a Series with sparse values from a scipy.sparse.coo_matrix.
72
+
73
+ Parameters
74
+ ----------
75
+ A : scipy.sparse.coo_matrix
76
+ dense_index : bool, default False
77
+ If False (default), the index consists of only the
78
+ coords of the non-null entries of the original coo_matrix.
79
+ If True, the index consists of the full sorted
80
+ (row, col) coordinates of the coo_matrix.
81
+
82
+ Returns
83
+ -------
84
+ s : Series
85
+ A Series with sparse values.
86
+
87
+ Examples
88
+ --------
89
+ >>> from scipy import sparse
90
+
91
+ >>> A = sparse.coo_matrix(
92
+ ... ([3.0, 1.0, 2.0], ([1, 0, 0], [0, 2, 3])), shape=(3, 4)
93
+ ... )
94
+ >>> A
95
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
96
+ with 3 stored elements in COOrdinate format>
97
+
98
+ >>> A.todense()
99
+ matrix([[0., 0., 1., 2.],
100
+ [3., 0., 0., 0.],
101
+ [0., 0., 0., 0.]])
102
+
103
+ >>> ss = pd.Series.sparse.from_coo(A)
104
+ >>> ss
105
+ 0 2 1.0
106
+ 3 2.0
107
+ 1 0 3.0
108
+ dtype: Sparse[float64, nan]
109
+ """
110
+ from pandas import Series
111
+ from pandas.core.arrays.sparse.scipy_sparse import coo_to_sparse_series
112
+
113
+ result = coo_to_sparse_series(A, dense_index=dense_index)
114
+ result = Series(result.array, index=result.index, copy=False)
115
+
116
+ return result
117
+
118
+ def to_coo(self, row_levels=(0,), column_levels=(1,), sort_labels: bool = False):
119
+ """
120
+ Create a scipy.sparse.coo_matrix from a Series with MultiIndex.
121
+
122
+ Use row_levels and column_levels to determine the row and column
123
+ coordinates respectively. row_levels and column_levels are the names
124
+ (labels) or numbers of the levels. {row_levels, column_levels} must be
125
+ a partition of the MultiIndex level names (or numbers).
126
+
127
+ Parameters
128
+ ----------
129
+ row_levels : tuple/list
130
+ column_levels : tuple/list
131
+ sort_labels : bool, default False
132
+ Sort the row and column labels before forming the sparse matrix.
133
+ When `row_levels` and/or `column_levels` refer to a single level,
134
+ set to `True` for a faster execution.
135
+
136
+ Returns
137
+ -------
138
+ y : scipy.sparse.coo_matrix
139
+ rows : list (row labels)
140
+ columns : list (column labels)
141
+
142
+ Examples
143
+ --------
144
+ >>> s = pd.Series([3.0, np.nan, 1.0, 3.0, np.nan, np.nan])
145
+ >>> s.index = pd.MultiIndex.from_tuples(
146
+ ... [
147
+ ... (1, 2, "a", 0),
148
+ ... (1, 2, "a", 1),
149
+ ... (1, 1, "b", 0),
150
+ ... (1, 1, "b", 1),
151
+ ... (2, 1, "b", 0),
152
+ ... (2, 1, "b", 1)
153
+ ... ],
154
+ ... names=["A", "B", "C", "D"],
155
+ ... )
156
+ >>> s
157
+ A B C D
158
+ 1 2 a 0 3.0
159
+ 1 NaN
160
+ 1 b 0 1.0
161
+ 1 3.0
162
+ 2 1 b 0 NaN
163
+ 1 NaN
164
+ dtype: float64
165
+
166
+ >>> ss = s.astype("Sparse")
167
+ >>> ss
168
+ A B C D
169
+ 1 2 a 0 3.0
170
+ 1 NaN
171
+ 1 b 0 1.0
172
+ 1 3.0
173
+ 2 1 b 0 NaN
174
+ 1 NaN
175
+ dtype: Sparse[float64, nan]
176
+
177
+ >>> A, rows, columns = ss.sparse.to_coo(
178
+ ... row_levels=["A", "B"], column_levels=["C", "D"], sort_labels=True
179
+ ... )
180
+ >>> A
181
+ <3x4 sparse matrix of type '<class 'numpy.float64'>'
182
+ with 3 stored elements in COOrdinate format>
183
+ >>> A.todense()
184
+ matrix([[0., 0., 1., 3.],
185
+ [3., 0., 0., 0.],
186
+ [0., 0., 0., 0.]])
187
+
188
+ >>> rows
189
+ [(1, 1), (1, 2), (2, 1)]
190
+ >>> columns
191
+ [('a', 0), ('a', 1), ('b', 0), ('b', 1)]
192
+ """
193
+ from pandas.core.arrays.sparse.scipy_sparse import sparse_series_to_coo
194
+
195
+ A, rows, columns = sparse_series_to_coo(
196
+ self._parent, row_levels, column_levels, sort_labels=sort_labels
197
+ )
198
+ return A, rows, columns
199
+
200
+ def to_dense(self) -> Series:
201
+ """
202
+ Convert a Series from sparse values to dense.
203
+
204
+ Returns
205
+ -------
206
+ Series:
207
+ A Series with the same values, stored as a dense array.
208
+
209
+ Examples
210
+ --------
211
+ >>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
212
+ >>> series
213
+ 0 0
214
+ 1 1
215
+ 2 0
216
+ dtype: Sparse[int64, 0]
217
+
218
+ >>> series.sparse.to_dense()
219
+ 0 0
220
+ 1 1
221
+ 2 0
222
+ dtype: int64
223
+ """
224
+ from pandas import Series
225
+
226
+ return Series(
227
+ self._parent.array.to_dense(),
228
+ index=self._parent.index,
229
+ name=self._parent.name,
230
+ copy=False,
231
+ )
232
+
233
+
234
+ class SparseFrameAccessor(BaseAccessor, PandasDelegate):
235
+ """
236
+ DataFrame accessor for sparse data.
237
+
238
+ Examples
239
+ --------
240
+ >>> df = pd.DataFrame({"a": [1, 2, 0, 0],
241
+ ... "b": [3, 0, 0, 4]}, dtype="Sparse[int]")
242
+ >>> df.sparse.density
243
+ 0.5
244
+ """
245
+
246
+ def _validate(self, data):
247
+ dtypes = data.dtypes
248
+ if not all(isinstance(t, SparseDtype) for t in dtypes):
249
+ raise AttributeError(self._validation_msg)
250
+
251
+ @classmethod
252
+ def from_spmatrix(cls, data, index=None, columns=None) -> DataFrame:
253
+ """
254
+ Create a new DataFrame from a scipy sparse matrix.
255
+
256
+ Parameters
257
+ ----------
258
+ data : scipy.sparse.spmatrix
259
+ Must be convertible to csc format.
260
+ index, columns : Index, optional
261
+ Row and column labels to use for the resulting DataFrame.
262
+ Defaults to a RangeIndex.
263
+
264
+ Returns
265
+ -------
266
+ DataFrame
267
+ Each column of the DataFrame is stored as a
268
+ :class:`arrays.SparseArray`.
269
+
270
+ Examples
271
+ --------
272
+ >>> import scipy.sparse
273
+ >>> mat = scipy.sparse.eye(3, dtype=float)
274
+ >>> pd.DataFrame.sparse.from_spmatrix(mat)
275
+ 0 1 2
276
+ 0 1.0 0 0
277
+ 1 0 1.0 0
278
+ 2 0 0 1.0
279
+ """
280
+ from pandas._libs.sparse import IntIndex
281
+
282
+ from pandas import DataFrame
283
+
284
+ data = data.tocsc()
285
+ index, columns = cls._prep_index(data, index, columns)
286
+ n_rows, n_columns = data.shape
287
+ # We need to make sure indices are sorted, as we create
288
+ # IntIndex with no input validation (i.e. check_integrity=False ).
289
+ # Indices may already be sorted in scipy in which case this adds
290
+ # a small overhead.
291
+ data.sort_indices()
292
+ indices = data.indices
293
+ indptr = data.indptr
294
+ array_data = data.data
295
+ dtype = SparseDtype(array_data.dtype, 0)
296
+ arrays = []
297
+ for i in range(n_columns):
298
+ sl = slice(indptr[i], indptr[i + 1])
299
+ idx = IntIndex(n_rows, indices[sl], check_integrity=False)
300
+ arr = SparseArray._simple_new(array_data[sl], idx, dtype)
301
+ arrays.append(arr)
302
+ return DataFrame._from_arrays(
303
+ arrays, columns=columns, index=index, verify_integrity=False
304
+ )
305
+
306
+ def to_dense(self) -> DataFrame:
307
+ """
308
+ Convert a DataFrame with sparse values to dense.
309
+
310
+ Returns
311
+ -------
312
+ DataFrame
313
+ A DataFrame with the same values stored as dense arrays.
314
+
315
+ Examples
316
+ --------
317
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0])})
318
+ >>> df.sparse.to_dense()
319
+ A
320
+ 0 0
321
+ 1 1
322
+ 2 0
323
+ """
324
+ from pandas import DataFrame
325
+
326
+ data = {k: v.array.to_dense() for k, v in self._parent.items()}
327
+ return DataFrame(data, index=self._parent.index, columns=self._parent.columns)
328
+
329
+ def to_coo(self):
330
+ """
331
+ Return the contents of the frame as a sparse SciPy COO matrix.
332
+
333
+ Returns
334
+ -------
335
+ scipy.sparse.spmatrix
336
+ If the caller is heterogeneous and contains booleans or objects,
337
+ the result will be of dtype=object. See Notes.
338
+
339
+ Notes
340
+ -----
341
+ The dtype will be the lowest-common-denominator type (implicit
342
+ upcasting); that is to say if the dtypes (even of numeric types)
343
+ are mixed, the one that accommodates all will be chosen.
344
+
345
+ e.g. If the dtypes are float16 and float32, dtype will be upcast to
346
+ float32. By numpy.find_common_type convention, mixing int64 and
347
+ and uint64 will result in a float64 dtype.
348
+
349
+ Examples
350
+ --------
351
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])})
352
+ >>> df.sparse.to_coo()
353
+ <4x1 sparse matrix of type '<class 'numpy.int64'>'
354
+ with 2 stored elements in COOrdinate format>
355
+ """
356
+ import_optional_dependency("scipy")
357
+ from scipy.sparse import coo_matrix
358
+
359
+ dtype = find_common_type(self._parent.dtypes.to_list())
360
+ if isinstance(dtype, SparseDtype):
361
+ dtype = dtype.subtype
362
+
363
+ cols, rows, data = [], [], []
364
+ for col, (_, ser) in enumerate(self._parent.items()):
365
+ sp_arr = ser.array
366
+ if sp_arr.fill_value != 0:
367
+ raise ValueError("fill value must be 0 when converting to COO matrix")
368
+
369
+ row = sp_arr.sp_index.indices
370
+ cols.append(np.repeat(col, len(row)))
371
+ rows.append(row)
372
+ data.append(sp_arr.sp_values.astype(dtype, copy=False))
373
+
374
+ cols = np.concatenate(cols)
375
+ rows = np.concatenate(rows)
376
+ data = np.concatenate(data)
377
+ return coo_matrix((data, (rows, cols)), shape=self._parent.shape)
378
+
379
+ @property
380
+ def density(self) -> float:
381
+ """
382
+ Ratio of non-sparse points to total (dense) data points.
383
+
384
+ Examples
385
+ --------
386
+ >>> df = pd.DataFrame({"A": pd.arrays.SparseArray([0, 1, 0, 1])})
387
+ >>> df.sparse.density
388
+ 0.5
389
+ """
390
+ tmp = np.mean([column.array.density for _, column in self._parent.items()])
391
+ return tmp
392
+
393
+ @staticmethod
394
+ def _prep_index(data, index, columns):
395
+ from pandas.core.indexes.api import (
396
+ default_index,
397
+ ensure_index,
398
+ )
399
+
400
+ N, K = data.shape
401
+ if index is None:
402
+ index = default_index(N)
403
+ else:
404
+ index = ensure_index(index)
405
+ if columns is None:
406
+ columns = default_index(K)
407
+ else:
408
+ columns = ensure_index(columns)
409
+
410
+ if len(columns) != K:
411
+ raise ValueError(f"Column length mismatch: {len(columns)} vs. {K}")
412
+ if len(index) != N:
413
+ raise ValueError(f"Index length mismatch: {len(index)} vs. {N}")
414
+ return index, columns
venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/array.py ADDED
@@ -0,0 +1,1929 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SparseArray data structure
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from collections import abc
7
+ import numbers
8
+ import operator
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Callable,
13
+ Literal,
14
+ cast,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._libs import lib
22
+ import pandas._libs.sparse as splib
23
+ from pandas._libs.sparse import (
24
+ BlockIndex,
25
+ IntIndex,
26
+ SparseIndex,
27
+ )
28
+ from pandas._libs.tslibs import NaT
29
+ from pandas.compat.numpy import function as nv
30
+ from pandas.errors import PerformanceWarning
31
+ from pandas.util._decorators import doc
32
+ from pandas.util._exceptions import find_stack_level
33
+ from pandas.util._validators import (
34
+ validate_bool_kwarg,
35
+ validate_insert_loc,
36
+ )
37
+
38
+ from pandas.core.dtypes.astype import astype_array
39
+ from pandas.core.dtypes.cast import (
40
+ construct_1d_arraylike_from_scalar,
41
+ find_common_type,
42
+ maybe_box_datetimelike,
43
+ )
44
+ from pandas.core.dtypes.common import (
45
+ is_bool_dtype,
46
+ is_integer,
47
+ is_list_like,
48
+ is_object_dtype,
49
+ is_scalar,
50
+ is_string_dtype,
51
+ pandas_dtype,
52
+ )
53
+ from pandas.core.dtypes.dtypes import (
54
+ DatetimeTZDtype,
55
+ SparseDtype,
56
+ )
57
+ from pandas.core.dtypes.generic import (
58
+ ABCIndex,
59
+ ABCSeries,
60
+ )
61
+ from pandas.core.dtypes.missing import (
62
+ isna,
63
+ na_value_for_dtype,
64
+ notna,
65
+ )
66
+
67
+ from pandas.core import arraylike
68
+ import pandas.core.algorithms as algos
69
+ from pandas.core.arraylike import OpsMixin
70
+ from pandas.core.arrays import ExtensionArray
71
+ from pandas.core.base import PandasObject
72
+ import pandas.core.common as com
73
+ from pandas.core.construction import (
74
+ ensure_wrapped_if_datetimelike,
75
+ extract_array,
76
+ sanitize_array,
77
+ )
78
+ from pandas.core.indexers import (
79
+ check_array_indexer,
80
+ unpack_tuple_and_ellipses,
81
+ )
82
+ from pandas.core.nanops import check_below_min_count
83
+
84
+ from pandas.io.formats import printing
85
+
86
+ # See https://github.com/python/typing/issues/684
87
+ if TYPE_CHECKING:
88
+ from collections.abc import Sequence
89
+ from enum import Enum
90
+
91
+ class ellipsis(Enum):
92
+ Ellipsis = "..."
93
+
94
+ Ellipsis = ellipsis.Ellipsis
95
+
96
+ from scipy.sparse import spmatrix
97
+
98
+ from pandas._typing import (
99
+ FillnaOptions,
100
+ NumpySorter,
101
+ )
102
+
103
+ SparseIndexKind = Literal["integer", "block"]
104
+
105
+ from pandas._typing import (
106
+ ArrayLike,
107
+ AstypeArg,
108
+ Axis,
109
+ AxisInt,
110
+ Dtype,
111
+ NpDtype,
112
+ PositionalIndexer,
113
+ Scalar,
114
+ ScalarIndexer,
115
+ Self,
116
+ SequenceIndexer,
117
+ npt,
118
+ )
119
+
120
+ from pandas import Series
121
+
122
+ else:
123
+ ellipsis = type(Ellipsis)
124
+
125
+
126
+ # ----------------------------------------------------------------------------
127
+ # Array
128
+
129
+ _sparray_doc_kwargs = {"klass": "SparseArray"}
130
+
131
+
132
+ def _get_fill(arr: SparseArray) -> np.ndarray:
133
+ """
134
+ Create a 0-dim ndarray containing the fill value
135
+
136
+ Parameters
137
+ ----------
138
+ arr : SparseArray
139
+
140
+ Returns
141
+ -------
142
+ fill_value : ndarray
143
+ 0-dim ndarray with just the fill value.
144
+
145
+ Notes
146
+ -----
147
+ coerce fill_value to arr dtype if possible
148
+ int64 SparseArray can have NaN as fill_value if there is no missing
149
+ """
150
+ try:
151
+ return np.asarray(arr.fill_value, dtype=arr.dtype.subtype)
152
+ except ValueError:
153
+ return np.asarray(arr.fill_value)
154
+
155
+
156
+ def _sparse_array_op(
157
+ left: SparseArray, right: SparseArray, op: Callable, name: str
158
+ ) -> SparseArray:
159
+ """
160
+ Perform a binary operation between two arrays.
161
+
162
+ Parameters
163
+ ----------
164
+ left : Union[SparseArray, ndarray]
165
+ right : Union[SparseArray, ndarray]
166
+ op : Callable
167
+ The binary operation to perform
168
+ name str
169
+ Name of the callable.
170
+
171
+ Returns
172
+ -------
173
+ SparseArray
174
+ """
175
+ if name.startswith("__"):
176
+ # For lookups in _libs.sparse we need non-dunder op name
177
+ name = name[2:-2]
178
+
179
+ # dtype used to find corresponding sparse method
180
+ ltype = left.dtype.subtype
181
+ rtype = right.dtype.subtype
182
+
183
+ if ltype != rtype:
184
+ subtype = find_common_type([ltype, rtype])
185
+ ltype = SparseDtype(subtype, left.fill_value)
186
+ rtype = SparseDtype(subtype, right.fill_value)
187
+
188
+ left = left.astype(ltype, copy=False)
189
+ right = right.astype(rtype, copy=False)
190
+ dtype = ltype.subtype
191
+ else:
192
+ dtype = ltype
193
+
194
+ # dtype the result must have
195
+ result_dtype = None
196
+
197
+ if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
198
+ with np.errstate(all="ignore"):
199
+ result = op(left.to_dense(), right.to_dense())
200
+ fill = op(_get_fill(left), _get_fill(right))
201
+
202
+ if left.sp_index.ngaps == 0:
203
+ index = left.sp_index
204
+ else:
205
+ index = right.sp_index
206
+ elif left.sp_index.equals(right.sp_index):
207
+ with np.errstate(all="ignore"):
208
+ result = op(left.sp_values, right.sp_values)
209
+ fill = op(_get_fill(left), _get_fill(right))
210
+ index = left.sp_index
211
+ else:
212
+ if name[0] == "r":
213
+ left, right = right, left
214
+ name = name[1:]
215
+
216
+ if name in ("and", "or", "xor") and dtype == "bool":
217
+ opname = f"sparse_{name}_uint8"
218
+ # to make template simple, cast here
219
+ left_sp_values = left.sp_values.view(np.uint8)
220
+ right_sp_values = right.sp_values.view(np.uint8)
221
+ result_dtype = bool
222
+ else:
223
+ opname = f"sparse_{name}_{dtype}"
224
+ left_sp_values = left.sp_values
225
+ right_sp_values = right.sp_values
226
+
227
+ if (
228
+ name in ["floordiv", "mod"]
229
+ and (right == 0).any()
230
+ and left.dtype.kind in "iu"
231
+ ):
232
+ # Match the non-Sparse Series behavior
233
+ opname = f"sparse_{name}_float64"
234
+ left_sp_values = left_sp_values.astype("float64")
235
+ right_sp_values = right_sp_values.astype("float64")
236
+
237
+ sparse_op = getattr(splib, opname)
238
+
239
+ with np.errstate(all="ignore"):
240
+ result, index, fill = sparse_op(
241
+ left_sp_values,
242
+ left.sp_index,
243
+ left.fill_value,
244
+ right_sp_values,
245
+ right.sp_index,
246
+ right.fill_value,
247
+ )
248
+
249
+ if name == "divmod":
250
+ # result is a 2-tuple
251
+ # error: Incompatible return value type (got "Tuple[SparseArray,
252
+ # SparseArray]", expected "SparseArray")
253
+ return ( # type: ignore[return-value]
254
+ _wrap_result(name, result[0], index, fill[0], dtype=result_dtype),
255
+ _wrap_result(name, result[1], index, fill[1], dtype=result_dtype),
256
+ )
257
+
258
+ if result_dtype is None:
259
+ result_dtype = result.dtype
260
+
261
+ return _wrap_result(name, result, index, fill, dtype=result_dtype)
262
+
263
+
264
+ def _wrap_result(
265
+ name: str, data, sparse_index, fill_value, dtype: Dtype | None = None
266
+ ) -> SparseArray:
267
+ """
268
+ wrap op result to have correct dtype
269
+ """
270
+ if name.startswith("__"):
271
+ # e.g. __eq__ --> eq
272
+ name = name[2:-2]
273
+
274
+ if name in ("eq", "ne", "lt", "gt", "le", "ge"):
275
+ dtype = bool
276
+
277
+ fill_value = lib.item_from_zerodim(fill_value)
278
+
279
+ if is_bool_dtype(dtype):
280
+ # fill_value may be np.bool_
281
+ fill_value = bool(fill_value)
282
+ return SparseArray(
283
+ data, sparse_index=sparse_index, fill_value=fill_value, dtype=dtype
284
+ )
285
+
286
+
287
+ class SparseArray(OpsMixin, PandasObject, ExtensionArray):
288
+ """
289
+ An ExtensionArray for storing sparse data.
290
+
291
+ Parameters
292
+ ----------
293
+ data : array-like or scalar
294
+ A dense array of values to store in the SparseArray. This may contain
295
+ `fill_value`.
296
+ sparse_index : SparseIndex, optional
297
+ fill_value : scalar, optional
298
+ Elements in data that are ``fill_value`` are not stored in the
299
+ SparseArray. For memory savings, this should be the most common value
300
+ in `data`. By default, `fill_value` depends on the dtype of `data`:
301
+
302
+ =========== ==========
303
+ data.dtype na_value
304
+ =========== ==========
305
+ float ``np.nan``
306
+ int ``0``
307
+ bool False
308
+ datetime64 ``pd.NaT``
309
+ timedelta64 ``pd.NaT``
310
+ =========== ==========
311
+
312
+ The fill value is potentially specified in three ways. In order of
313
+ precedence, these are
314
+
315
+ 1. The `fill_value` argument
316
+ 2. ``dtype.fill_value`` if `fill_value` is None and `dtype` is
317
+ a ``SparseDtype``
318
+ 3. ``data.dtype.fill_value`` if `fill_value` is None and `dtype`
319
+ is not a ``SparseDtype`` and `data` is a ``SparseArray``.
320
+
321
+ kind : str
322
+ Can be 'integer' or 'block', default is 'integer'.
323
+ The type of storage for sparse locations.
324
+
325
+ * 'block': Stores a `block` and `block_length` for each
326
+ contiguous *span* of sparse values. This is best when
327
+ sparse data tends to be clumped together, with large
328
+ regions of ``fill-value`` values between sparse values.
329
+ * 'integer': uses an integer to store the location of
330
+ each sparse value.
331
+
332
+ dtype : np.dtype or SparseDtype, optional
333
+ The dtype to use for the SparseArray. For numpy dtypes, this
334
+ determines the dtype of ``self.sp_values``. For SparseDtype,
335
+ this determines ``self.sp_values`` and ``self.fill_value``.
336
+ copy : bool, default False
337
+ Whether to explicitly copy the incoming `data` array.
338
+
339
+ Attributes
340
+ ----------
341
+ None
342
+
343
+ Methods
344
+ -------
345
+ None
346
+
347
+ Examples
348
+ --------
349
+ >>> from pandas.arrays import SparseArray
350
+ >>> arr = SparseArray([0, 0, 1, 2])
351
+ >>> arr
352
+ [0, 0, 1, 2]
353
+ Fill: 0
354
+ IntIndex
355
+ Indices: array([2, 3], dtype=int32)
356
+ """
357
+
358
+ _subtyp = "sparse_array" # register ABCSparseArray
359
+ _hidden_attrs = PandasObject._hidden_attrs | frozenset([])
360
+ _sparse_index: SparseIndex
361
+ _sparse_values: np.ndarray
362
+ _dtype: SparseDtype
363
+
364
+ def __init__(
365
+ self,
366
+ data,
367
+ sparse_index=None,
368
+ fill_value=None,
369
+ kind: SparseIndexKind = "integer",
370
+ dtype: Dtype | None = None,
371
+ copy: bool = False,
372
+ ) -> None:
373
+ if fill_value is None and isinstance(dtype, SparseDtype):
374
+ fill_value = dtype.fill_value
375
+
376
+ if isinstance(data, type(self)):
377
+ # disable normal inference on dtype, sparse_index, & fill_value
378
+ if sparse_index is None:
379
+ sparse_index = data.sp_index
380
+ if fill_value is None:
381
+ fill_value = data.fill_value
382
+ if dtype is None:
383
+ dtype = data.dtype
384
+ # TODO: make kind=None, and use data.kind?
385
+ data = data.sp_values
386
+
387
+ # Handle use-provided dtype
388
+ if isinstance(dtype, str):
389
+ # Two options: dtype='int', regular numpy dtype
390
+ # or dtype='Sparse[int]', a sparse dtype
391
+ try:
392
+ dtype = SparseDtype.construct_from_string(dtype)
393
+ except TypeError:
394
+ dtype = pandas_dtype(dtype)
395
+
396
+ if isinstance(dtype, SparseDtype):
397
+ if fill_value is None:
398
+ fill_value = dtype.fill_value
399
+ dtype = dtype.subtype
400
+
401
+ if is_scalar(data):
402
+ warnings.warn(
403
+ f"Constructing {type(self).__name__} with scalar data is deprecated "
404
+ "and will raise in a future version. Pass a sequence instead.",
405
+ FutureWarning,
406
+ stacklevel=find_stack_level(),
407
+ )
408
+ if sparse_index is None:
409
+ npoints = 1
410
+ else:
411
+ npoints = sparse_index.length
412
+
413
+ data = construct_1d_arraylike_from_scalar(data, npoints, dtype=None)
414
+ dtype = data.dtype
415
+
416
+ if dtype is not None:
417
+ dtype = pandas_dtype(dtype)
418
+
419
+ # TODO: disentangle the fill_value dtype inference from
420
+ # dtype inference
421
+ if data is None:
422
+ # TODO: What should the empty dtype be? Object or float?
423
+
424
+ # error: Argument "dtype" to "array" has incompatible type
425
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "Union[dtype[Any],
426
+ # None, type, _SupportsDType, str, Union[Tuple[Any, int], Tuple[Any,
427
+ # Union[int, Sequence[int]]], List[Any], _DTypeDict, Tuple[Any, Any]]]"
428
+ data = np.array([], dtype=dtype) # type: ignore[arg-type]
429
+
430
+ try:
431
+ data = sanitize_array(data, index=None)
432
+ except ValueError:
433
+ # NumPy may raise a ValueError on data like [1, []]
434
+ # we retry with object dtype here.
435
+ if dtype is None:
436
+ dtype = np.dtype(object)
437
+ data = np.atleast_1d(np.asarray(data, dtype=dtype))
438
+ else:
439
+ raise
440
+
441
+ if copy:
442
+ # TODO: avoid double copy when dtype forces cast.
443
+ data = data.copy()
444
+
445
+ if fill_value is None:
446
+ fill_value_dtype = data.dtype if dtype is None else dtype
447
+ if fill_value_dtype is None:
448
+ fill_value = np.nan
449
+ else:
450
+ fill_value = na_value_for_dtype(fill_value_dtype)
451
+
452
+ if isinstance(data, type(self)) and sparse_index is None:
453
+ sparse_index = data._sparse_index
454
+ # error: Argument "dtype" to "asarray" has incompatible type
455
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
456
+ sparse_values = np.asarray(
457
+ data.sp_values, dtype=dtype # type: ignore[arg-type]
458
+ )
459
+ elif sparse_index is None:
460
+ data = extract_array(data, extract_numpy=True)
461
+ if not isinstance(data, np.ndarray):
462
+ # EA
463
+ if isinstance(data.dtype, DatetimeTZDtype):
464
+ warnings.warn(
465
+ f"Creating SparseArray from {data.dtype} data "
466
+ "loses timezone information. Cast to object before "
467
+ "sparse to retain timezone information.",
468
+ UserWarning,
469
+ stacklevel=find_stack_level(),
470
+ )
471
+ data = np.asarray(data, dtype="datetime64[ns]")
472
+ if fill_value is NaT:
473
+ fill_value = np.datetime64("NaT", "ns")
474
+ data = np.asarray(data)
475
+ sparse_values, sparse_index, fill_value = _make_sparse(
476
+ # error: Argument "dtype" to "_make_sparse" has incompatible type
477
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected
478
+ # "Optional[dtype[Any]]"
479
+ data,
480
+ kind=kind,
481
+ fill_value=fill_value,
482
+ dtype=dtype, # type: ignore[arg-type]
483
+ )
484
+ else:
485
+ # error: Argument "dtype" to "asarray" has incompatible type
486
+ # "Union[ExtensionDtype, dtype[Any], None]"; expected "None"
487
+ sparse_values = np.asarray(data, dtype=dtype) # type: ignore[arg-type]
488
+ if len(sparse_values) != sparse_index.npoints:
489
+ raise AssertionError(
490
+ f"Non array-like type {type(sparse_values)} must "
491
+ "have the same length as the index"
492
+ )
493
+ self._sparse_index = sparse_index
494
+ self._sparse_values = sparse_values
495
+ self._dtype = SparseDtype(sparse_values.dtype, fill_value)
496
+
497
+ @classmethod
498
+ def _simple_new(
499
+ cls,
500
+ sparse_array: np.ndarray,
501
+ sparse_index: SparseIndex,
502
+ dtype: SparseDtype,
503
+ ) -> Self:
504
+ new = object.__new__(cls)
505
+ new._sparse_index = sparse_index
506
+ new._sparse_values = sparse_array
507
+ new._dtype = dtype
508
+ return new
509
+
510
+ @classmethod
511
+ def from_spmatrix(cls, data: spmatrix) -> Self:
512
+ """
513
+ Create a SparseArray from a scipy.sparse matrix.
514
+
515
+ Parameters
516
+ ----------
517
+ data : scipy.sparse.sp_matrix
518
+ This should be a SciPy sparse matrix where the size
519
+ of the second dimension is 1. In other words, a
520
+ sparse matrix with a single column.
521
+
522
+ Returns
523
+ -------
524
+ SparseArray
525
+
526
+ Examples
527
+ --------
528
+ >>> import scipy.sparse
529
+ >>> mat = scipy.sparse.coo_matrix((4, 1))
530
+ >>> pd.arrays.SparseArray.from_spmatrix(mat)
531
+ [0.0, 0.0, 0.0, 0.0]
532
+ Fill: 0.0
533
+ IntIndex
534
+ Indices: array([], dtype=int32)
535
+ """
536
+ length, ncol = data.shape
537
+
538
+ if ncol != 1:
539
+ raise ValueError(f"'data' must have a single column, not '{ncol}'")
540
+
541
+ # our sparse index classes require that the positions be strictly
542
+ # increasing. So we need to sort loc, and arr accordingly.
543
+ data = data.tocsc()
544
+ data.sort_indices()
545
+ arr = data.data
546
+ idx = data.indices
547
+
548
+ zero = np.array(0, dtype=arr.dtype).item()
549
+ dtype = SparseDtype(arr.dtype, zero)
550
+ index = IntIndex(length, idx)
551
+
552
+ return cls._simple_new(arr, index, dtype)
553
+
554
+ def __array__(
555
+ self, dtype: NpDtype | None = None, copy: bool | None = None
556
+ ) -> np.ndarray:
557
+ fill_value = self.fill_value
558
+
559
+ if self.sp_index.ngaps == 0:
560
+ # Compat for na dtype and int values.
561
+ return self.sp_values
562
+ if dtype is None:
563
+ # Can NumPy represent this type?
564
+ # If not, `np.result_type` will raise. We catch that
565
+ # and return object.
566
+ if self.sp_values.dtype.kind == "M":
567
+ # However, we *do* special-case the common case of
568
+ # a datetime64 with pandas NaT.
569
+ if fill_value is NaT:
570
+ # Can't put pd.NaT in a datetime64[ns]
571
+ fill_value = np.datetime64("NaT")
572
+ try:
573
+ dtype = np.result_type(self.sp_values.dtype, type(fill_value))
574
+ except TypeError:
575
+ dtype = object
576
+
577
+ out = np.full(self.shape, fill_value, dtype=dtype)
578
+ out[self.sp_index.indices] = self.sp_values
579
+ return out
580
+
581
+ def __setitem__(self, key, value) -> None:
582
+ # I suppose we could allow setting of non-fill_value elements.
583
+ # TODO(SparseArray.__setitem__): remove special cases in
584
+ # ExtensionBlock.where
585
+ msg = "SparseArray does not support item assignment via setitem"
586
+ raise TypeError(msg)
587
+
588
+ @classmethod
589
+ def _from_sequence(cls, scalars, *, dtype: Dtype | None = None, copy: bool = False):
590
+ return cls(scalars, dtype=dtype)
591
+
592
+ @classmethod
593
+ def _from_factorized(cls, values, original):
594
+ return cls(values, dtype=original.dtype)
595
+
596
+ # ------------------------------------------------------------------------
597
+ # Data
598
+ # ------------------------------------------------------------------------
599
+ @property
600
+ def sp_index(self) -> SparseIndex:
601
+ """
602
+ The SparseIndex containing the location of non- ``fill_value`` points.
603
+ """
604
+ return self._sparse_index
605
+
606
+ @property
607
+ def sp_values(self) -> np.ndarray:
608
+ """
609
+ An ndarray containing the non- ``fill_value`` values.
610
+
611
+ Examples
612
+ --------
613
+ >>> from pandas.arrays import SparseArray
614
+ >>> s = SparseArray([0, 0, 1, 0, 2], fill_value=0)
615
+ >>> s.sp_values
616
+ array([1, 2])
617
+ """
618
+ return self._sparse_values
619
+
620
+ @property
621
+ def dtype(self) -> SparseDtype:
622
+ return self._dtype
623
+
624
+ @property
625
+ def fill_value(self):
626
+ """
627
+ Elements in `data` that are `fill_value` are not stored.
628
+
629
+ For memory savings, this should be the most common value in the array.
630
+
631
+ Examples
632
+ --------
633
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype="Sparse[int]")
634
+ >>> ser.sparse.fill_value
635
+ 0
636
+ >>> spa_dtype = pd.SparseDtype(dtype=np.int32, fill_value=2)
637
+ >>> ser = pd.Series([0, 0, 2, 2, 2], dtype=spa_dtype)
638
+ >>> ser.sparse.fill_value
639
+ 2
640
+ """
641
+ return self.dtype.fill_value
642
+
643
+ @fill_value.setter
644
+ def fill_value(self, value) -> None:
645
+ self._dtype = SparseDtype(self.dtype.subtype, value)
646
+
647
+ @property
648
+ def kind(self) -> SparseIndexKind:
649
+ """
650
+ The kind of sparse index for this array. One of {'integer', 'block'}.
651
+ """
652
+ if isinstance(self.sp_index, IntIndex):
653
+ return "integer"
654
+ else:
655
+ return "block"
656
+
657
+ @property
658
+ def _valid_sp_values(self) -> np.ndarray:
659
+ sp_vals = self.sp_values
660
+ mask = notna(sp_vals)
661
+ return sp_vals[mask]
662
+
663
+ def __len__(self) -> int:
664
+ return self.sp_index.length
665
+
666
+ @property
667
+ def _null_fill_value(self) -> bool:
668
+ return self._dtype._is_na_fill_value
669
+
670
+ def _fill_value_matches(self, fill_value) -> bool:
671
+ if self._null_fill_value:
672
+ return isna(fill_value)
673
+ else:
674
+ return self.fill_value == fill_value
675
+
676
+ @property
677
+ def nbytes(self) -> int:
678
+ return self.sp_values.nbytes + self.sp_index.nbytes
679
+
680
+ @property
681
+ def density(self) -> float:
682
+ """
683
+ The percent of non- ``fill_value`` points, as decimal.
684
+
685
+ Examples
686
+ --------
687
+ >>> from pandas.arrays import SparseArray
688
+ >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
689
+ >>> s.density
690
+ 0.6
691
+ """
692
+ return self.sp_index.npoints / self.sp_index.length
693
+
694
+ @property
695
+ def npoints(self) -> int:
696
+ """
697
+ The number of non- ``fill_value`` points.
698
+
699
+ Examples
700
+ --------
701
+ >>> from pandas.arrays import SparseArray
702
+ >>> s = SparseArray([0, 0, 1, 1, 1], fill_value=0)
703
+ >>> s.npoints
704
+ 3
705
+ """
706
+ return self.sp_index.npoints
707
+
708
+ # error: Return type "SparseArray" of "isna" incompatible with return type
709
+ # "ndarray[Any, Any] | ExtensionArraySupportsAnyAll" in supertype "ExtensionArray"
710
+ def isna(self) -> Self: # type: ignore[override]
711
+ # If null fill value, we want SparseDtype[bool, true]
712
+ # to preserve the same memory usage.
713
+ dtype = SparseDtype(bool, self._null_fill_value)
714
+ if self._null_fill_value:
715
+ return type(self)._simple_new(isna(self.sp_values), self.sp_index, dtype)
716
+ mask = np.full(len(self), False, dtype=np.bool_)
717
+ mask[self.sp_index.indices] = isna(self.sp_values)
718
+ return type(self)(mask, fill_value=False, dtype=dtype)
719
+
720
+ def _pad_or_backfill( # pylint: disable=useless-parent-delegation
721
+ self,
722
+ *,
723
+ method: FillnaOptions,
724
+ limit: int | None = None,
725
+ limit_area: Literal["inside", "outside"] | None = None,
726
+ copy: bool = True,
727
+ ) -> Self:
728
+ # TODO(3.0): We can remove this method once deprecation for fillna method
729
+ # keyword is enforced.
730
+ return super()._pad_or_backfill(
731
+ method=method, limit=limit, limit_area=limit_area, copy=copy
732
+ )
733
+
734
+ def fillna(
735
+ self,
736
+ value=None,
737
+ method: FillnaOptions | None = None,
738
+ limit: int | None = None,
739
+ copy: bool = True,
740
+ ) -> Self:
741
+ """
742
+ Fill missing values with `value`.
743
+
744
+ Parameters
745
+ ----------
746
+ value : scalar, optional
747
+ method : str, optional
748
+
749
+ .. warning::
750
+
751
+ Using 'method' will result in high memory use,
752
+ as all `fill_value` methods will be converted to
753
+ an in-memory ndarray
754
+
755
+ limit : int, optional
756
+
757
+ copy: bool, default True
758
+ Ignored for SparseArray.
759
+
760
+ Returns
761
+ -------
762
+ SparseArray
763
+
764
+ Notes
765
+ -----
766
+ When `value` is specified, the result's ``fill_value`` depends on
767
+ ``self.fill_value``. The goal is to maintain low-memory use.
768
+
769
+ If ``self.fill_value`` is NA, the result dtype will be
770
+ ``SparseDtype(self.dtype, fill_value=value)``. This will preserve
771
+ amount of memory used before and after filling.
772
+
773
+ When ``self.fill_value`` is not NA, the result dtype will be
774
+ ``self.dtype``. Again, this preserves the amount of memory used.
775
+ """
776
+ if (method is None and value is None) or (
777
+ method is not None and value is not None
778
+ ):
779
+ raise ValueError("Must specify one of 'method' or 'value'.")
780
+
781
+ if method is not None:
782
+ return super().fillna(method=method, limit=limit)
783
+
784
+ else:
785
+ new_values = np.where(isna(self.sp_values), value, self.sp_values)
786
+
787
+ if self._null_fill_value:
788
+ # This is essentially just updating the dtype.
789
+ new_dtype = SparseDtype(self.dtype.subtype, fill_value=value)
790
+ else:
791
+ new_dtype = self.dtype
792
+
793
+ return self._simple_new(new_values, self._sparse_index, new_dtype)
794
+
795
+ def shift(self, periods: int = 1, fill_value=None) -> Self:
796
+ if not len(self) or periods == 0:
797
+ return self.copy()
798
+
799
+ if isna(fill_value):
800
+ fill_value = self.dtype.na_value
801
+
802
+ subtype = np.result_type(fill_value, self.dtype.subtype)
803
+
804
+ if subtype != self.dtype.subtype:
805
+ # just coerce up front
806
+ arr = self.astype(SparseDtype(subtype, self.fill_value))
807
+ else:
808
+ arr = self
809
+
810
+ empty = self._from_sequence(
811
+ [fill_value] * min(abs(periods), len(self)), dtype=arr.dtype
812
+ )
813
+
814
+ if periods > 0:
815
+ a = empty
816
+ b = arr[:-periods]
817
+ else:
818
+ a = arr[abs(periods) :]
819
+ b = empty
820
+ return arr._concat_same_type([a, b])
821
+
822
+ def _first_fill_value_loc(self):
823
+ """
824
+ Get the location of the first fill value.
825
+
826
+ Returns
827
+ -------
828
+ int
829
+ """
830
+ if len(self) == 0 or self.sp_index.npoints == len(self):
831
+ return -1
832
+
833
+ indices = self.sp_index.indices
834
+ if not len(indices) or indices[0] > 0:
835
+ return 0
836
+
837
+ # a number larger than 1 should be appended to
838
+ # the last in case of fill value only appears
839
+ # in the tail of array
840
+ diff = np.r_[np.diff(indices), 2]
841
+ return indices[(diff > 1).argmax()] + 1
842
+
843
+ @doc(ExtensionArray.duplicated)
844
+ def duplicated(
845
+ self, keep: Literal["first", "last", False] = "first"
846
+ ) -> npt.NDArray[np.bool_]:
847
+ values = np.asarray(self)
848
+ mask = np.asarray(self.isna())
849
+ return algos.duplicated(values, keep=keep, mask=mask)
850
+
851
+ def unique(self) -> Self:
852
+ uniques = algos.unique(self.sp_values)
853
+ if len(self.sp_values) != len(self):
854
+ fill_loc = self._first_fill_value_loc()
855
+ # Inorder to align the behavior of pd.unique or
856
+ # pd.Series.unique, we should keep the original
857
+ # order, here we use unique again to find the
858
+ # insertion place. Since the length of sp_values
859
+ # is not large, maybe minor performance hurt
860
+ # is worthwhile to the correctness.
861
+ insert_loc = len(algos.unique(self.sp_values[:fill_loc]))
862
+ uniques = np.insert(uniques, insert_loc, self.fill_value)
863
+ return type(self)._from_sequence(uniques, dtype=self.dtype)
864
+
865
+ def _values_for_factorize(self):
866
+ # Still override this for hash_pandas_object
867
+ return np.asarray(self), self.fill_value
868
+
869
+ def factorize(
870
+ self,
871
+ use_na_sentinel: bool = True,
872
+ ) -> tuple[np.ndarray, SparseArray]:
873
+ # Currently, ExtensionArray.factorize -> Tuple[ndarray, EA]
874
+ # The sparsity on this is backwards from what Sparse would want. Want
875
+ # ExtensionArray.factorize -> Tuple[EA, EA]
876
+ # Given that we have to return a dense array of codes, why bother
877
+ # implementing an efficient factorize?
878
+ codes, uniques = algos.factorize(
879
+ np.asarray(self), use_na_sentinel=use_na_sentinel
880
+ )
881
+ uniques_sp = SparseArray(uniques, dtype=self.dtype)
882
+ return codes, uniques_sp
883
+
884
+ def value_counts(self, dropna: bool = True) -> Series:
885
+ """
886
+ Returns a Series containing counts of unique values.
887
+
888
+ Parameters
889
+ ----------
890
+ dropna : bool, default True
891
+ Don't include counts of NaN, even if NaN is in sp_values.
892
+
893
+ Returns
894
+ -------
895
+ counts : Series
896
+ """
897
+ from pandas import (
898
+ Index,
899
+ Series,
900
+ )
901
+
902
+ keys, counts, _ = algos.value_counts_arraylike(self.sp_values, dropna=dropna)
903
+ fcounts = self.sp_index.ngaps
904
+ if fcounts > 0 and (not self._null_fill_value or not dropna):
905
+ mask = isna(keys) if self._null_fill_value else keys == self.fill_value
906
+ if mask.any():
907
+ counts[mask] += fcounts
908
+ else:
909
+ # error: Argument 1 to "insert" has incompatible type "Union[
910
+ # ExtensionArray,ndarray[Any, Any]]"; expected "Union[
911
+ # _SupportsArray[dtype[Any]], Sequence[_SupportsArray[dtype
912
+ # [Any]]], Sequence[Sequence[_SupportsArray[dtype[Any]]]],
913
+ # Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]], Sequence
914
+ # [Sequence[Sequence[Sequence[_SupportsArray[dtype[Any]]]]]]]"
915
+ keys = np.insert(keys, 0, self.fill_value) # type: ignore[arg-type]
916
+ counts = np.insert(counts, 0, fcounts)
917
+
918
+ if not isinstance(keys, ABCIndex):
919
+ index = Index(keys)
920
+ else:
921
+ index = keys
922
+ return Series(counts, index=index, copy=False)
923
+
924
+ # --------
925
+ # Indexing
926
+ # --------
927
+ @overload
928
+ def __getitem__(self, key: ScalarIndexer) -> Any:
929
+ ...
930
+
931
+ @overload
932
+ def __getitem__(
933
+ self,
934
+ key: SequenceIndexer | tuple[int | ellipsis, ...],
935
+ ) -> Self:
936
+ ...
937
+
938
+ def __getitem__(
939
+ self,
940
+ key: PositionalIndexer | tuple[int | ellipsis, ...],
941
+ ) -> Self | Any:
942
+ if isinstance(key, tuple):
943
+ key = unpack_tuple_and_ellipses(key)
944
+ if key is Ellipsis:
945
+ raise ValueError("Cannot slice with Ellipsis")
946
+
947
+ if is_integer(key):
948
+ return self._get_val_at(key)
949
+ elif isinstance(key, tuple):
950
+ # error: Invalid index type "Tuple[Union[int, ellipsis], ...]"
951
+ # for "ndarray[Any, Any]"; expected type
952
+ # "Union[SupportsIndex, _SupportsArray[dtype[Union[bool_,
953
+ # integer[Any]]]], _NestedSequence[_SupportsArray[dtype[
954
+ # Union[bool_, integer[Any]]]]], _NestedSequence[Union[
955
+ # bool, int]], Tuple[Union[SupportsIndex, _SupportsArray[
956
+ # dtype[Union[bool_, integer[Any]]]], _NestedSequence[
957
+ # _SupportsArray[dtype[Union[bool_, integer[Any]]]]],
958
+ # _NestedSequence[Union[bool, int]]], ...]]"
959
+ data_slice = self.to_dense()[key] # type: ignore[index]
960
+ elif isinstance(key, slice):
961
+ # Avoid densifying when handling contiguous slices
962
+ if key.step is None or key.step == 1:
963
+ start = 0 if key.start is None else key.start
964
+ if start < 0:
965
+ start += len(self)
966
+
967
+ end = len(self) if key.stop is None else key.stop
968
+ if end < 0:
969
+ end += len(self)
970
+
971
+ indices = self.sp_index.indices
972
+ keep_inds = np.flatnonzero((indices >= start) & (indices < end))
973
+ sp_vals = self.sp_values[keep_inds]
974
+
975
+ sp_index = indices[keep_inds].copy()
976
+
977
+ # If we've sliced to not include the start of the array, all our indices
978
+ # should be shifted. NB: here we are careful to also not shift by a
979
+ # negative value for a case like [0, 1][-100:] where the start index
980
+ # should be treated like 0
981
+ if start > 0:
982
+ sp_index -= start
983
+
984
+ # Length of our result should match applying this slice to a range
985
+ # of the length of our original array
986
+ new_len = len(range(len(self))[key])
987
+ new_sp_index = make_sparse_index(new_len, sp_index, self.kind)
988
+ return type(self)._simple_new(sp_vals, new_sp_index, self.dtype)
989
+ else:
990
+ indices = np.arange(len(self), dtype=np.int32)[key]
991
+ return self.take(indices)
992
+
993
+ elif not is_list_like(key):
994
+ # e.g. "foo" or 2.5
995
+ # exception message copied from numpy
996
+ raise IndexError(
997
+ r"only integers, slices (`:`), ellipsis (`...`), numpy.newaxis "
998
+ r"(`None`) and integer or boolean arrays are valid indices"
999
+ )
1000
+
1001
+ else:
1002
+ if isinstance(key, SparseArray):
1003
+ # NOTE: If we guarantee that SparseDType(bool)
1004
+ # has only fill_value - true, false or nan
1005
+ # (see GH PR 44955)
1006
+ # we can apply mask very fast:
1007
+ if is_bool_dtype(key):
1008
+ if isna(key.fill_value):
1009
+ return self.take(key.sp_index.indices[key.sp_values])
1010
+ if not key.fill_value:
1011
+ return self.take(key.sp_index.indices)
1012
+ n = len(self)
1013
+ mask = np.full(n, True, dtype=np.bool_)
1014
+ mask[key.sp_index.indices] = False
1015
+ return self.take(np.arange(n)[mask])
1016
+ else:
1017
+ key = np.asarray(key)
1018
+
1019
+ key = check_array_indexer(self, key)
1020
+
1021
+ if com.is_bool_indexer(key):
1022
+ # mypy doesn't know we have an array here
1023
+ key = cast(np.ndarray, key)
1024
+ return self.take(np.arange(len(key), dtype=np.int32)[key])
1025
+ elif hasattr(key, "__len__"):
1026
+ return self.take(key)
1027
+ else:
1028
+ raise ValueError(f"Cannot slice with '{key}'")
1029
+
1030
+ return type(self)(data_slice, kind=self.kind)
1031
+
1032
+ def _get_val_at(self, loc):
1033
+ loc = validate_insert_loc(loc, len(self))
1034
+
1035
+ sp_loc = self.sp_index.lookup(loc)
1036
+ if sp_loc == -1:
1037
+ return self.fill_value
1038
+ else:
1039
+ val = self.sp_values[sp_loc]
1040
+ val = maybe_box_datetimelike(val, self.sp_values.dtype)
1041
+ return val
1042
+
1043
+ def take(self, indices, *, allow_fill: bool = False, fill_value=None) -> Self:
1044
+ if is_scalar(indices):
1045
+ raise ValueError(f"'indices' must be an array, not a scalar '{indices}'.")
1046
+ indices = np.asarray(indices, dtype=np.int32)
1047
+
1048
+ dtype = None
1049
+ if indices.size == 0:
1050
+ result = np.array([], dtype="object")
1051
+ dtype = self.dtype
1052
+ elif allow_fill:
1053
+ result = self._take_with_fill(indices, fill_value=fill_value)
1054
+ else:
1055
+ return self._take_without_fill(indices)
1056
+
1057
+ return type(self)(
1058
+ result, fill_value=self.fill_value, kind=self.kind, dtype=dtype
1059
+ )
1060
+
1061
+ def _take_with_fill(self, indices, fill_value=None) -> np.ndarray:
1062
+ if fill_value is None:
1063
+ fill_value = self.dtype.na_value
1064
+
1065
+ if indices.min() < -1:
1066
+ raise ValueError(
1067
+ "Invalid value in 'indices'. Must be between -1 "
1068
+ "and the length of the array."
1069
+ )
1070
+
1071
+ if indices.max() >= len(self):
1072
+ raise IndexError("out of bounds value in 'indices'.")
1073
+
1074
+ if len(self) == 0:
1075
+ # Empty... Allow taking only if all empty
1076
+ if (indices == -1).all():
1077
+ dtype = np.result_type(self.sp_values, type(fill_value))
1078
+ taken = np.empty_like(indices, dtype=dtype)
1079
+ taken.fill(fill_value)
1080
+ return taken
1081
+ else:
1082
+ raise IndexError("cannot do a non-empty take from an empty axes.")
1083
+
1084
+ # sp_indexer may be -1 for two reasons
1085
+ # 1.) we took for an index of -1 (new)
1086
+ # 2.) we took a value that was self.fill_value (old)
1087
+ sp_indexer = self.sp_index.lookup_array(indices)
1088
+ new_fill_indices = indices == -1
1089
+ old_fill_indices = (sp_indexer == -1) & ~new_fill_indices
1090
+
1091
+ if self.sp_index.npoints == 0 and old_fill_indices.all():
1092
+ # We've looked up all valid points on an all-sparse array.
1093
+ taken = np.full(
1094
+ sp_indexer.shape, fill_value=self.fill_value, dtype=self.dtype.subtype
1095
+ )
1096
+
1097
+ elif self.sp_index.npoints == 0:
1098
+ # Use the old fill_value unless we took for an index of -1
1099
+ _dtype = np.result_type(self.dtype.subtype, type(fill_value))
1100
+ taken = np.full(sp_indexer.shape, fill_value=fill_value, dtype=_dtype)
1101
+ taken[old_fill_indices] = self.fill_value
1102
+ else:
1103
+ taken = self.sp_values.take(sp_indexer)
1104
+
1105
+ # Fill in two steps.
1106
+ # Old fill values
1107
+ # New fill values
1108
+ # potentially coercing to a new dtype at each stage.
1109
+
1110
+ m0 = sp_indexer[old_fill_indices] < 0
1111
+ m1 = sp_indexer[new_fill_indices] < 0
1112
+
1113
+ result_type = taken.dtype
1114
+
1115
+ if m0.any():
1116
+ result_type = np.result_type(result_type, type(self.fill_value))
1117
+ taken = taken.astype(result_type)
1118
+ taken[old_fill_indices] = self.fill_value
1119
+
1120
+ if m1.any():
1121
+ result_type = np.result_type(result_type, type(fill_value))
1122
+ taken = taken.astype(result_type)
1123
+ taken[new_fill_indices] = fill_value
1124
+
1125
+ return taken
1126
+
1127
+ def _take_without_fill(self, indices) -> Self:
1128
+ to_shift = indices < 0
1129
+
1130
+ n = len(self)
1131
+
1132
+ if (indices.max() >= n) or (indices.min() < -n):
1133
+ if n == 0:
1134
+ raise IndexError("cannot do a non-empty take from an empty axes.")
1135
+ raise IndexError("out of bounds value in 'indices'.")
1136
+
1137
+ if to_shift.any():
1138
+ indices = indices.copy()
1139
+ indices[to_shift] += n
1140
+
1141
+ sp_indexer = self.sp_index.lookup_array(indices)
1142
+ value_mask = sp_indexer != -1
1143
+ new_sp_values = self.sp_values[sp_indexer[value_mask]]
1144
+
1145
+ value_indices = np.flatnonzero(value_mask).astype(np.int32, copy=False)
1146
+
1147
+ new_sp_index = make_sparse_index(len(indices), value_indices, kind=self.kind)
1148
+ return type(self)._simple_new(new_sp_values, new_sp_index, dtype=self.dtype)
1149
+
1150
+ def searchsorted(
1151
+ self,
1152
+ v: ArrayLike | object,
1153
+ side: Literal["left", "right"] = "left",
1154
+ sorter: NumpySorter | None = None,
1155
+ ) -> npt.NDArray[np.intp] | np.intp:
1156
+ msg = "searchsorted requires high memory usage."
1157
+ warnings.warn(msg, PerformanceWarning, stacklevel=find_stack_level())
1158
+ v = np.asarray(v)
1159
+ return np.asarray(self, dtype=self.dtype.subtype).searchsorted(v, side, sorter)
1160
+
1161
+ def copy(self) -> Self:
1162
+ values = self.sp_values.copy()
1163
+ return self._simple_new(values, self.sp_index, self.dtype)
1164
+
1165
+ @classmethod
1166
+ def _concat_same_type(cls, to_concat: Sequence[Self]) -> Self:
1167
+ fill_value = to_concat[0].fill_value
1168
+
1169
+ values = []
1170
+ length = 0
1171
+
1172
+ if to_concat:
1173
+ sp_kind = to_concat[0].kind
1174
+ else:
1175
+ sp_kind = "integer"
1176
+
1177
+ sp_index: SparseIndex
1178
+ if sp_kind == "integer":
1179
+ indices = []
1180
+
1181
+ for arr in to_concat:
1182
+ int_idx = arr.sp_index.indices.copy()
1183
+ int_idx += length # TODO: wraparound
1184
+ length += arr.sp_index.length
1185
+
1186
+ values.append(arr.sp_values)
1187
+ indices.append(int_idx)
1188
+
1189
+ data = np.concatenate(values)
1190
+ indices_arr = np.concatenate(indices)
1191
+ # error: Argument 2 to "IntIndex" has incompatible type
1192
+ # "ndarray[Any, dtype[signedinteger[_32Bit]]]";
1193
+ # expected "Sequence[int]"
1194
+ sp_index = IntIndex(length, indices_arr) # type: ignore[arg-type]
1195
+
1196
+ else:
1197
+ # when concatenating block indices, we don't claim that you'll
1198
+ # get an identical index as concatenating the values and then
1199
+ # creating a new index. We don't want to spend the time trying
1200
+ # to merge blocks across arrays in `to_concat`, so the resulting
1201
+ # BlockIndex may have more blocks.
1202
+ blengths = []
1203
+ blocs = []
1204
+
1205
+ for arr in to_concat:
1206
+ block_idx = arr.sp_index.to_block_index()
1207
+
1208
+ values.append(arr.sp_values)
1209
+ blocs.append(block_idx.blocs.copy() + length)
1210
+ blengths.append(block_idx.blengths)
1211
+ length += arr.sp_index.length
1212
+
1213
+ data = np.concatenate(values)
1214
+ blocs_arr = np.concatenate(blocs)
1215
+ blengths_arr = np.concatenate(blengths)
1216
+
1217
+ sp_index = BlockIndex(length, blocs_arr, blengths_arr)
1218
+
1219
+ return cls(data, sparse_index=sp_index, fill_value=fill_value)
1220
+
1221
+ def astype(self, dtype: AstypeArg | None = None, copy: bool = True):
1222
+ """
1223
+ Change the dtype of a SparseArray.
1224
+
1225
+ The output will always be a SparseArray. To convert to a dense
1226
+ ndarray with a certain dtype, use :meth:`numpy.asarray`.
1227
+
1228
+ Parameters
1229
+ ----------
1230
+ dtype : np.dtype or ExtensionDtype
1231
+ For SparseDtype, this changes the dtype of
1232
+ ``self.sp_values`` and the ``self.fill_value``.
1233
+
1234
+ For other dtypes, this only changes the dtype of
1235
+ ``self.sp_values``.
1236
+
1237
+ copy : bool, default True
1238
+ Whether to ensure a copy is made, even if not necessary.
1239
+
1240
+ Returns
1241
+ -------
1242
+ SparseArray
1243
+
1244
+ Examples
1245
+ --------
1246
+ >>> arr = pd.arrays.SparseArray([0, 0, 1, 2])
1247
+ >>> arr
1248
+ [0, 0, 1, 2]
1249
+ Fill: 0
1250
+ IntIndex
1251
+ Indices: array([2, 3], dtype=int32)
1252
+
1253
+ >>> arr.astype(SparseDtype(np.dtype('int32')))
1254
+ [0, 0, 1, 2]
1255
+ Fill: 0
1256
+ IntIndex
1257
+ Indices: array([2, 3], dtype=int32)
1258
+
1259
+ Using a NumPy dtype with a different kind (e.g. float) will coerce
1260
+ just ``self.sp_values``.
1261
+
1262
+ >>> arr.astype(SparseDtype(np.dtype('float64')))
1263
+ ... # doctest: +NORMALIZE_WHITESPACE
1264
+ [nan, nan, 1.0, 2.0]
1265
+ Fill: nan
1266
+ IntIndex
1267
+ Indices: array([2, 3], dtype=int32)
1268
+
1269
+ Using a SparseDtype, you can also change the fill value as well.
1270
+
1271
+ >>> arr.astype(SparseDtype("float64", fill_value=0.0))
1272
+ ... # doctest: +NORMALIZE_WHITESPACE
1273
+ [0.0, 0.0, 1.0, 2.0]
1274
+ Fill: 0.0
1275
+ IntIndex
1276
+ Indices: array([2, 3], dtype=int32)
1277
+ """
1278
+ if dtype == self._dtype:
1279
+ if not copy:
1280
+ return self
1281
+ else:
1282
+ return self.copy()
1283
+
1284
+ future_dtype = pandas_dtype(dtype)
1285
+ if not isinstance(future_dtype, SparseDtype):
1286
+ # GH#34457
1287
+ values = np.asarray(self)
1288
+ values = ensure_wrapped_if_datetimelike(values)
1289
+ return astype_array(values, dtype=future_dtype, copy=False)
1290
+
1291
+ dtype = self.dtype.update_dtype(dtype)
1292
+ subtype = pandas_dtype(dtype._subtype_with_str)
1293
+ subtype = cast(np.dtype, subtype) # ensured by update_dtype
1294
+ values = ensure_wrapped_if_datetimelike(self.sp_values)
1295
+ sp_values = astype_array(values, subtype, copy=copy)
1296
+ sp_values = np.asarray(sp_values)
1297
+
1298
+ return self._simple_new(sp_values, self.sp_index, dtype)
1299
+
1300
+ def map(self, mapper, na_action=None) -> Self:
1301
+ """
1302
+ Map categories using an input mapping or function.
1303
+
1304
+ Parameters
1305
+ ----------
1306
+ mapper : dict, Series, callable
1307
+ The correspondence from old values to new.
1308
+ na_action : {None, 'ignore'}, default None
1309
+ If 'ignore', propagate NA values, without passing them to the
1310
+ mapping correspondence.
1311
+
1312
+ Returns
1313
+ -------
1314
+ SparseArray
1315
+ The output array will have the same density as the input.
1316
+ The output fill value will be the result of applying the
1317
+ mapping to ``self.fill_value``
1318
+
1319
+ Examples
1320
+ --------
1321
+ >>> arr = pd.arrays.SparseArray([0, 1, 2])
1322
+ >>> arr.map(lambda x: x + 10)
1323
+ [10, 11, 12]
1324
+ Fill: 10
1325
+ IntIndex
1326
+ Indices: array([1, 2], dtype=int32)
1327
+
1328
+ >>> arr.map({0: 10, 1: 11, 2: 12})
1329
+ [10, 11, 12]
1330
+ Fill: 10
1331
+ IntIndex
1332
+ Indices: array([1, 2], dtype=int32)
1333
+
1334
+ >>> arr.map(pd.Series([10, 11, 12], index=[0, 1, 2]))
1335
+ [10, 11, 12]
1336
+ Fill: 10
1337
+ IntIndex
1338
+ Indices: array([1, 2], dtype=int32)
1339
+ """
1340
+ is_map = isinstance(mapper, (abc.Mapping, ABCSeries))
1341
+
1342
+ fill_val = self.fill_value
1343
+
1344
+ if na_action is None or notna(fill_val):
1345
+ fill_val = mapper.get(fill_val, fill_val) if is_map else mapper(fill_val)
1346
+
1347
+ def func(sp_val):
1348
+ new_sp_val = mapper.get(sp_val, None) if is_map else mapper(sp_val)
1349
+ # check identity and equality because nans are not equal to each other
1350
+ if new_sp_val is fill_val or new_sp_val == fill_val:
1351
+ msg = "fill value in the sparse values not supported"
1352
+ raise ValueError(msg)
1353
+ return new_sp_val
1354
+
1355
+ sp_values = [func(x) for x in self.sp_values]
1356
+
1357
+ return type(self)(sp_values, sparse_index=self.sp_index, fill_value=fill_val)
1358
+
1359
+ def to_dense(self) -> np.ndarray:
1360
+ """
1361
+ Convert SparseArray to a NumPy array.
1362
+
1363
+ Returns
1364
+ -------
1365
+ arr : NumPy array
1366
+ """
1367
+ return np.asarray(self, dtype=self.sp_values.dtype)
1368
+
1369
+ def _where(self, mask, value):
1370
+ # NB: may not preserve dtype, e.g. result may be Sparse[float64]
1371
+ # while self is Sparse[int64]
1372
+ naive_implementation = np.where(mask, self, value)
1373
+ dtype = SparseDtype(naive_implementation.dtype, fill_value=self.fill_value)
1374
+ result = type(self)._from_sequence(naive_implementation, dtype=dtype)
1375
+ return result
1376
+
1377
+ # ------------------------------------------------------------------------
1378
+ # IO
1379
+ # ------------------------------------------------------------------------
1380
+ def __setstate__(self, state) -> None:
1381
+ """Necessary for making this object picklable"""
1382
+ if isinstance(state, tuple):
1383
+ # Compat for pandas < 0.24.0
1384
+ nd_state, (fill_value, sp_index) = state
1385
+ sparse_values = np.array([])
1386
+ sparse_values.__setstate__(nd_state)
1387
+
1388
+ self._sparse_values = sparse_values
1389
+ self._sparse_index = sp_index
1390
+ self._dtype = SparseDtype(sparse_values.dtype, fill_value)
1391
+ else:
1392
+ self.__dict__.update(state)
1393
+
1394
+ def nonzero(self) -> tuple[npt.NDArray[np.int32]]:
1395
+ if self.fill_value == 0:
1396
+ return (self.sp_index.indices,)
1397
+ else:
1398
+ return (self.sp_index.indices[self.sp_values != 0],)
1399
+
1400
+ # ------------------------------------------------------------------------
1401
+ # Reductions
1402
+ # ------------------------------------------------------------------------
1403
+
1404
+ def _reduce(
1405
+ self, name: str, *, skipna: bool = True, keepdims: bool = False, **kwargs
1406
+ ):
1407
+ method = getattr(self, name, None)
1408
+
1409
+ if method is None:
1410
+ raise TypeError(f"cannot perform {name} with type {self.dtype}")
1411
+
1412
+ if skipna:
1413
+ arr = self
1414
+ else:
1415
+ arr = self.dropna()
1416
+
1417
+ result = getattr(arr, name)(**kwargs)
1418
+
1419
+ if keepdims:
1420
+ return type(self)([result], dtype=self.dtype)
1421
+ else:
1422
+ return result
1423
+
1424
+ def all(self, axis=None, *args, **kwargs):
1425
+ """
1426
+ Tests whether all elements evaluate True
1427
+
1428
+ Returns
1429
+ -------
1430
+ all : bool
1431
+
1432
+ See Also
1433
+ --------
1434
+ numpy.all
1435
+ """
1436
+ nv.validate_all(args, kwargs)
1437
+
1438
+ values = self.sp_values
1439
+
1440
+ if len(values) != len(self) and not np.all(self.fill_value):
1441
+ return False
1442
+
1443
+ return values.all()
1444
+
1445
+ def any(self, axis: AxisInt = 0, *args, **kwargs) -> bool:
1446
+ """
1447
+ Tests whether at least one of elements evaluate True
1448
+
1449
+ Returns
1450
+ -------
1451
+ any : bool
1452
+
1453
+ See Also
1454
+ --------
1455
+ numpy.any
1456
+ """
1457
+ nv.validate_any(args, kwargs)
1458
+
1459
+ values = self.sp_values
1460
+
1461
+ if len(values) != len(self) and np.any(self.fill_value):
1462
+ return True
1463
+
1464
+ return values.any().item()
1465
+
1466
+ def sum(
1467
+ self,
1468
+ axis: AxisInt = 0,
1469
+ min_count: int = 0,
1470
+ skipna: bool = True,
1471
+ *args,
1472
+ **kwargs,
1473
+ ) -> Scalar:
1474
+ """
1475
+ Sum of non-NA/null values
1476
+
1477
+ Parameters
1478
+ ----------
1479
+ axis : int, default 0
1480
+ Not Used. NumPy compatibility.
1481
+ min_count : int, default 0
1482
+ The required number of valid values to perform the summation. If fewer
1483
+ than ``min_count`` valid values are present, the result will be the missing
1484
+ value indicator for subarray type.
1485
+ *args, **kwargs
1486
+ Not Used. NumPy compatibility.
1487
+
1488
+ Returns
1489
+ -------
1490
+ scalar
1491
+ """
1492
+ nv.validate_sum(args, kwargs)
1493
+ valid_vals = self._valid_sp_values
1494
+ sp_sum = valid_vals.sum()
1495
+ has_na = self.sp_index.ngaps > 0 and not self._null_fill_value
1496
+
1497
+ if has_na and not skipna:
1498
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1499
+
1500
+ if self._null_fill_value:
1501
+ if check_below_min_count(valid_vals.shape, None, min_count):
1502
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1503
+ return sp_sum
1504
+ else:
1505
+ nsparse = self.sp_index.ngaps
1506
+ if check_below_min_count(valid_vals.shape, None, min_count - nsparse):
1507
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1508
+ return sp_sum + self.fill_value * nsparse
1509
+
1510
+ def cumsum(self, axis: AxisInt = 0, *args, **kwargs) -> SparseArray:
1511
+ """
1512
+ Cumulative sum of non-NA/null values.
1513
+
1514
+ When performing the cumulative summation, any non-NA/null values will
1515
+ be skipped. The resulting SparseArray will preserve the locations of
1516
+ NaN values, but the fill value will be `np.nan` regardless.
1517
+
1518
+ Parameters
1519
+ ----------
1520
+ axis : int or None
1521
+ Axis over which to perform the cumulative summation. If None,
1522
+ perform cumulative summation over flattened array.
1523
+
1524
+ Returns
1525
+ -------
1526
+ cumsum : SparseArray
1527
+ """
1528
+ nv.validate_cumsum(args, kwargs)
1529
+
1530
+ if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
1531
+ raise ValueError(f"axis(={axis}) out of bounds")
1532
+
1533
+ if not self._null_fill_value:
1534
+ return SparseArray(self.to_dense()).cumsum()
1535
+
1536
+ return SparseArray(
1537
+ self.sp_values.cumsum(),
1538
+ sparse_index=self.sp_index,
1539
+ fill_value=self.fill_value,
1540
+ )
1541
+
1542
+ def mean(self, axis: Axis = 0, *args, **kwargs):
1543
+ """
1544
+ Mean of non-NA/null values
1545
+
1546
+ Returns
1547
+ -------
1548
+ mean : float
1549
+ """
1550
+ nv.validate_mean(args, kwargs)
1551
+ valid_vals = self._valid_sp_values
1552
+ sp_sum = valid_vals.sum()
1553
+ ct = len(valid_vals)
1554
+
1555
+ if self._null_fill_value:
1556
+ return sp_sum / ct
1557
+ else:
1558
+ nsparse = self.sp_index.ngaps
1559
+ return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
1560
+
1561
+ def max(self, *, axis: AxisInt | None = None, skipna: bool = True):
1562
+ """
1563
+ Max of array values, ignoring NA values if specified.
1564
+
1565
+ Parameters
1566
+ ----------
1567
+ axis : int, default 0
1568
+ Not Used. NumPy compatibility.
1569
+ skipna : bool, default True
1570
+ Whether to ignore NA values.
1571
+
1572
+ Returns
1573
+ -------
1574
+ scalar
1575
+ """
1576
+ nv.validate_minmax_axis(axis, self.ndim)
1577
+ return self._min_max("max", skipna=skipna)
1578
+
1579
+ def min(self, *, axis: AxisInt | None = None, skipna: bool = True):
1580
+ """
1581
+ Min of array values, ignoring NA values if specified.
1582
+
1583
+ Parameters
1584
+ ----------
1585
+ axis : int, default 0
1586
+ Not Used. NumPy compatibility.
1587
+ skipna : bool, default True
1588
+ Whether to ignore NA values.
1589
+
1590
+ Returns
1591
+ -------
1592
+ scalar
1593
+ """
1594
+ nv.validate_minmax_axis(axis, self.ndim)
1595
+ return self._min_max("min", skipna=skipna)
1596
+
1597
+ def _min_max(self, kind: Literal["min", "max"], skipna: bool) -> Scalar:
1598
+ """
1599
+ Min/max of non-NA/null values
1600
+
1601
+ Parameters
1602
+ ----------
1603
+ kind : {"min", "max"}
1604
+ skipna : bool
1605
+
1606
+ Returns
1607
+ -------
1608
+ scalar
1609
+ """
1610
+ valid_vals = self._valid_sp_values
1611
+ has_nonnull_fill_vals = not self._null_fill_value and self.sp_index.ngaps > 0
1612
+
1613
+ if len(valid_vals) > 0:
1614
+ sp_min_max = getattr(valid_vals, kind)()
1615
+
1616
+ # If a non-null fill value is currently present, it might be the min/max
1617
+ if has_nonnull_fill_vals:
1618
+ func = max if kind == "max" else min
1619
+ return func(sp_min_max, self.fill_value)
1620
+ elif skipna:
1621
+ return sp_min_max
1622
+ elif self.sp_index.ngaps == 0:
1623
+ # No NAs present
1624
+ return sp_min_max
1625
+ else:
1626
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1627
+ elif has_nonnull_fill_vals:
1628
+ return self.fill_value
1629
+ else:
1630
+ return na_value_for_dtype(self.dtype.subtype, compat=False)
1631
+
1632
+ def _argmin_argmax(self, kind: Literal["argmin", "argmax"]) -> int:
1633
+ values = self._sparse_values
1634
+ index = self._sparse_index.indices
1635
+ mask = np.asarray(isna(values))
1636
+ func = np.argmax if kind == "argmax" else np.argmin
1637
+
1638
+ idx = np.arange(values.shape[0])
1639
+ non_nans = values[~mask]
1640
+ non_nan_idx = idx[~mask]
1641
+
1642
+ _candidate = non_nan_idx[func(non_nans)]
1643
+ candidate = index[_candidate]
1644
+
1645
+ if isna(self.fill_value):
1646
+ return candidate
1647
+ if kind == "argmin" and self[candidate] < self.fill_value:
1648
+ return candidate
1649
+ if kind == "argmax" and self[candidate] > self.fill_value:
1650
+ return candidate
1651
+ _loc = self._first_fill_value_loc()
1652
+ if _loc == -1:
1653
+ # fill_value doesn't exist
1654
+ return candidate
1655
+ else:
1656
+ return _loc
1657
+
1658
+ def argmax(self, skipna: bool = True) -> int:
1659
+ validate_bool_kwarg(skipna, "skipna")
1660
+ if not skipna and self._hasna:
1661
+ raise NotImplementedError
1662
+ return self._argmin_argmax("argmax")
1663
+
1664
+ def argmin(self, skipna: bool = True) -> int:
1665
+ validate_bool_kwarg(skipna, "skipna")
1666
+ if not skipna and self._hasna:
1667
+ raise NotImplementedError
1668
+ return self._argmin_argmax("argmin")
1669
+
1670
+ # ------------------------------------------------------------------------
1671
+ # Ufuncs
1672
+ # ------------------------------------------------------------------------
1673
+
1674
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
1675
+
1676
+ def __array_ufunc__(self, ufunc: np.ufunc, method: str, *inputs, **kwargs):
1677
+ out = kwargs.get("out", ())
1678
+
1679
+ for x in inputs + out:
1680
+ if not isinstance(x, self._HANDLED_TYPES + (SparseArray,)):
1681
+ return NotImplemented
1682
+
1683
+ # for binary ops, use our custom dunder methods
1684
+ result = arraylike.maybe_dispatch_ufunc_to_dunder_op(
1685
+ self, ufunc, method, *inputs, **kwargs
1686
+ )
1687
+ if result is not NotImplemented:
1688
+ return result
1689
+
1690
+ if "out" in kwargs:
1691
+ # e.g. tests.arrays.sparse.test_arithmetics.test_ndarray_inplace
1692
+ res = arraylike.dispatch_ufunc_with_out(
1693
+ self, ufunc, method, *inputs, **kwargs
1694
+ )
1695
+ return res
1696
+
1697
+ if method == "reduce":
1698
+ result = arraylike.dispatch_reduction_ufunc(
1699
+ self, ufunc, method, *inputs, **kwargs
1700
+ )
1701
+ if result is not NotImplemented:
1702
+ # e.g. tests.series.test_ufunc.TestNumpyReductions
1703
+ return result
1704
+
1705
+ if len(inputs) == 1:
1706
+ # No alignment necessary.
1707
+ sp_values = getattr(ufunc, method)(self.sp_values, **kwargs)
1708
+ fill_value = getattr(ufunc, method)(self.fill_value, **kwargs)
1709
+
1710
+ if ufunc.nout > 1:
1711
+ # multiple outputs. e.g. modf
1712
+ arrays = tuple(
1713
+ self._simple_new(
1714
+ sp_value, self.sp_index, SparseDtype(sp_value.dtype, fv)
1715
+ )
1716
+ for sp_value, fv in zip(sp_values, fill_value)
1717
+ )
1718
+ return arrays
1719
+ elif method == "reduce":
1720
+ # e.g. reductions
1721
+ return sp_values
1722
+
1723
+ return self._simple_new(
1724
+ sp_values, self.sp_index, SparseDtype(sp_values.dtype, fill_value)
1725
+ )
1726
+
1727
+ new_inputs = tuple(np.asarray(x) for x in inputs)
1728
+ result = getattr(ufunc, method)(*new_inputs, **kwargs)
1729
+ if out:
1730
+ if len(out) == 1:
1731
+ out = out[0]
1732
+ return out
1733
+
1734
+ if ufunc.nout > 1:
1735
+ return tuple(type(self)(x) for x in result)
1736
+ elif method == "at":
1737
+ # no return value
1738
+ return None
1739
+ else:
1740
+ return type(self)(result)
1741
+
1742
+ # ------------------------------------------------------------------------
1743
+ # Ops
1744
+ # ------------------------------------------------------------------------
1745
+
1746
+ def _arith_method(self, other, op):
1747
+ op_name = op.__name__
1748
+
1749
+ if isinstance(other, SparseArray):
1750
+ return _sparse_array_op(self, other, op, op_name)
1751
+
1752
+ elif is_scalar(other):
1753
+ with np.errstate(all="ignore"):
1754
+ fill = op(_get_fill(self), np.asarray(other))
1755
+ result = op(self.sp_values, other)
1756
+
1757
+ if op_name == "divmod":
1758
+ left, right = result
1759
+ lfill, rfill = fill
1760
+ return (
1761
+ _wrap_result(op_name, left, self.sp_index, lfill),
1762
+ _wrap_result(op_name, right, self.sp_index, rfill),
1763
+ )
1764
+
1765
+ return _wrap_result(op_name, result, self.sp_index, fill)
1766
+
1767
+ else:
1768
+ other = np.asarray(other)
1769
+ with np.errstate(all="ignore"):
1770
+ if len(self) != len(other):
1771
+ raise AssertionError(
1772
+ f"length mismatch: {len(self)} vs. {len(other)}"
1773
+ )
1774
+ if not isinstance(other, SparseArray):
1775
+ dtype = getattr(other, "dtype", None)
1776
+ other = SparseArray(other, fill_value=self.fill_value, dtype=dtype)
1777
+ return _sparse_array_op(self, other, op, op_name)
1778
+
1779
+ def _cmp_method(self, other, op) -> SparseArray:
1780
+ if not is_scalar(other) and not isinstance(other, type(self)):
1781
+ # convert list-like to ndarray
1782
+ other = np.asarray(other)
1783
+
1784
+ if isinstance(other, np.ndarray):
1785
+ # TODO: make this more flexible than just ndarray...
1786
+ other = SparseArray(other, fill_value=self.fill_value)
1787
+
1788
+ if isinstance(other, SparseArray):
1789
+ if len(self) != len(other):
1790
+ raise ValueError(
1791
+ f"operands have mismatched length {len(self)} and {len(other)}"
1792
+ )
1793
+
1794
+ op_name = op.__name__.strip("_")
1795
+ return _sparse_array_op(self, other, op, op_name)
1796
+ else:
1797
+ # scalar
1798
+ fill_value = op(self.fill_value, other)
1799
+ result = np.full(len(self), fill_value, dtype=np.bool_)
1800
+ result[self.sp_index.indices] = op(self.sp_values, other)
1801
+
1802
+ return type(self)(
1803
+ result,
1804
+ fill_value=fill_value,
1805
+ dtype=np.bool_,
1806
+ )
1807
+
1808
+ _logical_method = _cmp_method
1809
+
1810
+ def _unary_method(self, op) -> SparseArray:
1811
+ fill_value = op(np.array(self.fill_value)).item()
1812
+ dtype = SparseDtype(self.dtype.subtype, fill_value)
1813
+ # NOTE: if fill_value doesn't change
1814
+ # we just have to apply op to sp_values
1815
+ if isna(self.fill_value) or fill_value == self.fill_value:
1816
+ values = op(self.sp_values)
1817
+ return type(self)._simple_new(values, self.sp_index, self.dtype)
1818
+ # In the other case we have to recalc indexes
1819
+ return type(self)(op(self.to_dense()), dtype=dtype)
1820
+
1821
+ def __pos__(self) -> SparseArray:
1822
+ return self._unary_method(operator.pos)
1823
+
1824
+ def __neg__(self) -> SparseArray:
1825
+ return self._unary_method(operator.neg)
1826
+
1827
+ def __invert__(self) -> SparseArray:
1828
+ return self._unary_method(operator.invert)
1829
+
1830
+ def __abs__(self) -> SparseArray:
1831
+ return self._unary_method(operator.abs)
1832
+
1833
+ # ----------
1834
+ # Formatting
1835
+ # -----------
1836
+ def __repr__(self) -> str:
1837
+ pp_str = printing.pprint_thing(self)
1838
+ pp_fill = printing.pprint_thing(self.fill_value)
1839
+ pp_index = printing.pprint_thing(self.sp_index)
1840
+ return f"{pp_str}\nFill: {pp_fill}\n{pp_index}"
1841
+
1842
+ def _formatter(self, boxed: bool = False):
1843
+ # Defer to the formatter from the GenericArrayFormatter calling us.
1844
+ # This will infer the correct formatter from the dtype of the values.
1845
+ return None
1846
+
1847
+
1848
+ def _make_sparse(
1849
+ arr: np.ndarray,
1850
+ kind: SparseIndexKind = "block",
1851
+ fill_value=None,
1852
+ dtype: np.dtype | None = None,
1853
+ ):
1854
+ """
1855
+ Convert ndarray to sparse format
1856
+
1857
+ Parameters
1858
+ ----------
1859
+ arr : ndarray
1860
+ kind : {'block', 'integer'}
1861
+ fill_value : NaN or another value
1862
+ dtype : np.dtype, optional
1863
+ copy : bool, default False
1864
+
1865
+ Returns
1866
+ -------
1867
+ (sparse_values, index, fill_value) : (ndarray, SparseIndex, Scalar)
1868
+ """
1869
+ assert isinstance(arr, np.ndarray)
1870
+
1871
+ if arr.ndim > 1:
1872
+ raise TypeError("expected dimension <= 1 data")
1873
+
1874
+ if fill_value is None:
1875
+ fill_value = na_value_for_dtype(arr.dtype)
1876
+
1877
+ if isna(fill_value):
1878
+ mask = notna(arr)
1879
+ else:
1880
+ # cast to object comparison to be safe
1881
+ if is_string_dtype(arr.dtype):
1882
+ arr = arr.astype(object)
1883
+
1884
+ if is_object_dtype(arr.dtype):
1885
+ # element-wise equality check method in numpy doesn't treat
1886
+ # each element type, eg. 0, 0.0, and False are treated as
1887
+ # same. So we have to check the both of its type and value.
1888
+ mask = splib.make_mask_object_ndarray(arr, fill_value)
1889
+ else:
1890
+ mask = arr != fill_value
1891
+
1892
+ length = len(arr)
1893
+ if length != len(mask):
1894
+ # the arr is a SparseArray
1895
+ indices = mask.sp_index.indices
1896
+ else:
1897
+ indices = mask.nonzero()[0].astype(np.int32)
1898
+
1899
+ index = make_sparse_index(length, indices, kind)
1900
+ sparsified_values = arr[mask]
1901
+ if dtype is not None:
1902
+ sparsified_values = ensure_wrapped_if_datetimelike(sparsified_values)
1903
+ sparsified_values = astype_array(sparsified_values, dtype=dtype)
1904
+ sparsified_values = np.asarray(sparsified_values)
1905
+
1906
+ # TODO: copy
1907
+ return sparsified_values, index, fill_value
1908
+
1909
+
1910
+ @overload
1911
+ def make_sparse_index(length: int, indices, kind: Literal["block"]) -> BlockIndex:
1912
+ ...
1913
+
1914
+
1915
+ @overload
1916
+ def make_sparse_index(length: int, indices, kind: Literal["integer"]) -> IntIndex:
1917
+ ...
1918
+
1919
+
1920
+ def make_sparse_index(length: int, indices, kind: SparseIndexKind) -> SparseIndex:
1921
+ index: SparseIndex
1922
+ if kind == "block":
1923
+ locs, lens = splib.get_blocks(indices)
1924
+ index = BlockIndex(length, locs, lens)
1925
+ elif kind == "integer":
1926
+ index = IntIndex(length, indices)
1927
+ else: # pragma: no cover
1928
+ raise ValueError("must be block or integer type")
1929
+ return index
venv/lib/python3.10/site-packages/pandas/core/arrays/sparse/scipy_sparse.py ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Interaction with scipy.sparse matrices.
3
+
4
+ Currently only includes to_coo helpers.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from typing import TYPE_CHECKING
9
+
10
+ from pandas._libs import lib
11
+
12
+ from pandas.core.dtypes.missing import notna
13
+
14
+ from pandas.core.algorithms import factorize
15
+ from pandas.core.indexes.api import MultiIndex
16
+ from pandas.core.series import Series
17
+
18
+ if TYPE_CHECKING:
19
+ from collections.abc import Iterable
20
+
21
+ import numpy as np
22
+ import scipy.sparse
23
+
24
+ from pandas._typing import (
25
+ IndexLabel,
26
+ npt,
27
+ )
28
+
29
+
30
+ def _check_is_partition(parts: Iterable, whole: Iterable):
31
+ whole = set(whole)
32
+ parts = [set(x) for x in parts]
33
+ if set.intersection(*parts) != set():
34
+ raise ValueError("Is not a partition because intersection is not null.")
35
+ if set.union(*parts) != whole:
36
+ raise ValueError("Is not a partition because union is not the whole.")
37
+
38
+
39
+ def _levels_to_axis(
40
+ ss,
41
+ levels: tuple[int] | list[int],
42
+ valid_ilocs: npt.NDArray[np.intp],
43
+ sort_labels: bool = False,
44
+ ) -> tuple[npt.NDArray[np.intp], list[IndexLabel]]:
45
+ """
46
+ For a MultiIndexed sparse Series `ss`, return `ax_coords` and `ax_labels`,
47
+ where `ax_coords` are the coordinates along one of the two axes of the
48
+ destination sparse matrix, and `ax_labels` are the labels from `ss`' Index
49
+ which correspond to these coordinates.
50
+
51
+ Parameters
52
+ ----------
53
+ ss : Series
54
+ levels : tuple/list
55
+ valid_ilocs : numpy.ndarray
56
+ Array of integer positions of valid values for the sparse matrix in ss.
57
+ sort_labels : bool, default False
58
+ Sort the axis labels before forming the sparse matrix. When `levels`
59
+ refers to a single level, set to True for a faster execution.
60
+
61
+ Returns
62
+ -------
63
+ ax_coords : numpy.ndarray (axis coordinates)
64
+ ax_labels : list (axis labels)
65
+ """
66
+ # Since the labels are sorted in `Index.levels`, when we wish to sort and
67
+ # there is only one level of the MultiIndex for this axis, the desired
68
+ # output can be obtained in the following simpler, more efficient way.
69
+ if sort_labels and len(levels) == 1:
70
+ ax_coords = ss.index.codes[levels[0]][valid_ilocs]
71
+ ax_labels = ss.index.levels[levels[0]]
72
+
73
+ else:
74
+ levels_values = lib.fast_zip(
75
+ [ss.index.get_level_values(lvl).to_numpy() for lvl in levels]
76
+ )
77
+ codes, ax_labels = factorize(levels_values, sort=sort_labels)
78
+ ax_coords = codes[valid_ilocs]
79
+
80
+ ax_labels = ax_labels.tolist()
81
+ return ax_coords, ax_labels
82
+
83
+
84
+ def _to_ijv(
85
+ ss,
86
+ row_levels: tuple[int] | list[int] = (0,),
87
+ column_levels: tuple[int] | list[int] = (1,),
88
+ sort_labels: bool = False,
89
+ ) -> tuple[
90
+ np.ndarray,
91
+ npt.NDArray[np.intp],
92
+ npt.NDArray[np.intp],
93
+ list[IndexLabel],
94
+ list[IndexLabel],
95
+ ]:
96
+ """
97
+ For an arbitrary MultiIndexed sparse Series return (v, i, j, ilabels,
98
+ jlabels) where (v, (i, j)) is suitable for passing to scipy.sparse.coo
99
+ constructor, and ilabels and jlabels are the row and column labels
100
+ respectively.
101
+
102
+ Parameters
103
+ ----------
104
+ ss : Series
105
+ row_levels : tuple/list
106
+ column_levels : tuple/list
107
+ sort_labels : bool, default False
108
+ Sort the row and column labels before forming the sparse matrix.
109
+ When `row_levels` and/or `column_levels` refer to a single level,
110
+ set to `True` for a faster execution.
111
+
112
+ Returns
113
+ -------
114
+ values : numpy.ndarray
115
+ Valid values to populate a sparse matrix, extracted from
116
+ ss.
117
+ i_coords : numpy.ndarray (row coordinates of the values)
118
+ j_coords : numpy.ndarray (column coordinates of the values)
119
+ i_labels : list (row labels)
120
+ j_labels : list (column labels)
121
+ """
122
+ # index and column levels must be a partition of the index
123
+ _check_is_partition([row_levels, column_levels], range(ss.index.nlevels))
124
+ # From the sparse Series, get the integer indices and data for valid sparse
125
+ # entries.
126
+ sp_vals = ss.array.sp_values
127
+ na_mask = notna(sp_vals)
128
+ values = sp_vals[na_mask]
129
+ valid_ilocs = ss.array.sp_index.indices[na_mask]
130
+
131
+ i_coords, i_labels = _levels_to_axis(
132
+ ss, row_levels, valid_ilocs, sort_labels=sort_labels
133
+ )
134
+
135
+ j_coords, j_labels = _levels_to_axis(
136
+ ss, column_levels, valid_ilocs, sort_labels=sort_labels
137
+ )
138
+
139
+ return values, i_coords, j_coords, i_labels, j_labels
140
+
141
+
142
+ def sparse_series_to_coo(
143
+ ss: Series,
144
+ row_levels: Iterable[int] = (0,),
145
+ column_levels: Iterable[int] = (1,),
146
+ sort_labels: bool = False,
147
+ ) -> tuple[scipy.sparse.coo_matrix, list[IndexLabel], list[IndexLabel]]:
148
+ """
149
+ Convert a sparse Series to a scipy.sparse.coo_matrix using index
150
+ levels row_levels, column_levels as the row and column
151
+ labels respectively. Returns the sparse_matrix, row and column labels.
152
+ """
153
+ import scipy.sparse
154
+
155
+ if ss.index.nlevels < 2:
156
+ raise ValueError("to_coo requires MultiIndex with nlevels >= 2.")
157
+ if not ss.index.is_unique:
158
+ raise ValueError(
159
+ "Duplicate index entries are not allowed in to_coo transformation."
160
+ )
161
+
162
+ # to keep things simple, only rely on integer indexing (not labels)
163
+ row_levels = [ss.index._get_level_number(x) for x in row_levels]
164
+ column_levels = [ss.index._get_level_number(x) for x in column_levels]
165
+
166
+ v, i, j, rows, columns = _to_ijv(
167
+ ss, row_levels=row_levels, column_levels=column_levels, sort_labels=sort_labels
168
+ )
169
+ sparse_matrix = scipy.sparse.coo_matrix(
170
+ (v, (i, j)), shape=(len(rows), len(columns))
171
+ )
172
+ return sparse_matrix, rows, columns
173
+
174
+
175
+ def coo_to_sparse_series(
176
+ A: scipy.sparse.coo_matrix, dense_index: bool = False
177
+ ) -> Series:
178
+ """
179
+ Convert a scipy.sparse.coo_matrix to a Series with type sparse.
180
+
181
+ Parameters
182
+ ----------
183
+ A : scipy.sparse.coo_matrix
184
+ dense_index : bool, default False
185
+
186
+ Returns
187
+ -------
188
+ Series
189
+
190
+ Raises
191
+ ------
192
+ TypeError if A is not a coo_matrix
193
+ """
194
+ from pandas import SparseDtype
195
+
196
+ try:
197
+ ser = Series(A.data, MultiIndex.from_arrays((A.row, A.col)), copy=False)
198
+ except AttributeError as err:
199
+ raise TypeError(
200
+ f"Expected coo_matrix. Got {type(A).__name__} instead."
201
+ ) from err
202
+ ser = ser.sort_index()
203
+ ser = ser.astype(SparseDtype(ser.dtype))
204
+ if dense_index:
205
+ ind = MultiIndex.from_product([A.row, A.col])
206
+ ser = ser.reindex(ind)
207
+ return ser
venv/lib/python3.10/site-packages/pandas/core/indexes/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/base.cpython-310.pyc ADDED
Binary file (196 kB). View file