applied-ai-018 commited on
Commit
e069948
·
verified ·
1 Parent(s): 43a2cb6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_138_mp_rank_02_optim_states.pt +3 -0
  2. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_186_mp_rank_00_optim_states.pt +3 -0
  3. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_18_mp_rank_02_optim_states.pt +3 -0
  4. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_253_mp_rank_03_optim_states.pt +3 -0
  5. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_4_mp_rank_02_optim_states.pt +3 -0
  6. ckpts/llama-3b/global_step100/bf16_zero_pp_rank_96_mp_rank_03_optim_states.pt +3 -0
  7. ckpts/llama-3b/global_step100/layer_08-model_00-model_states.pt +3 -0
  8. venv/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py +9 -0
  9. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/__init__.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/datetimelike_accumulations.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_accumulations.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_reductions.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/putmask.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/quantile.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/replace.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/take.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/transforms.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py +67 -0
  19. venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py +90 -0
  20. venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_reductions.py +197 -0
  21. venv/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py +149 -0
  22. venv/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py +226 -0
  23. venv/lib/python3.10/site-packages/pandas/core/array_algos/replace.py +152 -0
  24. venv/lib/python3.10/site-packages/pandas/core/array_algos/take.py +594 -0
  25. venv/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py +50 -0
  26. venv/lib/python3.10/site-packages/pandas/core/dtypes/cast.py +1973 -0
  27. venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc +0 -0
  31. venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc +0 -0
  32. venv/lib/python3.10/site-packages/pandas/core/internals/__init__.py +85 -0
  33. venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/pandas/core/internals/api.py +156 -0
  40. venv/lib/python3.10/site-packages/pandas/core/internals/array_manager.py +1340 -0
  41. venv/lib/python3.10/site-packages/pandas/core/internals/base.py +407 -0
  42. venv/lib/python3.10/site-packages/pandas/core/internals/blocks.py +0 -0
  43. venv/lib/python3.10/site-packages/pandas/core/internals/concat.py +598 -0
  44. venv/lib/python3.10/site-packages/pandas/core/internals/construction.py +1072 -0
  45. venv/lib/python3.10/site-packages/pandas/core/internals/managers.py +2375 -0
  46. venv/lib/python3.10/site-packages/pandas/core/internals/ops.py +154 -0
  47. venv/lib/python3.10/site-packages/pandas/core/ops/__init__.py +93 -0
  48. venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc +0 -0
  49. venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc +0 -0
  50. venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc +0 -0
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_138_mp_rank_02_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff76c17d334de5c86343d0dffcfc231327ebb290f99caffa78d01c910a492481
3
+ size 41830340
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_186_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:516bdd279e1eaf12b086ae0250fe787a6d24252660058e79839c4e9d454dafc3
3
+ size 41830148
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_18_mp_rank_02_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60991694a610facdca259d6d3a14425b34cf10b109817e6b9ed21cdfc3c888aa
3
+ size 41830394
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_253_mp_rank_03_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1136be4dc3a40f81e8ed1a5bce0c10118014b655260171b65f3667e6e92354b9
3
+ size 41830340
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_4_mp_rank_02_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf8df5af07690b36f38efee63b0276fe30f320bb483b8dc2f73871868cf62bf2
3
+ size 41830320
ckpts/llama-3b/global_step100/bf16_zero_pp_rank_96_mp_rank_03_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:930c0e049e732d909198b8bbd4950953337f97c72218ed4632cfa51ebb05c094
3
+ size 41830330
ckpts/llama-3b/global_step100/layer_08-model_00-model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e448f50f7295acee88d602a64b8a4d486909d1fda052f4645476058f5021de57
3
+ size 116407086
venv/lib/python3.10/site-packages/pandas/core/array_algos/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ core.array_algos is for algorithms that operate on ndarray and ExtensionArray.
3
+ These should:
4
+
5
+ - Assume that any Index, Series, or DataFrame objects have already been unwrapped.
6
+ - Assume that any list arguments have already been cast to ndarray/EA.
7
+ - Not depend on Index, Series, or DataFrame, nor import any of these.
8
+ - May dispatch to ExtensionArray methods, but should not import from core.arrays.
9
+ """
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (607 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/datetimelike_accumulations.cpython-310.pyc ADDED
Binary file (1.99 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_accumulations.cpython-310.pyc ADDED
Binary file (2.36 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/masked_reductions.cpython-310.pyc ADDED
Binary file (3.97 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/putmask.cpython-310.pyc ADDED
Binary file (3.44 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/quantile.cpython-310.pyc ADDED
Binary file (4.62 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/replace.cpython-310.pyc ADDED
Binary file (3.83 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/take.cpython-310.pyc ADDED
Binary file (13.4 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/__pycache__/transforms.cpython-310.pyc ADDED
Binary file (1.01 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/array_algos/datetimelike_accumulations.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ datetimelke_accumulations.py is for accumulations of datetimelike extension arrays
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from typing import Callable
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs import iNaT
12
+
13
+ from pandas.core.dtypes.missing import isna
14
+
15
+
16
+ def _cum_func(
17
+ func: Callable,
18
+ values: np.ndarray,
19
+ *,
20
+ skipna: bool = True,
21
+ ):
22
+ """
23
+ Accumulations for 1D datetimelike arrays.
24
+
25
+ Parameters
26
+ ----------
27
+ func : np.cumsum, np.maximum.accumulate, np.minimum.accumulate
28
+ values : np.ndarray
29
+ Numpy array with the values (can be of any dtype that support the
30
+ operation). Values is changed is modified inplace.
31
+ skipna : bool, default True
32
+ Whether to skip NA.
33
+ """
34
+ try:
35
+ fill_value = {
36
+ np.maximum.accumulate: np.iinfo(np.int64).min,
37
+ np.cumsum: 0,
38
+ np.minimum.accumulate: np.iinfo(np.int64).max,
39
+ }[func]
40
+ except KeyError:
41
+ raise ValueError(f"No accumulation for {func} implemented on BaseMaskedArray")
42
+
43
+ mask = isna(values)
44
+ y = values.view("i8")
45
+ y[mask] = fill_value
46
+
47
+ if not skipna:
48
+ mask = np.maximum.accumulate(mask)
49
+
50
+ result = func(y)
51
+ result[mask] = iNaT
52
+
53
+ if values.dtype.kind in "mM":
54
+ return result.view(values.dtype.base)
55
+ return result
56
+
57
+
58
+ def cumsum(values: np.ndarray, *, skipna: bool = True) -> np.ndarray:
59
+ return _cum_func(np.cumsum, values, skipna=skipna)
60
+
61
+
62
+ def cummin(values: np.ndarray, *, skipna: bool = True):
63
+ return _cum_func(np.minimum.accumulate, values, skipna=skipna)
64
+
65
+
66
+ def cummax(values: np.ndarray, *, skipna: bool = True):
67
+ return _cum_func(np.maximum.accumulate, values, skipna=skipna)
venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_accumulations.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ masked_accumulations.py is for accumulation algorithms using a mask-based approach
3
+ for missing values.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Callable,
11
+ )
12
+
13
+ import numpy as np
14
+
15
+ if TYPE_CHECKING:
16
+ from pandas._typing import npt
17
+
18
+
19
+ def _cum_func(
20
+ func: Callable,
21
+ values: np.ndarray,
22
+ mask: npt.NDArray[np.bool_],
23
+ *,
24
+ skipna: bool = True,
25
+ ):
26
+ """
27
+ Accumulations for 1D masked array.
28
+
29
+ We will modify values in place to replace NAs with the appropriate fill value.
30
+
31
+ Parameters
32
+ ----------
33
+ func : np.cumsum, np.cumprod, np.maximum.accumulate, np.minimum.accumulate
34
+ values : np.ndarray
35
+ Numpy array with the values (can be of any dtype that support the
36
+ operation).
37
+ mask : np.ndarray
38
+ Boolean numpy array (True values indicate missing values).
39
+ skipna : bool, default True
40
+ Whether to skip NA.
41
+ """
42
+ dtype_info: np.iinfo | np.finfo
43
+ if values.dtype.kind == "f":
44
+ dtype_info = np.finfo(values.dtype.type)
45
+ elif values.dtype.kind in "iu":
46
+ dtype_info = np.iinfo(values.dtype.type)
47
+ elif values.dtype.kind == "b":
48
+ # Max value of bool is 1, but since we are setting into a boolean
49
+ # array, 255 is fine as well. Min value has to be 0 when setting
50
+ # into the boolean array.
51
+ dtype_info = np.iinfo(np.uint8)
52
+ else:
53
+ raise NotImplementedError(
54
+ f"No masked accumulation defined for dtype {values.dtype.type}"
55
+ )
56
+ try:
57
+ fill_value = {
58
+ np.cumprod: 1,
59
+ np.maximum.accumulate: dtype_info.min,
60
+ np.cumsum: 0,
61
+ np.minimum.accumulate: dtype_info.max,
62
+ }[func]
63
+ except KeyError:
64
+ raise NotImplementedError(
65
+ f"No accumulation for {func} implemented on BaseMaskedArray"
66
+ )
67
+
68
+ values[mask] = fill_value
69
+
70
+ if not skipna:
71
+ mask = np.maximum.accumulate(mask)
72
+
73
+ values = func(values)
74
+ return values, mask
75
+
76
+
77
+ def cumsum(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
78
+ return _cum_func(np.cumsum, values, mask, skipna=skipna)
79
+
80
+
81
+ def cumprod(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
82
+ return _cum_func(np.cumprod, values, mask, skipna=skipna)
83
+
84
+
85
+ def cummin(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
86
+ return _cum_func(np.minimum.accumulate, values, mask, skipna=skipna)
87
+
88
+
89
+ def cummax(values: np.ndarray, mask: npt.NDArray[np.bool_], *, skipna: bool = True):
90
+ return _cum_func(np.maximum.accumulate, values, mask, skipna=skipna)
venv/lib/python3.10/site-packages/pandas/core/array_algos/masked_reductions.py ADDED
@@ -0,0 +1,197 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ masked_reductions.py is for reduction algorithms using a mask-based approach
3
+ for missing values.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Callable,
10
+ )
11
+ import warnings
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs import missing as libmissing
16
+
17
+ from pandas.core.nanops import check_below_min_count
18
+
19
+ if TYPE_CHECKING:
20
+ from pandas._typing import (
21
+ AxisInt,
22
+ npt,
23
+ )
24
+
25
+
26
+ def _reductions(
27
+ func: Callable,
28
+ values: np.ndarray,
29
+ mask: npt.NDArray[np.bool_],
30
+ *,
31
+ skipna: bool = True,
32
+ min_count: int = 0,
33
+ axis: AxisInt | None = None,
34
+ **kwargs,
35
+ ):
36
+ """
37
+ Sum, mean or product for 1D masked array.
38
+
39
+ Parameters
40
+ ----------
41
+ func : np.sum or np.prod
42
+ values : np.ndarray
43
+ Numpy array with the values (can be of any dtype that support the
44
+ operation).
45
+ mask : np.ndarray[bool]
46
+ Boolean numpy array (True values indicate missing values).
47
+ skipna : bool, default True
48
+ Whether to skip NA.
49
+ min_count : int, default 0
50
+ The required number of valid values to perform the operation. If fewer than
51
+ ``min_count`` non-NA values are present the result will be NA.
52
+ axis : int, optional, default None
53
+ """
54
+ if not skipna:
55
+ if mask.any() or check_below_min_count(values.shape, None, min_count):
56
+ return libmissing.NA
57
+ else:
58
+ return func(values, axis=axis, **kwargs)
59
+ else:
60
+ if check_below_min_count(values.shape, mask, min_count) and (
61
+ axis is None or values.ndim == 1
62
+ ):
63
+ return libmissing.NA
64
+
65
+ return func(values, where=~mask, axis=axis, **kwargs)
66
+
67
+
68
+ def sum(
69
+ values: np.ndarray,
70
+ mask: npt.NDArray[np.bool_],
71
+ *,
72
+ skipna: bool = True,
73
+ min_count: int = 0,
74
+ axis: AxisInt | None = None,
75
+ ):
76
+ return _reductions(
77
+ np.sum, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
78
+ )
79
+
80
+
81
+ def prod(
82
+ values: np.ndarray,
83
+ mask: npt.NDArray[np.bool_],
84
+ *,
85
+ skipna: bool = True,
86
+ min_count: int = 0,
87
+ axis: AxisInt | None = None,
88
+ ):
89
+ return _reductions(
90
+ np.prod, values=values, mask=mask, skipna=skipna, min_count=min_count, axis=axis
91
+ )
92
+
93
+
94
+ def _minmax(
95
+ func: Callable,
96
+ values: np.ndarray,
97
+ mask: npt.NDArray[np.bool_],
98
+ *,
99
+ skipna: bool = True,
100
+ axis: AxisInt | None = None,
101
+ ):
102
+ """
103
+ Reduction for 1D masked array.
104
+
105
+ Parameters
106
+ ----------
107
+ func : np.min or np.max
108
+ values : np.ndarray
109
+ Numpy array with the values (can be of any dtype that support the
110
+ operation).
111
+ mask : np.ndarray[bool]
112
+ Boolean numpy array (True values indicate missing values).
113
+ skipna : bool, default True
114
+ Whether to skip NA.
115
+ axis : int, optional, default None
116
+ """
117
+ if not skipna:
118
+ if mask.any() or not values.size:
119
+ # min/max with empty array raise in numpy, pandas returns NA
120
+ return libmissing.NA
121
+ else:
122
+ return func(values, axis=axis)
123
+ else:
124
+ subset = values[~mask]
125
+ if subset.size:
126
+ return func(subset, axis=axis)
127
+ else:
128
+ # min/max with empty array raise in numpy, pandas returns NA
129
+ return libmissing.NA
130
+
131
+
132
+ def min(
133
+ values: np.ndarray,
134
+ mask: npt.NDArray[np.bool_],
135
+ *,
136
+ skipna: bool = True,
137
+ axis: AxisInt | None = None,
138
+ ):
139
+ return _minmax(np.min, values=values, mask=mask, skipna=skipna, axis=axis)
140
+
141
+
142
+ def max(
143
+ values: np.ndarray,
144
+ mask: npt.NDArray[np.bool_],
145
+ *,
146
+ skipna: bool = True,
147
+ axis: AxisInt | None = None,
148
+ ):
149
+ return _minmax(np.max, values=values, mask=mask, skipna=skipna, axis=axis)
150
+
151
+
152
+ def mean(
153
+ values: np.ndarray,
154
+ mask: npt.NDArray[np.bool_],
155
+ *,
156
+ skipna: bool = True,
157
+ axis: AxisInt | None = None,
158
+ ):
159
+ if not values.size or mask.all():
160
+ return libmissing.NA
161
+ return _reductions(np.mean, values=values, mask=mask, skipna=skipna, axis=axis)
162
+
163
+
164
+ def var(
165
+ values: np.ndarray,
166
+ mask: npt.NDArray[np.bool_],
167
+ *,
168
+ skipna: bool = True,
169
+ axis: AxisInt | None = None,
170
+ ddof: int = 1,
171
+ ):
172
+ if not values.size or mask.all():
173
+ return libmissing.NA
174
+
175
+ with warnings.catch_warnings():
176
+ warnings.simplefilter("ignore", RuntimeWarning)
177
+ return _reductions(
178
+ np.var, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
179
+ )
180
+
181
+
182
+ def std(
183
+ values: np.ndarray,
184
+ mask: npt.NDArray[np.bool_],
185
+ *,
186
+ skipna: bool = True,
187
+ axis: AxisInt | None = None,
188
+ ddof: int = 1,
189
+ ):
190
+ if not values.size or mask.all():
191
+ return libmissing.NA
192
+
193
+ with warnings.catch_warnings():
194
+ warnings.simplefilter("ignore", RuntimeWarning)
195
+ return _reductions(
196
+ np.std, values=values, mask=mask, skipna=skipna, axis=axis, ddof=ddof
197
+ )
venv/lib/python3.10/site-packages/pandas/core/array_algos/putmask.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ EA-compatible analogue to np.putmask
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ )
10
+
11
+ import numpy as np
12
+
13
+ from pandas._libs import lib
14
+
15
+ from pandas.core.dtypes.cast import infer_dtype_from
16
+ from pandas.core.dtypes.common import is_list_like
17
+
18
+ from pandas.core.arrays import ExtensionArray
19
+
20
+ if TYPE_CHECKING:
21
+ from pandas._typing import (
22
+ ArrayLike,
23
+ npt,
24
+ )
25
+
26
+ from pandas import MultiIndex
27
+
28
+
29
+ def putmask_inplace(values: ArrayLike, mask: npt.NDArray[np.bool_], value: Any) -> None:
30
+ """
31
+ ExtensionArray-compatible implementation of np.putmask. The main
32
+ difference is we do not handle repeating or truncating like numpy.
33
+
34
+ Parameters
35
+ ----------
36
+ values: np.ndarray or ExtensionArray
37
+ mask : np.ndarray[bool]
38
+ We assume extract_bool_array has already been called.
39
+ value : Any
40
+ """
41
+
42
+ if (
43
+ not isinstance(values, np.ndarray)
44
+ or (values.dtype == object and not lib.is_scalar(value))
45
+ # GH#43424: np.putmask raises TypeError if we cannot cast between types with
46
+ # rule = "safe", a stricter guarantee we may not have here
47
+ or (
48
+ isinstance(value, np.ndarray) and not np.can_cast(value.dtype, values.dtype)
49
+ )
50
+ ):
51
+ # GH#19266 using np.putmask gives unexpected results with listlike value
52
+ # along with object dtype
53
+ if is_list_like(value) and len(value) == len(values):
54
+ values[mask] = value[mask]
55
+ else:
56
+ values[mask] = value
57
+ else:
58
+ # GH#37833 np.putmask is more performant than __setitem__
59
+ np.putmask(values, mask, value)
60
+
61
+
62
+ def putmask_without_repeat(
63
+ values: np.ndarray, mask: npt.NDArray[np.bool_], new: Any
64
+ ) -> None:
65
+ """
66
+ np.putmask will truncate or repeat if `new` is a listlike with
67
+ len(new) != len(values). We require an exact match.
68
+
69
+ Parameters
70
+ ----------
71
+ values : np.ndarray
72
+ mask : np.ndarray[bool]
73
+ new : Any
74
+ """
75
+ if getattr(new, "ndim", 0) >= 1:
76
+ new = new.astype(values.dtype, copy=False)
77
+
78
+ # TODO: this prob needs some better checking for 2D cases
79
+ nlocs = mask.sum()
80
+ if nlocs > 0 and is_list_like(new) and getattr(new, "ndim", 1) == 1:
81
+ shape = np.shape(new)
82
+ # np.shape compat for if setitem_datetimelike_compat
83
+ # changed arraylike to list e.g. test_where_dt64_2d
84
+ if nlocs == shape[-1]:
85
+ # GH#30567
86
+ # If length of ``new`` is less than the length of ``values``,
87
+ # `np.putmask` would first repeat the ``new`` array and then
88
+ # assign the masked values hence produces incorrect result.
89
+ # `np.place` on the other hand uses the ``new`` values at it is
90
+ # to place in the masked locations of ``values``
91
+ np.place(values, mask, new)
92
+ # i.e. values[mask] = new
93
+ elif mask.shape[-1] == shape[-1] or shape[-1] == 1:
94
+ np.putmask(values, mask, new)
95
+ else:
96
+ raise ValueError("cannot assign mismatch length to masked array")
97
+ else:
98
+ np.putmask(values, mask, new)
99
+
100
+
101
+ def validate_putmask(
102
+ values: ArrayLike | MultiIndex, mask: np.ndarray
103
+ ) -> tuple[npt.NDArray[np.bool_], bool]:
104
+ """
105
+ Validate mask and check if this putmask operation is a no-op.
106
+ """
107
+ mask = extract_bool_array(mask)
108
+ if mask.shape != values.shape:
109
+ raise ValueError("putmask: mask and data must be the same size")
110
+
111
+ noop = not mask.any()
112
+ return mask, noop
113
+
114
+
115
+ def extract_bool_array(mask: ArrayLike) -> npt.NDArray[np.bool_]:
116
+ """
117
+ If we have a SparseArray or BooleanArray, convert it to ndarray[bool].
118
+ """
119
+ if isinstance(mask, ExtensionArray):
120
+ # We could have BooleanArray, Sparse[bool], ...
121
+ # Except for BooleanArray, this is equivalent to just
122
+ # np.asarray(mask, dtype=bool)
123
+ mask = mask.to_numpy(dtype=bool, na_value=False)
124
+
125
+ mask = np.asarray(mask, dtype=bool)
126
+ return mask
127
+
128
+
129
+ def setitem_datetimelike_compat(values: np.ndarray, num_set: int, other):
130
+ """
131
+ Parameters
132
+ ----------
133
+ values : np.ndarray
134
+ num_set : int
135
+ For putmask, this is mask.sum()
136
+ other : Any
137
+ """
138
+ if values.dtype == object:
139
+ dtype, _ = infer_dtype_from(other)
140
+
141
+ if lib.is_np_dtype(dtype, "mM"):
142
+ # https://github.com/numpy/numpy/issues/12550
143
+ # timedelta64 will incorrectly cast to int
144
+ if not is_list_like(other):
145
+ other = [other] * num_set
146
+ else:
147
+ other = list(other)
148
+
149
+ return other
venv/lib/python3.10/site-packages/pandas/core/array_algos/quantile.py ADDED
@@ -0,0 +1,226 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import TYPE_CHECKING
4
+
5
+ import numpy as np
6
+
7
+ from pandas.core.dtypes.missing import (
8
+ isna,
9
+ na_value_for_dtype,
10
+ )
11
+
12
+ if TYPE_CHECKING:
13
+ from pandas._typing import (
14
+ ArrayLike,
15
+ Scalar,
16
+ npt,
17
+ )
18
+
19
+
20
+ def quantile_compat(
21
+ values: ArrayLike, qs: npt.NDArray[np.float64], interpolation: str
22
+ ) -> ArrayLike:
23
+ """
24
+ Compute the quantiles of the given values for each quantile in `qs`.
25
+
26
+ Parameters
27
+ ----------
28
+ values : np.ndarray or ExtensionArray
29
+ qs : np.ndarray[float64]
30
+ interpolation : str
31
+
32
+ Returns
33
+ -------
34
+ np.ndarray or ExtensionArray
35
+ """
36
+ if isinstance(values, np.ndarray):
37
+ fill_value = na_value_for_dtype(values.dtype, compat=False)
38
+ mask = isna(values)
39
+ return quantile_with_mask(values, mask, fill_value, qs, interpolation)
40
+ else:
41
+ return values._quantile(qs, interpolation)
42
+
43
+
44
+ def quantile_with_mask(
45
+ values: np.ndarray,
46
+ mask: npt.NDArray[np.bool_],
47
+ fill_value,
48
+ qs: npt.NDArray[np.float64],
49
+ interpolation: str,
50
+ ) -> np.ndarray:
51
+ """
52
+ Compute the quantiles of the given values for each quantile in `qs`.
53
+
54
+ Parameters
55
+ ----------
56
+ values : np.ndarray
57
+ For ExtensionArray, this is _values_for_factorize()[0]
58
+ mask : np.ndarray[bool]
59
+ mask = isna(values)
60
+ For ExtensionArray, this is computed before calling _value_for_factorize
61
+ fill_value : Scalar
62
+ The value to interpret fill NA entries with
63
+ For ExtensionArray, this is _values_for_factorize()[1]
64
+ qs : np.ndarray[float64]
65
+ interpolation : str
66
+ Type of interpolation
67
+
68
+ Returns
69
+ -------
70
+ np.ndarray
71
+
72
+ Notes
73
+ -----
74
+ Assumes values is already 2D. For ExtensionArray this means np.atleast_2d
75
+ has been called on _values_for_factorize()[0]
76
+
77
+ Quantile is computed along axis=1.
78
+ """
79
+ assert values.shape == mask.shape
80
+ if values.ndim == 1:
81
+ # unsqueeze, operate, re-squeeze
82
+ values = np.atleast_2d(values)
83
+ mask = np.atleast_2d(mask)
84
+ res_values = quantile_with_mask(values, mask, fill_value, qs, interpolation)
85
+ return res_values[0]
86
+
87
+ assert values.ndim == 2
88
+
89
+ is_empty = values.shape[1] == 0
90
+
91
+ if is_empty:
92
+ # create the array of na_values
93
+ # 2d len(values) * len(qs)
94
+ flat = np.array([fill_value] * len(qs))
95
+ result = np.repeat(flat, len(values)).reshape(len(values), len(qs))
96
+ else:
97
+ result = _nanpercentile(
98
+ values,
99
+ qs * 100.0,
100
+ na_value=fill_value,
101
+ mask=mask,
102
+ interpolation=interpolation,
103
+ )
104
+
105
+ result = np.asarray(result)
106
+ result = result.T
107
+
108
+ return result
109
+
110
+
111
+ def _nanpercentile_1d(
112
+ values: np.ndarray,
113
+ mask: npt.NDArray[np.bool_],
114
+ qs: npt.NDArray[np.float64],
115
+ na_value: Scalar,
116
+ interpolation: str,
117
+ ) -> Scalar | np.ndarray:
118
+ """
119
+ Wrapper for np.percentile that skips missing values, specialized to
120
+ 1-dimensional case.
121
+
122
+ Parameters
123
+ ----------
124
+ values : array over which to find quantiles
125
+ mask : ndarray[bool]
126
+ locations in values that should be considered missing
127
+ qs : np.ndarray[float64] of quantile indices to find
128
+ na_value : scalar
129
+ value to return for empty or all-null values
130
+ interpolation : str
131
+
132
+ Returns
133
+ -------
134
+ quantiles : scalar or array
135
+ """
136
+ # mask is Union[ExtensionArray, ndarray]
137
+ values = values[~mask]
138
+
139
+ if len(values) == 0:
140
+ # Can't pass dtype=values.dtype here bc we might have na_value=np.nan
141
+ # with values.dtype=int64 see test_quantile_empty
142
+ # equiv: 'np.array([na_value] * len(qs))' but much faster
143
+ return np.full(len(qs), na_value)
144
+
145
+ return np.percentile(
146
+ values,
147
+ qs,
148
+ # error: No overload variant of "percentile" matches argument
149
+ # types "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]"
150
+ # , "Dict[str, str]" [call-overload]
151
+ method=interpolation, # type: ignore[call-overload]
152
+ )
153
+
154
+
155
+ def _nanpercentile(
156
+ values: np.ndarray,
157
+ qs: npt.NDArray[np.float64],
158
+ *,
159
+ na_value,
160
+ mask: npt.NDArray[np.bool_],
161
+ interpolation: str,
162
+ ):
163
+ """
164
+ Wrapper for np.percentile that skips missing values.
165
+
166
+ Parameters
167
+ ----------
168
+ values : np.ndarray[ndim=2] over which to find quantiles
169
+ qs : np.ndarray[float64] of quantile indices to find
170
+ na_value : scalar
171
+ value to return for empty or all-null values
172
+ mask : np.ndarray[bool]
173
+ locations in values that should be considered missing
174
+ interpolation : str
175
+
176
+ Returns
177
+ -------
178
+ quantiles : scalar or array
179
+ """
180
+
181
+ if values.dtype.kind in "mM":
182
+ # need to cast to integer to avoid rounding errors in numpy
183
+ result = _nanpercentile(
184
+ values.view("i8"),
185
+ qs=qs,
186
+ na_value=na_value.view("i8"),
187
+ mask=mask,
188
+ interpolation=interpolation,
189
+ )
190
+
191
+ # Note: we have to do `astype` and not view because in general we
192
+ # have float result at this point, not i8
193
+ return result.astype(values.dtype)
194
+
195
+ if mask.any():
196
+ # Caller is responsible for ensuring mask shape match
197
+ assert mask.shape == values.shape
198
+ result = [
199
+ _nanpercentile_1d(val, m, qs, na_value, interpolation=interpolation)
200
+ for (val, m) in zip(list(values), list(mask))
201
+ ]
202
+ if values.dtype.kind == "f":
203
+ # preserve itemsize
204
+ result = np.asarray(result, dtype=values.dtype).T
205
+ else:
206
+ result = np.asarray(result).T
207
+ if (
208
+ result.dtype != values.dtype
209
+ and not mask.all()
210
+ and (result == result.astype(values.dtype, copy=False)).all()
211
+ ):
212
+ # mask.all() will never get cast back to int
213
+ # e.g. values id integer dtype and result is floating dtype,
214
+ # only cast back to integer dtype if result values are all-integer.
215
+ result = result.astype(values.dtype, copy=False)
216
+ return result
217
+ else:
218
+ return np.percentile(
219
+ values,
220
+ qs,
221
+ axis=1,
222
+ # error: No overload variant of "percentile" matches argument types
223
+ # "ndarray[Any, Any]", "ndarray[Any, dtype[floating[_64Bit]]]",
224
+ # "int", "Dict[str, str]" [call-overload]
225
+ method=interpolation, # type: ignore[call-overload]
226
+ )
venv/lib/python3.10/site-packages/pandas/core/array_algos/replace.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Methods used by Block.replace and related methods.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import operator
7
+ import re
8
+ from re import Pattern
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ )
13
+
14
+ import numpy as np
15
+
16
+ from pandas.core.dtypes.common import (
17
+ is_bool,
18
+ is_re,
19
+ is_re_compilable,
20
+ )
21
+ from pandas.core.dtypes.missing import isna
22
+
23
+ if TYPE_CHECKING:
24
+ from pandas._typing import (
25
+ ArrayLike,
26
+ Scalar,
27
+ npt,
28
+ )
29
+
30
+
31
+ def should_use_regex(regex: bool, to_replace: Any) -> bool:
32
+ """
33
+ Decide whether to treat `to_replace` as a regular expression.
34
+ """
35
+ if is_re(to_replace):
36
+ regex = True
37
+
38
+ regex = regex and is_re_compilable(to_replace)
39
+
40
+ # Don't use regex if the pattern is empty.
41
+ regex = regex and re.compile(to_replace).pattern != ""
42
+ return regex
43
+
44
+
45
+ def compare_or_regex_search(
46
+ a: ArrayLike, b: Scalar | Pattern, regex: bool, mask: npt.NDArray[np.bool_]
47
+ ) -> ArrayLike:
48
+ """
49
+ Compare two array-like inputs of the same shape or two scalar values
50
+
51
+ Calls operator.eq or re.search, depending on regex argument. If regex is
52
+ True, perform an element-wise regex matching.
53
+
54
+ Parameters
55
+ ----------
56
+ a : array-like
57
+ b : scalar or regex pattern
58
+ regex : bool
59
+ mask : np.ndarray[bool]
60
+
61
+ Returns
62
+ -------
63
+ mask : array-like of bool
64
+ """
65
+ if isna(b):
66
+ return ~mask
67
+
68
+ def _check_comparison_types(
69
+ result: ArrayLike | bool, a: ArrayLike, b: Scalar | Pattern
70
+ ):
71
+ """
72
+ Raises an error if the two arrays (a,b) cannot be compared.
73
+ Otherwise, returns the comparison result as expected.
74
+ """
75
+ if is_bool(result) and isinstance(a, np.ndarray):
76
+ type_names = [type(a).__name__, type(b).__name__]
77
+
78
+ type_names[0] = f"ndarray(dtype={a.dtype})"
79
+
80
+ raise TypeError(
81
+ f"Cannot compare types {repr(type_names[0])} and {repr(type_names[1])}"
82
+ )
83
+
84
+ if not regex or not should_use_regex(regex, b):
85
+ # TODO: should use missing.mask_missing?
86
+ op = lambda x: operator.eq(x, b)
87
+ else:
88
+ op = np.vectorize(
89
+ lambda x: bool(re.search(b, x))
90
+ if isinstance(x, str) and isinstance(b, (str, Pattern))
91
+ else False
92
+ )
93
+
94
+ # GH#32621 use mask to avoid comparing to NAs
95
+ if isinstance(a, np.ndarray):
96
+ a = a[mask]
97
+
98
+ result = op(a)
99
+
100
+ if isinstance(result, np.ndarray) and mask is not None:
101
+ # The shape of the mask can differ to that of the result
102
+ # since we may compare only a subset of a's or b's elements
103
+ tmp = np.zeros(mask.shape, dtype=np.bool_)
104
+ np.place(tmp, mask, result)
105
+ result = tmp
106
+
107
+ _check_comparison_types(result, a, b)
108
+ return result
109
+
110
+
111
+ def replace_regex(
112
+ values: ArrayLike, rx: re.Pattern, value, mask: npt.NDArray[np.bool_] | None
113
+ ) -> None:
114
+ """
115
+ Parameters
116
+ ----------
117
+ values : ArrayLike
118
+ Object dtype.
119
+ rx : re.Pattern
120
+ value : Any
121
+ mask : np.ndarray[bool], optional
122
+
123
+ Notes
124
+ -----
125
+ Alters values in-place.
126
+ """
127
+
128
+ # deal with replacing values with objects (strings) that match but
129
+ # whose replacement is not a string (numeric, nan, object)
130
+ if isna(value) or not isinstance(value, str):
131
+
132
+ def re_replacer(s):
133
+ if is_re(rx) and isinstance(s, str):
134
+ return value if rx.search(s) is not None else s
135
+ else:
136
+ return s
137
+
138
+ else:
139
+ # value is guaranteed to be a string here, s can be either a string
140
+ # or null if it's null it gets returned
141
+ def re_replacer(s):
142
+ if is_re(rx) and isinstance(s, str):
143
+ return rx.sub(value, s)
144
+ else:
145
+ return s
146
+
147
+ f = np.vectorize(re_replacer, otypes=[np.object_])
148
+
149
+ if mask is None:
150
+ values[:] = f(values)
151
+ else:
152
+ values[mask] = f(values[mask])
venv/lib/python3.10/site-packages/pandas/core/array_algos/take.py ADDED
@@ -0,0 +1,594 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ from typing import (
5
+ TYPE_CHECKING,
6
+ cast,
7
+ overload,
8
+ )
9
+
10
+ import numpy as np
11
+
12
+ from pandas._libs import (
13
+ algos as libalgos,
14
+ lib,
15
+ )
16
+
17
+ from pandas.core.dtypes.cast import maybe_promote
18
+ from pandas.core.dtypes.common import (
19
+ ensure_platform_int,
20
+ is_1d_only_ea_dtype,
21
+ )
22
+ from pandas.core.dtypes.missing import na_value_for_dtype
23
+
24
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
25
+
26
+ if TYPE_CHECKING:
27
+ from pandas._typing import (
28
+ ArrayLike,
29
+ AxisInt,
30
+ npt,
31
+ )
32
+
33
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
34
+ from pandas.core.arrays.base import ExtensionArray
35
+
36
+
37
+ @overload
38
+ def take_nd(
39
+ arr: np.ndarray,
40
+ indexer,
41
+ axis: AxisInt = ...,
42
+ fill_value=...,
43
+ allow_fill: bool = ...,
44
+ ) -> np.ndarray:
45
+ ...
46
+
47
+
48
+ @overload
49
+ def take_nd(
50
+ arr: ExtensionArray,
51
+ indexer,
52
+ axis: AxisInt = ...,
53
+ fill_value=...,
54
+ allow_fill: bool = ...,
55
+ ) -> ArrayLike:
56
+ ...
57
+
58
+
59
+ def take_nd(
60
+ arr: ArrayLike,
61
+ indexer,
62
+ axis: AxisInt = 0,
63
+ fill_value=lib.no_default,
64
+ allow_fill: bool = True,
65
+ ) -> ArrayLike:
66
+ """
67
+ Specialized Cython take which sets NaN values in one pass
68
+
69
+ This dispatches to ``take`` defined on ExtensionArrays.
70
+
71
+ Note: this function assumes that the indexer is a valid(ated) indexer with
72
+ no out of bound indices.
73
+
74
+ Parameters
75
+ ----------
76
+ arr : np.ndarray or ExtensionArray
77
+ Input array.
78
+ indexer : ndarray
79
+ 1-D array of indices to take, subarrays corresponding to -1 value
80
+ indices are filed with fill_value
81
+ axis : int, default 0
82
+ Axis to take from
83
+ fill_value : any, default np.nan
84
+ Fill value to replace -1 values with
85
+ allow_fill : bool, default True
86
+ If False, indexer is assumed to contain no -1 values so no filling
87
+ will be done. This short-circuits computation of a mask. Result is
88
+ undefined if allow_fill == False and -1 is present in indexer.
89
+
90
+ Returns
91
+ -------
92
+ subarray : np.ndarray or ExtensionArray
93
+ May be the same type as the input, or cast to an ndarray.
94
+ """
95
+ if fill_value is lib.no_default:
96
+ fill_value = na_value_for_dtype(arr.dtype, compat=False)
97
+ elif lib.is_np_dtype(arr.dtype, "mM"):
98
+ dtype, fill_value = maybe_promote(arr.dtype, fill_value)
99
+ if arr.dtype != dtype:
100
+ # EA.take is strict about returning a new object of the same type
101
+ # so for that case cast upfront
102
+ arr = arr.astype(dtype)
103
+
104
+ if not isinstance(arr, np.ndarray):
105
+ # i.e. ExtensionArray,
106
+ # includes for EA to catch DatetimeArray, TimedeltaArray
107
+ if not is_1d_only_ea_dtype(arr.dtype):
108
+ # i.e. DatetimeArray, TimedeltaArray
109
+ arr = cast("NDArrayBackedExtensionArray", arr)
110
+ return arr.take(
111
+ indexer, fill_value=fill_value, allow_fill=allow_fill, axis=axis
112
+ )
113
+
114
+ return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
115
+
116
+ arr = np.asarray(arr)
117
+ return _take_nd_ndarray(arr, indexer, axis, fill_value, allow_fill)
118
+
119
+
120
+ def _take_nd_ndarray(
121
+ arr: np.ndarray,
122
+ indexer: npt.NDArray[np.intp] | None,
123
+ axis: AxisInt,
124
+ fill_value,
125
+ allow_fill: bool,
126
+ ) -> np.ndarray:
127
+ if indexer is None:
128
+ indexer = np.arange(arr.shape[axis], dtype=np.intp)
129
+ dtype, fill_value = arr.dtype, arr.dtype.type()
130
+ else:
131
+ indexer = ensure_platform_int(indexer)
132
+
133
+ dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
134
+ arr, indexer, fill_value, allow_fill
135
+ )
136
+
137
+ flip_order = False
138
+ if arr.ndim == 2 and arr.flags.f_contiguous:
139
+ flip_order = True
140
+
141
+ if flip_order:
142
+ arr = arr.T
143
+ axis = arr.ndim - axis - 1
144
+
145
+ # at this point, it's guaranteed that dtype can hold both the arr values
146
+ # and the fill_value
147
+ out_shape_ = list(arr.shape)
148
+ out_shape_[axis] = len(indexer)
149
+ out_shape = tuple(out_shape_)
150
+ if arr.flags.f_contiguous and axis == arr.ndim - 1:
151
+ # minor tweak that can make an order-of-magnitude difference
152
+ # for dataframes initialized directly from 2-d ndarrays
153
+ # (s.t. df.values is c-contiguous and df._mgr.blocks[0] is its
154
+ # f-contiguous transpose)
155
+ out = np.empty(out_shape, dtype=dtype, order="F")
156
+ else:
157
+ out = np.empty(out_shape, dtype=dtype)
158
+
159
+ func = _get_take_nd_function(
160
+ arr.ndim, arr.dtype, out.dtype, axis=axis, mask_info=mask_info
161
+ )
162
+ func(arr, indexer, out, fill_value)
163
+
164
+ if flip_order:
165
+ out = out.T
166
+ return out
167
+
168
+
169
+ def take_1d(
170
+ arr: ArrayLike,
171
+ indexer: npt.NDArray[np.intp],
172
+ fill_value=None,
173
+ allow_fill: bool = True,
174
+ mask: npt.NDArray[np.bool_] | None = None,
175
+ ) -> ArrayLike:
176
+ """
177
+ Specialized version for 1D arrays. Differences compared to `take_nd`:
178
+
179
+ - Assumes input array has already been converted to numpy array / EA
180
+ - Assumes indexer is already guaranteed to be intp dtype ndarray
181
+ - Only works for 1D arrays
182
+
183
+ To ensure the lowest possible overhead.
184
+
185
+ Note: similarly to `take_nd`, this function assumes that the indexer is
186
+ a valid(ated) indexer with no out of bound indices.
187
+
188
+ Parameters
189
+ ----------
190
+ arr : np.ndarray or ExtensionArray
191
+ Input array.
192
+ indexer : ndarray
193
+ 1-D array of indices to take (validated indices, intp dtype).
194
+ fill_value : any, default np.nan
195
+ Fill value to replace -1 values with
196
+ allow_fill : bool, default True
197
+ If False, indexer is assumed to contain no -1 values so no filling
198
+ will be done. This short-circuits computation of a mask. Result is
199
+ undefined if allow_fill == False and -1 is present in indexer.
200
+ mask : np.ndarray, optional, default None
201
+ If `allow_fill` is True, and the mask (where indexer == -1) is already
202
+ known, it can be passed to avoid recomputation.
203
+ """
204
+ if not isinstance(arr, np.ndarray):
205
+ # ExtensionArray -> dispatch to their method
206
+ return arr.take(indexer, fill_value=fill_value, allow_fill=allow_fill)
207
+
208
+ if not allow_fill:
209
+ return arr.take(indexer)
210
+
211
+ dtype, fill_value, mask_info = _take_preprocess_indexer_and_fill_value(
212
+ arr, indexer, fill_value, True, mask
213
+ )
214
+
215
+ # at this point, it's guaranteed that dtype can hold both the arr values
216
+ # and the fill_value
217
+ out = np.empty(indexer.shape, dtype=dtype)
218
+
219
+ func = _get_take_nd_function(
220
+ arr.ndim, arr.dtype, out.dtype, axis=0, mask_info=mask_info
221
+ )
222
+ func(arr, indexer, out, fill_value)
223
+
224
+ return out
225
+
226
+
227
+ def take_2d_multi(
228
+ arr: np.ndarray,
229
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
230
+ fill_value=np.nan,
231
+ ) -> np.ndarray:
232
+ """
233
+ Specialized Cython take which sets NaN values in one pass.
234
+ """
235
+ # This is only called from one place in DataFrame._reindex_multi,
236
+ # so we know indexer is well-behaved.
237
+ assert indexer is not None
238
+ assert indexer[0] is not None
239
+ assert indexer[1] is not None
240
+
241
+ row_idx, col_idx = indexer
242
+
243
+ row_idx = ensure_platform_int(row_idx)
244
+ col_idx = ensure_platform_int(col_idx)
245
+ indexer = row_idx, col_idx
246
+ mask_info = None
247
+
248
+ # check for promotion based on types only (do this first because
249
+ # it's faster than computing a mask)
250
+ dtype, fill_value = maybe_promote(arr.dtype, fill_value)
251
+ if dtype != arr.dtype:
252
+ # check if promotion is actually required based on indexer
253
+ row_mask = row_idx == -1
254
+ col_mask = col_idx == -1
255
+ row_needs = row_mask.any()
256
+ col_needs = col_mask.any()
257
+ mask_info = (row_mask, col_mask), (row_needs, col_needs)
258
+
259
+ if not (row_needs or col_needs):
260
+ # if not, then depromote, set fill_value to dummy
261
+ # (it won't be used but we don't want the cython code
262
+ # to crash when trying to cast it to dtype)
263
+ dtype, fill_value = arr.dtype, arr.dtype.type()
264
+
265
+ # at this point, it's guaranteed that dtype can hold both the arr values
266
+ # and the fill_value
267
+ out_shape = len(row_idx), len(col_idx)
268
+ out = np.empty(out_shape, dtype=dtype)
269
+
270
+ func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
271
+ if func is None and arr.dtype != out.dtype:
272
+ func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
273
+ if func is not None:
274
+ func = _convert_wrapper(func, out.dtype)
275
+
276
+ if func is not None:
277
+ func(arr, indexer, out=out, fill_value=fill_value)
278
+ else:
279
+ # test_reindex_multi
280
+ _take_2d_multi_object(
281
+ arr, indexer, out, fill_value=fill_value, mask_info=mask_info
282
+ )
283
+
284
+ return out
285
+
286
+
287
+ @functools.lru_cache
288
+ def _get_take_nd_function_cached(
289
+ ndim: int, arr_dtype: np.dtype, out_dtype: np.dtype, axis: AxisInt
290
+ ):
291
+ """
292
+ Part of _get_take_nd_function below that doesn't need `mask_info` and thus
293
+ can be cached (mask_info potentially contains a numpy ndarray which is not
294
+ hashable and thus cannot be used as argument for cached function).
295
+ """
296
+ tup = (arr_dtype.name, out_dtype.name)
297
+ if ndim == 1:
298
+ func = _take_1d_dict.get(tup, None)
299
+ elif ndim == 2:
300
+ if axis == 0:
301
+ func = _take_2d_axis0_dict.get(tup, None)
302
+ else:
303
+ func = _take_2d_axis1_dict.get(tup, None)
304
+ if func is not None:
305
+ return func
306
+
307
+ # We get here with string, uint, float16, and complex dtypes that could
308
+ # potentially be handled in algos_take_helper.
309
+ # Also a couple with (M8[ns], object) and (m8[ns], object)
310
+ tup = (out_dtype.name, out_dtype.name)
311
+ if ndim == 1:
312
+ func = _take_1d_dict.get(tup, None)
313
+ elif ndim == 2:
314
+ if axis == 0:
315
+ func = _take_2d_axis0_dict.get(tup, None)
316
+ else:
317
+ func = _take_2d_axis1_dict.get(tup, None)
318
+ if func is not None:
319
+ func = _convert_wrapper(func, out_dtype)
320
+ return func
321
+
322
+ return None
323
+
324
+
325
+ def _get_take_nd_function(
326
+ ndim: int,
327
+ arr_dtype: np.dtype,
328
+ out_dtype: np.dtype,
329
+ axis: AxisInt = 0,
330
+ mask_info=None,
331
+ ):
332
+ """
333
+ Get the appropriate "take" implementation for the given dimension, axis
334
+ and dtypes.
335
+ """
336
+ func = None
337
+ if ndim <= 2:
338
+ # for this part we don't need `mask_info` -> use the cached algo lookup
339
+ func = _get_take_nd_function_cached(ndim, arr_dtype, out_dtype, axis)
340
+
341
+ if func is None:
342
+
343
+ def func(arr, indexer, out, fill_value=np.nan) -> None:
344
+ indexer = ensure_platform_int(indexer)
345
+ _take_nd_object(
346
+ arr, indexer, out, axis=axis, fill_value=fill_value, mask_info=mask_info
347
+ )
348
+
349
+ return func
350
+
351
+
352
+ def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
353
+ def wrapper(
354
+ arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
355
+ ) -> None:
356
+ if arr_dtype is not None:
357
+ arr = arr.view(arr_dtype)
358
+ if out_dtype is not None:
359
+ out = out.view(out_dtype)
360
+ if fill_wrap is not None:
361
+ # FIXME: if we get here with dt64/td64 we need to be sure we have
362
+ # matching resos
363
+ if fill_value.dtype.kind == "m":
364
+ fill_value = fill_value.astype("m8[ns]")
365
+ else:
366
+ fill_value = fill_value.astype("M8[ns]")
367
+ fill_value = fill_wrap(fill_value)
368
+
369
+ f(arr, indexer, out, fill_value=fill_value)
370
+
371
+ return wrapper
372
+
373
+
374
+ def _convert_wrapper(f, conv_dtype):
375
+ def wrapper(
376
+ arr: np.ndarray, indexer: np.ndarray, out: np.ndarray, fill_value=np.nan
377
+ ) -> None:
378
+ if conv_dtype == object:
379
+ # GH#39755 avoid casting dt64/td64 to integers
380
+ arr = ensure_wrapped_if_datetimelike(arr)
381
+ arr = arr.astype(conv_dtype)
382
+ f(arr, indexer, out, fill_value=fill_value)
383
+
384
+ return wrapper
385
+
386
+
387
+ _take_1d_dict = {
388
+ ("int8", "int8"): libalgos.take_1d_int8_int8,
389
+ ("int8", "int32"): libalgos.take_1d_int8_int32,
390
+ ("int8", "int64"): libalgos.take_1d_int8_int64,
391
+ ("int8", "float64"): libalgos.take_1d_int8_float64,
392
+ ("int16", "int16"): libalgos.take_1d_int16_int16,
393
+ ("int16", "int32"): libalgos.take_1d_int16_int32,
394
+ ("int16", "int64"): libalgos.take_1d_int16_int64,
395
+ ("int16", "float64"): libalgos.take_1d_int16_float64,
396
+ ("int32", "int32"): libalgos.take_1d_int32_int32,
397
+ ("int32", "int64"): libalgos.take_1d_int32_int64,
398
+ ("int32", "float64"): libalgos.take_1d_int32_float64,
399
+ ("int64", "int64"): libalgos.take_1d_int64_int64,
400
+ ("int64", "float64"): libalgos.take_1d_int64_float64,
401
+ ("float32", "float32"): libalgos.take_1d_float32_float32,
402
+ ("float32", "float64"): libalgos.take_1d_float32_float64,
403
+ ("float64", "float64"): libalgos.take_1d_float64_float64,
404
+ ("object", "object"): libalgos.take_1d_object_object,
405
+ ("bool", "bool"): _view_wrapper(libalgos.take_1d_bool_bool, np.uint8, np.uint8),
406
+ ("bool", "object"): _view_wrapper(libalgos.take_1d_bool_object, np.uint8, None),
407
+ ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
408
+ libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
409
+ ),
410
+ ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
411
+ libalgos.take_1d_int64_int64, np.int64, np.int64, np.int64
412
+ ),
413
+ }
414
+
415
+ _take_2d_axis0_dict = {
416
+ ("int8", "int8"): libalgos.take_2d_axis0_int8_int8,
417
+ ("int8", "int32"): libalgos.take_2d_axis0_int8_int32,
418
+ ("int8", "int64"): libalgos.take_2d_axis0_int8_int64,
419
+ ("int8", "float64"): libalgos.take_2d_axis0_int8_float64,
420
+ ("int16", "int16"): libalgos.take_2d_axis0_int16_int16,
421
+ ("int16", "int32"): libalgos.take_2d_axis0_int16_int32,
422
+ ("int16", "int64"): libalgos.take_2d_axis0_int16_int64,
423
+ ("int16", "float64"): libalgos.take_2d_axis0_int16_float64,
424
+ ("int32", "int32"): libalgos.take_2d_axis0_int32_int32,
425
+ ("int32", "int64"): libalgos.take_2d_axis0_int32_int64,
426
+ ("int32", "float64"): libalgos.take_2d_axis0_int32_float64,
427
+ ("int64", "int64"): libalgos.take_2d_axis0_int64_int64,
428
+ ("int64", "float64"): libalgos.take_2d_axis0_int64_float64,
429
+ ("float32", "float32"): libalgos.take_2d_axis0_float32_float32,
430
+ ("float32", "float64"): libalgos.take_2d_axis0_float32_float64,
431
+ ("float64", "float64"): libalgos.take_2d_axis0_float64_float64,
432
+ ("object", "object"): libalgos.take_2d_axis0_object_object,
433
+ ("bool", "bool"): _view_wrapper(
434
+ libalgos.take_2d_axis0_bool_bool, np.uint8, np.uint8
435
+ ),
436
+ ("bool", "object"): _view_wrapper(
437
+ libalgos.take_2d_axis0_bool_object, np.uint8, None
438
+ ),
439
+ ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
440
+ libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
441
+ ),
442
+ ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
443
+ libalgos.take_2d_axis0_int64_int64, np.int64, np.int64, fill_wrap=np.int64
444
+ ),
445
+ }
446
+
447
+ _take_2d_axis1_dict = {
448
+ ("int8", "int8"): libalgos.take_2d_axis1_int8_int8,
449
+ ("int8", "int32"): libalgos.take_2d_axis1_int8_int32,
450
+ ("int8", "int64"): libalgos.take_2d_axis1_int8_int64,
451
+ ("int8", "float64"): libalgos.take_2d_axis1_int8_float64,
452
+ ("int16", "int16"): libalgos.take_2d_axis1_int16_int16,
453
+ ("int16", "int32"): libalgos.take_2d_axis1_int16_int32,
454
+ ("int16", "int64"): libalgos.take_2d_axis1_int16_int64,
455
+ ("int16", "float64"): libalgos.take_2d_axis1_int16_float64,
456
+ ("int32", "int32"): libalgos.take_2d_axis1_int32_int32,
457
+ ("int32", "int64"): libalgos.take_2d_axis1_int32_int64,
458
+ ("int32", "float64"): libalgos.take_2d_axis1_int32_float64,
459
+ ("int64", "int64"): libalgos.take_2d_axis1_int64_int64,
460
+ ("int64", "float64"): libalgos.take_2d_axis1_int64_float64,
461
+ ("float32", "float32"): libalgos.take_2d_axis1_float32_float32,
462
+ ("float32", "float64"): libalgos.take_2d_axis1_float32_float64,
463
+ ("float64", "float64"): libalgos.take_2d_axis1_float64_float64,
464
+ ("object", "object"): libalgos.take_2d_axis1_object_object,
465
+ ("bool", "bool"): _view_wrapper(
466
+ libalgos.take_2d_axis1_bool_bool, np.uint8, np.uint8
467
+ ),
468
+ ("bool", "object"): _view_wrapper(
469
+ libalgos.take_2d_axis1_bool_object, np.uint8, None
470
+ ),
471
+ ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
472
+ libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
473
+ ),
474
+ ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
475
+ libalgos.take_2d_axis1_int64_int64, np.int64, np.int64, fill_wrap=np.int64
476
+ ),
477
+ }
478
+
479
+ _take_2d_multi_dict = {
480
+ ("int8", "int8"): libalgos.take_2d_multi_int8_int8,
481
+ ("int8", "int32"): libalgos.take_2d_multi_int8_int32,
482
+ ("int8", "int64"): libalgos.take_2d_multi_int8_int64,
483
+ ("int8", "float64"): libalgos.take_2d_multi_int8_float64,
484
+ ("int16", "int16"): libalgos.take_2d_multi_int16_int16,
485
+ ("int16", "int32"): libalgos.take_2d_multi_int16_int32,
486
+ ("int16", "int64"): libalgos.take_2d_multi_int16_int64,
487
+ ("int16", "float64"): libalgos.take_2d_multi_int16_float64,
488
+ ("int32", "int32"): libalgos.take_2d_multi_int32_int32,
489
+ ("int32", "int64"): libalgos.take_2d_multi_int32_int64,
490
+ ("int32", "float64"): libalgos.take_2d_multi_int32_float64,
491
+ ("int64", "int64"): libalgos.take_2d_multi_int64_int64,
492
+ ("int64", "float64"): libalgos.take_2d_multi_int64_float64,
493
+ ("float32", "float32"): libalgos.take_2d_multi_float32_float32,
494
+ ("float32", "float64"): libalgos.take_2d_multi_float32_float64,
495
+ ("float64", "float64"): libalgos.take_2d_multi_float64_float64,
496
+ ("object", "object"): libalgos.take_2d_multi_object_object,
497
+ ("bool", "bool"): _view_wrapper(
498
+ libalgos.take_2d_multi_bool_bool, np.uint8, np.uint8
499
+ ),
500
+ ("bool", "object"): _view_wrapper(
501
+ libalgos.take_2d_multi_bool_object, np.uint8, None
502
+ ),
503
+ ("datetime64[ns]", "datetime64[ns]"): _view_wrapper(
504
+ libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
505
+ ),
506
+ ("timedelta64[ns]", "timedelta64[ns]"): _view_wrapper(
507
+ libalgos.take_2d_multi_int64_int64, np.int64, np.int64, fill_wrap=np.int64
508
+ ),
509
+ }
510
+
511
+
512
+ def _take_nd_object(
513
+ arr: np.ndarray,
514
+ indexer: npt.NDArray[np.intp],
515
+ out: np.ndarray,
516
+ axis: AxisInt,
517
+ fill_value,
518
+ mask_info,
519
+ ) -> None:
520
+ if mask_info is not None:
521
+ mask, needs_masking = mask_info
522
+ else:
523
+ mask = indexer == -1
524
+ needs_masking = mask.any()
525
+ if arr.dtype != out.dtype:
526
+ arr = arr.astype(out.dtype)
527
+ if arr.shape[axis] > 0:
528
+ arr.take(indexer, axis=axis, out=out)
529
+ if needs_masking:
530
+ outindexer = [slice(None)] * arr.ndim
531
+ outindexer[axis] = mask
532
+ out[tuple(outindexer)] = fill_value
533
+
534
+
535
+ def _take_2d_multi_object(
536
+ arr: np.ndarray,
537
+ indexer: tuple[npt.NDArray[np.intp], npt.NDArray[np.intp]],
538
+ out: np.ndarray,
539
+ fill_value,
540
+ mask_info,
541
+ ) -> None:
542
+ # this is not ideal, performance-wise, but it's better than raising
543
+ # an exception (best to optimize in Cython to avoid getting here)
544
+ row_idx, col_idx = indexer # both np.intp
545
+ if mask_info is not None:
546
+ (row_mask, col_mask), (row_needs, col_needs) = mask_info
547
+ else:
548
+ row_mask = row_idx == -1
549
+ col_mask = col_idx == -1
550
+ row_needs = row_mask.any()
551
+ col_needs = col_mask.any()
552
+ if fill_value is not None:
553
+ if row_needs:
554
+ out[row_mask, :] = fill_value
555
+ if col_needs:
556
+ out[:, col_mask] = fill_value
557
+ for i, u_ in enumerate(row_idx):
558
+ if u_ != -1:
559
+ for j, v in enumerate(col_idx):
560
+ if v != -1:
561
+ out[i, j] = arr[u_, v]
562
+
563
+
564
+ def _take_preprocess_indexer_and_fill_value(
565
+ arr: np.ndarray,
566
+ indexer: npt.NDArray[np.intp],
567
+ fill_value,
568
+ allow_fill: bool,
569
+ mask: npt.NDArray[np.bool_] | None = None,
570
+ ):
571
+ mask_info: tuple[np.ndarray | None, bool] | None = None
572
+
573
+ if not allow_fill:
574
+ dtype, fill_value = arr.dtype, arr.dtype.type()
575
+ mask_info = None, False
576
+ else:
577
+ # check for promotion based on types only (do this first because
578
+ # it's faster than computing a mask)
579
+ dtype, fill_value = maybe_promote(arr.dtype, fill_value)
580
+ if dtype != arr.dtype:
581
+ # check if promotion is actually required based on indexer
582
+ if mask is not None:
583
+ needs_masking = True
584
+ else:
585
+ mask = indexer == -1
586
+ needs_masking = bool(mask.any())
587
+ mask_info = mask, needs_masking
588
+ if not needs_masking:
589
+ # if not, then depromote, set fill_value to dummy
590
+ # (it won't be used but we don't want the cython code
591
+ # to crash when trying to cast it to dtype)
592
+ dtype, fill_value = arr.dtype, arr.dtype.type()
593
+
594
+ return dtype, fill_value, mask_info
venv/lib/python3.10/site-packages/pandas/core/array_algos/transforms.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ transforms.py is for shape-preserving functions.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ from typing import TYPE_CHECKING
8
+
9
+ import numpy as np
10
+
11
+ if TYPE_CHECKING:
12
+ from pandas._typing import (
13
+ AxisInt,
14
+ Scalar,
15
+ )
16
+
17
+
18
+ def shift(
19
+ values: np.ndarray, periods: int, axis: AxisInt, fill_value: Scalar
20
+ ) -> np.ndarray:
21
+ new_values = values
22
+
23
+ if periods == 0 or values.size == 0:
24
+ return new_values.copy()
25
+
26
+ # make sure array sent to np.roll is c_contiguous
27
+ f_ordered = values.flags.f_contiguous
28
+ if f_ordered:
29
+ new_values = new_values.T
30
+ axis = new_values.ndim - axis - 1
31
+
32
+ if new_values.size:
33
+ new_values = np.roll(
34
+ new_values,
35
+ np.intp(periods),
36
+ axis=axis,
37
+ )
38
+
39
+ axis_indexer = [slice(None)] * values.ndim
40
+ if periods > 0:
41
+ axis_indexer[axis] = slice(None, periods)
42
+ else:
43
+ axis_indexer[axis] = slice(periods, None)
44
+ new_values[tuple(axis_indexer)] = fill_value
45
+
46
+ # restore original order
47
+ if f_ordered:
48
+ new_values = new_values.T
49
+
50
+ return new_values
venv/lib/python3.10/site-packages/pandas/core/dtypes/cast.py ADDED
@@ -0,0 +1,1973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Routines for casting.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import datetime as dt
8
+ import functools
9
+ from typing import (
10
+ TYPE_CHECKING,
11
+ Any,
12
+ Literal,
13
+ TypeVar,
14
+ cast,
15
+ overload,
16
+ )
17
+ import warnings
18
+
19
+ import numpy as np
20
+
21
+ from pandas._config import using_pyarrow_string_dtype
22
+
23
+ from pandas._libs import (
24
+ Interval,
25
+ Period,
26
+ lib,
27
+ )
28
+ from pandas._libs.missing import (
29
+ NA,
30
+ NAType,
31
+ checknull,
32
+ )
33
+ from pandas._libs.tslibs import (
34
+ NaT,
35
+ OutOfBoundsDatetime,
36
+ OutOfBoundsTimedelta,
37
+ Timedelta,
38
+ Timestamp,
39
+ is_supported_dtype,
40
+ )
41
+ from pandas._libs.tslibs.timedeltas import array_to_timedelta64
42
+ from pandas.compat.numpy import np_version_gt2
43
+ from pandas.errors import (
44
+ IntCastingNaNError,
45
+ LossySetitemError,
46
+ )
47
+
48
+ from pandas.core.dtypes.common import (
49
+ ensure_int8,
50
+ ensure_int16,
51
+ ensure_int32,
52
+ ensure_int64,
53
+ ensure_object,
54
+ ensure_str,
55
+ is_bool,
56
+ is_complex,
57
+ is_float,
58
+ is_integer,
59
+ is_object_dtype,
60
+ is_scalar,
61
+ is_string_dtype,
62
+ pandas_dtype as pandas_dtype_func,
63
+ )
64
+ from pandas.core.dtypes.dtypes import (
65
+ ArrowDtype,
66
+ BaseMaskedDtype,
67
+ CategoricalDtype,
68
+ DatetimeTZDtype,
69
+ ExtensionDtype,
70
+ IntervalDtype,
71
+ PandasExtensionDtype,
72
+ PeriodDtype,
73
+ )
74
+ from pandas.core.dtypes.generic import (
75
+ ABCExtensionArray,
76
+ ABCIndex,
77
+ ABCSeries,
78
+ )
79
+ from pandas.core.dtypes.inference import is_list_like
80
+ from pandas.core.dtypes.missing import (
81
+ is_valid_na_for_dtype,
82
+ isna,
83
+ na_value_for_dtype,
84
+ notna,
85
+ )
86
+
87
+ from pandas.io._util import _arrow_dtype_mapping
88
+
89
+ if TYPE_CHECKING:
90
+ from collections.abc import (
91
+ Sequence,
92
+ Sized,
93
+ )
94
+
95
+ from pandas._typing import (
96
+ ArrayLike,
97
+ Dtype,
98
+ DtypeObj,
99
+ NumpyIndexT,
100
+ Scalar,
101
+ npt,
102
+ )
103
+
104
+ from pandas import Index
105
+ from pandas.core.arrays import (
106
+ Categorical,
107
+ DatetimeArray,
108
+ ExtensionArray,
109
+ IntervalArray,
110
+ PeriodArray,
111
+ TimedeltaArray,
112
+ )
113
+
114
+
115
+ _int8_max = np.iinfo(np.int8).max
116
+ _int16_max = np.iinfo(np.int16).max
117
+ _int32_max = np.iinfo(np.int32).max
118
+
119
+ _dtype_obj = np.dtype(object)
120
+
121
+ NumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray)
122
+
123
+
124
+ def maybe_convert_platform(
125
+ values: list | tuple | range | np.ndarray | ExtensionArray,
126
+ ) -> ArrayLike:
127
+ """try to do platform conversion, allow ndarray or list here"""
128
+ arr: ArrayLike
129
+
130
+ if isinstance(values, (list, tuple, range)):
131
+ arr = construct_1d_object_array_from_listlike(values)
132
+ else:
133
+ # The caller is responsible for ensuring that we have np.ndarray
134
+ # or ExtensionArray here.
135
+ arr = values
136
+
137
+ if arr.dtype == _dtype_obj:
138
+ arr = cast(np.ndarray, arr)
139
+ arr = lib.maybe_convert_objects(arr)
140
+
141
+ return arr
142
+
143
+
144
+ def is_nested_object(obj) -> bool:
145
+ """
146
+ return a boolean if we have a nested object, e.g. a Series with 1 or
147
+ more Series elements
148
+
149
+ This may not be necessarily be performant.
150
+
151
+ """
152
+ return bool(
153
+ isinstance(obj, ABCSeries)
154
+ and is_object_dtype(obj.dtype)
155
+ and any(isinstance(v, ABCSeries) for v in obj._values)
156
+ )
157
+
158
+
159
+ def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar:
160
+ """
161
+ Cast scalar to Timestamp or Timedelta if scalar is datetime-like
162
+ and dtype is not object.
163
+
164
+ Parameters
165
+ ----------
166
+ value : scalar
167
+ dtype : Dtype, optional
168
+
169
+ Returns
170
+ -------
171
+ scalar
172
+ """
173
+ if dtype == _dtype_obj:
174
+ pass
175
+ elif isinstance(value, (np.datetime64, dt.datetime)):
176
+ value = Timestamp(value)
177
+ elif isinstance(value, (np.timedelta64, dt.timedelta)):
178
+ value = Timedelta(value)
179
+
180
+ return value
181
+
182
+
183
+ def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType:
184
+ """
185
+ If passed a scalar cast the scalar to a python native type.
186
+
187
+ Parameters
188
+ ----------
189
+ value : scalar or Series
190
+
191
+ Returns
192
+ -------
193
+ scalar or Series
194
+ """
195
+ if is_float(value):
196
+ value = float(value)
197
+ elif is_integer(value):
198
+ value = int(value)
199
+ elif is_bool(value):
200
+ value = bool(value)
201
+ elif isinstance(value, (np.datetime64, np.timedelta64)):
202
+ value = maybe_box_datetimelike(value)
203
+ elif value is NA:
204
+ value = None
205
+ return value
206
+
207
+
208
+ def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar:
209
+ """
210
+ Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting
211
+ into a numpy array. Failing to unbox would risk dropping nanoseconds.
212
+
213
+ Notes
214
+ -----
215
+ Caller is responsible for checking dtype.kind in "mM"
216
+ """
217
+ if is_valid_na_for_dtype(value, dtype):
218
+ # GH#36541: can't fill array directly with pd.NaT
219
+ # > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT)
220
+ # ValueError: cannot convert float NaN to integer
221
+ value = dtype.type("NaT", "ns")
222
+ elif isinstance(value, Timestamp):
223
+ if value.tz is None:
224
+ value = value.to_datetime64()
225
+ elif not isinstance(dtype, DatetimeTZDtype):
226
+ raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype")
227
+ elif isinstance(value, Timedelta):
228
+ value = value.to_timedelta64()
229
+
230
+ _disallow_mismatched_datetimelike(value, dtype)
231
+ return value
232
+
233
+
234
+ def _disallow_mismatched_datetimelike(value, dtype: DtypeObj):
235
+ """
236
+ numpy allows np.array(dt64values, dtype="timedelta64[ns]") and
237
+ vice-versa, but we do not want to allow this, so we need to
238
+ check explicitly
239
+ """
240
+ vdtype = getattr(value, "dtype", None)
241
+ if vdtype is None:
242
+ return
243
+ elif (vdtype.kind == "m" and dtype.kind == "M") or (
244
+ vdtype.kind == "M" and dtype.kind == "m"
245
+ ):
246
+ raise TypeError(f"Cannot cast {repr(value)} to {dtype}")
247
+
248
+
249
+ @overload
250
+ def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray:
251
+ ...
252
+
253
+
254
+ @overload
255
+ def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike:
256
+ ...
257
+
258
+
259
+ def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike:
260
+ """
261
+ try to cast to the specified dtype (e.g. convert back to bool/int
262
+ or could be an astype of float64->float32
263
+ """
264
+ if isinstance(result, ABCSeries):
265
+ result = result._values
266
+ do_round = False
267
+
268
+ if isinstance(dtype, str):
269
+ if dtype == "infer":
270
+ inferred_type = lib.infer_dtype(result, skipna=False)
271
+ if inferred_type == "boolean":
272
+ dtype = "bool"
273
+ elif inferred_type == "integer":
274
+ dtype = "int64"
275
+ elif inferred_type == "datetime64":
276
+ dtype = "datetime64[ns]"
277
+ elif inferred_type in ["timedelta", "timedelta64"]:
278
+ dtype = "timedelta64[ns]"
279
+
280
+ # try to upcast here
281
+ elif inferred_type == "floating":
282
+ dtype = "int64"
283
+ if issubclass(result.dtype.type, np.number):
284
+ do_round = True
285
+
286
+ else:
287
+ # TODO: complex? what if result is already non-object?
288
+ dtype = "object"
289
+
290
+ dtype = np.dtype(dtype)
291
+
292
+ if not isinstance(dtype, np.dtype):
293
+ # enforce our signature annotation
294
+ raise TypeError(dtype) # pragma: no cover
295
+
296
+ converted = maybe_downcast_numeric(result, dtype, do_round)
297
+ if converted is not result:
298
+ return converted
299
+
300
+ # a datetimelike
301
+ # GH12821, iNaT is cast to float
302
+ if dtype.kind in "mM" and result.dtype.kind in "if":
303
+ result = result.astype(dtype)
304
+
305
+ elif dtype.kind == "m" and result.dtype == _dtype_obj:
306
+ # test_where_downcast_to_td64
307
+ result = cast(np.ndarray, result)
308
+ result = array_to_timedelta64(result)
309
+
310
+ elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj:
311
+ result = cast(np.ndarray, result)
312
+ return np.asarray(maybe_cast_to_datetime(result, dtype=dtype))
313
+
314
+ return result
315
+
316
+
317
+ @overload
318
+ def maybe_downcast_numeric(
319
+ result: np.ndarray, dtype: np.dtype, do_round: bool = False
320
+ ) -> np.ndarray:
321
+ ...
322
+
323
+
324
+ @overload
325
+ def maybe_downcast_numeric(
326
+ result: ExtensionArray, dtype: DtypeObj, do_round: bool = False
327
+ ) -> ArrayLike:
328
+ ...
329
+
330
+
331
+ def maybe_downcast_numeric(
332
+ result: ArrayLike, dtype: DtypeObj, do_round: bool = False
333
+ ) -> ArrayLike:
334
+ """
335
+ Subset of maybe_downcast_to_dtype restricted to numeric dtypes.
336
+
337
+ Parameters
338
+ ----------
339
+ result : ndarray or ExtensionArray
340
+ dtype : np.dtype or ExtensionDtype
341
+ do_round : bool
342
+
343
+ Returns
344
+ -------
345
+ ndarray or ExtensionArray
346
+ """
347
+ if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype):
348
+ # e.g. SparseDtype has no itemsize attr
349
+ return result
350
+
351
+ def trans(x):
352
+ if do_round:
353
+ return x.round()
354
+ return x
355
+
356
+ if dtype.kind == result.dtype.kind:
357
+ # don't allow upcasts here (except if empty)
358
+ if result.dtype.itemsize <= dtype.itemsize and result.size:
359
+ return result
360
+
361
+ if dtype.kind in "biu":
362
+ if not result.size:
363
+ # if we don't have any elements, just astype it
364
+ return trans(result).astype(dtype)
365
+
366
+ if isinstance(result, np.ndarray):
367
+ element = result.item(0)
368
+ else:
369
+ element = result.iloc[0]
370
+ if not isinstance(element, (np.integer, np.floating, int, float, bool)):
371
+ # a comparable, e.g. a Decimal may slip in here
372
+ return result
373
+
374
+ if (
375
+ issubclass(result.dtype.type, (np.object_, np.number))
376
+ and notna(result).all()
377
+ ):
378
+ new_result = trans(result).astype(dtype)
379
+ if new_result.dtype.kind == "O" or result.dtype.kind == "O":
380
+ # np.allclose may raise TypeError on object-dtype
381
+ if (new_result == result).all():
382
+ return new_result
383
+ else:
384
+ if np.allclose(new_result, result, rtol=0):
385
+ return new_result
386
+
387
+ elif (
388
+ issubclass(dtype.type, np.floating)
389
+ and result.dtype.kind != "b"
390
+ and not is_string_dtype(result.dtype)
391
+ ):
392
+ with warnings.catch_warnings():
393
+ warnings.filterwarnings(
394
+ "ignore", "overflow encountered in cast", RuntimeWarning
395
+ )
396
+ new_result = result.astype(dtype)
397
+
398
+ # Adjust tolerances based on floating point size
399
+ size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16}
400
+
401
+ atol = size_tols.get(new_result.dtype.itemsize, 0.0)
402
+
403
+ # Check downcast float values are still equal within 7 digits when
404
+ # converting from float64 to float32
405
+ if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol):
406
+ return new_result
407
+
408
+ elif dtype.kind == result.dtype.kind == "c":
409
+ new_result = result.astype(dtype)
410
+
411
+ if np.array_equal(new_result, result, equal_nan=True):
412
+ # TODO: use tolerance like we do for float?
413
+ return new_result
414
+
415
+ return result
416
+
417
+
418
+ def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT:
419
+ """
420
+ If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit.
421
+
422
+ Parameters
423
+ ----------
424
+ arr : ndarray or ExtensionArray
425
+
426
+ Returns
427
+ -------
428
+ ndarray or ExtensionArray
429
+ """
430
+ dtype = arr.dtype
431
+ if dtype.kind == "i" and dtype != np.int64:
432
+ return arr.astype(np.int64)
433
+ elif dtype.kind == "u" and dtype != np.uint64:
434
+ return arr.astype(np.uint64)
435
+ elif dtype.kind == "f" and dtype != np.float64:
436
+ return arr.astype(np.float64)
437
+ else:
438
+ return arr
439
+
440
+
441
+ def maybe_cast_pointwise_result(
442
+ result: ArrayLike,
443
+ dtype: DtypeObj,
444
+ numeric_only: bool = False,
445
+ same_dtype: bool = True,
446
+ ) -> ArrayLike:
447
+ """
448
+ Try casting result of a pointwise operation back to the original dtype if
449
+ appropriate.
450
+
451
+ Parameters
452
+ ----------
453
+ result : array-like
454
+ Result to cast.
455
+ dtype : np.dtype or ExtensionDtype
456
+ Input Series from which result was calculated.
457
+ numeric_only : bool, default False
458
+ Whether to cast only numerics or datetimes as well.
459
+ same_dtype : bool, default True
460
+ Specify dtype when calling _from_sequence
461
+
462
+ Returns
463
+ -------
464
+ result : array-like
465
+ result maybe casted to the dtype.
466
+ """
467
+
468
+ if isinstance(dtype, ExtensionDtype):
469
+ cls = dtype.construct_array_type()
470
+ if same_dtype:
471
+ result = _maybe_cast_to_extension_array(cls, result, dtype=dtype)
472
+ else:
473
+ result = _maybe_cast_to_extension_array(cls, result)
474
+
475
+ elif (numeric_only and dtype.kind in "iufcb") or not numeric_only:
476
+ result = maybe_downcast_to_dtype(result, dtype)
477
+
478
+ return result
479
+
480
+
481
+ def _maybe_cast_to_extension_array(
482
+ cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None
483
+ ) -> ArrayLike:
484
+ """
485
+ Call to `_from_sequence` that returns the object unchanged on Exception.
486
+
487
+ Parameters
488
+ ----------
489
+ cls : class, subclass of ExtensionArray
490
+ obj : arraylike
491
+ Values to pass to cls._from_sequence
492
+ dtype : ExtensionDtype, optional
493
+
494
+ Returns
495
+ -------
496
+ ExtensionArray or obj
497
+ """
498
+ result: ArrayLike
499
+
500
+ if dtype is not None:
501
+ try:
502
+ result = cls._from_scalars(obj, dtype=dtype)
503
+ except (TypeError, ValueError):
504
+ return obj
505
+ return result
506
+
507
+ try:
508
+ result = cls._from_sequence(obj, dtype=dtype)
509
+ except Exception:
510
+ # We can't predict what downstream EA constructors may raise
511
+ result = obj
512
+ return result
513
+
514
+
515
+ @overload
516
+ def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype:
517
+ ...
518
+
519
+
520
+ @overload
521
+ def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype:
522
+ ...
523
+
524
+
525
+ def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj:
526
+ """
527
+ If we have a dtype that cannot hold NA values, find the best match that can.
528
+ """
529
+ if isinstance(dtype, ExtensionDtype):
530
+ if dtype._can_hold_na:
531
+ return dtype
532
+ elif isinstance(dtype, IntervalDtype):
533
+ # TODO(GH#45349): don't special-case IntervalDtype, allow
534
+ # overriding instead of returning object below.
535
+ return IntervalDtype(np.float64, closed=dtype.closed)
536
+ return _dtype_obj
537
+ elif dtype.kind == "b":
538
+ return _dtype_obj
539
+ elif dtype.kind in "iu":
540
+ return np.dtype(np.float64)
541
+ return dtype
542
+
543
+
544
+ _canonical_nans = {
545
+ np.datetime64: np.datetime64("NaT", "ns"),
546
+ np.timedelta64: np.timedelta64("NaT", "ns"),
547
+ type(np.nan): np.nan,
548
+ }
549
+
550
+
551
+ def maybe_promote(dtype: np.dtype, fill_value=np.nan):
552
+ """
553
+ Find the minimal dtype that can hold both the given dtype and fill_value.
554
+
555
+ Parameters
556
+ ----------
557
+ dtype : np.dtype
558
+ fill_value : scalar, default np.nan
559
+
560
+ Returns
561
+ -------
562
+ dtype
563
+ Upcasted from dtype argument if necessary.
564
+ fill_value
565
+ Upcasted from fill_value argument if necessary.
566
+
567
+ Raises
568
+ ------
569
+ ValueError
570
+ If fill_value is a non-scalar and dtype is not object.
571
+ """
572
+ orig = fill_value
573
+ orig_is_nat = False
574
+ if checknull(fill_value):
575
+ # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740
576
+ # avoid cache misses with NaN/NaT values that are not singletons
577
+ if fill_value is not NA:
578
+ try:
579
+ orig_is_nat = np.isnat(fill_value)
580
+ except TypeError:
581
+ pass
582
+
583
+ fill_value = _canonical_nans.get(type(fill_value), fill_value)
584
+
585
+ # for performance, we are using a cached version of the actual implementation
586
+ # of the function in _maybe_promote. However, this doesn't always work (in case
587
+ # of non-hashable arguments), so we fallback to the actual implementation if needed
588
+ try:
589
+ # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type
590
+ # "Type[Any]"; expected "Hashable" [arg-type]
591
+ dtype, fill_value = _maybe_promote_cached(
592
+ dtype, fill_value, type(fill_value) # type: ignore[arg-type]
593
+ )
594
+ except TypeError:
595
+ # if fill_value is not hashable (required for caching)
596
+ dtype, fill_value = _maybe_promote(dtype, fill_value)
597
+
598
+ if (dtype == _dtype_obj and orig is not None) or (
599
+ orig_is_nat and np.datetime_data(orig)[0] != "ns"
600
+ ):
601
+ # GH#51592,53497 restore our potentially non-canonical fill_value
602
+ fill_value = orig
603
+ return dtype, fill_value
604
+
605
+
606
+ @functools.lru_cache
607
+ def _maybe_promote_cached(dtype, fill_value, fill_value_type):
608
+ # The cached version of _maybe_promote below
609
+ # This also use fill_value_type as (unused) argument to use this in the
610
+ # cache lookup -> to differentiate 1 and True
611
+ return _maybe_promote(dtype, fill_value)
612
+
613
+
614
+ def _maybe_promote(dtype: np.dtype, fill_value=np.nan):
615
+ # The actual implementation of the function, use `maybe_promote` above for
616
+ # a cached version.
617
+ if not is_scalar(fill_value):
618
+ # with object dtype there is nothing to promote, and the user can
619
+ # pass pretty much any weird fill_value they like
620
+ if dtype != object:
621
+ # with object dtype there is nothing to promote, and the user can
622
+ # pass pretty much any weird fill_value they like
623
+ raise ValueError("fill_value must be a scalar")
624
+ dtype = _dtype_obj
625
+ return dtype, fill_value
626
+
627
+ if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM":
628
+ dtype = ensure_dtype_can_hold_na(dtype)
629
+ fv = na_value_for_dtype(dtype)
630
+ return dtype, fv
631
+
632
+ elif isinstance(dtype, CategoricalDtype):
633
+ if fill_value in dtype.categories or isna(fill_value):
634
+ return dtype, fill_value
635
+ else:
636
+ return object, ensure_object(fill_value)
637
+
638
+ elif isna(fill_value):
639
+ dtype = _dtype_obj
640
+ if fill_value is None:
641
+ # but we retain e.g. pd.NA
642
+ fill_value = np.nan
643
+ return dtype, fill_value
644
+
645
+ # returns tuple of (dtype, fill_value)
646
+ if issubclass(dtype.type, np.datetime64):
647
+ inferred, fv = infer_dtype_from_scalar(fill_value)
648
+ if inferred == dtype:
649
+ return dtype, fv
650
+
651
+ from pandas.core.arrays import DatetimeArray
652
+
653
+ dta = DatetimeArray._from_sequence([], dtype="M8[ns]")
654
+ try:
655
+ fv = dta._validate_setitem_value(fill_value)
656
+ return dta.dtype, fv
657
+ except (ValueError, TypeError):
658
+ return _dtype_obj, fill_value
659
+
660
+ elif issubclass(dtype.type, np.timedelta64):
661
+ inferred, fv = infer_dtype_from_scalar(fill_value)
662
+ if inferred == dtype:
663
+ return dtype, fv
664
+
665
+ elif inferred.kind == "m":
666
+ # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns]
667
+ # see if we can losslessly cast it to our dtype
668
+ unit = np.datetime_data(dtype)[0]
669
+ try:
670
+ td = Timedelta(fill_value).as_unit(unit, round_ok=False)
671
+ except OutOfBoundsTimedelta:
672
+ return _dtype_obj, fill_value
673
+ else:
674
+ return dtype, td.asm8
675
+
676
+ return _dtype_obj, fill_value
677
+
678
+ elif is_float(fill_value):
679
+ if issubclass(dtype.type, np.bool_):
680
+ dtype = np.dtype(np.object_)
681
+
682
+ elif issubclass(dtype.type, np.integer):
683
+ dtype = np.dtype(np.float64)
684
+
685
+ elif dtype.kind == "f":
686
+ mst = np.min_scalar_type(fill_value)
687
+ if mst > dtype:
688
+ # e.g. mst is np.float64 and dtype is np.float32
689
+ dtype = mst
690
+
691
+ elif dtype.kind == "c":
692
+ mst = np.min_scalar_type(fill_value)
693
+ dtype = np.promote_types(dtype, mst)
694
+
695
+ elif is_bool(fill_value):
696
+ if not issubclass(dtype.type, np.bool_):
697
+ dtype = np.dtype(np.object_)
698
+
699
+ elif is_integer(fill_value):
700
+ if issubclass(dtype.type, np.bool_):
701
+ dtype = np.dtype(np.object_)
702
+
703
+ elif issubclass(dtype.type, np.integer):
704
+ if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type]
705
+ # upcast to prevent overflow
706
+ mst = np.min_scalar_type(fill_value)
707
+ dtype = np.promote_types(dtype, mst)
708
+ if dtype.kind == "f":
709
+ # Case where we disagree with numpy
710
+ dtype = np.dtype(np.object_)
711
+
712
+ elif is_complex(fill_value):
713
+ if issubclass(dtype.type, np.bool_):
714
+ dtype = np.dtype(np.object_)
715
+
716
+ elif issubclass(dtype.type, (np.integer, np.floating)):
717
+ mst = np.min_scalar_type(fill_value)
718
+ dtype = np.promote_types(dtype, mst)
719
+
720
+ elif dtype.kind == "c":
721
+ mst = np.min_scalar_type(fill_value)
722
+ if mst > dtype:
723
+ # e.g. mst is np.complex128 and dtype is np.complex64
724
+ dtype = mst
725
+
726
+ else:
727
+ dtype = np.dtype(np.object_)
728
+
729
+ # in case we have a string that looked like a number
730
+ if issubclass(dtype.type, (bytes, str)):
731
+ dtype = np.dtype(np.object_)
732
+
733
+ fill_value = _ensure_dtype_type(fill_value, dtype)
734
+ return dtype, fill_value
735
+
736
+
737
+ def _ensure_dtype_type(value, dtype: np.dtype):
738
+ """
739
+ Ensure that the given value is an instance of the given dtype.
740
+
741
+ e.g. if out dtype is np.complex64_, we should have an instance of that
742
+ as opposed to a python complex object.
743
+
744
+ Parameters
745
+ ----------
746
+ value : object
747
+ dtype : np.dtype
748
+
749
+ Returns
750
+ -------
751
+ object
752
+ """
753
+ # Start with exceptions in which we do _not_ cast to numpy types
754
+
755
+ if dtype == _dtype_obj:
756
+ return value
757
+
758
+ # Note: before we get here we have already excluded isna(value)
759
+ return dtype.type(value)
760
+
761
+
762
+ def infer_dtype_from(val) -> tuple[DtypeObj, Any]:
763
+ """
764
+ Interpret the dtype from a scalar or array.
765
+
766
+ Parameters
767
+ ----------
768
+ val : object
769
+ """
770
+ if not is_list_like(val):
771
+ return infer_dtype_from_scalar(val)
772
+ return infer_dtype_from_array(val)
773
+
774
+
775
+ def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]:
776
+ """
777
+ Interpret the dtype from a scalar.
778
+
779
+ Parameters
780
+ ----------
781
+ val : object
782
+ """
783
+ dtype: DtypeObj = _dtype_obj
784
+
785
+ # a 1-element ndarray
786
+ if isinstance(val, np.ndarray):
787
+ if val.ndim != 0:
788
+ msg = "invalid ndarray passed to infer_dtype_from_scalar"
789
+ raise ValueError(msg)
790
+
791
+ dtype = val.dtype
792
+ val = lib.item_from_zerodim(val)
793
+
794
+ elif isinstance(val, str):
795
+ # If we create an empty array using a string to infer
796
+ # the dtype, NumPy will only allocate one character per entry
797
+ # so this is kind of bad. Alternately we could use np.repeat
798
+ # instead of np.empty (but then you still don't want things
799
+ # coming out as np.str_!
800
+
801
+ dtype = _dtype_obj
802
+ if using_pyarrow_string_dtype():
803
+ from pandas.core.arrays.string_ import StringDtype
804
+
805
+ dtype = StringDtype(storage="pyarrow_numpy")
806
+
807
+ elif isinstance(val, (np.datetime64, dt.datetime)):
808
+ try:
809
+ val = Timestamp(val)
810
+ except OutOfBoundsDatetime:
811
+ return _dtype_obj, val
812
+
813
+ if val is NaT or val.tz is None:
814
+ val = val.to_datetime64()
815
+ dtype = val.dtype
816
+ # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes
817
+ else:
818
+ dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz)
819
+
820
+ elif isinstance(val, (np.timedelta64, dt.timedelta)):
821
+ try:
822
+ val = Timedelta(val)
823
+ except (OutOfBoundsTimedelta, OverflowError):
824
+ dtype = _dtype_obj
825
+ else:
826
+ if val is NaT:
827
+ val = np.timedelta64("NaT", "ns")
828
+ else:
829
+ val = val.asm8
830
+ dtype = val.dtype
831
+
832
+ elif is_bool(val):
833
+ dtype = np.dtype(np.bool_)
834
+
835
+ elif is_integer(val):
836
+ if isinstance(val, np.integer):
837
+ dtype = np.dtype(type(val))
838
+ else:
839
+ dtype = np.dtype(np.int64)
840
+
841
+ try:
842
+ np.array(val, dtype=dtype)
843
+ except OverflowError:
844
+ dtype = np.array(val).dtype
845
+
846
+ elif is_float(val):
847
+ if isinstance(val, np.floating):
848
+ dtype = np.dtype(type(val))
849
+ else:
850
+ dtype = np.dtype(np.float64)
851
+
852
+ elif is_complex(val):
853
+ dtype = np.dtype(np.complex128)
854
+
855
+ if isinstance(val, Period):
856
+ dtype = PeriodDtype(freq=val.freq)
857
+ elif isinstance(val, Interval):
858
+ subtype = infer_dtype_from_scalar(val.left)[0]
859
+ dtype = IntervalDtype(subtype=subtype, closed=val.closed)
860
+
861
+ return dtype, val
862
+
863
+
864
+ def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]:
865
+ """
866
+ Convert datetimelike-keyed dicts to a Timestamp-keyed dict.
867
+
868
+ Parameters
869
+ ----------
870
+ d: dict-like object
871
+
872
+ Returns
873
+ -------
874
+ dict
875
+ """
876
+ return {maybe_box_datetimelike(key): value for key, value in d.items()}
877
+
878
+
879
+ def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]:
880
+ """
881
+ Infer the dtype from an array.
882
+
883
+ Parameters
884
+ ----------
885
+ arr : array
886
+
887
+ Returns
888
+ -------
889
+ tuple (pandas-compat dtype, array)
890
+
891
+
892
+ Examples
893
+ --------
894
+ >>> np.asarray([1, '1'])
895
+ array(['1', '1'], dtype='<U21')
896
+
897
+ >>> infer_dtype_from_array([1, '1'])
898
+ (dtype('O'), [1, '1'])
899
+ """
900
+ if isinstance(arr, np.ndarray):
901
+ return arr.dtype, arr
902
+
903
+ if not is_list_like(arr):
904
+ raise TypeError("'arr' must be list-like")
905
+
906
+ arr_dtype = getattr(arr, "dtype", None)
907
+ if isinstance(arr_dtype, ExtensionDtype):
908
+ return arr.dtype, arr
909
+
910
+ elif isinstance(arr, ABCSeries):
911
+ return arr.dtype, np.asarray(arr)
912
+
913
+ # don't force numpy coerce with nan's
914
+ inferred = lib.infer_dtype(arr, skipna=False)
915
+ if inferred in ["string", "bytes", "mixed", "mixed-integer"]:
916
+ return (np.dtype(np.object_), arr)
917
+
918
+ arr = np.asarray(arr)
919
+ return arr.dtype, arr
920
+
921
+
922
+ def _maybe_infer_dtype_type(element):
923
+ """
924
+ Try to infer an object's dtype, for use in arithmetic ops.
925
+
926
+ Uses `element.dtype` if that's available.
927
+ Objects implementing the iterator protocol are cast to a NumPy array,
928
+ and from there the array's type is used.
929
+
930
+ Parameters
931
+ ----------
932
+ element : object
933
+ Possibly has a `.dtype` attribute, and possibly the iterator
934
+ protocol.
935
+
936
+ Returns
937
+ -------
938
+ tipo : type
939
+
940
+ Examples
941
+ --------
942
+ >>> from collections import namedtuple
943
+ >>> Foo = namedtuple("Foo", "dtype")
944
+ >>> _maybe_infer_dtype_type(Foo(np.dtype("i8")))
945
+ dtype('int64')
946
+ """
947
+ tipo = None
948
+ if hasattr(element, "dtype"):
949
+ tipo = element.dtype
950
+ elif is_list_like(element):
951
+ element = np.asarray(element)
952
+ tipo = element.dtype
953
+ return tipo
954
+
955
+
956
+ def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None:
957
+ """
958
+ Change string like dtypes to object for
959
+ ``DataFrame.select_dtypes()``.
960
+ """
961
+ # error: Argument 1 to <set> has incompatible type "Type[generic]"; expected
962
+ # "Union[dtype[Any], ExtensionDtype, None]"
963
+ # error: Argument 2 to <set> has incompatible type "Type[generic]"; expected
964
+ # "Union[dtype[Any], ExtensionDtype, None]"
965
+ non_string_dtypes = dtype_set - {
966
+ np.dtype("S").type, # type: ignore[arg-type]
967
+ np.dtype("<U").type, # type: ignore[arg-type]
968
+ }
969
+ if non_string_dtypes != dtype_set:
970
+ raise TypeError("string dtypes are not allowed, use 'object' instead")
971
+
972
+
973
+ def coerce_indexer_dtype(indexer, categories) -> np.ndarray:
974
+ """coerce the indexer input array to the smallest dtype possible"""
975
+ length = len(categories)
976
+ if length < _int8_max:
977
+ return ensure_int8(indexer)
978
+ elif length < _int16_max:
979
+ return ensure_int16(indexer)
980
+ elif length < _int32_max:
981
+ return ensure_int32(indexer)
982
+ return ensure_int64(indexer)
983
+
984
+
985
+ def convert_dtypes(
986
+ input_array: ArrayLike,
987
+ convert_string: bool = True,
988
+ convert_integer: bool = True,
989
+ convert_boolean: bool = True,
990
+ convert_floating: bool = True,
991
+ infer_objects: bool = False,
992
+ dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable",
993
+ ) -> DtypeObj:
994
+ """
995
+ Convert objects to best possible type, and optionally,
996
+ to types supporting ``pd.NA``.
997
+
998
+ Parameters
999
+ ----------
1000
+ input_array : ExtensionArray or np.ndarray
1001
+ convert_string : bool, default True
1002
+ Whether object dtypes should be converted to ``StringDtype()``.
1003
+ convert_integer : bool, default True
1004
+ Whether, if possible, conversion can be done to integer extension types.
1005
+ convert_boolean : bool, defaults True
1006
+ Whether object dtypes should be converted to ``BooleanDtypes()``.
1007
+ convert_floating : bool, defaults True
1008
+ Whether, if possible, conversion can be done to floating extension types.
1009
+ If `convert_integer` is also True, preference will be give to integer
1010
+ dtypes if the floats can be faithfully casted to integers.
1011
+ infer_objects : bool, defaults False
1012
+ Whether to also infer objects to float/int if possible. Is only hit if the
1013
+ object array contains pd.NA.
1014
+ dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable'
1015
+ Back-end data type applied to the resultant :class:`DataFrame`
1016
+ (still experimental). Behaviour is as follows:
1017
+
1018
+ * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame`
1019
+ (default).
1020
+ * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype`
1021
+ DataFrame.
1022
+
1023
+ .. versionadded:: 2.0
1024
+
1025
+ Returns
1026
+ -------
1027
+ np.dtype, or ExtensionDtype
1028
+ """
1029
+ inferred_dtype: str | DtypeObj
1030
+
1031
+ if (
1032
+ convert_string or convert_integer or convert_boolean or convert_floating
1033
+ ) and isinstance(input_array, np.ndarray):
1034
+ if input_array.dtype == object:
1035
+ inferred_dtype = lib.infer_dtype(input_array)
1036
+ else:
1037
+ inferred_dtype = input_array.dtype
1038
+
1039
+ if is_string_dtype(inferred_dtype):
1040
+ if not convert_string or inferred_dtype == "bytes":
1041
+ inferred_dtype = input_array.dtype
1042
+ else:
1043
+ inferred_dtype = pandas_dtype_func("string")
1044
+
1045
+ if convert_integer:
1046
+ target_int_dtype = pandas_dtype_func("Int64")
1047
+
1048
+ if input_array.dtype.kind in "iu":
1049
+ from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE
1050
+
1051
+ inferred_dtype = NUMPY_INT_TO_DTYPE.get(
1052
+ input_array.dtype, target_int_dtype
1053
+ )
1054
+ elif input_array.dtype.kind in "fcb":
1055
+ # TODO: de-dup with maybe_cast_to_integer_array?
1056
+ arr = input_array[notna(input_array)]
1057
+ if (arr.astype(int) == arr).all():
1058
+ inferred_dtype = target_int_dtype
1059
+ else:
1060
+ inferred_dtype = input_array.dtype
1061
+ elif (
1062
+ infer_objects
1063
+ and input_array.dtype == object
1064
+ and (isinstance(inferred_dtype, str) and inferred_dtype == "integer")
1065
+ ):
1066
+ inferred_dtype = target_int_dtype
1067
+
1068
+ if convert_floating:
1069
+ if input_array.dtype.kind in "fcb":
1070
+ # i.e. numeric but not integer
1071
+ from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE
1072
+
1073
+ inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get(
1074
+ input_array.dtype, pandas_dtype_func("Float64")
1075
+ )
1076
+ # if we could also convert to integer, check if all floats
1077
+ # are actually integers
1078
+ if convert_integer:
1079
+ # TODO: de-dup with maybe_cast_to_integer_array?
1080
+ arr = input_array[notna(input_array)]
1081
+ if (arr.astype(int) == arr).all():
1082
+ inferred_dtype = pandas_dtype_func("Int64")
1083
+ else:
1084
+ inferred_dtype = inferred_float_dtype
1085
+ else:
1086
+ inferred_dtype = inferred_float_dtype
1087
+ elif (
1088
+ infer_objects
1089
+ and input_array.dtype == object
1090
+ and (
1091
+ isinstance(inferred_dtype, str)
1092
+ and inferred_dtype == "mixed-integer-float"
1093
+ )
1094
+ ):
1095
+ inferred_dtype = pandas_dtype_func("Float64")
1096
+
1097
+ if convert_boolean:
1098
+ if input_array.dtype.kind == "b":
1099
+ inferred_dtype = pandas_dtype_func("boolean")
1100
+ elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean":
1101
+ inferred_dtype = pandas_dtype_func("boolean")
1102
+
1103
+ if isinstance(inferred_dtype, str):
1104
+ # If we couldn't do anything else, then we retain the dtype
1105
+ inferred_dtype = input_array.dtype
1106
+
1107
+ else:
1108
+ inferred_dtype = input_array.dtype
1109
+
1110
+ if dtype_backend == "pyarrow":
1111
+ from pandas.core.arrays.arrow.array import to_pyarrow_type
1112
+ from pandas.core.arrays.string_ import StringDtype
1113
+
1114
+ assert not isinstance(inferred_dtype, str)
1115
+
1116
+ if (
1117
+ (convert_integer and inferred_dtype.kind in "iu")
1118
+ or (convert_floating and inferred_dtype.kind in "fc")
1119
+ or (convert_boolean and inferred_dtype.kind == "b")
1120
+ or (convert_string and isinstance(inferred_dtype, StringDtype))
1121
+ or (
1122
+ inferred_dtype.kind not in "iufcb"
1123
+ and not isinstance(inferred_dtype, StringDtype)
1124
+ )
1125
+ ):
1126
+ if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance(
1127
+ inferred_dtype, DatetimeTZDtype
1128
+ ):
1129
+ base_dtype = inferred_dtype.base
1130
+ elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)):
1131
+ base_dtype = inferred_dtype.numpy_dtype
1132
+ elif isinstance(inferred_dtype, StringDtype):
1133
+ base_dtype = np.dtype(str)
1134
+ else:
1135
+ base_dtype = inferred_dtype
1136
+ if (
1137
+ base_dtype.kind == "O" # type: ignore[union-attr]
1138
+ and input_array.size > 0
1139
+ and isna(input_array).all()
1140
+ ):
1141
+ import pyarrow as pa
1142
+
1143
+ pa_type = pa.null()
1144
+ else:
1145
+ pa_type = to_pyarrow_type(base_dtype)
1146
+ if pa_type is not None:
1147
+ inferred_dtype = ArrowDtype(pa_type)
1148
+ elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype):
1149
+ # GH 53648
1150
+ inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype]
1151
+
1152
+ # error: Incompatible return value type (got "Union[str, Union[dtype[Any],
1153
+ # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]")
1154
+ return inferred_dtype # type: ignore[return-value]
1155
+
1156
+
1157
+ def maybe_infer_to_datetimelike(
1158
+ value: npt.NDArray[np.object_],
1159
+ ) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray:
1160
+ """
1161
+ we might have a array (or single object) that is datetime like,
1162
+ and no dtype is passed don't change the value unless we find a
1163
+ datetime/timedelta set
1164
+
1165
+ this is pretty strict in that a datetime/timedelta is REQUIRED
1166
+ in addition to possible nulls/string likes
1167
+
1168
+ Parameters
1169
+ ----------
1170
+ value : np.ndarray[object]
1171
+
1172
+ Returns
1173
+ -------
1174
+ np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray
1175
+
1176
+ """
1177
+ if not isinstance(value, np.ndarray) or value.dtype != object:
1178
+ # Caller is responsible for passing only ndarray[object]
1179
+ raise TypeError(type(value)) # pragma: no cover
1180
+ if value.ndim != 1:
1181
+ # Caller is responsible
1182
+ raise ValueError(value.ndim) # pragma: no cover
1183
+
1184
+ if not len(value):
1185
+ return value
1186
+
1187
+ # error: Incompatible return value type (got "Union[ExtensionArray,
1188
+ # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray,
1189
+ # TimedeltaArray, PeriodArray, IntervalArray]")
1190
+ return lib.maybe_convert_objects( # type: ignore[return-value]
1191
+ value,
1192
+ # Here we do not convert numeric dtypes, as if we wanted that,
1193
+ # numpy would have done it for us.
1194
+ convert_numeric=False,
1195
+ convert_non_numeric=True,
1196
+ dtype_if_all_nat=np.dtype("M8[ns]"),
1197
+ )
1198
+
1199
+
1200
+ def maybe_cast_to_datetime(
1201
+ value: np.ndarray | list, dtype: np.dtype
1202
+ ) -> ExtensionArray | np.ndarray:
1203
+ """
1204
+ try to cast the array/value to a datetimelike dtype, converting float
1205
+ nan to iNaT
1206
+
1207
+ Caller is responsible for handling ExtensionDtype cases and non dt64/td64
1208
+ cases.
1209
+ """
1210
+ from pandas.core.arrays.datetimes import DatetimeArray
1211
+ from pandas.core.arrays.timedeltas import TimedeltaArray
1212
+
1213
+ assert dtype.kind in "mM"
1214
+ if not is_list_like(value):
1215
+ raise TypeError("value must be listlike")
1216
+
1217
+ # TODO: _from_sequence would raise ValueError in cases where
1218
+ # _ensure_nanosecond_dtype raises TypeError
1219
+ _ensure_nanosecond_dtype(dtype)
1220
+
1221
+ if lib.is_np_dtype(dtype, "m"):
1222
+ res = TimedeltaArray._from_sequence(value, dtype=dtype)
1223
+ return res
1224
+ else:
1225
+ try:
1226
+ dta = DatetimeArray._from_sequence(value, dtype=dtype)
1227
+ except ValueError as err:
1228
+ # We can give a Series-specific exception message.
1229
+ if "cannot supply both a tz and a timezone-naive dtype" in str(err):
1230
+ raise ValueError(
1231
+ "Cannot convert timezone-aware data to "
1232
+ "timezone-naive dtype. Use "
1233
+ "pd.Series(values).dt.tz_localize(None) instead."
1234
+ ) from err
1235
+ raise
1236
+
1237
+ return dta
1238
+
1239
+
1240
+ def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None:
1241
+ """
1242
+ Convert dtypes with granularity less than nanosecond to nanosecond
1243
+
1244
+ >>> _ensure_nanosecond_dtype(np.dtype("M8[us]"))
1245
+
1246
+ >>> _ensure_nanosecond_dtype(np.dtype("M8[D]"))
1247
+ Traceback (most recent call last):
1248
+ ...
1249
+ TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'
1250
+
1251
+ >>> _ensure_nanosecond_dtype(np.dtype("m8[ps]"))
1252
+ Traceback (most recent call last):
1253
+ ...
1254
+ TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns'
1255
+ """ # noqa: E501
1256
+ msg = (
1257
+ f"The '{dtype.name}' dtype has no unit. "
1258
+ f"Please pass in '{dtype.name}[ns]' instead."
1259
+ )
1260
+
1261
+ # unpack e.g. SparseDtype
1262
+ dtype = getattr(dtype, "subtype", dtype)
1263
+
1264
+ if not isinstance(dtype, np.dtype):
1265
+ # i.e. datetime64tz
1266
+ pass
1267
+
1268
+ elif dtype.kind in "mM":
1269
+ if not is_supported_dtype(dtype):
1270
+ # pre-2.0 we would silently swap in nanos for lower-resolutions,
1271
+ # raise for above-nano resolutions
1272
+ if dtype.name in ["datetime64", "timedelta64"]:
1273
+ raise ValueError(msg)
1274
+ # TODO: ValueError or TypeError? existing test
1275
+ # test_constructor_generic_timestamp_bad_frequency expects TypeError
1276
+ raise TypeError(
1277
+ f"dtype={dtype} is not supported. Supported resolutions are 's', "
1278
+ "'ms', 'us', and 'ns'"
1279
+ )
1280
+
1281
+
1282
+ # TODO: other value-dependent functions to standardize here include
1283
+ # Index._find_common_type_compat
1284
+ def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj:
1285
+ """
1286
+ Find the type/dtype for the result of an operation between objects.
1287
+
1288
+ This is similar to find_common_type, but looks at the right object instead
1289
+ of just its dtype. This can be useful in particular when the right
1290
+ object does not have a `dtype`.
1291
+
1292
+ Parameters
1293
+ ----------
1294
+ left_dtype : np.dtype or ExtensionDtype
1295
+ right : Any
1296
+
1297
+ Returns
1298
+ -------
1299
+ np.dtype or ExtensionDtype
1300
+
1301
+ See also
1302
+ --------
1303
+ find_common_type
1304
+ numpy.result_type
1305
+ """
1306
+ new_dtype: DtypeObj
1307
+
1308
+ if (
1309
+ isinstance(left_dtype, np.dtype)
1310
+ and left_dtype.kind in "iuc"
1311
+ and (lib.is_integer(right) or lib.is_float(right))
1312
+ ):
1313
+ # e.g. with int8 dtype and right=512, we want to end up with
1314
+ # np.int16, whereas infer_dtype_from(512) gives np.int64,
1315
+ # which will make us upcast too far.
1316
+ if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f":
1317
+ right = int(right)
1318
+ # After NEP 50, numpy won't inspect Python scalars
1319
+ # TODO: do we need to recreate numpy's inspection logic for floats too
1320
+ # (this breaks some tests)
1321
+ if isinstance(right, int) and not isinstance(right, np.integer):
1322
+ # This gives an unsigned type by default
1323
+ # (if our number is positive)
1324
+
1325
+ # If our left dtype is signed, we might not want this since
1326
+ # this might give us 1 dtype too big
1327
+ # We should check if the corresponding int dtype (e.g. int64 for uint64)
1328
+ # can hold the number
1329
+ right_dtype = np.min_scalar_type(right)
1330
+ if right == 0:
1331
+ # Special case 0
1332
+ right = left_dtype
1333
+ elif (
1334
+ not np.issubdtype(left_dtype, np.unsignedinteger)
1335
+ and 0 < right <= np.iinfo(right_dtype).max
1336
+ ):
1337
+ # If left dtype isn't unsigned, check if it fits in the signed dtype
1338
+ right = np.dtype(f"i{right_dtype.itemsize}")
1339
+ else:
1340
+ right = right_dtype
1341
+
1342
+ new_dtype = np.result_type(left_dtype, right)
1343
+
1344
+ elif is_valid_na_for_dtype(right, left_dtype):
1345
+ # e.g. IntervalDtype[int] and None/np.nan
1346
+ new_dtype = ensure_dtype_can_hold_na(left_dtype)
1347
+
1348
+ else:
1349
+ dtype, _ = infer_dtype_from(right)
1350
+ new_dtype = find_common_type([left_dtype, dtype])
1351
+
1352
+ return new_dtype
1353
+
1354
+
1355
+ def common_dtype_categorical_compat(
1356
+ objs: Sequence[Index | ArrayLike], dtype: DtypeObj
1357
+ ) -> DtypeObj:
1358
+ """
1359
+ Update the result of find_common_type to account for NAs in a Categorical.
1360
+
1361
+ Parameters
1362
+ ----------
1363
+ objs : list[np.ndarray | ExtensionArray | Index]
1364
+ dtype : np.dtype or ExtensionDtype
1365
+
1366
+ Returns
1367
+ -------
1368
+ np.dtype or ExtensionDtype
1369
+ """
1370
+ # GH#38240
1371
+
1372
+ # TODO: more generally, could do `not can_hold_na(dtype)`
1373
+ if lib.is_np_dtype(dtype, "iu"):
1374
+ for obj in objs:
1375
+ # We don't want to accientally allow e.g. "categorical" str here
1376
+ obj_dtype = getattr(obj, "dtype", None)
1377
+ if isinstance(obj_dtype, CategoricalDtype):
1378
+ if isinstance(obj, ABCIndex):
1379
+ # This check may already be cached
1380
+ hasnas = obj.hasnans
1381
+ else:
1382
+ # Categorical
1383
+ hasnas = cast("Categorical", obj)._hasna
1384
+
1385
+ if hasnas:
1386
+ # see test_union_int_categorical_with_nan
1387
+ dtype = np.dtype(np.float64)
1388
+ break
1389
+ return dtype
1390
+
1391
+
1392
+ def np_find_common_type(*dtypes: np.dtype) -> np.dtype:
1393
+ """
1394
+ np.find_common_type implementation pre-1.25 deprecation using np.result_type
1395
+ https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065
1396
+
1397
+ Parameters
1398
+ ----------
1399
+ dtypes : np.dtypes
1400
+
1401
+ Returns
1402
+ -------
1403
+ np.dtype
1404
+ """
1405
+ try:
1406
+ common_dtype = np.result_type(*dtypes)
1407
+ if common_dtype.kind in "mMSU":
1408
+ # NumPy promotion currently (1.25) misbehaves for for times and strings,
1409
+ # so fall back to object (find_common_dtype did unless there
1410
+ # was only one dtype)
1411
+ common_dtype = np.dtype("O")
1412
+
1413
+ except TypeError:
1414
+ common_dtype = np.dtype("O")
1415
+ return common_dtype
1416
+
1417
+
1418
+ @overload
1419
+ def find_common_type(types: list[np.dtype]) -> np.dtype:
1420
+ ...
1421
+
1422
+
1423
+ @overload
1424
+ def find_common_type(types: list[ExtensionDtype]) -> DtypeObj:
1425
+ ...
1426
+
1427
+
1428
+ @overload
1429
+ def find_common_type(types: list[DtypeObj]) -> DtypeObj:
1430
+ ...
1431
+
1432
+
1433
+ def find_common_type(types):
1434
+ """
1435
+ Find a common data type among the given dtypes.
1436
+
1437
+ Parameters
1438
+ ----------
1439
+ types : list of dtypes
1440
+
1441
+ Returns
1442
+ -------
1443
+ pandas extension or numpy dtype
1444
+
1445
+ See Also
1446
+ --------
1447
+ numpy.find_common_type
1448
+
1449
+ """
1450
+ if not types:
1451
+ raise ValueError("no types given")
1452
+
1453
+ first = types[0]
1454
+
1455
+ # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2)
1456
+ # => object
1457
+ if lib.dtypes_all_equal(list(types)):
1458
+ return first
1459
+
1460
+ # get unique types (dict.fromkeys is used as order-preserving set())
1461
+ types = list(dict.fromkeys(types).keys())
1462
+
1463
+ if any(isinstance(t, ExtensionDtype) for t in types):
1464
+ for t in types:
1465
+ if isinstance(t, ExtensionDtype):
1466
+ res = t._get_common_dtype(types)
1467
+ if res is not None:
1468
+ return res
1469
+ return np.dtype("object")
1470
+
1471
+ # take lowest unit
1472
+ if all(lib.is_np_dtype(t, "M") for t in types):
1473
+ return np.dtype(max(types))
1474
+ if all(lib.is_np_dtype(t, "m") for t in types):
1475
+ return np.dtype(max(types))
1476
+
1477
+ # don't mix bool / int or float or complex
1478
+ # this is different from numpy, which casts bool with float/int as int
1479
+ has_bools = any(t.kind == "b" for t in types)
1480
+ if has_bools:
1481
+ for t in types:
1482
+ if t.kind in "iufc":
1483
+ return np.dtype("object")
1484
+
1485
+ return np_find_common_type(*types)
1486
+
1487
+
1488
+ def construct_2d_arraylike_from_scalar(
1489
+ value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool
1490
+ ) -> np.ndarray:
1491
+ shape = (length, width)
1492
+
1493
+ if dtype.kind in "mM":
1494
+ value = _maybe_box_and_unbox_datetimelike(value, dtype)
1495
+ elif dtype == _dtype_obj:
1496
+ if isinstance(value, (np.timedelta64, np.datetime64)):
1497
+ # calling np.array below would cast to pytimedelta/pydatetime
1498
+ out = np.empty(shape, dtype=object)
1499
+ out.fill(value)
1500
+ return out
1501
+
1502
+ # Attempt to coerce to a numpy array
1503
+ try:
1504
+ if not copy:
1505
+ arr = np.asarray(value, dtype=dtype)
1506
+ else:
1507
+ arr = np.array(value, dtype=dtype, copy=copy)
1508
+ except (ValueError, TypeError) as err:
1509
+ raise TypeError(
1510
+ f"DataFrame constructor called with incompatible data and dtype: {err}"
1511
+ ) from err
1512
+
1513
+ if arr.ndim != 0:
1514
+ raise ValueError("DataFrame constructor not properly called!")
1515
+
1516
+ return np.full(shape, arr)
1517
+
1518
+
1519
+ def construct_1d_arraylike_from_scalar(
1520
+ value: Scalar, length: int, dtype: DtypeObj | None
1521
+ ) -> ArrayLike:
1522
+ """
1523
+ create a np.ndarray / pandas type of specified shape and dtype
1524
+ filled with values
1525
+
1526
+ Parameters
1527
+ ----------
1528
+ value : scalar value
1529
+ length : int
1530
+ dtype : pandas_dtype or np.dtype
1531
+
1532
+ Returns
1533
+ -------
1534
+ np.ndarray / pandas type of length, filled with value
1535
+
1536
+ """
1537
+
1538
+ if dtype is None:
1539
+ try:
1540
+ dtype, value = infer_dtype_from_scalar(value)
1541
+ except OutOfBoundsDatetime:
1542
+ dtype = _dtype_obj
1543
+
1544
+ if isinstance(dtype, ExtensionDtype):
1545
+ cls = dtype.construct_array_type()
1546
+ seq = [] if length == 0 else [value]
1547
+ subarr = cls._from_sequence(seq, dtype=dtype).repeat(length)
1548
+
1549
+ else:
1550
+ if length and dtype.kind in "iu" and isna(value):
1551
+ # coerce if we have nan for an integer dtype
1552
+ dtype = np.dtype("float64")
1553
+ elif lib.is_np_dtype(dtype, "US"):
1554
+ # we need to coerce to object dtype to avoid
1555
+ # to allow numpy to take our string as a scalar value
1556
+ dtype = np.dtype("object")
1557
+ if not isna(value):
1558
+ value = ensure_str(value)
1559
+ elif dtype.kind in "mM":
1560
+ value = _maybe_box_and_unbox_datetimelike(value, dtype)
1561
+
1562
+ subarr = np.empty(length, dtype=dtype)
1563
+ if length:
1564
+ # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes
1565
+ subarr.fill(value)
1566
+
1567
+ return subarr
1568
+
1569
+
1570
+ def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj):
1571
+ # Caller is responsible for checking dtype.kind in "mM"
1572
+
1573
+ if isinstance(value, dt.datetime):
1574
+ # we dont want to box dt64, in particular datetime64("NaT")
1575
+ value = maybe_box_datetimelike(value, dtype)
1576
+
1577
+ return _maybe_unbox_datetimelike(value, dtype)
1578
+
1579
+
1580
+ def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray:
1581
+ """
1582
+ Transform any list-like object in a 1-dimensional numpy array of object
1583
+ dtype.
1584
+
1585
+ Parameters
1586
+ ----------
1587
+ values : any iterable which has a len()
1588
+
1589
+ Raises
1590
+ ------
1591
+ TypeError
1592
+ * If `values` does not have a len()
1593
+
1594
+ Returns
1595
+ -------
1596
+ 1-dimensional numpy array of dtype object
1597
+ """
1598
+ # numpy will try to interpret nested lists as further dimensions, hence
1599
+ # making a 1D array that contains list-likes is a bit tricky:
1600
+ result = np.empty(len(values), dtype="object")
1601
+ result[:] = values
1602
+ return result
1603
+
1604
+
1605
+ def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray:
1606
+ """
1607
+ Takes any dtype and returns the casted version, raising for when data is
1608
+ incompatible with integer/unsigned integer dtypes.
1609
+
1610
+ Parameters
1611
+ ----------
1612
+ arr : np.ndarray or list
1613
+ The array to cast.
1614
+ dtype : np.dtype
1615
+ The integer dtype to cast the array to.
1616
+
1617
+ Returns
1618
+ -------
1619
+ ndarray
1620
+ Array of integer or unsigned integer dtype.
1621
+
1622
+ Raises
1623
+ ------
1624
+ OverflowError : the dtype is incompatible with the data
1625
+ ValueError : loss of precision has occurred during casting
1626
+
1627
+ Examples
1628
+ --------
1629
+ If you try to coerce negative values to unsigned integers, it raises:
1630
+
1631
+ >>> pd.Series([-1], dtype="uint64")
1632
+ Traceback (most recent call last):
1633
+ ...
1634
+ OverflowError: Trying to coerce negative values to unsigned integers
1635
+
1636
+ Also, if you try to coerce float values to integers, it raises:
1637
+
1638
+ >>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64"))
1639
+ Traceback (most recent call last):
1640
+ ...
1641
+ ValueError: Trying to coerce float values to integers
1642
+ """
1643
+ assert dtype.kind in "iu"
1644
+
1645
+ try:
1646
+ if not isinstance(arr, np.ndarray):
1647
+ with warnings.catch_warnings():
1648
+ # We already disallow dtype=uint w/ negative numbers
1649
+ # (test_constructor_coercion_signed_to_unsigned) so safe to ignore.
1650
+ if not np_version_gt2:
1651
+ warnings.filterwarnings(
1652
+ "ignore",
1653
+ "NumPy will stop allowing conversion of "
1654
+ "out-of-bound Python int",
1655
+ DeprecationWarning,
1656
+ )
1657
+ casted = np.asarray(arr, dtype=dtype)
1658
+ else:
1659
+ with warnings.catch_warnings():
1660
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
1661
+ casted = arr.astype(dtype, copy=False)
1662
+ except OverflowError as err:
1663
+ raise OverflowError(
1664
+ "The elements provided in the data cannot all be "
1665
+ f"casted to the dtype {dtype}"
1666
+ ) from err
1667
+
1668
+ if isinstance(arr, np.ndarray) and arr.dtype == dtype:
1669
+ # avoid expensive array_equal check
1670
+ return casted
1671
+
1672
+ with warnings.catch_warnings():
1673
+ warnings.filterwarnings("ignore", category=RuntimeWarning)
1674
+ warnings.filterwarnings(
1675
+ "ignore", "elementwise comparison failed", FutureWarning
1676
+ )
1677
+ if np.array_equal(arr, casted):
1678
+ return casted
1679
+
1680
+ # We do this casting to allow for proper
1681
+ # data and dtype checking.
1682
+ #
1683
+ # We didn't do this earlier because NumPy
1684
+ # doesn't handle `uint64` correctly.
1685
+ arr = np.asarray(arr)
1686
+
1687
+ if np.issubdtype(arr.dtype, str):
1688
+ # TODO(numpy-2.0 min): This case will raise an OverflowError above
1689
+ if (casted.astype(str) == arr).all():
1690
+ return casted
1691
+ raise ValueError(f"string values cannot be losslessly cast to {dtype}")
1692
+
1693
+ if dtype.kind == "u" and (arr < 0).any():
1694
+ # TODO: can this be hit anymore after numpy 2.0?
1695
+ raise OverflowError("Trying to coerce negative values to unsigned integers")
1696
+
1697
+ if arr.dtype.kind == "f":
1698
+ if not np.isfinite(arr).all():
1699
+ raise IntCastingNaNError(
1700
+ "Cannot convert non-finite values (NA or inf) to integer"
1701
+ )
1702
+ raise ValueError("Trying to coerce float values to integers")
1703
+ if arr.dtype == object:
1704
+ raise ValueError("Trying to coerce float values to integers")
1705
+
1706
+ if casted.dtype < arr.dtype:
1707
+ # TODO: Can this path be hit anymore with numpy > 2
1708
+ # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows
1709
+ raise ValueError(
1710
+ f"Values are too large to be losslessly converted to {dtype}. "
1711
+ f"To cast anyway, use pd.Series(values).astype({dtype})"
1712
+ )
1713
+
1714
+ if arr.dtype.kind in "mM":
1715
+ # test_constructor_maskedarray_nonfloat
1716
+ raise TypeError(
1717
+ f"Constructing a Series or DataFrame from {arr.dtype} values and "
1718
+ f"dtype={dtype} is not supported. Use values.view({dtype}) instead."
1719
+ )
1720
+
1721
+ # No known cases that get here, but raising explicitly to cover our bases.
1722
+ raise ValueError(f"values cannot be losslessly cast to {dtype}")
1723
+
1724
+
1725
+ def can_hold_element(arr: ArrayLike, element: Any) -> bool:
1726
+ """
1727
+ Can we do an inplace setitem with this element in an array with this dtype?
1728
+
1729
+ Parameters
1730
+ ----------
1731
+ arr : np.ndarray or ExtensionArray
1732
+ element : Any
1733
+
1734
+ Returns
1735
+ -------
1736
+ bool
1737
+ """
1738
+ dtype = arr.dtype
1739
+ if not isinstance(dtype, np.dtype) or dtype.kind in "mM":
1740
+ if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)):
1741
+ # np.dtype here catches datetime64ns and timedelta64ns; we assume
1742
+ # in this case that we have DatetimeArray/TimedeltaArray
1743
+ arr = cast(
1744
+ "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr
1745
+ )
1746
+ try:
1747
+ arr._validate_setitem_value(element)
1748
+ return True
1749
+ except (ValueError, TypeError):
1750
+ return False
1751
+
1752
+ # This is technically incorrect, but maintains the behavior of
1753
+ # ExtensionBlock._can_hold_element
1754
+ return True
1755
+
1756
+ try:
1757
+ np_can_hold_element(dtype, element)
1758
+ return True
1759
+ except (TypeError, LossySetitemError):
1760
+ return False
1761
+
1762
+
1763
+ def np_can_hold_element(dtype: np.dtype, element: Any) -> Any:
1764
+ """
1765
+ Raise if we cannot losslessly set this element into an ndarray with this dtype.
1766
+
1767
+ Specifically about places where we disagree with numpy. i.e. there are
1768
+ cases where numpy will raise in doing the setitem that we do not check
1769
+ for here, e.g. setting str "X" into a numeric ndarray.
1770
+
1771
+ Returns
1772
+ -------
1773
+ Any
1774
+ The element, potentially cast to the dtype.
1775
+
1776
+ Raises
1777
+ ------
1778
+ ValueError : If we cannot losslessly store this element with this dtype.
1779
+ """
1780
+ if dtype == _dtype_obj:
1781
+ return element
1782
+
1783
+ tipo = _maybe_infer_dtype_type(element)
1784
+
1785
+ if dtype.kind in "iu":
1786
+ if isinstance(element, range):
1787
+ if _dtype_can_hold_range(element, dtype):
1788
+ return element
1789
+ raise LossySetitemError
1790
+
1791
+ if is_integer(element) or (is_float(element) and element.is_integer()):
1792
+ # e.g. test_setitem_series_int8 if we have a python int 1
1793
+ # tipo may be np.int32, despite the fact that it will fit
1794
+ # in smaller int dtypes.
1795
+ info = np.iinfo(dtype)
1796
+ if info.min <= element <= info.max:
1797
+ return dtype.type(element)
1798
+ raise LossySetitemError
1799
+
1800
+ if tipo is not None:
1801
+ if tipo.kind not in "iu":
1802
+ if isinstance(element, np.ndarray) and element.dtype.kind == "f":
1803
+ # If all can be losslessly cast to integers, then we can hold them
1804
+ with np.errstate(invalid="ignore"):
1805
+ # We check afterwards if cast was losslessly, so no need to show
1806
+ # the warning
1807
+ casted = element.astype(dtype)
1808
+ comp = casted == element
1809
+ if comp.all():
1810
+ # Return the casted values bc they can be passed to
1811
+ # np.putmask, whereas the raw values cannot.
1812
+ # see TestSetitemFloatNDarrayIntoIntegerSeries
1813
+ return casted
1814
+ raise LossySetitemError
1815
+
1816
+ elif isinstance(element, ABCExtensionArray) and isinstance(
1817
+ element.dtype, CategoricalDtype
1818
+ ):
1819
+ # GH#52927 setting Categorical value into non-EA frame
1820
+ # TODO: general-case for EAs?
1821
+ try:
1822
+ casted = element.astype(dtype)
1823
+ except (ValueError, TypeError):
1824
+ raise LossySetitemError
1825
+ # Check for cases of either
1826
+ # a) lossy overflow/rounding or
1827
+ # b) semantic changes like dt64->int64
1828
+ comp = casted == element
1829
+ if not comp.all():
1830
+ raise LossySetitemError
1831
+ return casted
1832
+
1833
+ # Anything other than integer we cannot hold
1834
+ raise LossySetitemError
1835
+ if (
1836
+ dtype.kind == "u"
1837
+ and isinstance(element, np.ndarray)
1838
+ and element.dtype.kind == "i"
1839
+ ):
1840
+ # see test_where_uint64
1841
+ casted = element.astype(dtype)
1842
+ if (casted == element).all():
1843
+ # TODO: faster to check (element >=0).all()? potential
1844
+ # itemsize issues there?
1845
+ return casted
1846
+ raise LossySetitemError
1847
+ if dtype.itemsize < tipo.itemsize:
1848
+ raise LossySetitemError
1849
+ if not isinstance(tipo, np.dtype):
1850
+ # i.e. nullable IntegerDtype; we can put this into an ndarray
1851
+ # losslessly iff it has no NAs
1852
+ arr = element._values if isinstance(element, ABCSeries) else element
1853
+ if arr._hasna:
1854
+ raise LossySetitemError
1855
+ return element
1856
+
1857
+ return element
1858
+
1859
+ raise LossySetitemError
1860
+
1861
+ if dtype.kind == "f":
1862
+ if lib.is_integer(element) or lib.is_float(element):
1863
+ casted = dtype.type(element)
1864
+ if np.isnan(casted) or casted == element:
1865
+ return casted
1866
+ # otherwise e.g. overflow see TestCoercionFloat32
1867
+ raise LossySetitemError
1868
+
1869
+ if tipo is not None:
1870
+ # TODO: itemsize check?
1871
+ if tipo.kind not in "iuf":
1872
+ # Anything other than float/integer we cannot hold
1873
+ raise LossySetitemError
1874
+ if not isinstance(tipo, np.dtype):
1875
+ # i.e. nullable IntegerDtype or FloatingDtype;
1876
+ # we can put this into an ndarray losslessly iff it has no NAs
1877
+ if element._hasna:
1878
+ raise LossySetitemError
1879
+ return element
1880
+ elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind:
1881
+ if isinstance(element, np.ndarray):
1882
+ # e.g. TestDataFrameIndexingWhere::test_where_alignment
1883
+ casted = element.astype(dtype)
1884
+ if np.array_equal(casted, element, equal_nan=True):
1885
+ return casted
1886
+ raise LossySetitemError
1887
+
1888
+ return element
1889
+
1890
+ raise LossySetitemError
1891
+
1892
+ if dtype.kind == "c":
1893
+ if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element):
1894
+ if np.isnan(element):
1895
+ # see test_where_complex GH#6345
1896
+ return dtype.type(element)
1897
+
1898
+ with warnings.catch_warnings():
1899
+ warnings.filterwarnings("ignore")
1900
+ casted = dtype.type(element)
1901
+ if casted == element:
1902
+ return casted
1903
+ # otherwise e.g. overflow see test_32878_complex_itemsize
1904
+ raise LossySetitemError
1905
+
1906
+ if tipo is not None:
1907
+ if tipo.kind in "iufc":
1908
+ return element
1909
+ raise LossySetitemError
1910
+ raise LossySetitemError
1911
+
1912
+ if dtype.kind == "b":
1913
+ if tipo is not None:
1914
+ if tipo.kind == "b":
1915
+ if not isinstance(tipo, np.dtype):
1916
+ # i.e. we have a BooleanArray
1917
+ if element._hasna:
1918
+ # i.e. there are pd.NA elements
1919
+ raise LossySetitemError
1920
+ return element
1921
+ raise LossySetitemError
1922
+ if lib.is_bool(element):
1923
+ return element
1924
+ raise LossySetitemError
1925
+
1926
+ if dtype.kind == "S":
1927
+ # TODO: test tests.frame.methods.test_replace tests get here,
1928
+ # need more targeted tests. xref phofl has a PR about this
1929
+ if tipo is not None:
1930
+ if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize:
1931
+ return element
1932
+ raise LossySetitemError
1933
+ if isinstance(element, bytes) and len(element) <= dtype.itemsize:
1934
+ return element
1935
+ raise LossySetitemError
1936
+
1937
+ if dtype.kind == "V":
1938
+ # i.e. np.void, which cannot hold _anything_
1939
+ raise LossySetitemError
1940
+
1941
+ raise NotImplementedError(dtype)
1942
+
1943
+
1944
+ def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool:
1945
+ """
1946
+ _maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints),
1947
+ but in many cases a range can be held by a smaller integer dtype.
1948
+ Check if this is one of those cases.
1949
+ """
1950
+ if not len(rng):
1951
+ return True
1952
+ return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype)
1953
+
1954
+
1955
+ def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool:
1956
+ """
1957
+ np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar
1958
+ inference
1959
+
1960
+ Parameters
1961
+ ----------
1962
+ element : Scalar
1963
+ dtype : np.dtype
1964
+
1965
+ Returns
1966
+ -------
1967
+ bool
1968
+ """
1969
+ try:
1970
+ np_can_hold_element(dtype, element)
1971
+ return True
1972
+ except (LossySetitemError, NotImplementedError):
1973
+ return False
venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/accessors.cpython-310.pyc ADDED
Binary file (17.3 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/api.cpython-310.pyc ADDED
Binary file (10.6 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/multi.cpython-310.pyc ADDED
Binary file (108 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/indexes/__pycache__/timedeltas.cpython-310.pyc ADDED
Binary file (10.1 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/internals/__init__.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.internals.api import make_block # 2023-09-18 pyarrow uses this
2
+ from pandas.core.internals.array_manager import (
3
+ ArrayManager,
4
+ SingleArrayManager,
5
+ )
6
+ from pandas.core.internals.base import (
7
+ DataManager,
8
+ SingleDataManager,
9
+ )
10
+ from pandas.core.internals.concat import concatenate_managers
11
+ from pandas.core.internals.managers import (
12
+ BlockManager,
13
+ SingleBlockManager,
14
+ )
15
+
16
+ __all__ = [
17
+ "Block", # pylint: disable=undefined-all-variable
18
+ "DatetimeTZBlock", # pylint: disable=undefined-all-variable
19
+ "ExtensionBlock", # pylint: disable=undefined-all-variable
20
+ "make_block",
21
+ "DataManager",
22
+ "ArrayManager",
23
+ "BlockManager",
24
+ "SingleDataManager",
25
+ "SingleBlockManager",
26
+ "SingleArrayManager",
27
+ "concatenate_managers",
28
+ ]
29
+
30
+
31
+ def __getattr__(name: str):
32
+ # GH#55139
33
+ import warnings
34
+
35
+ if name == "create_block_manager_from_blocks":
36
+ # GH#33892
37
+ warnings.warn(
38
+ f"{name} is deprecated and will be removed in a future version. "
39
+ "Use public APIs instead.",
40
+ DeprecationWarning,
41
+ # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758
42
+ # on hard-coding stacklevel
43
+ stacklevel=2,
44
+ )
45
+ from pandas.core.internals.managers import create_block_manager_from_blocks
46
+
47
+ return create_block_manager_from_blocks
48
+
49
+ if name in [
50
+ "NumericBlock",
51
+ "ObjectBlock",
52
+ "Block",
53
+ "ExtensionBlock",
54
+ "DatetimeTZBlock",
55
+ ]:
56
+ warnings.warn(
57
+ f"{name} is deprecated and will be removed in a future version. "
58
+ "Use public APIs instead.",
59
+ DeprecationWarning,
60
+ # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758
61
+ # on hard-coding stacklevel
62
+ stacklevel=2,
63
+ )
64
+ if name == "NumericBlock":
65
+ from pandas.core.internals.blocks import NumericBlock
66
+
67
+ return NumericBlock
68
+ elif name == "DatetimeTZBlock":
69
+ from pandas.core.internals.blocks import DatetimeTZBlock
70
+
71
+ return DatetimeTZBlock
72
+ elif name == "ExtensionBlock":
73
+ from pandas.core.internals.blocks import ExtensionBlock
74
+
75
+ return ExtensionBlock
76
+ elif name == "Block":
77
+ from pandas.core.internals.blocks import Block
78
+
79
+ return Block
80
+ else:
81
+ from pandas.core.internals.blocks import ObjectBlock
82
+
83
+ return ObjectBlock
84
+
85
+ raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'")
venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.55 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc ADDED
Binary file (60.2 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc ADDED
Binary file (3.19 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/internals/api.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is a pseudo-public API for downstream libraries. We ask that downstream
3
+ authors
4
+
5
+ 1) Try to avoid using internals directly altogether, and failing that,
6
+ 2) Use only functions exposed here (or in core.internals)
7
+
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs.internals import BlockPlacement
16
+
17
+ from pandas.core.dtypes.common import pandas_dtype
18
+ from pandas.core.dtypes.dtypes import (
19
+ DatetimeTZDtype,
20
+ PeriodDtype,
21
+ )
22
+
23
+ from pandas.core.arrays import DatetimeArray
24
+ from pandas.core.construction import extract_array
25
+ from pandas.core.internals.blocks import (
26
+ check_ndim,
27
+ ensure_block_shape,
28
+ extract_pandas_array,
29
+ get_block_type,
30
+ maybe_coerce_values,
31
+ )
32
+
33
+ if TYPE_CHECKING:
34
+ from pandas._typing import Dtype
35
+
36
+ from pandas.core.internals.blocks import Block
37
+
38
+
39
+ def make_block(
40
+ values, placement, klass=None, ndim=None, dtype: Dtype | None = None
41
+ ) -> Block:
42
+ """
43
+ This is a pseudo-public analogue to blocks.new_block.
44
+
45
+ We ask that downstream libraries use this rather than any fully-internal
46
+ APIs, including but not limited to:
47
+
48
+ - core.internals.blocks.make_block
49
+ - Block.make_block
50
+ - Block.make_block_same_class
51
+ - Block.__init__
52
+ """
53
+ if dtype is not None:
54
+ dtype = pandas_dtype(dtype)
55
+
56
+ values, dtype = extract_pandas_array(values, dtype, ndim)
57
+
58
+ from pandas.core.internals.blocks import (
59
+ DatetimeTZBlock,
60
+ ExtensionBlock,
61
+ )
62
+
63
+ if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype):
64
+ # GH-44681 changed PeriodArray to be stored in the 2D
65
+ # NDArrayBackedExtensionBlock instead of ExtensionBlock
66
+ # -> still allow ExtensionBlock to be passed in this case for back compat
67
+ klass = None
68
+
69
+ if klass is None:
70
+ dtype = dtype or values.dtype
71
+ klass = get_block_type(dtype)
72
+
73
+ elif klass is DatetimeTZBlock and not isinstance(values.dtype, DatetimeTZDtype):
74
+ # pyarrow calls get here
75
+ values = DatetimeArray._simple_new(
76
+ # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
77
+ # incompatible type "Union[ExtensionDtype, dtype[Any], None]";
78
+ # expected "Union[dtype[datetime64], DatetimeTZDtype]"
79
+ values,
80
+ dtype=dtype, # type: ignore[arg-type]
81
+ )
82
+
83
+ if not isinstance(placement, BlockPlacement):
84
+ placement = BlockPlacement(placement)
85
+
86
+ ndim = maybe_infer_ndim(values, placement, ndim)
87
+ if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)):
88
+ # GH#41168 ensure we can pass 1D dt64tz values
89
+ # More generally, any EA dtype that isn't is_1d_only_ea_dtype
90
+ values = extract_array(values, extract_numpy=True)
91
+ values = ensure_block_shape(values, ndim)
92
+
93
+ check_ndim(values, placement, ndim)
94
+ values = maybe_coerce_values(values)
95
+ return klass(values, ndim=ndim, placement=placement)
96
+
97
+
98
+ def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:
99
+ """
100
+ If `ndim` is not provided, infer it from placement and values.
101
+ """
102
+ if ndim is None:
103
+ # GH#38134 Block constructor now assumes ndim is not None
104
+ if not isinstance(values.dtype, np.dtype):
105
+ if len(placement) != 1:
106
+ ndim = 1
107
+ else:
108
+ ndim = 2
109
+ else:
110
+ ndim = values.ndim
111
+ return ndim
112
+
113
+
114
+ def __getattr__(name: str):
115
+ # GH#55139
116
+ import warnings
117
+
118
+ if name in [
119
+ "Block",
120
+ "ExtensionBlock",
121
+ "DatetimeTZBlock",
122
+ "create_block_manager_from_blocks",
123
+ ]:
124
+ # GH#33892
125
+ warnings.warn(
126
+ f"{name} is deprecated and will be removed in a future version. "
127
+ "Use public APIs instead.",
128
+ DeprecationWarning,
129
+ # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758
130
+ # on hard-coding stacklevel
131
+ stacklevel=2,
132
+ )
133
+
134
+ if name == "create_block_manager_from_blocks":
135
+ from pandas.core.internals.managers import create_block_manager_from_blocks
136
+
137
+ return create_block_manager_from_blocks
138
+
139
+ elif name == "Block":
140
+ from pandas.core.internals.blocks import Block
141
+
142
+ return Block
143
+
144
+ elif name == "DatetimeTZBlock":
145
+ from pandas.core.internals.blocks import DatetimeTZBlock
146
+
147
+ return DatetimeTZBlock
148
+
149
+ elif name == "ExtensionBlock":
150
+ from pandas.core.internals.blocks import ExtensionBlock
151
+
152
+ return ExtensionBlock
153
+
154
+ raise AttributeError(
155
+ f"module 'pandas.core.internals.api' has no attribute '{name}'"
156
+ )
venv/lib/python3.10/site-packages/pandas/core/internals/array_manager.py ADDED
@@ -0,0 +1,1340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Experimental manager based on storing a collection of 1D arrays
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import itertools
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Callable,
10
+ Literal,
11
+ )
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs import (
16
+ NaT,
17
+ lib,
18
+ )
19
+
20
+ from pandas.core.dtypes.astype import (
21
+ astype_array,
22
+ astype_array_safe,
23
+ )
24
+ from pandas.core.dtypes.cast import (
25
+ ensure_dtype_can_hold_na,
26
+ find_common_type,
27
+ infer_dtype_from_scalar,
28
+ np_find_common_type,
29
+ )
30
+ from pandas.core.dtypes.common import (
31
+ ensure_platform_int,
32
+ is_datetime64_ns_dtype,
33
+ is_integer,
34
+ is_numeric_dtype,
35
+ is_object_dtype,
36
+ is_timedelta64_ns_dtype,
37
+ )
38
+ from pandas.core.dtypes.dtypes import ExtensionDtype
39
+ from pandas.core.dtypes.generic import (
40
+ ABCDataFrame,
41
+ ABCSeries,
42
+ )
43
+ from pandas.core.dtypes.missing import (
44
+ array_equals,
45
+ isna,
46
+ na_value_for_dtype,
47
+ )
48
+
49
+ import pandas.core.algorithms as algos
50
+ from pandas.core.array_algos.quantile import quantile_compat
51
+ from pandas.core.array_algos.take import take_1d
52
+ from pandas.core.arrays import (
53
+ DatetimeArray,
54
+ ExtensionArray,
55
+ NumpyExtensionArray,
56
+ TimedeltaArray,
57
+ )
58
+ from pandas.core.construction import (
59
+ ensure_wrapped_if_datetimelike,
60
+ extract_array,
61
+ sanitize_array,
62
+ )
63
+ from pandas.core.indexers import (
64
+ maybe_convert_indices,
65
+ validate_indices,
66
+ )
67
+ from pandas.core.indexes.api import (
68
+ Index,
69
+ ensure_index,
70
+ )
71
+ from pandas.core.indexes.base import get_values_for_csv
72
+ from pandas.core.internals.base import (
73
+ DataManager,
74
+ SingleDataManager,
75
+ ensure_np_dtype,
76
+ interleaved_dtype,
77
+ )
78
+ from pandas.core.internals.blocks import (
79
+ BlockPlacement,
80
+ ensure_block_shape,
81
+ external_values,
82
+ extract_pandas_array,
83
+ maybe_coerce_values,
84
+ new_block,
85
+ )
86
+ from pandas.core.internals.managers import make_na_array
87
+
88
+ if TYPE_CHECKING:
89
+ from collections.abc import Hashable
90
+
91
+ from pandas._typing import (
92
+ ArrayLike,
93
+ AxisInt,
94
+ DtypeObj,
95
+ QuantileInterpolation,
96
+ Self,
97
+ npt,
98
+ )
99
+
100
+
101
+ class BaseArrayManager(DataManager):
102
+ """
103
+ Core internal data structure to implement DataFrame and Series.
104
+
105
+ Alternative to the BlockManager, storing a list of 1D arrays instead of
106
+ Blocks.
107
+
108
+ This is *not* a public API class
109
+
110
+ Parameters
111
+ ----------
112
+ arrays : Sequence of arrays
113
+ axes : Sequence of Index
114
+ verify_integrity : bool, default True
115
+
116
+ """
117
+
118
+ __slots__ = [
119
+ "_axes", # private attribute, because 'axes' has different order, see below
120
+ "arrays",
121
+ ]
122
+
123
+ arrays: list[np.ndarray | ExtensionArray]
124
+ _axes: list[Index]
125
+
126
+ def __init__(
127
+ self,
128
+ arrays: list[np.ndarray | ExtensionArray],
129
+ axes: list[Index],
130
+ verify_integrity: bool = True,
131
+ ) -> None:
132
+ raise NotImplementedError
133
+
134
+ def make_empty(self, axes=None) -> Self:
135
+ """Return an empty ArrayManager with the items axis of len 0 (no columns)"""
136
+ if axes is None:
137
+ axes = [self.axes[1:], Index([])]
138
+
139
+ arrays: list[np.ndarray | ExtensionArray] = []
140
+ return type(self)(arrays, axes)
141
+
142
+ @property
143
+ def items(self) -> Index:
144
+ return self._axes[-1]
145
+
146
+ @property
147
+ # error: Signature of "axes" incompatible with supertype "DataManager"
148
+ def axes(self) -> list[Index]: # type: ignore[override]
149
+ # mypy doesn't work to override attribute with property
150
+ # see https://github.com/python/mypy/issues/4125
151
+ """Axes is BlockManager-compatible order (columns, rows)"""
152
+ return [self._axes[1], self._axes[0]]
153
+
154
+ @property
155
+ def shape_proper(self) -> tuple[int, ...]:
156
+ # this returns (n_rows, n_columns)
157
+ return tuple(len(ax) for ax in self._axes)
158
+
159
+ @staticmethod
160
+ def _normalize_axis(axis: AxisInt) -> int:
161
+ # switch axis
162
+ axis = 1 if axis == 0 else 0
163
+ return axis
164
+
165
+ def set_axis(self, axis: AxisInt, new_labels: Index) -> None:
166
+ # Caller is responsible for ensuring we have an Index object.
167
+ self._validate_set_axis(axis, new_labels)
168
+ axis = self._normalize_axis(axis)
169
+ self._axes[axis] = new_labels
170
+
171
+ def get_dtypes(self) -> npt.NDArray[np.object_]:
172
+ return np.array([arr.dtype for arr in self.arrays], dtype="object")
173
+
174
+ def add_references(self, mgr: BaseArrayManager) -> None:
175
+ """
176
+ Only implemented on the BlockManager level
177
+ """
178
+ return
179
+
180
+ def __getstate__(self):
181
+ return self.arrays, self._axes
182
+
183
+ def __setstate__(self, state) -> None:
184
+ self.arrays = state[0]
185
+ self._axes = state[1]
186
+
187
+ def __repr__(self) -> str:
188
+ output = type(self).__name__
189
+ output += f"\nIndex: {self._axes[0]}"
190
+ if self.ndim == 2:
191
+ output += f"\nColumns: {self._axes[1]}"
192
+ output += f"\n{len(self.arrays)} arrays:"
193
+ for arr in self.arrays:
194
+ output += f"\n{arr.dtype}"
195
+ return output
196
+
197
+ def apply(
198
+ self,
199
+ f,
200
+ align_keys: list[str] | None = None,
201
+ **kwargs,
202
+ ) -> Self:
203
+ """
204
+ Iterate over the arrays, collect and create a new ArrayManager.
205
+
206
+ Parameters
207
+ ----------
208
+ f : str or callable
209
+ Name of the Array method to apply.
210
+ align_keys: List[str] or None, default None
211
+ **kwargs
212
+ Keywords to pass to `f`
213
+
214
+ Returns
215
+ -------
216
+ ArrayManager
217
+ """
218
+ assert "filter" not in kwargs
219
+
220
+ align_keys = align_keys or []
221
+ result_arrays: list[ArrayLike] = []
222
+ # fillna: Series/DataFrame is responsible for making sure value is aligned
223
+
224
+ aligned_args = {k: kwargs[k] for k in align_keys}
225
+
226
+ if f == "apply":
227
+ f = kwargs.pop("func")
228
+
229
+ for i, arr in enumerate(self.arrays):
230
+ if aligned_args:
231
+ for k, obj in aligned_args.items():
232
+ if isinstance(obj, (ABCSeries, ABCDataFrame)):
233
+ # The caller is responsible for ensuring that
234
+ # obj.axes[-1].equals(self.items)
235
+ if obj.ndim == 1:
236
+ kwargs[k] = obj.iloc[i]
237
+ else:
238
+ kwargs[k] = obj.iloc[:, i]._values
239
+ else:
240
+ # otherwise we have an array-like
241
+ kwargs[k] = obj[i]
242
+
243
+ if callable(f):
244
+ applied = f(arr, **kwargs)
245
+ else:
246
+ applied = getattr(arr, f)(**kwargs)
247
+
248
+ result_arrays.append(applied)
249
+
250
+ new_axes = self._axes
251
+ return type(self)(result_arrays, new_axes)
252
+
253
+ def apply_with_block(self, f, align_keys=None, **kwargs) -> Self:
254
+ # switch axis to follow BlockManager logic
255
+ swap_axis = True
256
+ if f == "interpolate":
257
+ swap_axis = False
258
+ if swap_axis and "axis" in kwargs and self.ndim == 2:
259
+ kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0
260
+
261
+ align_keys = align_keys or []
262
+ aligned_args = {k: kwargs[k] for k in align_keys}
263
+
264
+ result_arrays = []
265
+
266
+ for i, arr in enumerate(self.arrays):
267
+ if aligned_args:
268
+ for k, obj in aligned_args.items():
269
+ if isinstance(obj, (ABCSeries, ABCDataFrame)):
270
+ # The caller is responsible for ensuring that
271
+ # obj.axes[-1].equals(self.items)
272
+ if obj.ndim == 1:
273
+ if self.ndim == 2:
274
+ kwargs[k] = obj.iloc[slice(i, i + 1)]._values
275
+ else:
276
+ kwargs[k] = obj.iloc[:]._values
277
+ else:
278
+ kwargs[k] = obj.iloc[:, [i]]._values
279
+ else:
280
+ # otherwise we have an ndarray
281
+ if obj.ndim == 2:
282
+ kwargs[k] = obj[[i]]
283
+
284
+ if isinstance(arr.dtype, np.dtype) and not isinstance(arr, np.ndarray):
285
+ # i.e. TimedeltaArray, DatetimeArray with tz=None. Need to
286
+ # convert for the Block constructors.
287
+ arr = np.asarray(arr)
288
+
289
+ arr = maybe_coerce_values(arr)
290
+ if self.ndim == 2:
291
+ arr = ensure_block_shape(arr, 2)
292
+ bp = BlockPlacement(slice(0, 1, 1))
293
+ block = new_block(arr, placement=bp, ndim=2)
294
+ else:
295
+ bp = BlockPlacement(slice(0, len(self), 1))
296
+ block = new_block(arr, placement=bp, ndim=1)
297
+
298
+ applied = getattr(block, f)(**kwargs)
299
+ if isinstance(applied, list):
300
+ applied = applied[0]
301
+ arr = applied.values
302
+ if self.ndim == 2 and arr.ndim == 2:
303
+ # 2D for np.ndarray or DatetimeArray/TimedeltaArray
304
+ assert len(arr) == 1
305
+ # error: No overload variant of "__getitem__" of "ExtensionArray"
306
+ # matches argument type "Tuple[int, slice]"
307
+ arr = arr[0, :] # type: ignore[call-overload]
308
+ result_arrays.append(arr)
309
+
310
+ return type(self)(result_arrays, self._axes)
311
+
312
+ def setitem(self, indexer, value, warn: bool = True) -> Self:
313
+ return self.apply_with_block("setitem", indexer=indexer, value=value)
314
+
315
+ def diff(self, n: int) -> Self:
316
+ assert self.ndim == 2 # caller ensures
317
+ return self.apply(algos.diff, n=n)
318
+
319
+ def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self:
320
+ if copy is None:
321
+ copy = True
322
+
323
+ return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors)
324
+
325
+ def convert(self, copy: bool | None) -> Self:
326
+ if copy is None:
327
+ copy = True
328
+
329
+ def _convert(arr):
330
+ if is_object_dtype(arr.dtype):
331
+ # extract NumpyExtensionArray for tests that patch
332
+ # NumpyExtensionArray._typ
333
+ arr = np.asarray(arr)
334
+ result = lib.maybe_convert_objects(
335
+ arr,
336
+ convert_non_numeric=True,
337
+ )
338
+ if result is arr and copy:
339
+ return arr.copy()
340
+ return result
341
+ else:
342
+ return arr.copy() if copy else arr
343
+
344
+ return self.apply(_convert)
345
+
346
+ def get_values_for_csv(
347
+ self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None
348
+ ) -> Self:
349
+ return self.apply(
350
+ get_values_for_csv,
351
+ na_rep=na_rep,
352
+ quoting=quoting,
353
+ float_format=float_format,
354
+ date_format=date_format,
355
+ decimal=decimal,
356
+ )
357
+
358
+ @property
359
+ def any_extension_types(self) -> bool:
360
+ """Whether any of the blocks in this manager are extension blocks"""
361
+ return False # any(block.is_extension for block in self.blocks)
362
+
363
+ @property
364
+ def is_view(self) -> bool:
365
+ """return a boolean if we are a single block and are a view"""
366
+ # TODO what is this used for?
367
+ return False
368
+
369
+ @property
370
+ def is_single_block(self) -> bool:
371
+ return len(self.arrays) == 1
372
+
373
+ def _get_data_subset(self, predicate: Callable) -> Self:
374
+ indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)]
375
+ arrays = [self.arrays[i] for i in indices]
376
+ # TODO copy?
377
+ # Note: using Index.take ensures we can retain e.g. DatetimeIndex.freq,
378
+ # see test_describe_datetime_columns
379
+ taker = np.array(indices, dtype="intp")
380
+ new_cols = self._axes[1].take(taker)
381
+ new_axes = [self._axes[0], new_cols]
382
+ return type(self)(arrays, new_axes, verify_integrity=False)
383
+
384
+ def get_bool_data(self, copy: bool = False) -> Self:
385
+ """
386
+ Select columns that are bool-dtype and object-dtype columns that are all-bool.
387
+
388
+ Parameters
389
+ ----------
390
+ copy : bool, default False
391
+ Whether to copy the blocks
392
+ """
393
+ return self._get_data_subset(lambda x: x.dtype == np.dtype(bool))
394
+
395
+ def get_numeric_data(self, copy: bool = False) -> Self:
396
+ """
397
+ Select columns that have a numeric dtype.
398
+
399
+ Parameters
400
+ ----------
401
+ copy : bool, default False
402
+ Whether to copy the blocks
403
+ """
404
+ return self._get_data_subset(
405
+ lambda arr: is_numeric_dtype(arr.dtype)
406
+ or getattr(arr.dtype, "_is_numeric", False)
407
+ )
408
+
409
+ def copy(self, deep: bool | Literal["all"] | None = True) -> Self:
410
+ """
411
+ Make deep or shallow copy of ArrayManager
412
+
413
+ Parameters
414
+ ----------
415
+ deep : bool or string, default True
416
+ If False, return shallow copy (do not copy data)
417
+ If 'all', copy data and a deep copy of the index
418
+
419
+ Returns
420
+ -------
421
+ BlockManager
422
+ """
423
+ if deep is None:
424
+ # ArrayManager does not yet support CoW, so deep=None always means
425
+ # deep=True for now
426
+ deep = True
427
+
428
+ # this preserves the notion of view copying of axes
429
+ if deep:
430
+ # hit in e.g. tests.io.json.test_pandas
431
+
432
+ def copy_func(ax):
433
+ return ax.copy(deep=True) if deep == "all" else ax.view()
434
+
435
+ new_axes = [copy_func(ax) for ax in self._axes]
436
+ else:
437
+ new_axes = list(self._axes)
438
+
439
+ if deep:
440
+ new_arrays = [arr.copy() for arr in self.arrays]
441
+ else:
442
+ new_arrays = list(self.arrays)
443
+ return type(self)(new_arrays, new_axes, verify_integrity=False)
444
+
445
+ def reindex_indexer(
446
+ self,
447
+ new_axis,
448
+ indexer,
449
+ axis: AxisInt,
450
+ fill_value=None,
451
+ allow_dups: bool = False,
452
+ copy: bool | None = True,
453
+ # ignored keywords
454
+ only_slice: bool = False,
455
+ # ArrayManager specific keywords
456
+ use_na_proxy: bool = False,
457
+ ) -> Self:
458
+ axis = self._normalize_axis(axis)
459
+ return self._reindex_indexer(
460
+ new_axis,
461
+ indexer,
462
+ axis,
463
+ fill_value,
464
+ allow_dups,
465
+ copy,
466
+ use_na_proxy,
467
+ )
468
+
469
+ def _reindex_indexer(
470
+ self,
471
+ new_axis,
472
+ indexer: npt.NDArray[np.intp] | None,
473
+ axis: AxisInt,
474
+ fill_value=None,
475
+ allow_dups: bool = False,
476
+ copy: bool | None = True,
477
+ use_na_proxy: bool = False,
478
+ ) -> Self:
479
+ """
480
+ Parameters
481
+ ----------
482
+ new_axis : Index
483
+ indexer : ndarray[intp] or None
484
+ axis : int
485
+ fill_value : object, default None
486
+ allow_dups : bool, default False
487
+ copy : bool, default True
488
+
489
+
490
+ pandas-indexer with -1's only.
491
+ """
492
+ if copy is None:
493
+ # ArrayManager does not yet support CoW, so deep=None always means
494
+ # deep=True for now
495
+ copy = True
496
+
497
+ if indexer is None:
498
+ if new_axis is self._axes[axis] and not copy:
499
+ return self
500
+
501
+ result = self.copy(deep=copy)
502
+ result._axes = list(self._axes)
503
+ result._axes[axis] = new_axis
504
+ return result
505
+
506
+ # some axes don't allow reindexing with dups
507
+ if not allow_dups:
508
+ self._axes[axis]._validate_can_reindex(indexer)
509
+
510
+ if axis >= self.ndim:
511
+ raise IndexError("Requested axis not found in manager")
512
+
513
+ if axis == 1:
514
+ new_arrays = []
515
+ for i in indexer:
516
+ if i == -1:
517
+ arr = self._make_na_array(
518
+ fill_value=fill_value, use_na_proxy=use_na_proxy
519
+ )
520
+ else:
521
+ arr = self.arrays[i]
522
+ if copy:
523
+ arr = arr.copy()
524
+ new_arrays.append(arr)
525
+
526
+ else:
527
+ validate_indices(indexer, len(self._axes[0]))
528
+ indexer = ensure_platform_int(indexer)
529
+ mask = indexer == -1
530
+ needs_masking = mask.any()
531
+ new_arrays = [
532
+ take_1d(
533
+ arr,
534
+ indexer,
535
+ allow_fill=needs_masking,
536
+ fill_value=fill_value,
537
+ mask=mask,
538
+ # if fill_value is not None else blk.fill_value
539
+ )
540
+ for arr in self.arrays
541
+ ]
542
+
543
+ new_axes = list(self._axes)
544
+ new_axes[axis] = new_axis
545
+
546
+ return type(self)(new_arrays, new_axes, verify_integrity=False)
547
+
548
+ def take(
549
+ self,
550
+ indexer: npt.NDArray[np.intp],
551
+ axis: AxisInt = 1,
552
+ verify: bool = True,
553
+ ) -> Self:
554
+ """
555
+ Take items along any axis.
556
+ """
557
+ assert isinstance(indexer, np.ndarray), type(indexer)
558
+ assert indexer.dtype == np.intp, indexer.dtype
559
+
560
+ axis = self._normalize_axis(axis)
561
+
562
+ if not indexer.ndim == 1:
563
+ raise ValueError("indexer should be 1-dimensional")
564
+
565
+ n = self.shape_proper[axis]
566
+ indexer = maybe_convert_indices(indexer, n, verify=verify)
567
+
568
+ new_labels = self._axes[axis].take(indexer)
569
+ return self._reindex_indexer(
570
+ new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
571
+ )
572
+
573
+ def _make_na_array(self, fill_value=None, use_na_proxy: bool = False):
574
+ if use_na_proxy:
575
+ assert fill_value is None
576
+ return NullArrayProxy(self.shape_proper[0])
577
+
578
+ if fill_value is None:
579
+ fill_value = np.nan
580
+
581
+ dtype, fill_value = infer_dtype_from_scalar(fill_value)
582
+ array_values = make_na_array(dtype, self.shape_proper[:1], fill_value)
583
+ return array_values
584
+
585
+ def _equal_values(self, other) -> bool:
586
+ """
587
+ Used in .equals defined in base class. Only check the column values
588
+ assuming shape and indexes have already been checked.
589
+ """
590
+ for left, right in zip(self.arrays, other.arrays):
591
+ if not array_equals(left, right):
592
+ return False
593
+ return True
594
+
595
+ # TODO
596
+ # to_dict
597
+
598
+
599
+ class ArrayManager(BaseArrayManager):
600
+ @property
601
+ def ndim(self) -> Literal[2]:
602
+ return 2
603
+
604
+ def __init__(
605
+ self,
606
+ arrays: list[np.ndarray | ExtensionArray],
607
+ axes: list[Index],
608
+ verify_integrity: bool = True,
609
+ ) -> None:
610
+ # Note: we are storing the axes in "_axes" in the (row, columns) order
611
+ # which contrasts the order how it is stored in BlockManager
612
+ self._axes = axes
613
+ self.arrays = arrays
614
+
615
+ if verify_integrity:
616
+ self._axes = [ensure_index(ax) for ax in axes]
617
+ arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays]
618
+ self.arrays = [maybe_coerce_values(arr) for arr in arrays]
619
+ self._verify_integrity()
620
+
621
+ def _verify_integrity(self) -> None:
622
+ n_rows, n_columns = self.shape_proper
623
+ if not len(self.arrays) == n_columns:
624
+ raise ValueError(
625
+ "Number of passed arrays must equal the size of the column Index: "
626
+ f"{len(self.arrays)} arrays vs {n_columns} columns."
627
+ )
628
+ for arr in self.arrays:
629
+ if not len(arr) == n_rows:
630
+ raise ValueError(
631
+ "Passed arrays should have the same length as the rows Index: "
632
+ f"{len(arr)} vs {n_rows} rows"
633
+ )
634
+ if not isinstance(arr, (np.ndarray, ExtensionArray)):
635
+ raise ValueError(
636
+ "Passed arrays should be np.ndarray or ExtensionArray instances, "
637
+ f"got {type(arr)} instead"
638
+ )
639
+ if not arr.ndim == 1:
640
+ raise ValueError(
641
+ "Passed arrays should be 1-dimensional, got array with "
642
+ f"{arr.ndim} dimensions instead."
643
+ )
644
+
645
+ # --------------------------------------------------------------------
646
+ # Indexing
647
+
648
+ def fast_xs(self, loc: int) -> SingleArrayManager:
649
+ """
650
+ Return the array corresponding to `frame.iloc[loc]`.
651
+
652
+ Parameters
653
+ ----------
654
+ loc : int
655
+
656
+ Returns
657
+ -------
658
+ np.ndarray or ExtensionArray
659
+ """
660
+ dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
661
+
662
+ values = [arr[loc] for arr in self.arrays]
663
+ if isinstance(dtype, ExtensionDtype):
664
+ result = dtype.construct_array_type()._from_sequence(values, dtype=dtype)
665
+ # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT
666
+ elif is_datetime64_ns_dtype(dtype):
667
+ result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray
668
+ elif is_timedelta64_ns_dtype(dtype):
669
+ result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray
670
+ else:
671
+ result = np.array(values, dtype=dtype)
672
+ return SingleArrayManager([result], [self._axes[1]])
673
+
674
+ def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager:
675
+ axis = self._normalize_axis(axis)
676
+
677
+ if axis == 0:
678
+ arrays = [arr[slobj] for arr in self.arrays]
679
+ elif axis == 1:
680
+ arrays = self.arrays[slobj]
681
+
682
+ new_axes = list(self._axes)
683
+ new_axes[axis] = new_axes[axis]._getitem_slice(slobj)
684
+
685
+ return type(self)(arrays, new_axes, verify_integrity=False)
686
+
687
+ def iget(self, i: int) -> SingleArrayManager:
688
+ """
689
+ Return the data as a SingleArrayManager.
690
+ """
691
+ values = self.arrays[i]
692
+ return SingleArrayManager([values], [self._axes[0]])
693
+
694
+ def iget_values(self, i: int) -> ArrayLike:
695
+ """
696
+ Return the data for column i as the values (ndarray or ExtensionArray).
697
+ """
698
+ return self.arrays[i]
699
+
700
+ @property
701
+ def column_arrays(self) -> list[ArrayLike]:
702
+ """
703
+ Used in the JSON C code to access column arrays.
704
+ """
705
+
706
+ return [np.asarray(arr) for arr in self.arrays]
707
+
708
+ def iset(
709
+ self,
710
+ loc: int | slice | np.ndarray,
711
+ value: ArrayLike,
712
+ inplace: bool = False,
713
+ refs=None,
714
+ ) -> None:
715
+ """
716
+ Set new column(s).
717
+
718
+ This changes the ArrayManager in-place, but replaces (an) existing
719
+ column(s), not changing column values in-place).
720
+
721
+ Parameters
722
+ ----------
723
+ loc : integer, slice or boolean mask
724
+ Positional location (already bounds checked)
725
+ value : np.ndarray or ExtensionArray
726
+ inplace : bool, default False
727
+ Whether overwrite existing array as opposed to replacing it.
728
+ """
729
+ # single column -> single integer index
730
+ if lib.is_integer(loc):
731
+ # TODO can we avoid needing to unpack this here? That means converting
732
+ # DataFrame into 1D array when loc is an integer
733
+ if isinstance(value, np.ndarray) and value.ndim == 2:
734
+ assert value.shape[1] == 1
735
+ value = value[:, 0]
736
+
737
+ # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item
738
+ # but we should avoid that and pass directly the proper array
739
+ value = maybe_coerce_values(value)
740
+
741
+ assert isinstance(value, (np.ndarray, ExtensionArray))
742
+ assert value.ndim == 1
743
+ assert len(value) == len(self._axes[0])
744
+ self.arrays[loc] = value
745
+ return
746
+
747
+ # multiple columns -> convert slice or array to integer indices
748
+ elif isinstance(loc, slice):
749
+ indices: range | np.ndarray = range(
750
+ loc.start if loc.start is not None else 0,
751
+ loc.stop if loc.stop is not None else self.shape_proper[1],
752
+ loc.step if loc.step is not None else 1,
753
+ )
754
+ else:
755
+ assert isinstance(loc, np.ndarray)
756
+ assert loc.dtype == "bool"
757
+ indices = np.nonzero(loc)[0]
758
+
759
+ assert value.ndim == 2
760
+ assert value.shape[0] == len(self._axes[0])
761
+
762
+ for value_idx, mgr_idx in enumerate(indices):
763
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
764
+ # argument type "Tuple[slice, int]"
765
+ value_arr = value[:, value_idx] # type: ignore[call-overload]
766
+ self.arrays[mgr_idx] = value_arr
767
+ return
768
+
769
+ def column_setitem(
770
+ self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False
771
+ ) -> None:
772
+ """
773
+ Set values ("setitem") into a single column (not setting the full column).
774
+
775
+ This is a method on the ArrayManager level, to avoid creating an
776
+ intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)
777
+ """
778
+ if not is_integer(loc):
779
+ raise TypeError("The column index should be an integer")
780
+ arr = self.arrays[loc]
781
+ mgr = SingleArrayManager([arr], [self._axes[0]])
782
+ if inplace_only:
783
+ mgr.setitem_inplace(idx, value)
784
+ else:
785
+ new_mgr = mgr.setitem((idx,), value)
786
+ # update existing ArrayManager in-place
787
+ self.arrays[loc] = new_mgr.arrays[0]
788
+
789
+ def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None:
790
+ """
791
+ Insert item at selected position.
792
+
793
+ Parameters
794
+ ----------
795
+ loc : int
796
+ item : hashable
797
+ value : np.ndarray or ExtensionArray
798
+ """
799
+ # insert to the axis; this could possibly raise a TypeError
800
+ new_axis = self.items.insert(loc, item)
801
+
802
+ value = extract_array(value, extract_numpy=True)
803
+ if value.ndim == 2:
804
+ if value.shape[0] == 1:
805
+ # error: No overload variant of "__getitem__" of "ExtensionArray"
806
+ # matches argument type "Tuple[int, slice]"
807
+ value = value[0, :] # type: ignore[call-overload]
808
+ else:
809
+ raise ValueError(
810
+ f"Expected a 1D array, got an array with shape {value.shape}"
811
+ )
812
+ value = maybe_coerce_values(value)
813
+
814
+ # TODO self.arrays can be empty
815
+ # assert len(value) == len(self.arrays[0])
816
+
817
+ # TODO is this copy needed?
818
+ arrays = self.arrays.copy()
819
+ arrays.insert(loc, value)
820
+
821
+ self.arrays = arrays
822
+ self._axes[1] = new_axis
823
+
824
+ def idelete(self, indexer) -> ArrayManager:
825
+ """
826
+ Delete selected locations in-place (new block and array, same BlockManager)
827
+ """
828
+ to_keep = np.ones(self.shape[0], dtype=np.bool_)
829
+ to_keep[indexer] = False
830
+
831
+ self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]]
832
+ self._axes = [self._axes[0], self._axes[1][to_keep]]
833
+ return self
834
+
835
+ # --------------------------------------------------------------------
836
+ # Array-wise Operation
837
+
838
+ def grouped_reduce(self, func: Callable) -> Self:
839
+ """
840
+ Apply grouped reduction function columnwise, returning a new ArrayManager.
841
+
842
+ Parameters
843
+ ----------
844
+ func : grouped reduction function
845
+
846
+ Returns
847
+ -------
848
+ ArrayManager
849
+ """
850
+ result_arrays: list[np.ndarray] = []
851
+ result_indices: list[int] = []
852
+
853
+ for i, arr in enumerate(self.arrays):
854
+ # grouped_reduce functions all expect 2D arrays
855
+ arr = ensure_block_shape(arr, ndim=2)
856
+ res = func(arr)
857
+ if res.ndim == 2:
858
+ # reverse of ensure_block_shape
859
+ assert res.shape[0] == 1
860
+ res = res[0]
861
+
862
+ result_arrays.append(res)
863
+ result_indices.append(i)
864
+
865
+ if len(result_arrays) == 0:
866
+ nrows = 0
867
+ else:
868
+ nrows = result_arrays[0].shape[0]
869
+ index = Index(range(nrows))
870
+
871
+ columns = self.items
872
+
873
+ # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
874
+ # expected "List[Union[ndarray, ExtensionArray]]"
875
+ return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]
876
+
877
+ def reduce(self, func: Callable) -> Self:
878
+ """
879
+ Apply reduction function column-wise, returning a single-row ArrayManager.
880
+
881
+ Parameters
882
+ ----------
883
+ func : reduction function
884
+
885
+ Returns
886
+ -------
887
+ ArrayManager
888
+ """
889
+ result_arrays: list[np.ndarray] = []
890
+ for i, arr in enumerate(self.arrays):
891
+ res = func(arr, axis=0)
892
+
893
+ # TODO NaT doesn't preserve dtype, so we need to ensure to create
894
+ # a timedelta result array if original was timedelta
895
+ # what if datetime results in timedelta? (eg std)
896
+ dtype = arr.dtype if res is NaT else None
897
+ result_arrays.append(
898
+ sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type]
899
+ )
900
+
901
+ index = Index._simple_new(np.array([None], dtype=object)) # placeholder
902
+ columns = self.items
903
+
904
+ # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
905
+ # expected "List[Union[ndarray, ExtensionArray]]"
906
+ new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]
907
+ return new_mgr
908
+
909
+ def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager:
910
+ """
911
+ Apply array_op blockwise with another (aligned) BlockManager.
912
+ """
913
+ # TODO what if `other` is BlockManager ?
914
+ left_arrays = self.arrays
915
+ right_arrays = other.arrays
916
+ result_arrays = [
917
+ array_op(left, right) for left, right in zip(left_arrays, right_arrays)
918
+ ]
919
+ return type(self)(result_arrays, self._axes)
920
+
921
+ def quantile(
922
+ self,
923
+ *,
924
+ qs: Index, # with dtype float64
925
+ transposed: bool = False,
926
+ interpolation: QuantileInterpolation = "linear",
927
+ ) -> ArrayManager:
928
+ arrs = [ensure_block_shape(x, 2) for x in self.arrays]
929
+ new_arrs = [
930
+ quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs
931
+ ]
932
+ for i, arr in enumerate(new_arrs):
933
+ if arr.ndim == 2:
934
+ assert arr.shape[0] == 1, arr.shape
935
+ new_arrs[i] = arr[0]
936
+
937
+ axes = [qs, self._axes[1]]
938
+ return type(self)(new_arrs, axes)
939
+
940
+ # ----------------------------------------------------------------
941
+
942
+ def unstack(self, unstacker, fill_value) -> ArrayManager:
943
+ """
944
+ Return a BlockManager with all blocks unstacked.
945
+
946
+ Parameters
947
+ ----------
948
+ unstacker : reshape._Unstacker
949
+ fill_value : Any
950
+ fill_value for newly introduced missing values.
951
+
952
+ Returns
953
+ -------
954
+ unstacked : BlockManager
955
+ """
956
+ indexer, _ = unstacker._indexer_and_to_sort
957
+ if unstacker.mask.all():
958
+ new_indexer = indexer
959
+ allow_fill = False
960
+ new_mask2D = None
961
+ needs_masking = None
962
+ else:
963
+ new_indexer = np.full(unstacker.mask.shape, -1)
964
+ new_indexer[unstacker.mask] = indexer
965
+ allow_fill = True
966
+ # calculating the full mask once and passing it to take_1d is faster
967
+ # than letting take_1d calculate it in each repeated call
968
+ new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape)
969
+ needs_masking = new_mask2D.any(axis=0)
970
+ new_indexer2D = new_indexer.reshape(*unstacker.full_shape)
971
+ new_indexer2D = ensure_platform_int(new_indexer2D)
972
+
973
+ new_arrays = []
974
+ for arr in self.arrays:
975
+ for i in range(unstacker.full_shape[1]):
976
+ if allow_fill:
977
+ # error: Value of type "Optional[Any]" is not indexable [index]
978
+ new_arr = take_1d(
979
+ arr,
980
+ new_indexer2D[:, i],
981
+ allow_fill=needs_masking[i], # type: ignore[index]
982
+ fill_value=fill_value,
983
+ mask=new_mask2D[:, i], # type: ignore[index]
984
+ )
985
+ else:
986
+ new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False)
987
+ new_arrays.append(new_arr)
988
+
989
+ new_index = unstacker.new_index
990
+ new_columns = unstacker.get_new_columns(self._axes[1])
991
+ new_axes = [new_index, new_columns]
992
+
993
+ return type(self)(new_arrays, new_axes, verify_integrity=False)
994
+
995
+ def as_array(
996
+ self,
997
+ dtype=None,
998
+ copy: bool = False,
999
+ na_value: object = lib.no_default,
1000
+ ) -> np.ndarray:
1001
+ """
1002
+ Convert the blockmanager data into an numpy array.
1003
+
1004
+ Parameters
1005
+ ----------
1006
+ dtype : object, default None
1007
+ Data type of the return array.
1008
+ copy : bool, default False
1009
+ If True then guarantee that a copy is returned. A value of
1010
+ False does not guarantee that the underlying data is not
1011
+ copied.
1012
+ na_value : object, default lib.no_default
1013
+ Value to be used as the missing value sentinel.
1014
+
1015
+ Returns
1016
+ -------
1017
+ arr : ndarray
1018
+ """
1019
+ if len(self.arrays) == 0:
1020
+ empty_arr = np.empty(self.shape, dtype=float)
1021
+ return empty_arr.transpose()
1022
+
1023
+ # We want to copy when na_value is provided to avoid
1024
+ # mutating the original object
1025
+ copy = copy or na_value is not lib.no_default
1026
+
1027
+ if not dtype:
1028
+ dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
1029
+
1030
+ dtype = ensure_np_dtype(dtype)
1031
+
1032
+ result = np.empty(self.shape_proper, dtype=dtype)
1033
+
1034
+ for i, arr in enumerate(self.arrays):
1035
+ arr = arr.astype(dtype, copy=copy)
1036
+ result[:, i] = arr
1037
+
1038
+ if na_value is not lib.no_default:
1039
+ result[isna(result)] = na_value
1040
+
1041
+ return result
1042
+
1043
+ @classmethod
1044
+ def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self:
1045
+ """
1046
+ Concatenate uniformly-indexed ArrayManagers horizontally.
1047
+ """
1048
+ # concatting along the columns -> combine reindexed arrays in a single manager
1049
+ arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
1050
+ new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False)
1051
+ return new_mgr
1052
+
1053
+ @classmethod
1054
+ def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self:
1055
+ """
1056
+ Concatenate uniformly-indexed ArrayManagers vertically.
1057
+ """
1058
+ # concatting along the rows -> concat the reindexed arrays
1059
+ # TODO(ArrayManager) doesn't yet preserve the correct dtype
1060
+ arrays = [
1061
+ concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
1062
+ for j in range(len(mgrs[0].arrays))
1063
+ ]
1064
+ new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False)
1065
+ return new_mgr
1066
+
1067
+
1068
+ class SingleArrayManager(BaseArrayManager, SingleDataManager):
1069
+ __slots__ = [
1070
+ "_axes", # private attribute, because 'axes' has different order, see below
1071
+ "arrays",
1072
+ ]
1073
+
1074
+ arrays: list[np.ndarray | ExtensionArray]
1075
+ _axes: list[Index]
1076
+
1077
+ @property
1078
+ def ndim(self) -> Literal[1]:
1079
+ return 1
1080
+
1081
+ def __init__(
1082
+ self,
1083
+ arrays: list[np.ndarray | ExtensionArray],
1084
+ axes: list[Index],
1085
+ verify_integrity: bool = True,
1086
+ ) -> None:
1087
+ self._axes = axes
1088
+ self.arrays = arrays
1089
+
1090
+ if verify_integrity:
1091
+ assert len(axes) == 1
1092
+ assert len(arrays) == 1
1093
+ self._axes = [ensure_index(ax) for ax in self._axes]
1094
+ arr = arrays[0]
1095
+ arr = maybe_coerce_values(arr)
1096
+ arr = extract_pandas_array(arr, None, 1)[0]
1097
+ self.arrays = [arr]
1098
+ self._verify_integrity()
1099
+
1100
+ def _verify_integrity(self) -> None:
1101
+ (n_rows,) = self.shape
1102
+ assert len(self.arrays) == 1
1103
+ arr = self.arrays[0]
1104
+ assert len(arr) == n_rows
1105
+ if not arr.ndim == 1:
1106
+ raise ValueError(
1107
+ "Passed array should be 1-dimensional, got array with "
1108
+ f"{arr.ndim} dimensions instead."
1109
+ )
1110
+
1111
+ @staticmethod
1112
+ def _normalize_axis(axis):
1113
+ return axis
1114
+
1115
+ def make_empty(self, axes=None) -> Self:
1116
+ """Return an empty ArrayManager with index/array of length 0"""
1117
+ if axes is None:
1118
+ axes = [Index([], dtype=object)]
1119
+ array: np.ndarray = np.array([], dtype=self.dtype)
1120
+ return type(self)([array], axes)
1121
+
1122
+ @classmethod
1123
+ def from_array(cls, array, index) -> SingleArrayManager:
1124
+ return cls([array], [index])
1125
+
1126
+ # error: Cannot override writeable attribute with read-only property
1127
+ @property
1128
+ def axes(self) -> list[Index]: # type: ignore[override]
1129
+ return self._axes
1130
+
1131
+ @property
1132
+ def index(self) -> Index:
1133
+ return self._axes[0]
1134
+
1135
+ @property
1136
+ def dtype(self):
1137
+ return self.array.dtype
1138
+
1139
+ def external_values(self):
1140
+ """The array that Series.values returns"""
1141
+ return external_values(self.array)
1142
+
1143
+ def internal_values(self):
1144
+ """The array that Series._values returns"""
1145
+ return self.array
1146
+
1147
+ def array_values(self):
1148
+ """The array that Series.array returns"""
1149
+ arr = self.array
1150
+ if isinstance(arr, np.ndarray):
1151
+ arr = NumpyExtensionArray(arr)
1152
+ return arr
1153
+
1154
+ @property
1155
+ def _can_hold_na(self) -> bool:
1156
+ if isinstance(self.array, np.ndarray):
1157
+ return self.array.dtype.kind not in "iub"
1158
+ else:
1159
+ # ExtensionArray
1160
+ return self.array._can_hold_na
1161
+
1162
+ @property
1163
+ def is_single_block(self) -> bool:
1164
+ return True
1165
+
1166
+ def fast_xs(self, loc: int) -> SingleArrayManager:
1167
+ raise NotImplementedError("Use series._values[loc] instead")
1168
+
1169
+ def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleArrayManager:
1170
+ if axis >= self.ndim:
1171
+ raise IndexError("Requested axis not found in manager")
1172
+
1173
+ new_array = self.array[slobj]
1174
+ new_index = self.index._getitem_slice(slobj)
1175
+ return type(self)([new_array], [new_index], verify_integrity=False)
1176
+
1177
+ def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> SingleArrayManager:
1178
+ new_array = self.array[indexer]
1179
+ new_index = self.index[indexer]
1180
+ return type(self)([new_array], [new_index])
1181
+
1182
+ # error: Signature of "apply" incompatible with supertype "BaseArrayManager"
1183
+ def apply(self, func, **kwargs) -> Self: # type: ignore[override]
1184
+ if callable(func):
1185
+ new_array = func(self.array, **kwargs)
1186
+ else:
1187
+ new_array = getattr(self.array, func)(**kwargs)
1188
+ return type(self)([new_array], self._axes)
1189
+
1190
+ def setitem(self, indexer, value, warn: bool = True) -> SingleArrayManager:
1191
+ """
1192
+ Set values with indexer.
1193
+
1194
+ For SingleArrayManager, this backs s[indexer] = value
1195
+
1196
+ See `setitem_inplace` for a version that works inplace and doesn't
1197
+ return a new Manager.
1198
+ """
1199
+ if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:
1200
+ raise ValueError(f"Cannot set values with ndim > {self.ndim}")
1201
+ return self.apply_with_block("setitem", indexer=indexer, value=value)
1202
+
1203
+ def idelete(self, indexer) -> SingleArrayManager:
1204
+ """
1205
+ Delete selected locations in-place (new array, same ArrayManager)
1206
+ """
1207
+ to_keep = np.ones(self.shape[0], dtype=np.bool_)
1208
+ to_keep[indexer] = False
1209
+
1210
+ self.arrays = [self.arrays[0][to_keep]]
1211
+ self._axes = [self._axes[0][to_keep]]
1212
+ return self
1213
+
1214
+ def _get_data_subset(self, predicate: Callable) -> SingleArrayManager:
1215
+ # used in get_numeric_data / get_bool_data
1216
+ if predicate(self.array):
1217
+ return type(self)(self.arrays, self._axes, verify_integrity=False)
1218
+ else:
1219
+ return self.make_empty()
1220
+
1221
+ def set_values(self, values: ArrayLike) -> None:
1222
+ """
1223
+ Set (replace) the values of the SingleArrayManager in place.
1224
+
1225
+ Use at your own risk! This does not check if the passed values are
1226
+ valid for the current SingleArrayManager (length, dtype, etc).
1227
+ """
1228
+ self.arrays[0] = values
1229
+
1230
+ def to_2d_mgr(self, columns: Index) -> ArrayManager:
1231
+ """
1232
+ Manager analogue of Series.to_frame
1233
+ """
1234
+ arrays = [self.arrays[0]]
1235
+ axes = [self.axes[0], columns]
1236
+
1237
+ return ArrayManager(arrays, axes, verify_integrity=False)
1238
+
1239
+
1240
+ class NullArrayProxy:
1241
+ """
1242
+ Proxy object for an all-NA array.
1243
+
1244
+ Only stores the length of the array, and not the dtype. The dtype
1245
+ will only be known when actually concatenating (after determining the
1246
+ common dtype, for which this proxy is ignored).
1247
+ Using this object avoids that the internals/concat.py needs to determine
1248
+ the proper dtype and array type.
1249
+ """
1250
+
1251
+ ndim = 1
1252
+
1253
+ def __init__(self, n: int) -> None:
1254
+ self.n = n
1255
+
1256
+ @property
1257
+ def shape(self) -> tuple[int]:
1258
+ return (self.n,)
1259
+
1260
+ def to_array(self, dtype: DtypeObj) -> ArrayLike:
1261
+ """
1262
+ Helper function to create the actual all-NA array from the NullArrayProxy
1263
+ object.
1264
+
1265
+ Parameters
1266
+ ----------
1267
+ arr : NullArrayProxy
1268
+ dtype : the dtype for the resulting array
1269
+
1270
+ Returns
1271
+ -------
1272
+ np.ndarray or ExtensionArray
1273
+ """
1274
+ if isinstance(dtype, ExtensionDtype):
1275
+ empty = dtype.construct_array_type()._from_sequence([], dtype=dtype)
1276
+ indexer = -np.ones(self.n, dtype=np.intp)
1277
+ return empty.take(indexer, allow_fill=True)
1278
+ else:
1279
+ # when introducing missing values, int becomes float, bool becomes object
1280
+ dtype = ensure_dtype_can_hold_na(dtype)
1281
+ fill_value = na_value_for_dtype(dtype)
1282
+ arr = np.empty(self.n, dtype=dtype)
1283
+ arr.fill(fill_value)
1284
+ return ensure_wrapped_if_datetimelike(arr)
1285
+
1286
+
1287
+ def concat_arrays(to_concat: list) -> ArrayLike:
1288
+ """
1289
+ Alternative for concat_compat but specialized for use in the ArrayManager.
1290
+
1291
+ Differences: only deals with 1D arrays (no axis keyword), assumes
1292
+ ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
1293
+ the dtype.
1294
+ In addition ensures that all NullArrayProxies get replaced with actual
1295
+ arrays.
1296
+
1297
+ Parameters
1298
+ ----------
1299
+ to_concat : list of arrays
1300
+
1301
+ Returns
1302
+ -------
1303
+ np.ndarray or ExtensionArray
1304
+ """
1305
+ # ignore the all-NA proxies to determine the resulting dtype
1306
+ to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
1307
+
1308
+ dtypes = {x.dtype for x in to_concat_no_proxy}
1309
+ single_dtype = len(dtypes) == 1
1310
+
1311
+ if single_dtype:
1312
+ target_dtype = to_concat_no_proxy[0].dtype
1313
+ elif all(lib.is_np_dtype(x, "iub") for x in dtypes):
1314
+ # GH#42092
1315
+ target_dtype = np_find_common_type(*dtypes)
1316
+ else:
1317
+ target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
1318
+
1319
+ to_concat = [
1320
+ arr.to_array(target_dtype)
1321
+ if isinstance(arr, NullArrayProxy)
1322
+ else astype_array(arr, target_dtype, copy=False)
1323
+ for arr in to_concat
1324
+ ]
1325
+
1326
+ if isinstance(to_concat[0], ExtensionArray):
1327
+ cls = type(to_concat[0])
1328
+ return cls._concat_same_type(to_concat)
1329
+
1330
+ result = np.concatenate(to_concat)
1331
+
1332
+ # TODO decide on exact behaviour (we shouldn't do this only for empty result)
1333
+ # see https://github.com/pandas-dev/pandas/issues/39817
1334
+ if len(result) == 0:
1335
+ # all empties -> check for bool to not coerce to float
1336
+ kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
1337
+ if len(kinds) != 1:
1338
+ if "b" in kinds:
1339
+ result = result.astype(object)
1340
+ return result
venv/lib/python3.10/site-packages/pandas/core/internals/base.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Base class for the internal managers. Both BlockManager and ArrayManager
3
+ inherit from this class.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Literal,
11
+ cast,
12
+ final,
13
+ )
14
+
15
+ import numpy as np
16
+
17
+ from pandas._config import (
18
+ using_copy_on_write,
19
+ warn_copy_on_write,
20
+ )
21
+
22
+ from pandas._libs import (
23
+ algos as libalgos,
24
+ lib,
25
+ )
26
+ from pandas.errors import AbstractMethodError
27
+ from pandas.util._validators import validate_bool_kwarg
28
+
29
+ from pandas.core.dtypes.cast import (
30
+ find_common_type,
31
+ np_can_hold_element,
32
+ )
33
+ from pandas.core.dtypes.dtypes import (
34
+ ExtensionDtype,
35
+ SparseDtype,
36
+ )
37
+
38
+ from pandas.core.base import PandasObject
39
+ from pandas.core.construction import extract_array
40
+ from pandas.core.indexes.api import (
41
+ Index,
42
+ default_index,
43
+ )
44
+
45
+ if TYPE_CHECKING:
46
+ from pandas._typing import (
47
+ ArrayLike,
48
+ AxisInt,
49
+ DtypeObj,
50
+ Self,
51
+ Shape,
52
+ )
53
+
54
+
55
+ class _AlreadyWarned:
56
+ def __init__(self):
57
+ # This class is used on the manager level to the block level to
58
+ # ensure that we warn only once. The block method can update the
59
+ # warned_already option without returning a value to keep the
60
+ # interface consistent. This is only a temporary solution for
61
+ # CoW warnings.
62
+ self.warned_already = False
63
+
64
+
65
+ class DataManager(PandasObject):
66
+ # TODO share more methods/attributes
67
+
68
+ axes: list[Index]
69
+
70
+ @property
71
+ def items(self) -> Index:
72
+ raise AbstractMethodError(self)
73
+
74
+ @final
75
+ def __len__(self) -> int:
76
+ return len(self.items)
77
+
78
+ @property
79
+ def ndim(self) -> int:
80
+ return len(self.axes)
81
+
82
+ @property
83
+ def shape(self) -> Shape:
84
+ return tuple(len(ax) for ax in self.axes)
85
+
86
+ @final
87
+ def _validate_set_axis(self, axis: AxisInt, new_labels: Index) -> None:
88
+ # Caller is responsible for ensuring we have an Index object.
89
+ old_len = len(self.axes[axis])
90
+ new_len = len(new_labels)
91
+
92
+ if axis == 1 and len(self.items) == 0:
93
+ # If we are setting the index on a DataFrame with no columns,
94
+ # it is OK to change the length.
95
+ pass
96
+
97
+ elif new_len != old_len:
98
+ raise ValueError(
99
+ f"Length mismatch: Expected axis has {old_len} elements, new "
100
+ f"values have {new_len} elements"
101
+ )
102
+
103
+ def reindex_indexer(
104
+ self,
105
+ new_axis,
106
+ indexer,
107
+ axis: AxisInt,
108
+ fill_value=None,
109
+ allow_dups: bool = False,
110
+ copy: bool = True,
111
+ only_slice: bool = False,
112
+ ) -> Self:
113
+ raise AbstractMethodError(self)
114
+
115
+ @final
116
+ def reindex_axis(
117
+ self,
118
+ new_index: Index,
119
+ axis: AxisInt,
120
+ fill_value=None,
121
+ only_slice: bool = False,
122
+ ) -> Self:
123
+ """
124
+ Conform data manager to new index.
125
+ """
126
+ new_index, indexer = self.axes[axis].reindex(new_index)
127
+
128
+ return self.reindex_indexer(
129
+ new_index,
130
+ indexer,
131
+ axis=axis,
132
+ fill_value=fill_value,
133
+ copy=False,
134
+ only_slice=only_slice,
135
+ )
136
+
137
+ def _equal_values(self, other: Self) -> bool:
138
+ """
139
+ To be implemented by the subclasses. Only check the column values
140
+ assuming shape and indexes have already been checked.
141
+ """
142
+ raise AbstractMethodError(self)
143
+
144
+ @final
145
+ def equals(self, other: object) -> bool:
146
+ """
147
+ Implementation for DataFrame.equals
148
+ """
149
+ if not isinstance(other, type(self)):
150
+ return False
151
+
152
+ self_axes, other_axes = self.axes, other.axes
153
+ if len(self_axes) != len(other_axes):
154
+ return False
155
+ if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
156
+ return False
157
+
158
+ return self._equal_values(other)
159
+
160
+ def apply(
161
+ self,
162
+ f,
163
+ align_keys: list[str] | None = None,
164
+ **kwargs,
165
+ ) -> Self:
166
+ raise AbstractMethodError(self)
167
+
168
+ def apply_with_block(
169
+ self,
170
+ f,
171
+ align_keys: list[str] | None = None,
172
+ **kwargs,
173
+ ) -> Self:
174
+ raise AbstractMethodError(self)
175
+
176
+ @final
177
+ def isna(self, func) -> Self:
178
+ return self.apply("apply", func=func)
179
+
180
+ @final
181
+ def fillna(self, value, limit: int | None, inplace: bool, downcast) -> Self:
182
+ if limit is not None:
183
+ # Do this validation even if we go through one of the no-op paths
184
+ limit = libalgos.validate_limit(None, limit=limit)
185
+
186
+ return self.apply_with_block(
187
+ "fillna",
188
+ value=value,
189
+ limit=limit,
190
+ inplace=inplace,
191
+ downcast=downcast,
192
+ using_cow=using_copy_on_write(),
193
+ already_warned=_AlreadyWarned(),
194
+ )
195
+
196
+ @final
197
+ def where(self, other, cond, align: bool) -> Self:
198
+ if align:
199
+ align_keys = ["other", "cond"]
200
+ else:
201
+ align_keys = ["cond"]
202
+ other = extract_array(other, extract_numpy=True)
203
+
204
+ return self.apply_with_block(
205
+ "where",
206
+ align_keys=align_keys,
207
+ other=other,
208
+ cond=cond,
209
+ using_cow=using_copy_on_write(),
210
+ )
211
+
212
+ @final
213
+ def putmask(self, mask, new, align: bool = True, warn: bool = True) -> Self:
214
+ if align:
215
+ align_keys = ["new", "mask"]
216
+ else:
217
+ align_keys = ["mask"]
218
+ new = extract_array(new, extract_numpy=True)
219
+
220
+ already_warned = None
221
+ if warn_copy_on_write():
222
+ already_warned = _AlreadyWarned()
223
+ if not warn:
224
+ already_warned.warned_already = True
225
+
226
+ return self.apply_with_block(
227
+ "putmask",
228
+ align_keys=align_keys,
229
+ mask=mask,
230
+ new=new,
231
+ using_cow=using_copy_on_write(),
232
+ already_warned=already_warned,
233
+ )
234
+
235
+ @final
236
+ def round(self, decimals: int, using_cow: bool = False) -> Self:
237
+ return self.apply_with_block(
238
+ "round",
239
+ decimals=decimals,
240
+ using_cow=using_cow,
241
+ )
242
+
243
+ @final
244
+ def replace(self, to_replace, value, inplace: bool) -> Self:
245
+ inplace = validate_bool_kwarg(inplace, "inplace")
246
+ # NDFrame.replace ensures the not-is_list_likes here
247
+ assert not lib.is_list_like(to_replace)
248
+ assert not lib.is_list_like(value)
249
+ return self.apply_with_block(
250
+ "replace",
251
+ to_replace=to_replace,
252
+ value=value,
253
+ inplace=inplace,
254
+ using_cow=using_copy_on_write(),
255
+ already_warned=_AlreadyWarned(),
256
+ )
257
+
258
+ @final
259
+ def replace_regex(self, **kwargs) -> Self:
260
+ return self.apply_with_block(
261
+ "_replace_regex",
262
+ **kwargs,
263
+ using_cow=using_copy_on_write(),
264
+ already_warned=_AlreadyWarned(),
265
+ )
266
+
267
+ @final
268
+ def replace_list(
269
+ self,
270
+ src_list: list[Any],
271
+ dest_list: list[Any],
272
+ inplace: bool = False,
273
+ regex: bool = False,
274
+ ) -> Self:
275
+ """do a list replace"""
276
+ inplace = validate_bool_kwarg(inplace, "inplace")
277
+
278
+ bm = self.apply_with_block(
279
+ "replace_list",
280
+ src_list=src_list,
281
+ dest_list=dest_list,
282
+ inplace=inplace,
283
+ regex=regex,
284
+ using_cow=using_copy_on_write(),
285
+ already_warned=_AlreadyWarned(),
286
+ )
287
+ bm._consolidate_inplace()
288
+ return bm
289
+
290
+ def interpolate(self, inplace: bool, **kwargs) -> Self:
291
+ return self.apply_with_block(
292
+ "interpolate",
293
+ inplace=inplace,
294
+ **kwargs,
295
+ using_cow=using_copy_on_write(),
296
+ already_warned=_AlreadyWarned(),
297
+ )
298
+
299
+ def pad_or_backfill(self, inplace: bool, **kwargs) -> Self:
300
+ return self.apply_with_block(
301
+ "pad_or_backfill",
302
+ inplace=inplace,
303
+ **kwargs,
304
+ using_cow=using_copy_on_write(),
305
+ already_warned=_AlreadyWarned(),
306
+ )
307
+
308
+ def shift(self, periods: int, fill_value) -> Self:
309
+ if fill_value is lib.no_default:
310
+ fill_value = None
311
+
312
+ return self.apply_with_block("shift", periods=periods, fill_value=fill_value)
313
+
314
+ # --------------------------------------------------------------------
315
+ # Consolidation: No-ops for all but BlockManager
316
+
317
+ def is_consolidated(self) -> bool:
318
+ return True
319
+
320
+ def consolidate(self) -> Self:
321
+ return self
322
+
323
+ def _consolidate_inplace(self) -> None:
324
+ return
325
+
326
+
327
+ class SingleDataManager(DataManager):
328
+ @property
329
+ def ndim(self) -> Literal[1]:
330
+ return 1
331
+
332
+ @final
333
+ @property
334
+ def array(self) -> ArrayLike:
335
+ """
336
+ Quick access to the backing array of the Block or SingleArrayManager.
337
+ """
338
+ # error: "SingleDataManager" has no attribute "arrays"; maybe "array"
339
+ return self.arrays[0] # type: ignore[attr-defined]
340
+
341
+ def setitem_inplace(self, indexer, value, warn: bool = True) -> None:
342
+ """
343
+ Set values with indexer.
344
+
345
+ For Single[Block/Array]Manager, this backs s[indexer] = value
346
+
347
+ This is an inplace version of `setitem()`, mutating the manager/values
348
+ in place, not returning a new Manager (and Block), and thus never changing
349
+ the dtype.
350
+ """
351
+ arr = self.array
352
+
353
+ # EAs will do this validation in their own __setitem__ methods.
354
+ if isinstance(arr, np.ndarray):
355
+ # Note: checking for ndarray instead of np.dtype means we exclude
356
+ # dt64/td64, which do their own validation.
357
+ value = np_can_hold_element(arr.dtype, value)
358
+
359
+ if isinstance(value, np.ndarray) and value.ndim == 1 and len(value) == 1:
360
+ # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615
361
+ value = value[0, ...]
362
+
363
+ arr[indexer] = value
364
+
365
+ def grouped_reduce(self, func):
366
+ arr = self.array
367
+ res = func(arr)
368
+ index = default_index(len(res))
369
+
370
+ mgr = type(self).from_array(res, index)
371
+ return mgr
372
+
373
+ @classmethod
374
+ def from_array(cls, arr: ArrayLike, index: Index):
375
+ raise AbstractMethodError(cls)
376
+
377
+
378
+ def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None:
379
+ """
380
+ Find the common dtype for `blocks`.
381
+
382
+ Parameters
383
+ ----------
384
+ blocks : List[DtypeObj]
385
+
386
+ Returns
387
+ -------
388
+ dtype : np.dtype, ExtensionDtype, or None
389
+ None is returned when `blocks` is empty.
390
+ """
391
+ if not len(dtypes):
392
+ return None
393
+
394
+ return find_common_type(dtypes)
395
+
396
+
397
+ def ensure_np_dtype(dtype: DtypeObj) -> np.dtype:
398
+ # TODO: https://github.com/pandas-dev/pandas/issues/22791
399
+ # Give EAs some input on what happens here. Sparse needs this.
400
+ if isinstance(dtype, SparseDtype):
401
+ dtype = dtype.subtype
402
+ dtype = cast(np.dtype, dtype)
403
+ elif isinstance(dtype, ExtensionDtype):
404
+ dtype = np.dtype("object")
405
+ elif dtype == np.dtype(str):
406
+ dtype = np.dtype("object")
407
+ return dtype
venv/lib/python3.10/site-packages/pandas/core/internals/blocks.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/pandas/core/internals/concat.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ cast,
6
+ )
7
+ import warnings
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs import (
12
+ NaT,
13
+ algos as libalgos,
14
+ internals as libinternals,
15
+ lib,
16
+ )
17
+ from pandas._libs.missing import NA
18
+ from pandas.util._decorators import cache_readonly
19
+ from pandas.util._exceptions import find_stack_level
20
+
21
+ from pandas.core.dtypes.cast import (
22
+ ensure_dtype_can_hold_na,
23
+ find_common_type,
24
+ )
25
+ from pandas.core.dtypes.common import (
26
+ is_1d_only_ea_dtype,
27
+ is_scalar,
28
+ needs_i8_conversion,
29
+ )
30
+ from pandas.core.dtypes.concat import concat_compat
31
+ from pandas.core.dtypes.dtypes import (
32
+ ExtensionDtype,
33
+ SparseDtype,
34
+ )
35
+ from pandas.core.dtypes.missing import (
36
+ is_valid_na_for_dtype,
37
+ isna,
38
+ isna_all,
39
+ )
40
+
41
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
42
+ from pandas.core.internals.array_manager import ArrayManager
43
+ from pandas.core.internals.blocks import (
44
+ ensure_block_shape,
45
+ new_block_2d,
46
+ )
47
+ from pandas.core.internals.managers import (
48
+ BlockManager,
49
+ make_na_array,
50
+ )
51
+
52
+ if TYPE_CHECKING:
53
+ from collections.abc import Sequence
54
+
55
+ from pandas._typing import (
56
+ ArrayLike,
57
+ AxisInt,
58
+ DtypeObj,
59
+ Manager2D,
60
+ Shape,
61
+ )
62
+
63
+ from pandas import Index
64
+ from pandas.core.internals.blocks import (
65
+ Block,
66
+ BlockPlacement,
67
+ )
68
+
69
+
70
+ def _concatenate_array_managers(
71
+ mgrs: list[ArrayManager], axes: list[Index], concat_axis: AxisInt
72
+ ) -> Manager2D:
73
+ """
74
+ Concatenate array managers into one.
75
+
76
+ Parameters
77
+ ----------
78
+ mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
79
+ axes : list of Index
80
+ concat_axis : int
81
+
82
+ Returns
83
+ -------
84
+ ArrayManager
85
+ """
86
+ if concat_axis == 1:
87
+ return mgrs[0].concat_vertical(mgrs, axes)
88
+ else:
89
+ # concatting along the columns -> combine reindexed arrays in a single manager
90
+ assert concat_axis == 0
91
+ return mgrs[0].concat_horizontal(mgrs, axes)
92
+
93
+
94
+ def concatenate_managers(
95
+ mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool
96
+ ) -> Manager2D:
97
+ """
98
+ Concatenate block managers into one.
99
+
100
+ Parameters
101
+ ----------
102
+ mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
103
+ axes : list of Index
104
+ concat_axis : int
105
+ copy : bool
106
+
107
+ Returns
108
+ -------
109
+ BlockManager
110
+ """
111
+
112
+ needs_copy = copy and concat_axis == 0
113
+
114
+ # TODO(ArrayManager) this assumes that all managers are of the same type
115
+ if isinstance(mgrs_indexers[0][0], ArrayManager):
116
+ mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
117
+ # error: Argument 1 to "_concatenate_array_managers" has incompatible
118
+ # type "List[BlockManager]"; expected "List[Union[ArrayManager,
119
+ # SingleArrayManager, BlockManager, SingleBlockManager]]"
120
+ return _concatenate_array_managers(
121
+ mgrs, axes, concat_axis # type: ignore[arg-type]
122
+ )
123
+
124
+ # Assertions disabled for performance
125
+ # for tup in mgrs_indexers:
126
+ # # caller is responsible for ensuring this
127
+ # indexers = tup[1]
128
+ # assert concat_axis not in indexers
129
+
130
+ if concat_axis == 0:
131
+ mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
132
+ return mgrs[0].concat_horizontal(mgrs, axes)
133
+
134
+ if len(mgrs_indexers) > 0 and mgrs_indexers[0][0].nblocks > 0:
135
+ first_dtype = mgrs_indexers[0][0].blocks[0].dtype
136
+ if first_dtype in [np.float64, np.float32]:
137
+ # TODO: support more dtypes here. This will be simpler once
138
+ # JoinUnit.is_na behavior is deprecated.
139
+ if (
140
+ all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in mgrs_indexers)
141
+ and len(mgrs_indexers) > 1
142
+ ):
143
+ # Fastpath!
144
+ # Length restriction is just to avoid having to worry about 'copy'
145
+ shape = tuple(len(x) for x in axes)
146
+ nb = _concat_homogeneous_fastpath(mgrs_indexers, shape, first_dtype)
147
+ return BlockManager((nb,), axes)
148
+
149
+ mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
150
+
151
+ if len(mgrs) == 1:
152
+ mgr = mgrs[0]
153
+ out = mgr.copy(deep=False)
154
+ out.axes = axes
155
+ return out
156
+
157
+ concat_plan = _get_combined_plan(mgrs)
158
+
159
+ blocks = []
160
+ values: ArrayLike
161
+
162
+ for placement, join_units in concat_plan:
163
+ unit = join_units[0]
164
+ blk = unit.block
165
+
166
+ if _is_uniform_join_units(join_units):
167
+ vals = [ju.block.values for ju in join_units]
168
+
169
+ if not blk.is_extension:
170
+ # _is_uniform_join_units ensures a single dtype, so
171
+ # we can use np.concatenate, which is more performant
172
+ # than concat_compat
173
+ # error: Argument 1 to "concatenate" has incompatible type
174
+ # "List[Union[ndarray[Any, Any], ExtensionArray]]";
175
+ # expected "Union[_SupportsArray[dtype[Any]],
176
+ # _NestedSequence[_SupportsArray[dtype[Any]]]]"
177
+ values = np.concatenate(vals, axis=1) # type: ignore[arg-type]
178
+ elif is_1d_only_ea_dtype(blk.dtype):
179
+ # TODO(EA2D): special-casing not needed with 2D EAs
180
+ values = concat_compat(vals, axis=0, ea_compat_axis=True)
181
+ values = ensure_block_shape(values, ndim=2)
182
+ else:
183
+ values = concat_compat(vals, axis=1)
184
+
185
+ values = ensure_wrapped_if_datetimelike(values)
186
+
187
+ fastpath = blk.values.dtype == values.dtype
188
+ else:
189
+ values = _concatenate_join_units(join_units, copy=copy)
190
+ fastpath = False
191
+
192
+ if fastpath:
193
+ b = blk.make_block_same_class(values, placement=placement)
194
+ else:
195
+ b = new_block_2d(values, placement=placement)
196
+
197
+ blocks.append(b)
198
+
199
+ return BlockManager(tuple(blocks), axes)
200
+
201
+
202
+ def _maybe_reindex_columns_na_proxy(
203
+ axes: list[Index],
204
+ mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]],
205
+ needs_copy: bool,
206
+ ) -> list[BlockManager]:
207
+ """
208
+ Reindex along columns so that all of the BlockManagers being concatenated
209
+ have matching columns.
210
+
211
+ Columns added in this reindexing have dtype=np.void, indicating they
212
+ should be ignored when choosing a column's final dtype.
213
+ """
214
+ new_mgrs = []
215
+
216
+ for mgr, indexers in mgrs_indexers:
217
+ # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this
218
+ # is a cheap reindexing.
219
+ for i, indexer in indexers.items():
220
+ mgr = mgr.reindex_indexer(
221
+ axes[i],
222
+ indexers[i],
223
+ axis=i,
224
+ copy=False,
225
+ only_slice=True, # only relevant for i==0
226
+ allow_dups=True,
227
+ use_na_proxy=True, # only relevant for i==0
228
+ )
229
+ if needs_copy and not indexers:
230
+ mgr = mgr.copy()
231
+
232
+ new_mgrs.append(mgr)
233
+ return new_mgrs
234
+
235
+
236
+ def _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool:
237
+ """
238
+ Check if this Manager can be treated as a single ndarray.
239
+ """
240
+ if mgr.nblocks != 1:
241
+ return False
242
+ blk = mgr.blocks[0]
243
+ if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1):
244
+ return False
245
+
246
+ return blk.dtype == first_dtype
247
+
248
+
249
+ def _concat_homogeneous_fastpath(
250
+ mgrs_indexers, shape: Shape, first_dtype: np.dtype
251
+ ) -> Block:
252
+ """
253
+ With single-Block managers with homogeneous dtypes (that can already hold nan),
254
+ we avoid [...]
255
+ """
256
+ # assumes
257
+ # all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in in mgrs_indexers)
258
+
259
+ if all(not indexers for _, indexers in mgrs_indexers):
260
+ # https://github.com/pandas-dev/pandas/pull/52685#issuecomment-1523287739
261
+ arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers]
262
+ arr = np.concatenate(arrs).T
263
+ bp = libinternals.BlockPlacement(slice(shape[0]))
264
+ nb = new_block_2d(arr, bp)
265
+ return nb
266
+
267
+ arr = np.empty(shape, dtype=first_dtype)
268
+
269
+ if first_dtype == np.float64:
270
+ take_func = libalgos.take_2d_axis0_float64_float64
271
+ else:
272
+ take_func = libalgos.take_2d_axis0_float32_float32
273
+
274
+ start = 0
275
+ for mgr, indexers in mgrs_indexers:
276
+ mgr_len = mgr.shape[1]
277
+ end = start + mgr_len
278
+
279
+ if 0 in indexers:
280
+ take_func(
281
+ mgr.blocks[0].values,
282
+ indexers[0],
283
+ arr[:, start:end],
284
+ )
285
+ else:
286
+ # No reindexing necessary, we can copy values directly
287
+ arr[:, start:end] = mgr.blocks[0].values
288
+
289
+ start += mgr_len
290
+
291
+ bp = libinternals.BlockPlacement(slice(shape[0]))
292
+ nb = new_block_2d(arr, bp)
293
+ return nb
294
+
295
+
296
+ def _get_combined_plan(
297
+ mgrs: list[BlockManager],
298
+ ) -> list[tuple[BlockPlacement, list[JoinUnit]]]:
299
+ plan = []
300
+
301
+ max_len = mgrs[0].shape[0]
302
+
303
+ blknos_list = [mgr.blknos for mgr in mgrs]
304
+ pairs = libinternals.get_concat_blkno_indexers(blknos_list)
305
+ for ind, (blknos, bp) in enumerate(pairs):
306
+ # assert bp.is_slice_like
307
+ # assert len(bp) > 0
308
+
309
+ units_for_bp = []
310
+ for k, mgr in enumerate(mgrs):
311
+ blkno = blknos[k]
312
+
313
+ nb = _get_block_for_concat_plan(mgr, bp, blkno, max_len=max_len)
314
+ unit = JoinUnit(nb)
315
+ units_for_bp.append(unit)
316
+
317
+ plan.append((bp, units_for_bp))
318
+
319
+ return plan
320
+
321
+
322
+ def _get_block_for_concat_plan(
323
+ mgr: BlockManager, bp: BlockPlacement, blkno: int, *, max_len: int
324
+ ) -> Block:
325
+ blk = mgr.blocks[blkno]
326
+ # Assertions disabled for performance:
327
+ # assert bp.is_slice_like
328
+ # assert blkno != -1
329
+ # assert (mgr.blknos[bp] == blkno).all()
330
+
331
+ if len(bp) == len(blk.mgr_locs) and (
332
+ blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1
333
+ ):
334
+ nb = blk
335
+ else:
336
+ ax0_blk_indexer = mgr.blklocs[bp.indexer]
337
+
338
+ slc = lib.maybe_indices_to_slice(ax0_blk_indexer, max_len)
339
+ # TODO: in all extant test cases 2023-04-08 we have a slice here.
340
+ # Will this always be the case?
341
+ if isinstance(slc, slice):
342
+ nb = blk.slice_block_columns(slc)
343
+ else:
344
+ nb = blk.take_block_columns(slc)
345
+
346
+ # assert nb.shape == (len(bp), mgr.shape[1])
347
+ return nb
348
+
349
+
350
+ class JoinUnit:
351
+ def __init__(self, block: Block) -> None:
352
+ self.block = block
353
+
354
+ def __repr__(self) -> str:
355
+ return f"{type(self).__name__}({repr(self.block)})"
356
+
357
+ def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
358
+ """
359
+ Check that we are all-NA of a type/dtype that is compatible with this dtype.
360
+ Augments `self.is_na` with an additional check of the type of NA values.
361
+ """
362
+ if not self.is_na:
363
+ return False
364
+
365
+ blk = self.block
366
+ if blk.dtype.kind == "V":
367
+ return True
368
+
369
+ if blk.dtype == object:
370
+ values = blk.values
371
+ return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
372
+
373
+ na_value = blk.fill_value
374
+ if na_value is NaT and blk.dtype != dtype:
375
+ # e.g. we are dt64 and other is td64
376
+ # fill_values match but we should not cast blk.values to dtype
377
+ # TODO: this will need updating if we ever have non-nano dt64/td64
378
+ return False
379
+
380
+ if na_value is NA and needs_i8_conversion(dtype):
381
+ # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
382
+ # e.g. blk.dtype == "Int64" and dtype is td64, we dont want
383
+ # to consider these as matching
384
+ return False
385
+
386
+ # TODO: better to use can_hold_element?
387
+ return is_valid_na_for_dtype(na_value, dtype)
388
+
389
+ @cache_readonly
390
+ def is_na(self) -> bool:
391
+ blk = self.block
392
+ if blk.dtype.kind == "V":
393
+ return True
394
+
395
+ if not blk._can_hold_na:
396
+ return False
397
+
398
+ values = blk.values
399
+ if values.size == 0:
400
+ # GH#39122 this case will return False once deprecation is enforced
401
+ return True
402
+
403
+ if isinstance(values.dtype, SparseDtype):
404
+ return False
405
+
406
+ if values.ndim == 1:
407
+ # TODO(EA2D): no need for special case with 2D EAs
408
+ val = values[0]
409
+ if not is_scalar(val) or not isna(val):
410
+ # ideally isna_all would do this short-circuiting
411
+ return False
412
+ return isna_all(values)
413
+ else:
414
+ val = values[0][0]
415
+ if not is_scalar(val) or not isna(val):
416
+ # ideally isna_all would do this short-circuiting
417
+ return False
418
+ return all(isna_all(row) for row in values)
419
+
420
+ @cache_readonly
421
+ def is_na_after_size_and_isna_all_deprecation(self) -> bool:
422
+ """
423
+ Will self.is_na be True after values.size == 0 deprecation and isna_all
424
+ deprecation are enforced?
425
+ """
426
+ blk = self.block
427
+ if blk.dtype.kind == "V":
428
+ return True
429
+ return False
430
+
431
+ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
432
+ values: ArrayLike
433
+
434
+ if upcasted_na is None and self.block.dtype.kind != "V":
435
+ # No upcasting is necessary
436
+ return self.block.values
437
+ else:
438
+ fill_value = upcasted_na
439
+
440
+ if self._is_valid_na_for(empty_dtype):
441
+ # note: always holds when self.block.dtype.kind == "V"
442
+ blk_dtype = self.block.dtype
443
+
444
+ if blk_dtype == np.dtype("object"):
445
+ # we want to avoid filling with np.nan if we are
446
+ # using None; we already know that we are all
447
+ # nulls
448
+ values = cast(np.ndarray, self.block.values)
449
+ if values.size and values[0, 0] is None:
450
+ fill_value = None
451
+
452
+ return make_na_array(empty_dtype, self.block.shape, fill_value)
453
+
454
+ return self.block.values
455
+
456
+
457
+ def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike:
458
+ """
459
+ Concatenate values from several join units along axis=1.
460
+ """
461
+ empty_dtype, empty_dtype_future = _get_empty_dtype(join_units)
462
+
463
+ has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
464
+ upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)
465
+
466
+ to_concat = [
467
+ ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
468
+ for ju in join_units
469
+ ]
470
+
471
+ if any(is_1d_only_ea_dtype(t.dtype) for t in to_concat):
472
+ # TODO(EA2D): special case not needed if all EAs used HybridBlocks
473
+
474
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
475
+ # argument type "Tuple[int, slice]"
476
+ to_concat = [
477
+ t
478
+ if is_1d_only_ea_dtype(t.dtype)
479
+ else t[0, :] # type: ignore[call-overload]
480
+ for t in to_concat
481
+ ]
482
+ concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True)
483
+ concat_values = ensure_block_shape(concat_values, 2)
484
+
485
+ else:
486
+ concat_values = concat_compat(to_concat, axis=1)
487
+
488
+ if empty_dtype != empty_dtype_future:
489
+ if empty_dtype == concat_values.dtype:
490
+ # GH#39122, GH#40893
491
+ warnings.warn(
492
+ "The behavior of DataFrame concatenation with empty or all-NA "
493
+ "entries is deprecated. In a future version, this will no longer "
494
+ "exclude empty or all-NA columns when determining the result dtypes. "
495
+ "To retain the old behavior, exclude the relevant entries before "
496
+ "the concat operation.",
497
+ FutureWarning,
498
+ stacklevel=find_stack_level(),
499
+ )
500
+ return concat_values
501
+
502
+
503
+ def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):
504
+ """
505
+ Find the NA value to go with this dtype.
506
+ """
507
+ if isinstance(dtype, ExtensionDtype):
508
+ return dtype.na_value
509
+ elif dtype.kind in "mM":
510
+ return dtype.type("NaT")
511
+ elif dtype.kind in "fc":
512
+ return dtype.type("NaN")
513
+ elif dtype.kind == "b":
514
+ # different from missing.na_value_for_dtype
515
+ return None
516
+ elif dtype.kind in "iu":
517
+ if not has_none_blocks:
518
+ # different from missing.na_value_for_dtype
519
+ return None
520
+ return np.nan
521
+ elif dtype.kind == "O":
522
+ return np.nan
523
+ raise NotImplementedError
524
+
525
+
526
+ def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> tuple[DtypeObj, DtypeObj]:
527
+ """
528
+ Return dtype and N/A values to use when concatenating specified units.
529
+
530
+ Returned N/A value may be None which means there was no casting involved.
531
+
532
+ Returns
533
+ -------
534
+ dtype
535
+ """
536
+ if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]):
537
+ empty_dtype = join_units[0].block.dtype
538
+ return empty_dtype, empty_dtype
539
+
540
+ has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
541
+
542
+ dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]
543
+ if not len(dtypes):
544
+ dtypes = [
545
+ unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V"
546
+ ]
547
+
548
+ dtype = find_common_type(dtypes)
549
+ if has_none_blocks:
550
+ dtype = ensure_dtype_can_hold_na(dtype)
551
+
552
+ dtype_future = dtype
553
+ if len(dtypes) != len(join_units):
554
+ dtypes_future = [
555
+ unit.block.dtype
556
+ for unit in join_units
557
+ if not unit.is_na_after_size_and_isna_all_deprecation
558
+ ]
559
+ if not len(dtypes_future):
560
+ dtypes_future = [
561
+ unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V"
562
+ ]
563
+
564
+ if len(dtypes) != len(dtypes_future):
565
+ dtype_future = find_common_type(dtypes_future)
566
+ if has_none_blocks:
567
+ dtype_future = ensure_dtype_can_hold_na(dtype_future)
568
+
569
+ return dtype, dtype_future
570
+
571
+
572
+ def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
573
+ """
574
+ Check if the join units consist of blocks of uniform type that can
575
+ be concatenated using Block.concat_same_type instead of the generic
576
+ _concatenate_join_units (which uses `concat_compat`).
577
+
578
+ """
579
+ first = join_units[0].block
580
+ if first.dtype.kind == "V":
581
+ return False
582
+ return (
583
+ # exclude cases where a) ju.block is None or b) we have e.g. Int64+int64
584
+ all(type(ju.block) is type(first) for ju in join_units)
585
+ and
586
+ # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform
587
+ all(
588
+ ju.block.dtype == first.dtype
589
+ # GH#42092 we only want the dtype_equal check for non-numeric blocks
590
+ # (for now, may change but that would need a deprecation)
591
+ or ju.block.dtype.kind in "iub"
592
+ for ju in join_units
593
+ )
594
+ and
595
+ # no blocks that would get missing values (can lead to type upcasts)
596
+ # unless we're an extension dtype.
597
+ all(not ju.is_na or ju.block.is_extension for ju in join_units)
598
+ )
venv/lib/python3.10/site-packages/pandas/core/internals/construction.py ADDED
@@ -0,0 +1,1072 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for preparing various inputs passed to the DataFrame or Series
3
+ constructors before passing them to a BlockManager.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from collections import abc
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Any,
11
+ )
12
+
13
+ import numpy as np
14
+ from numpy import ma
15
+
16
+ from pandas._config import using_pyarrow_string_dtype
17
+
18
+ from pandas._libs import lib
19
+
20
+ from pandas.core.dtypes.astype import astype_is_view
21
+ from pandas.core.dtypes.cast import (
22
+ construct_1d_arraylike_from_scalar,
23
+ dict_compat,
24
+ maybe_cast_to_datetime,
25
+ maybe_convert_platform,
26
+ maybe_infer_to_datetimelike,
27
+ )
28
+ from pandas.core.dtypes.common import (
29
+ is_1d_only_ea_dtype,
30
+ is_integer_dtype,
31
+ is_list_like,
32
+ is_named_tuple,
33
+ is_object_dtype,
34
+ )
35
+ from pandas.core.dtypes.dtypes import ExtensionDtype
36
+ from pandas.core.dtypes.generic import (
37
+ ABCDataFrame,
38
+ ABCSeries,
39
+ )
40
+
41
+ from pandas.core import (
42
+ algorithms,
43
+ common as com,
44
+ )
45
+ from pandas.core.arrays import ExtensionArray
46
+ from pandas.core.arrays.string_ import StringDtype
47
+ from pandas.core.construction import (
48
+ array as pd_array,
49
+ ensure_wrapped_if_datetimelike,
50
+ extract_array,
51
+ range_to_ndarray,
52
+ sanitize_array,
53
+ )
54
+ from pandas.core.indexes.api import (
55
+ DatetimeIndex,
56
+ Index,
57
+ TimedeltaIndex,
58
+ default_index,
59
+ ensure_index,
60
+ get_objs_combined_axis,
61
+ union_indexes,
62
+ )
63
+ from pandas.core.internals.array_manager import (
64
+ ArrayManager,
65
+ SingleArrayManager,
66
+ )
67
+ from pandas.core.internals.blocks import (
68
+ BlockPlacement,
69
+ ensure_block_shape,
70
+ new_block,
71
+ new_block_2d,
72
+ )
73
+ from pandas.core.internals.managers import (
74
+ BlockManager,
75
+ SingleBlockManager,
76
+ create_block_manager_from_blocks,
77
+ create_block_manager_from_column_arrays,
78
+ )
79
+
80
+ if TYPE_CHECKING:
81
+ from collections.abc import (
82
+ Hashable,
83
+ Sequence,
84
+ )
85
+
86
+ from pandas._typing import (
87
+ ArrayLike,
88
+ DtypeObj,
89
+ Manager,
90
+ npt,
91
+ )
92
+ # ---------------------------------------------------------------------
93
+ # BlockManager Interface
94
+
95
+
96
+ def arrays_to_mgr(
97
+ arrays,
98
+ columns: Index,
99
+ index,
100
+ *,
101
+ dtype: DtypeObj | None = None,
102
+ verify_integrity: bool = True,
103
+ typ: str | None = None,
104
+ consolidate: bool = True,
105
+ ) -> Manager:
106
+ """
107
+ Segregate Series based on type and coerce into matrices.
108
+
109
+ Needs to handle a lot of exceptional cases.
110
+ """
111
+ if verify_integrity:
112
+ # figure out the index, if necessary
113
+ if index is None:
114
+ index = _extract_index(arrays)
115
+ else:
116
+ index = ensure_index(index)
117
+
118
+ # don't force copy because getting jammed in an ndarray anyway
119
+ arrays, refs = _homogenize(arrays, index, dtype)
120
+ # _homogenize ensures
121
+ # - all(len(x) == len(index) for x in arrays)
122
+ # - all(x.ndim == 1 for x in arrays)
123
+ # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
124
+ # - all(type(x) is not NumpyExtensionArray for x in arrays)
125
+
126
+ else:
127
+ index = ensure_index(index)
128
+ arrays = [extract_array(x, extract_numpy=True) for x in arrays]
129
+ # with _from_arrays, the passed arrays should never be Series objects
130
+ refs = [None] * len(arrays)
131
+
132
+ # Reached via DataFrame._from_arrays; we do minimal validation here
133
+ for arr in arrays:
134
+ if (
135
+ not isinstance(arr, (np.ndarray, ExtensionArray))
136
+ or arr.ndim != 1
137
+ or len(arr) != len(index)
138
+ ):
139
+ raise ValueError(
140
+ "Arrays must be 1-dimensional np.ndarray or ExtensionArray "
141
+ "with length matching len(index)"
142
+ )
143
+
144
+ columns = ensure_index(columns)
145
+ if len(columns) != len(arrays):
146
+ raise ValueError("len(arrays) must match len(columns)")
147
+
148
+ # from BlockManager perspective
149
+ axes = [columns, index]
150
+
151
+ if typ == "block":
152
+ return create_block_manager_from_column_arrays(
153
+ arrays, axes, consolidate=consolidate, refs=refs
154
+ )
155
+ elif typ == "array":
156
+ return ArrayManager(arrays, [index, columns])
157
+ else:
158
+ raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
159
+
160
+
161
+ def rec_array_to_mgr(
162
+ data: np.rec.recarray | np.ndarray,
163
+ index,
164
+ columns,
165
+ dtype: DtypeObj | None,
166
+ copy: bool,
167
+ typ: str,
168
+ ) -> Manager:
169
+ """
170
+ Extract from a masked rec array and create the manager.
171
+ """
172
+ # essentially process a record array then fill it
173
+ fdata = ma.getdata(data)
174
+ if index is None:
175
+ index = default_index(len(fdata))
176
+ else:
177
+ index = ensure_index(index)
178
+
179
+ if columns is not None:
180
+ columns = ensure_index(columns)
181
+ arrays, arr_columns = to_arrays(fdata, columns)
182
+
183
+ # create the manager
184
+
185
+ arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index))
186
+ if columns is None:
187
+ columns = arr_columns
188
+
189
+ mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ)
190
+
191
+ if copy:
192
+ mgr = mgr.copy()
193
+ return mgr
194
+
195
+
196
+ def mgr_to_mgr(mgr, typ: str, copy: bool = True) -> Manager:
197
+ """
198
+ Convert to specific type of Manager. Does not copy if the type is already
199
+ correct. Does not guarantee a copy otherwise. `copy` keyword only controls
200
+ whether conversion from Block->ArrayManager copies the 1D arrays.
201
+ """
202
+ new_mgr: Manager
203
+
204
+ if typ == "block":
205
+ if isinstance(mgr, BlockManager):
206
+ new_mgr = mgr
207
+ else:
208
+ if mgr.ndim == 2:
209
+ new_mgr = arrays_to_mgr(
210
+ mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block"
211
+ )
212
+ else:
213
+ new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)
214
+ elif typ == "array":
215
+ if isinstance(mgr, ArrayManager):
216
+ new_mgr = mgr
217
+ else:
218
+ if mgr.ndim == 2:
219
+ arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))]
220
+ if copy:
221
+ arrays = [arr.copy() for arr in arrays]
222
+ new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])
223
+ else:
224
+ array = mgr.internal_values()
225
+ if copy:
226
+ array = array.copy()
227
+ new_mgr = SingleArrayManager([array], [mgr.index])
228
+ else:
229
+ raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
230
+ return new_mgr
231
+
232
+
233
+ # ---------------------------------------------------------------------
234
+ # DataFrame Constructor Interface
235
+
236
+
237
+ def ndarray_to_mgr(
238
+ values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str
239
+ ) -> Manager:
240
+ # used in DataFrame.__init__
241
+ # input must be a ndarray, list, Series, Index, ExtensionArray
242
+
243
+ if isinstance(values, ABCSeries):
244
+ if columns is None:
245
+ if values.name is not None:
246
+ columns = Index([values.name])
247
+ if index is None:
248
+ index = values.index
249
+ else:
250
+ values = values.reindex(index)
251
+
252
+ # zero len case (GH #2234)
253
+ if not len(values) and columns is not None and len(columns):
254
+ values = np.empty((0, 1), dtype=object)
255
+
256
+ # if the array preparation does a copy -> avoid this for ArrayManager,
257
+ # since the copy is done on conversion to 1D arrays
258
+ copy_on_sanitize = False if typ == "array" else copy
259
+
260
+ vdtype = getattr(values, "dtype", None)
261
+ refs = None
262
+ if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype):
263
+ # GH#19157
264
+
265
+ if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1:
266
+ # GH#12513 a EA dtype passed with a 2D array, split into
267
+ # multiple EAs that view the values
268
+ # error: No overload variant of "__getitem__" of "ExtensionArray"
269
+ # matches argument type "Tuple[slice, int]"
270
+ values = [
271
+ values[:, n] # type: ignore[call-overload]
272
+ for n in range(values.shape[1])
273
+ ]
274
+ else:
275
+ values = [values]
276
+
277
+ if columns is None:
278
+ columns = Index(range(len(values)))
279
+ else:
280
+ columns = ensure_index(columns)
281
+
282
+ return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ)
283
+
284
+ elif isinstance(vdtype, ExtensionDtype):
285
+ # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype)
286
+ # are already caught above
287
+ values = extract_array(values, extract_numpy=True)
288
+ if copy:
289
+ values = values.copy()
290
+ if values.ndim == 1:
291
+ values = values.reshape(-1, 1)
292
+
293
+ elif isinstance(values, (ABCSeries, Index)):
294
+ if not copy_on_sanitize and (
295
+ dtype is None or astype_is_view(values.dtype, dtype)
296
+ ):
297
+ refs = values._references
298
+
299
+ if copy_on_sanitize:
300
+ values = values._values.copy()
301
+ else:
302
+ values = values._values
303
+
304
+ values = _ensure_2d(values)
305
+
306
+ elif isinstance(values, (np.ndarray, ExtensionArray)):
307
+ # drop subclass info
308
+ _copy = (
309
+ copy_on_sanitize
310
+ if (dtype is None or astype_is_view(values.dtype, dtype))
311
+ else False
312
+ )
313
+ values = np.array(values, copy=_copy)
314
+ values = _ensure_2d(values)
315
+
316
+ else:
317
+ # by definition an array here
318
+ # the dtypes will be coerced to a single dtype
319
+ values = _prep_ndarraylike(values, copy=copy_on_sanitize)
320
+
321
+ if dtype is not None and values.dtype != dtype:
322
+ # GH#40110 see similar check inside sanitize_array
323
+ values = sanitize_array(
324
+ values,
325
+ None,
326
+ dtype=dtype,
327
+ copy=copy_on_sanitize,
328
+ allow_2d=True,
329
+ )
330
+
331
+ # _prep_ndarraylike ensures that values.ndim == 2 at this point
332
+ index, columns = _get_axes(
333
+ values.shape[0], values.shape[1], index=index, columns=columns
334
+ )
335
+
336
+ _check_values_indices_shape_match(values, index, columns)
337
+
338
+ if typ == "array":
339
+ if issubclass(values.dtype.type, str):
340
+ values = np.array(values, dtype=object)
341
+
342
+ if dtype is None and is_object_dtype(values.dtype):
343
+ arrays = [
344
+ ensure_wrapped_if_datetimelike(
345
+ maybe_infer_to_datetimelike(values[:, i])
346
+ )
347
+ for i in range(values.shape[1])
348
+ ]
349
+ else:
350
+ if lib.is_np_dtype(values.dtype, "mM"):
351
+ values = ensure_wrapped_if_datetimelike(values)
352
+ arrays = [values[:, i] for i in range(values.shape[1])]
353
+
354
+ if copy:
355
+ arrays = [arr.copy() for arr in arrays]
356
+
357
+ return ArrayManager(arrays, [index, columns], verify_integrity=False)
358
+
359
+ values = values.T
360
+
361
+ # if we don't have a dtype specified, then try to convert objects
362
+ # on the entire block; this is to convert if we have datetimelike's
363
+ # embedded in an object type
364
+ if dtype is None and is_object_dtype(values.dtype):
365
+ obj_columns = list(values)
366
+ maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns]
367
+ # don't convert (and copy) the objects if no type inference occurs
368
+ if any(x is not y for x, y in zip(obj_columns, maybe_datetime)):
369
+ dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime]
370
+ block_values = [
371
+ new_block_2d(dvals_list[n], placement=BlockPlacement(n))
372
+ for n in range(len(dvals_list))
373
+ ]
374
+ else:
375
+ bp = BlockPlacement(slice(len(columns)))
376
+ nb = new_block_2d(values, placement=bp, refs=refs)
377
+ block_values = [nb]
378
+ elif dtype is None and values.dtype.kind == "U" and using_pyarrow_string_dtype():
379
+ dtype = StringDtype(storage="pyarrow_numpy")
380
+
381
+ obj_columns = list(values)
382
+ block_values = [
383
+ new_block(
384
+ dtype.construct_array_type()._from_sequence(data, dtype=dtype),
385
+ BlockPlacement(slice(i, i + 1)),
386
+ ndim=2,
387
+ )
388
+ for i, data in enumerate(obj_columns)
389
+ ]
390
+
391
+ else:
392
+ bp = BlockPlacement(slice(len(columns)))
393
+ nb = new_block_2d(values, placement=bp, refs=refs)
394
+ block_values = [nb]
395
+
396
+ if len(columns) == 0:
397
+ # TODO: check len(values) == 0?
398
+ block_values = []
399
+
400
+ return create_block_manager_from_blocks(
401
+ block_values, [columns, index], verify_integrity=False
402
+ )
403
+
404
+
405
+ def _check_values_indices_shape_match(
406
+ values: np.ndarray, index: Index, columns: Index
407
+ ) -> None:
408
+ """
409
+ Check that the shape implied by our axes matches the actual shape of the
410
+ data.
411
+ """
412
+ if values.shape[1] != len(columns) or values.shape[0] != len(index):
413
+ # Could let this raise in Block constructor, but we get a more
414
+ # helpful exception message this way.
415
+ if values.shape[0] == 0 < len(index):
416
+ raise ValueError("Empty data passed with indices specified.")
417
+
418
+ passed = values.shape
419
+ implied = (len(index), len(columns))
420
+ raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
421
+
422
+
423
+ def dict_to_mgr(
424
+ data: dict,
425
+ index,
426
+ columns,
427
+ *,
428
+ dtype: DtypeObj | None = None,
429
+ typ: str = "block",
430
+ copy: bool = True,
431
+ ) -> Manager:
432
+ """
433
+ Segregate Series based on type and coerce into matrices.
434
+ Needs to handle a lot of exceptional cases.
435
+
436
+ Used in DataFrame.__init__
437
+ """
438
+ arrays: Sequence[Any] | Series
439
+
440
+ if columns is not None:
441
+ from pandas.core.series import Series
442
+
443
+ arrays = Series(data, index=columns, dtype=object)
444
+ missing = arrays.isna()
445
+ if index is None:
446
+ # GH10856
447
+ # raise ValueError if only scalars in dict
448
+ index = _extract_index(arrays[~missing])
449
+ else:
450
+ index = ensure_index(index)
451
+
452
+ # no obvious "empty" int column
453
+ if missing.any() and not is_integer_dtype(dtype):
454
+ nan_dtype: DtypeObj
455
+
456
+ if dtype is not None:
457
+ # calling sanitize_array ensures we don't mix-and-match
458
+ # NA dtypes
459
+ midxs = missing.values.nonzero()[0]
460
+ for i in midxs:
461
+ arr = sanitize_array(arrays.iat[i], index, dtype=dtype)
462
+ arrays.iat[i] = arr
463
+ else:
464
+ # GH#1783
465
+ nan_dtype = np.dtype("object")
466
+ val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
467
+ nmissing = missing.sum()
468
+ if copy:
469
+ rhs = [val] * nmissing
470
+ else:
471
+ # GH#45369
472
+ rhs = [val.copy() for _ in range(nmissing)]
473
+ arrays.loc[missing] = rhs
474
+
475
+ arrays = list(arrays)
476
+ columns = ensure_index(columns)
477
+
478
+ else:
479
+ keys = list(data.keys())
480
+ columns = Index(keys) if keys else default_index(0)
481
+ arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
482
+
483
+ if copy:
484
+ if typ == "block":
485
+ # We only need to copy arrays that will not get consolidated, i.e.
486
+ # only EA arrays
487
+ arrays = [
488
+ x.copy()
489
+ if isinstance(x, ExtensionArray)
490
+ else x.copy(deep=True)
491
+ if (
492
+ isinstance(x, Index)
493
+ or isinstance(x, ABCSeries)
494
+ and is_1d_only_ea_dtype(x.dtype)
495
+ )
496
+ else x
497
+ for x in arrays
498
+ ]
499
+ else:
500
+ # dtype check to exclude e.g. range objects, scalars
501
+ arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays]
502
+
503
+ return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
504
+
505
+
506
+ def nested_data_to_arrays(
507
+ data: Sequence,
508
+ columns: Index | None,
509
+ index: Index | None,
510
+ dtype: DtypeObj | None,
511
+ ) -> tuple[list[ArrayLike], Index, Index]:
512
+ """
513
+ Convert a single sequence of arrays to multiple arrays.
514
+ """
515
+ # By the time we get here we have already checked treat_as_nested(data)
516
+
517
+ if is_named_tuple(data[0]) and columns is None:
518
+ columns = ensure_index(data[0]._fields)
519
+
520
+ arrays, columns = to_arrays(data, columns, dtype=dtype)
521
+ columns = ensure_index(columns)
522
+
523
+ if index is None:
524
+ if isinstance(data[0], ABCSeries):
525
+ index = _get_names_from_index(data)
526
+ else:
527
+ index = default_index(len(data))
528
+
529
+ return arrays, columns, index
530
+
531
+
532
+ def treat_as_nested(data) -> bool:
533
+ """
534
+ Check if we should use nested_data_to_arrays.
535
+ """
536
+ return (
537
+ len(data) > 0
538
+ and is_list_like(data[0])
539
+ and getattr(data[0], "ndim", 1) == 1
540
+ and not (isinstance(data, ExtensionArray) and data.ndim == 2)
541
+ )
542
+
543
+
544
+ # ---------------------------------------------------------------------
545
+
546
+
547
+ def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray:
548
+ # values is specifically _not_ ndarray, EA, Index, or Series
549
+ # We only get here with `not treat_as_nested(values)`
550
+
551
+ if len(values) == 0:
552
+ # TODO: check for length-zero range, in which case return int64 dtype?
553
+ # TODO: reuse anything in try_cast?
554
+ return np.empty((0, 0), dtype=object)
555
+ elif isinstance(values, range):
556
+ arr = range_to_ndarray(values)
557
+ return arr[..., np.newaxis]
558
+
559
+ def convert(v):
560
+ if not is_list_like(v) or isinstance(v, ABCDataFrame):
561
+ return v
562
+
563
+ v = extract_array(v, extract_numpy=True)
564
+ res = maybe_convert_platform(v)
565
+ # We don't do maybe_infer_to_datetimelike here bc we will end up doing
566
+ # it column-by-column in ndarray_to_mgr
567
+ return res
568
+
569
+ # we could have a 1-dim or 2-dim list here
570
+ # this is equiv of np.asarray, but does object conversion
571
+ # and platform dtype preservation
572
+ # does not convert e.g. [1, "a", True] to ["1", "a", "True"] like
573
+ # np.asarray would
574
+ if is_list_like(values[0]):
575
+ values = np.array([convert(v) for v in values])
576
+ elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
577
+ # GH#21861 see test_constructor_list_of_lists
578
+ values = np.array([convert(v) for v in values])
579
+ else:
580
+ values = convert(values)
581
+
582
+ return _ensure_2d(values)
583
+
584
+
585
+ def _ensure_2d(values: np.ndarray) -> np.ndarray:
586
+ """
587
+ Reshape 1D values, raise on anything else other than 2D.
588
+ """
589
+ if values.ndim == 1:
590
+ values = values.reshape((values.shape[0], 1))
591
+ elif values.ndim != 2:
592
+ raise ValueError(f"Must pass 2-d input. shape={values.shape}")
593
+ return values
594
+
595
+
596
+ def _homogenize(
597
+ data, index: Index, dtype: DtypeObj | None
598
+ ) -> tuple[list[ArrayLike], list[Any]]:
599
+ oindex = None
600
+ homogenized = []
601
+ # if the original array-like in `data` is a Series, keep track of this Series' refs
602
+ refs: list[Any] = []
603
+
604
+ for val in data:
605
+ if isinstance(val, (ABCSeries, Index)):
606
+ if dtype is not None:
607
+ val = val.astype(dtype, copy=False)
608
+ if isinstance(val, ABCSeries) and val.index is not index:
609
+ # Forces alignment. No need to copy data since we
610
+ # are putting it into an ndarray later
611
+ val = val.reindex(index, copy=False)
612
+ refs.append(val._references)
613
+ val = val._values
614
+ else:
615
+ if isinstance(val, dict):
616
+ # GH#41785 this _should_ be equivalent to (but faster than)
617
+ # val = Series(val, index=index)._values
618
+ if oindex is None:
619
+ oindex = index.astype("O")
620
+
621
+ if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
622
+ # see test_constructor_dict_datetime64_index
623
+ val = dict_compat(val)
624
+ else:
625
+ # see test_constructor_subclass_dict
626
+ val = dict(val)
627
+ val = lib.fast_multiget(val, oindex._values, default=np.nan)
628
+
629
+ val = sanitize_array(val, index, dtype=dtype, copy=False)
630
+ com.require_length_match(val, index)
631
+ refs.append(None)
632
+
633
+ homogenized.append(val)
634
+
635
+ return homogenized, refs
636
+
637
+
638
+ def _extract_index(data) -> Index:
639
+ """
640
+ Try to infer an Index from the passed data, raise ValueError on failure.
641
+ """
642
+ index: Index
643
+ if len(data) == 0:
644
+ return default_index(0)
645
+
646
+ raw_lengths = []
647
+ indexes: list[list[Hashable] | Index] = []
648
+
649
+ have_raw_arrays = False
650
+ have_series = False
651
+ have_dicts = False
652
+
653
+ for val in data:
654
+ if isinstance(val, ABCSeries):
655
+ have_series = True
656
+ indexes.append(val.index)
657
+ elif isinstance(val, dict):
658
+ have_dicts = True
659
+ indexes.append(list(val.keys()))
660
+ elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
661
+ have_raw_arrays = True
662
+ raw_lengths.append(len(val))
663
+ elif isinstance(val, np.ndarray) and val.ndim > 1:
664
+ raise ValueError("Per-column arrays must each be 1-dimensional")
665
+
666
+ if not indexes and not raw_lengths:
667
+ raise ValueError("If using all scalar values, you must pass an index")
668
+
669
+ if have_series:
670
+ index = union_indexes(indexes)
671
+ elif have_dicts:
672
+ index = union_indexes(indexes, sort=False)
673
+
674
+ if have_raw_arrays:
675
+ lengths = list(set(raw_lengths))
676
+ if len(lengths) > 1:
677
+ raise ValueError("All arrays must be of the same length")
678
+
679
+ if have_dicts:
680
+ raise ValueError(
681
+ "Mixing dicts with non-Series may lead to ambiguous ordering."
682
+ )
683
+
684
+ if have_series:
685
+ if lengths[0] != len(index):
686
+ msg = (
687
+ f"array length {lengths[0]} does not match index "
688
+ f"length {len(index)}"
689
+ )
690
+ raise ValueError(msg)
691
+ else:
692
+ index = default_index(lengths[0])
693
+
694
+ return ensure_index(index)
695
+
696
+
697
+ def reorder_arrays(
698
+ arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int
699
+ ) -> tuple[list[ArrayLike], Index]:
700
+ """
701
+ Pre-emptively (cheaply) reindex arrays with new columns.
702
+ """
703
+ # reorder according to the columns
704
+ if columns is not None:
705
+ if not columns.equals(arr_columns):
706
+ # if they are equal, there is nothing to do
707
+ new_arrays: list[ArrayLike] = []
708
+ indexer = arr_columns.get_indexer(columns)
709
+ for i, k in enumerate(indexer):
710
+ if k == -1:
711
+ # by convention default is all-NaN object dtype
712
+ arr = np.empty(length, dtype=object)
713
+ arr.fill(np.nan)
714
+ else:
715
+ arr = arrays[k]
716
+ new_arrays.append(arr)
717
+
718
+ arrays = new_arrays
719
+ arr_columns = columns
720
+
721
+ return arrays, arr_columns
722
+
723
+
724
+ def _get_names_from_index(data) -> Index:
725
+ has_some_name = any(getattr(s, "name", None) is not None for s in data)
726
+ if not has_some_name:
727
+ return default_index(len(data))
728
+
729
+ index: list[Hashable] = list(range(len(data)))
730
+ count = 0
731
+ for i, s in enumerate(data):
732
+ n = getattr(s, "name", None)
733
+ if n is not None:
734
+ index[i] = n
735
+ else:
736
+ index[i] = f"Unnamed {count}"
737
+ count += 1
738
+
739
+ return Index(index)
740
+
741
+
742
+ def _get_axes(
743
+ N: int, K: int, index: Index | None, columns: Index | None
744
+ ) -> tuple[Index, Index]:
745
+ # helper to create the axes as indexes
746
+ # return axes or defaults
747
+
748
+ if index is None:
749
+ index = default_index(N)
750
+ else:
751
+ index = ensure_index(index)
752
+
753
+ if columns is None:
754
+ columns = default_index(K)
755
+ else:
756
+ columns = ensure_index(columns)
757
+ return index, columns
758
+
759
+
760
+ def dataclasses_to_dicts(data):
761
+ """
762
+ Converts a list of dataclass instances to a list of dictionaries.
763
+
764
+ Parameters
765
+ ----------
766
+ data : List[Type[dataclass]]
767
+
768
+ Returns
769
+ --------
770
+ list_dict : List[dict]
771
+
772
+ Examples
773
+ --------
774
+ >>> from dataclasses import dataclass
775
+ >>> @dataclass
776
+ ... class Point:
777
+ ... x: int
778
+ ... y: int
779
+
780
+ >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
781
+ [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
782
+
783
+ """
784
+ from dataclasses import asdict
785
+
786
+ return list(map(asdict, data))
787
+
788
+
789
+ # ---------------------------------------------------------------------
790
+ # Conversion of Inputs to Arrays
791
+
792
+
793
+ def to_arrays(
794
+ data, columns: Index | None, dtype: DtypeObj | None = None
795
+ ) -> tuple[list[ArrayLike], Index]:
796
+ """
797
+ Return list of arrays, columns.
798
+
799
+ Returns
800
+ -------
801
+ list[ArrayLike]
802
+ These will become columns in a DataFrame.
803
+ Index
804
+ This will become frame.columns.
805
+
806
+ Notes
807
+ -----
808
+ Ensures that len(result_arrays) == len(result_index).
809
+ """
810
+
811
+ if not len(data):
812
+ if isinstance(data, np.ndarray):
813
+ if data.dtype.names is not None:
814
+ # i.e. numpy structured array
815
+ columns = ensure_index(data.dtype.names)
816
+ arrays = [data[name] for name in columns]
817
+
818
+ if len(data) == 0:
819
+ # GH#42456 the indexing above results in list of 2D ndarrays
820
+ # TODO: is that an issue with numpy?
821
+ for i, arr in enumerate(arrays):
822
+ if arr.ndim == 2:
823
+ arrays[i] = arr[:, 0]
824
+
825
+ return arrays, columns
826
+ return [], ensure_index([])
827
+
828
+ elif isinstance(data, np.ndarray) and data.dtype.names is not None:
829
+ # e.g. recarray
830
+ columns = Index(list(data.dtype.names))
831
+ arrays = [data[k] for k in columns]
832
+ return arrays, columns
833
+
834
+ if isinstance(data[0], (list, tuple)):
835
+ arr = _list_to_arrays(data)
836
+ elif isinstance(data[0], abc.Mapping):
837
+ arr, columns = _list_of_dict_to_arrays(data, columns)
838
+ elif isinstance(data[0], ABCSeries):
839
+ arr, columns = _list_of_series_to_arrays(data, columns)
840
+ else:
841
+ # last ditch effort
842
+ data = [tuple(x) for x in data]
843
+ arr = _list_to_arrays(data)
844
+
845
+ content, columns = _finalize_columns_and_data(arr, columns, dtype)
846
+ return content, columns
847
+
848
+
849
+ def _list_to_arrays(data: list[tuple | list]) -> np.ndarray:
850
+ # Returned np.ndarray has ndim = 2
851
+ # Note: we already check len(data) > 0 before getting hre
852
+ if isinstance(data[0], tuple):
853
+ content = lib.to_object_array_tuples(data)
854
+ else:
855
+ # list of lists
856
+ content = lib.to_object_array(data)
857
+ return content
858
+
859
+
860
+ def _list_of_series_to_arrays(
861
+ data: list,
862
+ columns: Index | None,
863
+ ) -> tuple[np.ndarray, Index]:
864
+ # returned np.ndarray has ndim == 2
865
+
866
+ if columns is None:
867
+ # We know pass_data is non-empty because data[0] is a Series
868
+ pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
869
+ columns = get_objs_combined_axis(pass_data, sort=False)
870
+
871
+ indexer_cache: dict[int, np.ndarray] = {}
872
+
873
+ aligned_values = []
874
+ for s in data:
875
+ index = getattr(s, "index", None)
876
+ if index is None:
877
+ index = default_index(len(s))
878
+
879
+ if id(index) in indexer_cache:
880
+ indexer = indexer_cache[id(index)]
881
+ else:
882
+ indexer = indexer_cache[id(index)] = index.get_indexer(columns)
883
+
884
+ values = extract_array(s, extract_numpy=True)
885
+ aligned_values.append(algorithms.take_nd(values, indexer))
886
+
887
+ content = np.vstack(aligned_values)
888
+ return content, columns
889
+
890
+
891
+ def _list_of_dict_to_arrays(
892
+ data: list[dict],
893
+ columns: Index | None,
894
+ ) -> tuple[np.ndarray, Index]:
895
+ """
896
+ Convert list of dicts to numpy arrays
897
+
898
+ if `columns` is not passed, column names are inferred from the records
899
+ - for OrderedDict and dicts, the column names match
900
+ the key insertion-order from the first record to the last.
901
+ - For other kinds of dict-likes, the keys are lexically sorted.
902
+
903
+ Parameters
904
+ ----------
905
+ data : iterable
906
+ collection of records (OrderedDict, dict)
907
+ columns: iterables or None
908
+
909
+ Returns
910
+ -------
911
+ content : np.ndarray[object, ndim=2]
912
+ columns : Index
913
+ """
914
+ if columns is None:
915
+ gen = (list(x.keys()) for x in data)
916
+ sort = not any(isinstance(d, dict) for d in data)
917
+ pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
918
+ columns = ensure_index(pre_cols)
919
+
920
+ # assure that they are of the base dict class and not of derived
921
+ # classes
922
+ data = [d if type(d) is dict else dict(d) for d in data] # noqa: E721
923
+
924
+ content = lib.dicts_to_array(data, list(columns))
925
+ return content, columns
926
+
927
+
928
+ def _finalize_columns_and_data(
929
+ content: np.ndarray, # ndim == 2
930
+ columns: Index | None,
931
+ dtype: DtypeObj | None,
932
+ ) -> tuple[list[ArrayLike], Index]:
933
+ """
934
+ Ensure we have valid columns, cast object dtypes if possible.
935
+ """
936
+ contents = list(content.T)
937
+
938
+ try:
939
+ columns = _validate_or_indexify_columns(contents, columns)
940
+ except AssertionError as err:
941
+ # GH#26429 do not raise user-facing AssertionError
942
+ raise ValueError(err) from err
943
+
944
+ if len(contents) and contents[0].dtype == np.object_:
945
+ contents = convert_object_array(contents, dtype=dtype)
946
+
947
+ return contents, columns
948
+
949
+
950
+ def _validate_or_indexify_columns(
951
+ content: list[np.ndarray], columns: Index | None
952
+ ) -> Index:
953
+ """
954
+ If columns is None, make numbers as column names; Otherwise, validate that
955
+ columns have valid length.
956
+
957
+ Parameters
958
+ ----------
959
+ content : list of np.ndarrays
960
+ columns : Index or None
961
+
962
+ Returns
963
+ -------
964
+ Index
965
+ If columns is None, assign positional column index value as columns.
966
+
967
+ Raises
968
+ ------
969
+ 1. AssertionError when content is not composed of list of lists, and if
970
+ length of columns is not equal to length of content.
971
+ 2. ValueError when content is list of lists, but length of each sub-list
972
+ is not equal
973
+ 3. ValueError when content is list of lists, but length of sub-list is
974
+ not equal to length of content
975
+ """
976
+ if columns is None:
977
+ columns = default_index(len(content))
978
+ else:
979
+ # Add mask for data which is composed of list of lists
980
+ is_mi_list = isinstance(columns, list) and all(
981
+ isinstance(col, list) for col in columns
982
+ )
983
+
984
+ if not is_mi_list and len(columns) != len(content): # pragma: no cover
985
+ # caller's responsibility to check for this...
986
+ raise AssertionError(
987
+ f"{len(columns)} columns passed, passed data had "
988
+ f"{len(content)} columns"
989
+ )
990
+ if is_mi_list:
991
+ # check if nested list column, length of each sub-list should be equal
992
+ if len({len(col) for col in columns}) > 1:
993
+ raise ValueError(
994
+ "Length of columns passed for MultiIndex columns is different"
995
+ )
996
+
997
+ # if columns is not empty and length of sublist is not equal to content
998
+ if columns and len(columns[0]) != len(content):
999
+ raise ValueError(
1000
+ f"{len(columns[0])} columns passed, passed data had "
1001
+ f"{len(content)} columns"
1002
+ )
1003
+ return columns
1004
+
1005
+
1006
+ def convert_object_array(
1007
+ content: list[npt.NDArray[np.object_]],
1008
+ dtype: DtypeObj | None,
1009
+ dtype_backend: str = "numpy",
1010
+ coerce_float: bool = False,
1011
+ ) -> list[ArrayLike]:
1012
+ """
1013
+ Internal function to convert object array.
1014
+
1015
+ Parameters
1016
+ ----------
1017
+ content: List[np.ndarray]
1018
+ dtype: np.dtype or ExtensionDtype
1019
+ dtype_backend: Controls if nullable/pyarrow dtypes are returned.
1020
+ coerce_float: Cast floats that are integers to int.
1021
+
1022
+ Returns
1023
+ -------
1024
+ List[ArrayLike]
1025
+ """
1026
+ # provide soft conversion of object dtypes
1027
+
1028
+ def convert(arr):
1029
+ if dtype != np.dtype("O"):
1030
+ arr = lib.maybe_convert_objects(
1031
+ arr,
1032
+ try_float=coerce_float,
1033
+ convert_to_nullable_dtype=dtype_backend != "numpy",
1034
+ )
1035
+ # Notes on cases that get here 2023-02-15
1036
+ # 1) we DO get here when arr is all Timestamps and dtype=None
1037
+ # 2) disabling this doesn't break the world, so this must be
1038
+ # getting caught at a higher level
1039
+ # 3) passing convert_non_numeric to maybe_convert_objects get this right
1040
+ # 4) convert_non_numeric?
1041
+
1042
+ if dtype is None:
1043
+ if arr.dtype == np.dtype("O"):
1044
+ # i.e. maybe_convert_objects didn't convert
1045
+ arr = maybe_infer_to_datetimelike(arr)
1046
+ if dtype_backend != "numpy" and arr.dtype == np.dtype("O"):
1047
+ new_dtype = StringDtype()
1048
+ arr_cls = new_dtype.construct_array_type()
1049
+ arr = arr_cls._from_sequence(arr, dtype=new_dtype)
1050
+ elif dtype_backend != "numpy" and isinstance(arr, np.ndarray):
1051
+ if arr.dtype.kind in "iufb":
1052
+ arr = pd_array(arr, copy=False)
1053
+
1054
+ elif isinstance(dtype, ExtensionDtype):
1055
+ # TODO: test(s) that get here
1056
+ # TODO: try to de-duplicate this convert function with
1057
+ # core.construction functions
1058
+ cls = dtype.construct_array_type()
1059
+ arr = cls._from_sequence(arr, dtype=dtype, copy=False)
1060
+ elif dtype.kind in "mM":
1061
+ # This restriction is harmless bc these are the only cases
1062
+ # where maybe_cast_to_datetime is not a no-op.
1063
+ # Here we know:
1064
+ # 1) dtype.kind in "mM" and
1065
+ # 2) arr is either object or numeric dtype
1066
+ arr = maybe_cast_to_datetime(arr, dtype)
1067
+
1068
+ return arr
1069
+
1070
+ arrays = [convert(arr) for arr in content]
1071
+
1072
+ return arrays
venv/lib/python3.10/site-packages/pandas/core/internals/managers.py ADDED
@@ -0,0 +1,2375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import (
4
+ Hashable,
5
+ Sequence,
6
+ )
7
+ import itertools
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Callable,
11
+ Literal,
12
+ cast,
13
+ )
14
+ import warnings
15
+
16
+ import numpy as np
17
+
18
+ from pandas._config import (
19
+ using_copy_on_write,
20
+ warn_copy_on_write,
21
+ )
22
+
23
+ from pandas._libs import (
24
+ internals as libinternals,
25
+ lib,
26
+ )
27
+ from pandas._libs.internals import (
28
+ BlockPlacement,
29
+ BlockValuesRefs,
30
+ )
31
+ from pandas._libs.tslibs import Timestamp
32
+ from pandas.errors import PerformanceWarning
33
+ from pandas.util._decorators import cache_readonly
34
+ from pandas.util._exceptions import find_stack_level
35
+
36
+ from pandas.core.dtypes.cast import infer_dtype_from_scalar
37
+ from pandas.core.dtypes.common import (
38
+ ensure_platform_int,
39
+ is_1d_only_ea_dtype,
40
+ is_list_like,
41
+ )
42
+ from pandas.core.dtypes.dtypes import (
43
+ DatetimeTZDtype,
44
+ ExtensionDtype,
45
+ )
46
+ from pandas.core.dtypes.generic import (
47
+ ABCDataFrame,
48
+ ABCSeries,
49
+ )
50
+ from pandas.core.dtypes.missing import (
51
+ array_equals,
52
+ isna,
53
+ )
54
+
55
+ import pandas.core.algorithms as algos
56
+ from pandas.core.arrays import (
57
+ ArrowExtensionArray,
58
+ ArrowStringArray,
59
+ DatetimeArray,
60
+ )
61
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
62
+ from pandas.core.construction import (
63
+ ensure_wrapped_if_datetimelike,
64
+ extract_array,
65
+ )
66
+ from pandas.core.indexers import maybe_convert_indices
67
+ from pandas.core.indexes.api import (
68
+ Index,
69
+ ensure_index,
70
+ )
71
+ from pandas.core.internals.base import (
72
+ DataManager,
73
+ SingleDataManager,
74
+ ensure_np_dtype,
75
+ interleaved_dtype,
76
+ )
77
+ from pandas.core.internals.blocks import (
78
+ COW_WARNING_GENERAL_MSG,
79
+ COW_WARNING_SETITEM_MSG,
80
+ Block,
81
+ NumpyBlock,
82
+ ensure_block_shape,
83
+ extend_blocks,
84
+ get_block_type,
85
+ maybe_coerce_values,
86
+ new_block,
87
+ new_block_2d,
88
+ )
89
+ from pandas.core.internals.ops import (
90
+ blockwise_all,
91
+ operate_blockwise,
92
+ )
93
+
94
+ if TYPE_CHECKING:
95
+ from pandas._typing import (
96
+ ArrayLike,
97
+ AxisInt,
98
+ DtypeObj,
99
+ QuantileInterpolation,
100
+ Self,
101
+ Shape,
102
+ npt,
103
+ )
104
+
105
+ from pandas.api.extensions import ExtensionArray
106
+
107
+
108
+ class BaseBlockManager(DataManager):
109
+ """
110
+ Core internal data structure to implement DataFrame, Series, etc.
111
+
112
+ Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
113
+ lightweight blocked set of labeled data to be manipulated by the DataFrame
114
+ public API class
115
+
116
+ Attributes
117
+ ----------
118
+ shape
119
+ ndim
120
+ axes
121
+ values
122
+ items
123
+
124
+ Methods
125
+ -------
126
+ set_axis(axis, new_labels)
127
+ copy(deep=True)
128
+
129
+ get_dtypes
130
+
131
+ apply(func, axes, block_filter_fn)
132
+
133
+ get_bool_data
134
+ get_numeric_data
135
+
136
+ get_slice(slice_like, axis)
137
+ get(label)
138
+ iget(loc)
139
+
140
+ take(indexer, axis)
141
+ reindex_axis(new_labels, axis)
142
+ reindex_indexer(new_labels, indexer, axis)
143
+
144
+ delete(label)
145
+ insert(loc, label, value)
146
+ set(label, value)
147
+
148
+ Parameters
149
+ ----------
150
+ blocks: Sequence of Block
151
+ axes: Sequence of Index
152
+ verify_integrity: bool, default True
153
+
154
+ Notes
155
+ -----
156
+ This is *not* a public API class
157
+ """
158
+
159
+ __slots__ = ()
160
+
161
+ _blknos: npt.NDArray[np.intp]
162
+ _blklocs: npt.NDArray[np.intp]
163
+ blocks: tuple[Block, ...]
164
+ axes: list[Index]
165
+
166
+ @property
167
+ def ndim(self) -> int:
168
+ raise NotImplementedError
169
+
170
+ _known_consolidated: bool
171
+ _is_consolidated: bool
172
+
173
+ def __init__(self, blocks, axes, verify_integrity: bool = True) -> None:
174
+ raise NotImplementedError
175
+
176
+ @classmethod
177
+ def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self:
178
+ raise NotImplementedError
179
+
180
+ @property
181
+ def blknos(self) -> npt.NDArray[np.intp]:
182
+ """
183
+ Suppose we want to find the array corresponding to our i'th column.
184
+
185
+ blknos[i] identifies the block from self.blocks that contains this column.
186
+
187
+ blklocs[i] identifies the column of interest within
188
+ self.blocks[self.blknos[i]]
189
+ """
190
+ if self._blknos is None:
191
+ # Note: these can be altered by other BlockManager methods.
192
+ self._rebuild_blknos_and_blklocs()
193
+
194
+ return self._blknos
195
+
196
+ @property
197
+ def blklocs(self) -> npt.NDArray[np.intp]:
198
+ """
199
+ See blknos.__doc__
200
+ """
201
+ if self._blklocs is None:
202
+ # Note: these can be altered by other BlockManager methods.
203
+ self._rebuild_blknos_and_blklocs()
204
+
205
+ return self._blklocs
206
+
207
+ def make_empty(self, axes=None) -> Self:
208
+ """return an empty BlockManager with the items axis of len 0"""
209
+ if axes is None:
210
+ axes = [Index([])] + self.axes[1:]
211
+
212
+ # preserve dtype if possible
213
+ if self.ndim == 1:
214
+ assert isinstance(self, SingleBlockManager) # for mypy
215
+ blk = self.blocks[0]
216
+ arr = blk.values[:0]
217
+ bp = BlockPlacement(slice(0, 0))
218
+ nb = blk.make_block_same_class(arr, placement=bp)
219
+ blocks = [nb]
220
+ else:
221
+ blocks = []
222
+ return type(self).from_blocks(blocks, axes)
223
+
224
+ def __nonzero__(self) -> bool:
225
+ return True
226
+
227
+ # Python3 compat
228
+ __bool__ = __nonzero__
229
+
230
+ def _normalize_axis(self, axis: AxisInt) -> int:
231
+ # switch axis to follow BlockManager logic
232
+ if self.ndim == 2:
233
+ axis = 1 if axis == 0 else 0
234
+ return axis
235
+
236
+ def set_axis(self, axis: AxisInt, new_labels: Index) -> None:
237
+ # Caller is responsible for ensuring we have an Index object.
238
+ self._validate_set_axis(axis, new_labels)
239
+ self.axes[axis] = new_labels
240
+
241
+ @property
242
+ def is_single_block(self) -> bool:
243
+ # Assumes we are 2D; overridden by SingleBlockManager
244
+ return len(self.blocks) == 1
245
+
246
+ @property
247
+ def items(self) -> Index:
248
+ return self.axes[0]
249
+
250
+ def _has_no_reference(self, i: int) -> bool:
251
+ """
252
+ Check for column `i` if it has references.
253
+ (whether it references another array or is itself being referenced)
254
+ Returns True if the column has no references.
255
+ """
256
+ blkno = self.blknos[i]
257
+ return self._has_no_reference_block(blkno)
258
+
259
+ def _has_no_reference_block(self, blkno: int) -> bool:
260
+ """
261
+ Check for block `i` if it has references.
262
+ (whether it references another array or is itself being referenced)
263
+ Returns True if the block has no references.
264
+ """
265
+ return not self.blocks[blkno].refs.has_reference()
266
+
267
+ def add_references(self, mgr: BaseBlockManager) -> None:
268
+ """
269
+ Adds the references from one manager to another. We assume that both
270
+ managers have the same block structure.
271
+ """
272
+ if len(self.blocks) != len(mgr.blocks):
273
+ # If block structure changes, then we made a copy
274
+ return
275
+ for i, blk in enumerate(self.blocks):
276
+ blk.refs = mgr.blocks[i].refs
277
+ blk.refs.add_reference(blk)
278
+
279
+ def references_same_values(self, mgr: BaseBlockManager, blkno: int) -> bool:
280
+ """
281
+ Checks if two blocks from two different block managers reference the
282
+ same underlying values.
283
+ """
284
+ blk = self.blocks[blkno]
285
+ return any(blk is ref() for ref in mgr.blocks[blkno].refs.referenced_blocks)
286
+
287
+ def get_dtypes(self) -> npt.NDArray[np.object_]:
288
+ dtypes = np.array([blk.dtype for blk in self.blocks], dtype=object)
289
+ return dtypes.take(self.blknos)
290
+
291
+ @property
292
+ def arrays(self) -> list[ArrayLike]:
293
+ """
294
+ Quick access to the backing arrays of the Blocks.
295
+
296
+ Only for compatibility with ArrayManager for testing convenience.
297
+ Not to be used in actual code, and return value is not the same as the
298
+ ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs).
299
+
300
+ Warning! The returned arrays don't handle Copy-on-Write, so this should
301
+ be used with caution (only in read-mode).
302
+ """
303
+ return [blk.values for blk in self.blocks]
304
+
305
+ def __repr__(self) -> str:
306
+ output = type(self).__name__
307
+ for i, ax in enumerate(self.axes):
308
+ if i == 0:
309
+ output += f"\nItems: {ax}"
310
+ else:
311
+ output += f"\nAxis {i}: {ax}"
312
+
313
+ for block in self.blocks:
314
+ output += f"\n{block}"
315
+ return output
316
+
317
+ def apply(
318
+ self,
319
+ f,
320
+ align_keys: list[str] | None = None,
321
+ **kwargs,
322
+ ) -> Self:
323
+ """
324
+ Iterate over the blocks, collect and create a new BlockManager.
325
+
326
+ Parameters
327
+ ----------
328
+ f : str or callable
329
+ Name of the Block method to apply.
330
+ align_keys: List[str] or None, default None
331
+ **kwargs
332
+ Keywords to pass to `f`
333
+
334
+ Returns
335
+ -------
336
+ BlockManager
337
+ """
338
+ assert "filter" not in kwargs
339
+
340
+ align_keys = align_keys or []
341
+ result_blocks: list[Block] = []
342
+ # fillna: Series/DataFrame is responsible for making sure value is aligned
343
+
344
+ aligned_args = {k: kwargs[k] for k in align_keys}
345
+
346
+ for b in self.blocks:
347
+ if aligned_args:
348
+ for k, obj in aligned_args.items():
349
+ if isinstance(obj, (ABCSeries, ABCDataFrame)):
350
+ # The caller is responsible for ensuring that
351
+ # obj.axes[-1].equals(self.items)
352
+ if obj.ndim == 1:
353
+ kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values
354
+ else:
355
+ kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values
356
+ else:
357
+ # otherwise we have an ndarray
358
+ kwargs[k] = obj[b.mgr_locs.indexer]
359
+
360
+ if callable(f):
361
+ applied = b.apply(f, **kwargs)
362
+ else:
363
+ applied = getattr(b, f)(**kwargs)
364
+ result_blocks = extend_blocks(applied, result_blocks)
365
+
366
+ out = type(self).from_blocks(result_blocks, self.axes)
367
+ return out
368
+
369
+ # Alias so we can share code with ArrayManager
370
+ apply_with_block = apply
371
+
372
+ def setitem(self, indexer, value, warn: bool = True) -> Self:
373
+ """
374
+ Set values with indexer.
375
+
376
+ For SingleBlockManager, this backs s[indexer] = value
377
+ """
378
+ if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:
379
+ raise ValueError(f"Cannot set values with ndim > {self.ndim}")
380
+
381
+ if warn and warn_copy_on_write() and not self._has_no_reference(0):
382
+ warnings.warn(
383
+ COW_WARNING_GENERAL_MSG,
384
+ FutureWarning,
385
+ stacklevel=find_stack_level(),
386
+ )
387
+
388
+ elif using_copy_on_write() and not self._has_no_reference(0):
389
+ # this method is only called if there is a single block -> hardcoded 0
390
+ # Split blocks to only copy the columns we want to modify
391
+ if self.ndim == 2 and isinstance(indexer, tuple):
392
+ blk_loc = self.blklocs[indexer[1]]
393
+ if is_list_like(blk_loc) and blk_loc.ndim == 2:
394
+ blk_loc = np.squeeze(blk_loc, axis=0)
395
+ elif not is_list_like(blk_loc):
396
+ # Keep dimension and copy data later
397
+ blk_loc = [blk_loc] # type: ignore[assignment]
398
+ if len(blk_loc) == 0:
399
+ return self.copy(deep=False)
400
+
401
+ values = self.blocks[0].values
402
+ if values.ndim == 2:
403
+ values = values[blk_loc]
404
+ # "T" has no attribute "_iset_split_block"
405
+ self._iset_split_block( # type: ignore[attr-defined]
406
+ 0, blk_loc, values
407
+ )
408
+ # first block equals values
409
+ self.blocks[0].setitem((indexer[0], np.arange(len(blk_loc))), value)
410
+ return self
411
+ # No need to split if we either set all columns or on a single block
412
+ # manager
413
+ self = self.copy()
414
+
415
+ return self.apply("setitem", indexer=indexer, value=value)
416
+
417
+ def diff(self, n: int) -> Self:
418
+ # only reached with self.ndim == 2
419
+ return self.apply("diff", n=n)
420
+
421
+ def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self:
422
+ if copy is None:
423
+ if using_copy_on_write():
424
+ copy = False
425
+ else:
426
+ copy = True
427
+ elif using_copy_on_write():
428
+ copy = False
429
+
430
+ return self.apply(
431
+ "astype",
432
+ dtype=dtype,
433
+ copy=copy,
434
+ errors=errors,
435
+ using_cow=using_copy_on_write(),
436
+ )
437
+
438
+ def convert(self, copy: bool | None) -> Self:
439
+ if copy is None:
440
+ if using_copy_on_write():
441
+ copy = False
442
+ else:
443
+ copy = True
444
+ elif using_copy_on_write():
445
+ copy = False
446
+
447
+ return self.apply("convert", copy=copy, using_cow=using_copy_on_write())
448
+
449
+ def convert_dtypes(self, **kwargs):
450
+ if using_copy_on_write():
451
+ copy = False
452
+ else:
453
+ copy = True
454
+
455
+ return self.apply(
456
+ "convert_dtypes", copy=copy, using_cow=using_copy_on_write(), **kwargs
457
+ )
458
+
459
+ def get_values_for_csv(
460
+ self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None
461
+ ) -> Self:
462
+ """
463
+ Convert values to native types (strings / python objects) that are used
464
+ in formatting (repr / csv).
465
+ """
466
+ return self.apply(
467
+ "get_values_for_csv",
468
+ na_rep=na_rep,
469
+ quoting=quoting,
470
+ float_format=float_format,
471
+ date_format=date_format,
472
+ decimal=decimal,
473
+ )
474
+
475
+ @property
476
+ def any_extension_types(self) -> bool:
477
+ """Whether any of the blocks in this manager are extension blocks"""
478
+ return any(block.is_extension for block in self.blocks)
479
+
480
+ @property
481
+ def is_view(self) -> bool:
482
+ """return a boolean if we are a single block and are a view"""
483
+ if len(self.blocks) == 1:
484
+ return self.blocks[0].is_view
485
+
486
+ # It is technically possible to figure out which blocks are views
487
+ # e.g. [ b.values.base is not None for b in self.blocks ]
488
+ # but then we have the case of possibly some blocks being a view
489
+ # and some blocks not. setting in theory is possible on the non-view
490
+ # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
491
+ # complicated
492
+
493
+ return False
494
+
495
+ def _get_data_subset(self, predicate: Callable) -> Self:
496
+ blocks = [blk for blk in self.blocks if predicate(blk.values)]
497
+ return self._combine(blocks)
498
+
499
+ def get_bool_data(self) -> Self:
500
+ """
501
+ Select blocks that are bool-dtype and columns from object-dtype blocks
502
+ that are all-bool.
503
+ """
504
+
505
+ new_blocks = []
506
+
507
+ for blk in self.blocks:
508
+ if blk.dtype == bool:
509
+ new_blocks.append(blk)
510
+
511
+ elif blk.is_object:
512
+ nbs = blk._split()
513
+ new_blocks.extend(nb for nb in nbs if nb.is_bool)
514
+
515
+ return self._combine(new_blocks)
516
+
517
+ def get_numeric_data(self) -> Self:
518
+ numeric_blocks = [blk for blk in self.blocks if blk.is_numeric]
519
+ if len(numeric_blocks) == len(self.blocks):
520
+ # Avoid somewhat expensive _combine
521
+ return self
522
+ return self._combine(numeric_blocks)
523
+
524
+ def _combine(self, blocks: list[Block], index: Index | None = None) -> Self:
525
+ """return a new manager with the blocks"""
526
+ if len(blocks) == 0:
527
+ if self.ndim == 2:
528
+ # retain our own Index dtype
529
+ if index is not None:
530
+ axes = [self.items[:0], index]
531
+ else:
532
+ axes = [self.items[:0]] + self.axes[1:]
533
+ return self.make_empty(axes)
534
+ return self.make_empty()
535
+
536
+ # FIXME: optimization potential
537
+ indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
538
+ inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
539
+
540
+ new_blocks: list[Block] = []
541
+ for b in blocks:
542
+ nb = b.copy(deep=False)
543
+ nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer])
544
+ new_blocks.append(nb)
545
+
546
+ axes = list(self.axes)
547
+ if index is not None:
548
+ axes[-1] = index
549
+ axes[0] = self.items.take(indexer)
550
+
551
+ return type(self).from_blocks(new_blocks, axes)
552
+
553
+ @property
554
+ def nblocks(self) -> int:
555
+ return len(self.blocks)
556
+
557
+ def copy(self, deep: bool | None | Literal["all"] = True) -> Self:
558
+ """
559
+ Make deep or shallow copy of BlockManager
560
+
561
+ Parameters
562
+ ----------
563
+ deep : bool, string or None, default True
564
+ If False or None, return a shallow copy (do not copy data)
565
+ If 'all', copy data and a deep copy of the index
566
+
567
+ Returns
568
+ -------
569
+ BlockManager
570
+ """
571
+ if deep is None:
572
+ if using_copy_on_write():
573
+ # use shallow copy
574
+ deep = False
575
+ else:
576
+ # preserve deep copy for BlockManager with copy=None
577
+ deep = True
578
+
579
+ # this preserves the notion of view copying of axes
580
+ if deep:
581
+ # hit in e.g. tests.io.json.test_pandas
582
+
583
+ def copy_func(ax):
584
+ return ax.copy(deep=True) if deep == "all" else ax.view()
585
+
586
+ new_axes = [copy_func(ax) for ax in self.axes]
587
+ else:
588
+ if using_copy_on_write():
589
+ new_axes = [ax.view() for ax in self.axes]
590
+ else:
591
+ new_axes = list(self.axes)
592
+
593
+ res = self.apply("copy", deep=deep)
594
+ res.axes = new_axes
595
+
596
+ if self.ndim > 1:
597
+ # Avoid needing to re-compute these
598
+ blknos = self._blknos
599
+ if blknos is not None:
600
+ res._blknos = blknos.copy()
601
+ res._blklocs = self._blklocs.copy()
602
+
603
+ if deep:
604
+ res._consolidate_inplace()
605
+ return res
606
+
607
+ def consolidate(self) -> Self:
608
+ """
609
+ Join together blocks having same dtype
610
+
611
+ Returns
612
+ -------
613
+ y : BlockManager
614
+ """
615
+ if self.is_consolidated():
616
+ return self
617
+
618
+ bm = type(self)(self.blocks, self.axes, verify_integrity=False)
619
+ bm._is_consolidated = False
620
+ bm._consolidate_inplace()
621
+ return bm
622
+
623
+ def reindex_indexer(
624
+ self,
625
+ new_axis: Index,
626
+ indexer: npt.NDArray[np.intp] | None,
627
+ axis: AxisInt,
628
+ fill_value=None,
629
+ allow_dups: bool = False,
630
+ copy: bool | None = True,
631
+ only_slice: bool = False,
632
+ *,
633
+ use_na_proxy: bool = False,
634
+ ) -> Self:
635
+ """
636
+ Parameters
637
+ ----------
638
+ new_axis : Index
639
+ indexer : ndarray[intp] or None
640
+ axis : int
641
+ fill_value : object, default None
642
+ allow_dups : bool, default False
643
+ copy : bool or None, default True
644
+ If None, regard as False to get shallow copy.
645
+ only_slice : bool, default False
646
+ Whether to take views, not copies, along columns.
647
+ use_na_proxy : bool, default False
648
+ Whether to use a np.void ndarray for newly introduced columns.
649
+
650
+ pandas-indexer with -1's only.
651
+ """
652
+ if copy is None:
653
+ if using_copy_on_write():
654
+ # use shallow copy
655
+ copy = False
656
+ else:
657
+ # preserve deep copy for BlockManager with copy=None
658
+ copy = True
659
+
660
+ if indexer is None:
661
+ if new_axis is self.axes[axis] and not copy:
662
+ return self
663
+
664
+ result = self.copy(deep=copy)
665
+ result.axes = list(self.axes)
666
+ result.axes[axis] = new_axis
667
+ return result
668
+
669
+ # Should be intp, but in some cases we get int64 on 32bit builds
670
+ assert isinstance(indexer, np.ndarray)
671
+
672
+ # some axes don't allow reindexing with dups
673
+ if not allow_dups:
674
+ self.axes[axis]._validate_can_reindex(indexer)
675
+
676
+ if axis >= self.ndim:
677
+ raise IndexError("Requested axis not found in manager")
678
+
679
+ if axis == 0:
680
+ new_blocks = self._slice_take_blocks_ax0(
681
+ indexer,
682
+ fill_value=fill_value,
683
+ only_slice=only_slice,
684
+ use_na_proxy=use_na_proxy,
685
+ )
686
+ else:
687
+ new_blocks = [
688
+ blk.take_nd(
689
+ indexer,
690
+ axis=1,
691
+ fill_value=(
692
+ fill_value if fill_value is not None else blk.fill_value
693
+ ),
694
+ )
695
+ for blk in self.blocks
696
+ ]
697
+
698
+ new_axes = list(self.axes)
699
+ new_axes[axis] = new_axis
700
+
701
+ new_mgr = type(self).from_blocks(new_blocks, new_axes)
702
+ if axis == 1:
703
+ # We can avoid the need to rebuild these
704
+ new_mgr._blknos = self.blknos.copy()
705
+ new_mgr._blklocs = self.blklocs.copy()
706
+ return new_mgr
707
+
708
+ def _slice_take_blocks_ax0(
709
+ self,
710
+ slice_or_indexer: slice | np.ndarray,
711
+ fill_value=lib.no_default,
712
+ only_slice: bool = False,
713
+ *,
714
+ use_na_proxy: bool = False,
715
+ ref_inplace_op: bool = False,
716
+ ) -> list[Block]:
717
+ """
718
+ Slice/take blocks along axis=0.
719
+
720
+ Overloaded for SingleBlock
721
+
722
+ Parameters
723
+ ----------
724
+ slice_or_indexer : slice or np.ndarray[int64]
725
+ fill_value : scalar, default lib.no_default
726
+ only_slice : bool, default False
727
+ If True, we always return views on existing arrays, never copies.
728
+ This is used when called from ops.blockwise.operate_blockwise.
729
+ use_na_proxy : bool, default False
730
+ Whether to use a np.void ndarray for newly introduced columns.
731
+ ref_inplace_op: bool, default False
732
+ Don't track refs if True because we operate inplace
733
+
734
+ Returns
735
+ -------
736
+ new_blocks : list of Block
737
+ """
738
+ allow_fill = fill_value is not lib.no_default
739
+
740
+ sl_type, slobj, sllen = _preprocess_slice_or_indexer(
741
+ slice_or_indexer, self.shape[0], allow_fill=allow_fill
742
+ )
743
+
744
+ if self.is_single_block:
745
+ blk = self.blocks[0]
746
+
747
+ if sl_type == "slice":
748
+ # GH#32959 EABlock would fail since we can't make 0-width
749
+ # TODO(EA2D): special casing unnecessary with 2D EAs
750
+ if sllen == 0:
751
+ return []
752
+ bp = BlockPlacement(slice(0, sllen))
753
+ return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)]
754
+ elif not allow_fill or self.ndim == 1:
755
+ if allow_fill and fill_value is None:
756
+ fill_value = blk.fill_value
757
+
758
+ if not allow_fill and only_slice:
759
+ # GH#33597 slice instead of take, so we get
760
+ # views instead of copies
761
+ blocks = [
762
+ blk.getitem_block_columns(
763
+ slice(ml, ml + 1),
764
+ new_mgr_locs=BlockPlacement(i),
765
+ ref_inplace_op=ref_inplace_op,
766
+ )
767
+ for i, ml in enumerate(slobj)
768
+ ]
769
+ return blocks
770
+ else:
771
+ bp = BlockPlacement(slice(0, sllen))
772
+ return [
773
+ blk.take_nd(
774
+ slobj,
775
+ axis=0,
776
+ new_mgr_locs=bp,
777
+ fill_value=fill_value,
778
+ )
779
+ ]
780
+
781
+ if sl_type == "slice":
782
+ blknos = self.blknos[slobj]
783
+ blklocs = self.blklocs[slobj]
784
+ else:
785
+ blknos = algos.take_nd(
786
+ self.blknos, slobj, fill_value=-1, allow_fill=allow_fill
787
+ )
788
+ blklocs = algos.take_nd(
789
+ self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill
790
+ )
791
+
792
+ # When filling blknos, make sure blknos is updated before appending to
793
+ # blocks list, that way new blkno is exactly len(blocks).
794
+ blocks = []
795
+ group = not only_slice
796
+ for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
797
+ if blkno == -1:
798
+ # If we've got here, fill_value was not lib.no_default
799
+
800
+ blocks.append(
801
+ self._make_na_block(
802
+ placement=mgr_locs,
803
+ fill_value=fill_value,
804
+ use_na_proxy=use_na_proxy,
805
+ )
806
+ )
807
+ else:
808
+ blk = self.blocks[blkno]
809
+
810
+ # Otherwise, slicing along items axis is necessary.
811
+ if not blk._can_consolidate and not blk._validate_ndim:
812
+ # i.e. we dont go through here for DatetimeTZBlock
813
+ # A non-consolidatable block, it's easy, because there's
814
+ # only one item and each mgr loc is a copy of that single
815
+ # item.
816
+ deep = not (only_slice or using_copy_on_write())
817
+ for mgr_loc in mgr_locs:
818
+ newblk = blk.copy(deep=deep)
819
+ newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))
820
+ blocks.append(newblk)
821
+
822
+ else:
823
+ # GH#32779 to avoid the performance penalty of copying,
824
+ # we may try to only slice
825
+ taker = blklocs[mgr_locs.indexer]
826
+ max_len = max(len(mgr_locs), taker.max() + 1)
827
+ if only_slice or using_copy_on_write():
828
+ taker = lib.maybe_indices_to_slice(taker, max_len)
829
+
830
+ if isinstance(taker, slice):
831
+ nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs)
832
+ blocks.append(nb)
833
+ elif only_slice:
834
+ # GH#33597 slice instead of take, so we get
835
+ # views instead of copies
836
+ for i, ml in zip(taker, mgr_locs):
837
+ slc = slice(i, i + 1)
838
+ bp = BlockPlacement(ml)
839
+ nb = blk.getitem_block_columns(slc, new_mgr_locs=bp)
840
+ # We have np.shares_memory(nb.values, blk.values)
841
+ blocks.append(nb)
842
+ else:
843
+ nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
844
+ blocks.append(nb)
845
+
846
+ return blocks
847
+
848
+ def _make_na_block(
849
+ self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False
850
+ ) -> Block:
851
+ # Note: we only get here with self.ndim == 2
852
+
853
+ if use_na_proxy:
854
+ assert fill_value is None
855
+ shape = (len(placement), self.shape[1])
856
+ vals = np.empty(shape, dtype=np.void)
857
+ nb = NumpyBlock(vals, placement, ndim=2)
858
+ return nb
859
+
860
+ if fill_value is None:
861
+ fill_value = np.nan
862
+
863
+ shape = (len(placement), self.shape[1])
864
+
865
+ dtype, fill_value = infer_dtype_from_scalar(fill_value)
866
+ block_values = make_na_array(dtype, shape, fill_value)
867
+ return new_block_2d(block_values, placement=placement)
868
+
869
+ def take(
870
+ self,
871
+ indexer: npt.NDArray[np.intp],
872
+ axis: AxisInt = 1,
873
+ verify: bool = True,
874
+ ) -> Self:
875
+ """
876
+ Take items along any axis.
877
+
878
+ indexer : np.ndarray[np.intp]
879
+ axis : int, default 1
880
+ verify : bool, default True
881
+ Check that all entries are between 0 and len(self) - 1, inclusive.
882
+ Pass verify=False if this check has been done by the caller.
883
+
884
+ Returns
885
+ -------
886
+ BlockManager
887
+ """
888
+ # Caller is responsible for ensuring indexer annotation is accurate
889
+
890
+ n = self.shape[axis]
891
+ indexer = maybe_convert_indices(indexer, n, verify=verify)
892
+
893
+ new_labels = self.axes[axis].take(indexer)
894
+ return self.reindex_indexer(
895
+ new_axis=new_labels,
896
+ indexer=indexer,
897
+ axis=axis,
898
+ allow_dups=True,
899
+ copy=None,
900
+ )
901
+
902
+
903
+ class BlockManager(libinternals.BlockManager, BaseBlockManager):
904
+ """
905
+ BaseBlockManager that holds 2D blocks.
906
+ """
907
+
908
+ ndim = 2
909
+
910
+ # ----------------------------------------------------------------
911
+ # Constructors
912
+
913
+ def __init__(
914
+ self,
915
+ blocks: Sequence[Block],
916
+ axes: Sequence[Index],
917
+ verify_integrity: bool = True,
918
+ ) -> None:
919
+ if verify_integrity:
920
+ # Assertion disabled for performance
921
+ # assert all(isinstance(x, Index) for x in axes)
922
+
923
+ for block in blocks:
924
+ if self.ndim != block.ndim:
925
+ raise AssertionError(
926
+ f"Number of Block dimensions ({block.ndim}) must equal "
927
+ f"number of axes ({self.ndim})"
928
+ )
929
+ # As of 2.0, the caller is responsible for ensuring that
930
+ # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2;
931
+ # previously there was a special check for fastparquet compat.
932
+
933
+ self._verify_integrity()
934
+
935
+ def _verify_integrity(self) -> None:
936
+ mgr_shape = self.shape
937
+ tot_items = sum(len(x.mgr_locs) for x in self.blocks)
938
+ for block in self.blocks:
939
+ if block.shape[1:] != mgr_shape[1:]:
940
+ raise_construction_error(tot_items, block.shape[1:], self.axes)
941
+ if len(self.items) != tot_items:
942
+ raise AssertionError(
943
+ "Number of manager items must equal union of "
944
+ f"block items\n# manager items: {len(self.items)}, # "
945
+ f"tot_items: {tot_items}"
946
+ )
947
+
948
+ @classmethod
949
+ def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self:
950
+ """
951
+ Constructor for BlockManager and SingleBlockManager with same signature.
952
+ """
953
+ return cls(blocks, axes, verify_integrity=False)
954
+
955
+ # ----------------------------------------------------------------
956
+ # Indexing
957
+
958
+ def fast_xs(self, loc: int) -> SingleBlockManager:
959
+ """
960
+ Return the array corresponding to `frame.iloc[loc]`.
961
+
962
+ Parameters
963
+ ----------
964
+ loc : int
965
+
966
+ Returns
967
+ -------
968
+ np.ndarray or ExtensionArray
969
+ """
970
+ if len(self.blocks) == 1:
971
+ # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like;
972
+ # is this ruled out in the general case?
973
+ result = self.blocks[0].iget((slice(None), loc))
974
+ # in the case of a single block, the new block is a view
975
+ bp = BlockPlacement(slice(0, len(result)))
976
+ block = new_block(
977
+ result,
978
+ placement=bp,
979
+ ndim=1,
980
+ refs=self.blocks[0].refs,
981
+ )
982
+ return SingleBlockManager(block, self.axes[0])
983
+
984
+ dtype = interleaved_dtype([blk.dtype for blk in self.blocks])
985
+
986
+ n = len(self)
987
+
988
+ if isinstance(dtype, ExtensionDtype):
989
+ # TODO: use object dtype as workaround for non-performant
990
+ # EA.__setitem__ methods. (primarily ArrowExtensionArray.__setitem__
991
+ # when iteratively setting individual values)
992
+ # https://github.com/pandas-dev/pandas/pull/54508#issuecomment-1675827918
993
+ result = np.empty(n, dtype=object)
994
+ else:
995
+ result = np.empty(n, dtype=dtype)
996
+ result = ensure_wrapped_if_datetimelike(result)
997
+
998
+ for blk in self.blocks:
999
+ # Such assignment may incorrectly coerce NaT to None
1000
+ # result[blk.mgr_locs] = blk._slice((slice(None), loc))
1001
+ for i, rl in enumerate(blk.mgr_locs):
1002
+ result[rl] = blk.iget((i, loc))
1003
+
1004
+ if isinstance(dtype, ExtensionDtype):
1005
+ cls = dtype.construct_array_type()
1006
+ result = cls._from_sequence(result, dtype=dtype)
1007
+
1008
+ bp = BlockPlacement(slice(0, len(result)))
1009
+ block = new_block(result, placement=bp, ndim=1)
1010
+ return SingleBlockManager(block, self.axes[0])
1011
+
1012
+ def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager:
1013
+ """
1014
+ Return the data as a SingleBlockManager.
1015
+ """
1016
+ block = self.blocks[self.blknos[i]]
1017
+ values = block.iget(self.blklocs[i])
1018
+
1019
+ # shortcut for select a single-dim from a 2-dim BM
1020
+ bp = BlockPlacement(slice(0, len(values)))
1021
+ nb = type(block)(
1022
+ values, placement=bp, ndim=1, refs=block.refs if track_ref else None
1023
+ )
1024
+ return SingleBlockManager(nb, self.axes[1])
1025
+
1026
+ def iget_values(self, i: int) -> ArrayLike:
1027
+ """
1028
+ Return the data for column i as the values (ndarray or ExtensionArray).
1029
+
1030
+ Warning! The returned array is a view but doesn't handle Copy-on-Write,
1031
+ so this should be used with caution.
1032
+ """
1033
+ # TODO(CoW) making the arrays read-only might make this safer to use?
1034
+ block = self.blocks[self.blknos[i]]
1035
+ values = block.iget(self.blklocs[i])
1036
+ return values
1037
+
1038
+ @property
1039
+ def column_arrays(self) -> list[np.ndarray]:
1040
+ """
1041
+ Used in the JSON C code to access column arrays.
1042
+ This optimizes compared to using `iget_values` by converting each
1043
+
1044
+ Warning! This doesn't handle Copy-on-Write, so should be used with
1045
+ caution (current use case of consuming this in the JSON code is fine).
1046
+ """
1047
+ # This is an optimized equivalent to
1048
+ # result = [self.iget_values(i) for i in range(len(self.items))]
1049
+ result: list[np.ndarray | None] = [None] * len(self.items)
1050
+
1051
+ for blk in self.blocks:
1052
+ mgr_locs = blk._mgr_locs
1053
+ values = blk.array_values._values_for_json()
1054
+ if values.ndim == 1:
1055
+ # TODO(EA2D): special casing not needed with 2D EAs
1056
+ result[mgr_locs[0]] = values
1057
+
1058
+ else:
1059
+ for i, loc in enumerate(mgr_locs):
1060
+ result[loc] = values[i]
1061
+
1062
+ # error: Incompatible return value type (got "List[None]",
1063
+ # expected "List[ndarray[Any, Any]]")
1064
+ return result # type: ignore[return-value]
1065
+
1066
+ def iset(
1067
+ self,
1068
+ loc: int | slice | np.ndarray,
1069
+ value: ArrayLike,
1070
+ inplace: bool = False,
1071
+ refs: BlockValuesRefs | None = None,
1072
+ ) -> None:
1073
+ """
1074
+ Set new item in-place. Does not consolidate. Adds new Block if not
1075
+ contained in the current set of items
1076
+ """
1077
+
1078
+ # FIXME: refactor, clearly separate broadcasting & zip-like assignment
1079
+ # can prob also fix the various if tests for sparse/categorical
1080
+ if self._blklocs is None and self.ndim > 1:
1081
+ self._rebuild_blknos_and_blklocs()
1082
+
1083
+ # Note: we exclude DTA/TDA here
1084
+ value_is_extension_type = is_1d_only_ea_dtype(value.dtype)
1085
+ if not value_is_extension_type:
1086
+ if value.ndim == 2:
1087
+ value = value.T
1088
+ else:
1089
+ value = ensure_block_shape(value, ndim=2)
1090
+
1091
+ if value.shape[1:] != self.shape[1:]:
1092
+ raise AssertionError(
1093
+ "Shape of new values must be compatible with manager shape"
1094
+ )
1095
+
1096
+ if lib.is_integer(loc):
1097
+ # We have 6 tests where loc is _not_ an int.
1098
+ # In this case, get_blkno_placements will yield only one tuple,
1099
+ # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))
1100
+
1101
+ # Check if we can use _iset_single fastpath
1102
+ loc = cast(int, loc)
1103
+ blkno = self.blknos[loc]
1104
+ blk = self.blocks[blkno]
1105
+ if len(blk._mgr_locs) == 1: # TODO: fastest way to check this?
1106
+ return self._iset_single(
1107
+ loc,
1108
+ value,
1109
+ inplace=inplace,
1110
+ blkno=blkno,
1111
+ blk=blk,
1112
+ refs=refs,
1113
+ )
1114
+
1115
+ # error: Incompatible types in assignment (expression has type
1116
+ # "List[Union[int, slice, ndarray]]", variable has type "Union[int,
1117
+ # slice, ndarray]")
1118
+ loc = [loc] # type: ignore[assignment]
1119
+
1120
+ # categorical/sparse/datetimetz
1121
+ if value_is_extension_type:
1122
+
1123
+ def value_getitem(placement):
1124
+ return value
1125
+
1126
+ else:
1127
+
1128
+ def value_getitem(placement):
1129
+ return value[placement.indexer]
1130
+
1131
+ # Accessing public blknos ensures the public versions are initialized
1132
+ blknos = self.blknos[loc]
1133
+ blklocs = self.blklocs[loc].copy()
1134
+
1135
+ unfit_mgr_locs = []
1136
+ unfit_val_locs = []
1137
+ removed_blknos = []
1138
+ for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True):
1139
+ blk = self.blocks[blkno_l]
1140
+ blk_locs = blklocs[val_locs.indexer]
1141
+ if inplace and blk.should_store(value):
1142
+ # Updating inplace -> check if we need to do Copy-on-Write
1143
+ if using_copy_on_write() and not self._has_no_reference_block(blkno_l):
1144
+ self._iset_split_block(
1145
+ blkno_l, blk_locs, value_getitem(val_locs), refs=refs
1146
+ )
1147
+ else:
1148
+ blk.set_inplace(blk_locs, value_getitem(val_locs))
1149
+ continue
1150
+ else:
1151
+ unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
1152
+ unfit_val_locs.append(val_locs)
1153
+
1154
+ # If all block items are unfit, schedule the block for removal.
1155
+ if len(val_locs) == len(blk.mgr_locs):
1156
+ removed_blknos.append(blkno_l)
1157
+ continue
1158
+ else:
1159
+ # Defer setting the new values to enable consolidation
1160
+ self._iset_split_block(blkno_l, blk_locs, refs=refs)
1161
+
1162
+ if len(removed_blknos):
1163
+ # Remove blocks & update blknos accordingly
1164
+ is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
1165
+ is_deleted[removed_blknos] = True
1166
+
1167
+ new_blknos = np.empty(self.nblocks, dtype=np.intp)
1168
+ new_blknos.fill(-1)
1169
+ new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
1170
+ self._blknos = new_blknos[self._blknos]
1171
+ self.blocks = tuple(
1172
+ blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
1173
+ )
1174
+
1175
+ if unfit_val_locs:
1176
+ unfit_idxr = np.concatenate(unfit_mgr_locs)
1177
+ unfit_count = len(unfit_idxr)
1178
+
1179
+ new_blocks: list[Block] = []
1180
+ if value_is_extension_type:
1181
+ # This code (ab-)uses the fact that EA blocks contain only
1182
+ # one item.
1183
+ # TODO(EA2D): special casing unnecessary with 2D EAs
1184
+ new_blocks.extend(
1185
+ new_block_2d(
1186
+ values=value,
1187
+ placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)),
1188
+ refs=refs,
1189
+ )
1190
+ for mgr_loc in unfit_idxr
1191
+ )
1192
+
1193
+ self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks)
1194
+ self._blklocs[unfit_idxr] = 0
1195
+
1196
+ else:
1197
+ # unfit_val_locs contains BlockPlacement objects
1198
+ unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
1199
+
1200
+ new_blocks.append(
1201
+ new_block_2d(
1202
+ values=value_getitem(unfit_val_items),
1203
+ placement=BlockPlacement(unfit_idxr),
1204
+ refs=refs,
1205
+ )
1206
+ )
1207
+
1208
+ self._blknos[unfit_idxr] = len(self.blocks)
1209
+ self._blklocs[unfit_idxr] = np.arange(unfit_count)
1210
+
1211
+ self.blocks += tuple(new_blocks)
1212
+
1213
+ # Newly created block's dtype may already be present.
1214
+ self._known_consolidated = False
1215
+
1216
+ def _iset_split_block(
1217
+ self,
1218
+ blkno_l: int,
1219
+ blk_locs: np.ndarray | list[int],
1220
+ value: ArrayLike | None = None,
1221
+ refs: BlockValuesRefs | None = None,
1222
+ ) -> None:
1223
+ """Removes columns from a block by splitting the block.
1224
+
1225
+ Avoids copying the whole block through slicing and updates the manager
1226
+ after determinint the new block structure. Optionally adds a new block,
1227
+ otherwise has to be done by the caller.
1228
+
1229
+ Parameters
1230
+ ----------
1231
+ blkno_l: The block number to operate on, relevant for updating the manager
1232
+ blk_locs: The locations of our block that should be deleted.
1233
+ value: The value to set as a replacement.
1234
+ refs: The reference tracking object of the value to set.
1235
+ """
1236
+ blk = self.blocks[blkno_l]
1237
+
1238
+ if self._blklocs is None:
1239
+ self._rebuild_blknos_and_blklocs()
1240
+
1241
+ nbs_tup = tuple(blk.delete(blk_locs))
1242
+ if value is not None:
1243
+ locs = blk.mgr_locs.as_array[blk_locs]
1244
+ first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs)
1245
+ else:
1246
+ first_nb = nbs_tup[0]
1247
+ nbs_tup = tuple(nbs_tup[1:])
1248
+
1249
+ nr_blocks = len(self.blocks)
1250
+ blocks_tup = (
1251
+ self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup
1252
+ )
1253
+ self.blocks = blocks_tup
1254
+
1255
+ if not nbs_tup and value is not None:
1256
+ # No need to update anything if split did not happen
1257
+ return
1258
+
1259
+ self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb))
1260
+
1261
+ for i, nb in enumerate(nbs_tup):
1262
+ self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))
1263
+ self._blknos[nb.mgr_locs.indexer] = i + nr_blocks
1264
+
1265
+ def _iset_single(
1266
+ self,
1267
+ loc: int,
1268
+ value: ArrayLike,
1269
+ inplace: bool,
1270
+ blkno: int,
1271
+ blk: Block,
1272
+ refs: BlockValuesRefs | None = None,
1273
+ ) -> None:
1274
+ """
1275
+ Fastpath for iset when we are only setting a single position and
1276
+ the Block currently in that position is itself single-column.
1277
+
1278
+ In this case we can swap out the entire Block and blklocs and blknos
1279
+ are unaffected.
1280
+ """
1281
+ # Caller is responsible for verifying value.shape
1282
+
1283
+ if inplace and blk.should_store(value):
1284
+ copy = False
1285
+ if using_copy_on_write() and not self._has_no_reference_block(blkno):
1286
+ # perform Copy-on-Write and clear the reference
1287
+ copy = True
1288
+ iloc = self.blklocs[loc]
1289
+ blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy)
1290
+ return
1291
+
1292
+ nb = new_block_2d(value, placement=blk._mgr_locs, refs=refs)
1293
+ old_blocks = self.blocks
1294
+ new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :]
1295
+ self.blocks = new_blocks
1296
+ return
1297
+
1298
+ def column_setitem(
1299
+ self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False
1300
+ ) -> None:
1301
+ """
1302
+ Set values ("setitem") into a single column (not setting the full column).
1303
+
1304
+ This is a method on the BlockManager level, to avoid creating an
1305
+ intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)
1306
+ """
1307
+ needs_to_warn = False
1308
+ if warn_copy_on_write() and not self._has_no_reference(loc):
1309
+ if not isinstance(
1310
+ self.blocks[self.blknos[loc]].values,
1311
+ (ArrowExtensionArray, ArrowStringArray),
1312
+ ):
1313
+ # We might raise if we are in an expansion case, so defer
1314
+ # warning till we actually updated
1315
+ needs_to_warn = True
1316
+
1317
+ elif using_copy_on_write() and not self._has_no_reference(loc):
1318
+ blkno = self.blknos[loc]
1319
+ # Split blocks to only copy the column we want to modify
1320
+ blk_loc = self.blklocs[loc]
1321
+ # Copy our values
1322
+ values = self.blocks[blkno].values
1323
+ if values.ndim == 1:
1324
+ values = values.copy()
1325
+ else:
1326
+ # Use [blk_loc] as indexer to keep ndim=2, this already results in a
1327
+ # copy
1328
+ values = values[[blk_loc]]
1329
+ self._iset_split_block(blkno, [blk_loc], values)
1330
+
1331
+ # this manager is only created temporarily to mutate the values in place
1332
+ # so don't track references, otherwise the `setitem` would perform CoW again
1333
+ col_mgr = self.iget(loc, track_ref=False)
1334
+ if inplace_only:
1335
+ col_mgr.setitem_inplace(idx, value)
1336
+ else:
1337
+ new_mgr = col_mgr.setitem((idx,), value)
1338
+ self.iset(loc, new_mgr._block.values, inplace=True)
1339
+
1340
+ if needs_to_warn:
1341
+ warnings.warn(
1342
+ COW_WARNING_GENERAL_MSG,
1343
+ FutureWarning,
1344
+ stacklevel=find_stack_level(),
1345
+ )
1346
+
1347
+ def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None:
1348
+ """
1349
+ Insert item at selected position.
1350
+
1351
+ Parameters
1352
+ ----------
1353
+ loc : int
1354
+ item : hashable
1355
+ value : np.ndarray or ExtensionArray
1356
+ refs : The reference tracking object of the value to set.
1357
+ """
1358
+ with warnings.catch_warnings():
1359
+ # TODO: re-issue this with setitem-specific message?
1360
+ warnings.filterwarnings(
1361
+ "ignore",
1362
+ "The behavior of Index.insert with object-dtype is deprecated",
1363
+ category=FutureWarning,
1364
+ )
1365
+ new_axis = self.items.insert(loc, item)
1366
+
1367
+ if value.ndim == 2:
1368
+ value = value.T
1369
+ if len(value) > 1:
1370
+ raise ValueError(
1371
+ f"Expected a 1D array, got an array with shape {value.T.shape}"
1372
+ )
1373
+ else:
1374
+ value = ensure_block_shape(value, ndim=self.ndim)
1375
+
1376
+ bp = BlockPlacement(slice(loc, loc + 1))
1377
+ block = new_block_2d(values=value, placement=bp, refs=refs)
1378
+
1379
+ if not len(self.blocks):
1380
+ # Fastpath
1381
+ self._blklocs = np.array([0], dtype=np.intp)
1382
+ self._blknos = np.array([0], dtype=np.intp)
1383
+ else:
1384
+ self._insert_update_mgr_locs(loc)
1385
+ self._insert_update_blklocs_and_blknos(loc)
1386
+
1387
+ self.axes[0] = new_axis
1388
+ self.blocks += (block,)
1389
+
1390
+ self._known_consolidated = False
1391
+
1392
+ if sum(not block.is_extension for block in self.blocks) > 100:
1393
+ warnings.warn(
1394
+ "DataFrame is highly fragmented. This is usually the result "
1395
+ "of calling `frame.insert` many times, which has poor performance. "
1396
+ "Consider joining all columns at once using pd.concat(axis=1) "
1397
+ "instead. To get a de-fragmented frame, use `newframe = frame.copy()`",
1398
+ PerformanceWarning,
1399
+ stacklevel=find_stack_level(),
1400
+ )
1401
+
1402
+ def _insert_update_mgr_locs(self, loc) -> None:
1403
+ """
1404
+ When inserting a new Block at location 'loc', we increment
1405
+ all of the mgr_locs of blocks above that by one.
1406
+ """
1407
+ for blkno, count in _fast_count_smallints(self.blknos[loc:]):
1408
+ # .620 this way, .326 of which is in increment_above
1409
+ blk = self.blocks[blkno]
1410
+ blk._mgr_locs = blk._mgr_locs.increment_above(loc)
1411
+
1412
+ def _insert_update_blklocs_and_blknos(self, loc) -> None:
1413
+ """
1414
+ When inserting a new Block at location 'loc', we update our
1415
+ _blklocs and _blknos.
1416
+ """
1417
+
1418
+ # Accessing public blklocs ensures the public versions are initialized
1419
+ if loc == self.blklocs.shape[0]:
1420
+ # np.append is a lot faster, let's use it if we can.
1421
+ self._blklocs = np.append(self._blklocs, 0)
1422
+ self._blknos = np.append(self._blknos, len(self.blocks))
1423
+ elif loc == 0:
1424
+ # np.append is a lot faster, let's use it if we can.
1425
+ self._blklocs = np.append(self._blklocs[::-1], 0)[::-1]
1426
+ self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1]
1427
+ else:
1428
+ new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos(
1429
+ self.blklocs, self.blknos, loc, len(self.blocks)
1430
+ )
1431
+ self._blklocs = new_blklocs
1432
+ self._blknos = new_blknos
1433
+
1434
+ def idelete(self, indexer) -> BlockManager:
1435
+ """
1436
+ Delete selected locations, returning a new BlockManager.
1437
+ """
1438
+ is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
1439
+ is_deleted[indexer] = True
1440
+ taker = (~is_deleted).nonzero()[0]
1441
+
1442
+ nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True)
1443
+ new_columns = self.items[~is_deleted]
1444
+ axes = [new_columns, self.axes[1]]
1445
+ return type(self)(tuple(nbs), axes, verify_integrity=False)
1446
+
1447
+ # ----------------------------------------------------------------
1448
+ # Block-wise Operation
1449
+
1450
+ def grouped_reduce(self, func: Callable) -> Self:
1451
+ """
1452
+ Apply grouped reduction function blockwise, returning a new BlockManager.
1453
+
1454
+ Parameters
1455
+ ----------
1456
+ func : grouped reduction function
1457
+
1458
+ Returns
1459
+ -------
1460
+ BlockManager
1461
+ """
1462
+ result_blocks: list[Block] = []
1463
+
1464
+ for blk in self.blocks:
1465
+ if blk.is_object:
1466
+ # split on object-dtype blocks bc some columns may raise
1467
+ # while others do not.
1468
+ for sb in blk._split():
1469
+ applied = sb.apply(func)
1470
+ result_blocks = extend_blocks(applied, result_blocks)
1471
+ else:
1472
+ applied = blk.apply(func)
1473
+ result_blocks = extend_blocks(applied, result_blocks)
1474
+
1475
+ if len(result_blocks) == 0:
1476
+ nrows = 0
1477
+ else:
1478
+ nrows = result_blocks[0].values.shape[-1]
1479
+ index = Index(range(nrows))
1480
+
1481
+ return type(self).from_blocks(result_blocks, [self.axes[0], index])
1482
+
1483
+ def reduce(self, func: Callable) -> Self:
1484
+ """
1485
+ Apply reduction function blockwise, returning a single-row BlockManager.
1486
+
1487
+ Parameters
1488
+ ----------
1489
+ func : reduction function
1490
+
1491
+ Returns
1492
+ -------
1493
+ BlockManager
1494
+ """
1495
+ # If 2D, we assume that we're operating column-wise
1496
+ assert self.ndim == 2
1497
+
1498
+ res_blocks: list[Block] = []
1499
+ for blk in self.blocks:
1500
+ nbs = blk.reduce(func)
1501
+ res_blocks.extend(nbs)
1502
+
1503
+ index = Index([None]) # placeholder
1504
+ new_mgr = type(self).from_blocks(res_blocks, [self.items, index])
1505
+ return new_mgr
1506
+
1507
+ def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager:
1508
+ """
1509
+ Apply array_op blockwise with another (aligned) BlockManager.
1510
+ """
1511
+ return operate_blockwise(self, other, array_op)
1512
+
1513
+ def _equal_values(self: BlockManager, other: BlockManager) -> bool:
1514
+ """
1515
+ Used in .equals defined in base class. Only check the column values
1516
+ assuming shape and indexes have already been checked.
1517
+ """
1518
+ return blockwise_all(self, other, array_equals)
1519
+
1520
+ def quantile(
1521
+ self,
1522
+ *,
1523
+ qs: Index, # with dtype float 64
1524
+ interpolation: QuantileInterpolation = "linear",
1525
+ ) -> Self:
1526
+ """
1527
+ Iterate over blocks applying quantile reduction.
1528
+ This routine is intended for reduction type operations and
1529
+ will do inference on the generated blocks.
1530
+
1531
+ Parameters
1532
+ ----------
1533
+ interpolation : type of interpolation, default 'linear'
1534
+ qs : list of the quantiles to be computed
1535
+
1536
+ Returns
1537
+ -------
1538
+ BlockManager
1539
+ """
1540
+ # Series dispatches to DataFrame for quantile, which allows us to
1541
+ # simplify some of the code here and in the blocks
1542
+ assert self.ndim >= 2
1543
+ assert is_list_like(qs) # caller is responsible for this
1544
+
1545
+ new_axes = list(self.axes)
1546
+ new_axes[1] = Index(qs, dtype=np.float64)
1547
+
1548
+ blocks = [
1549
+ blk.quantile(qs=qs, interpolation=interpolation) for blk in self.blocks
1550
+ ]
1551
+
1552
+ return type(self)(blocks, new_axes)
1553
+
1554
+ # ----------------------------------------------------------------
1555
+
1556
+ def unstack(self, unstacker, fill_value) -> BlockManager:
1557
+ """
1558
+ Return a BlockManager with all blocks unstacked.
1559
+
1560
+ Parameters
1561
+ ----------
1562
+ unstacker : reshape._Unstacker
1563
+ fill_value : Any
1564
+ fill_value for newly introduced missing values.
1565
+
1566
+ Returns
1567
+ -------
1568
+ unstacked : BlockManager
1569
+ """
1570
+ new_columns = unstacker.get_new_columns(self.items)
1571
+ new_index = unstacker.new_index
1572
+
1573
+ allow_fill = not unstacker.mask_all
1574
+ if allow_fill:
1575
+ # calculating the full mask once and passing it to Block._unstack is
1576
+ # faster than letting calculating it in each repeated call
1577
+ new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape)
1578
+ needs_masking = new_mask2D.any(axis=0)
1579
+ else:
1580
+ needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool)
1581
+
1582
+ new_blocks: list[Block] = []
1583
+ columns_mask: list[np.ndarray] = []
1584
+
1585
+ if len(self.items) == 0:
1586
+ factor = 1
1587
+ else:
1588
+ fac = len(new_columns) / len(self.items)
1589
+ assert fac == int(fac)
1590
+ factor = int(fac)
1591
+
1592
+ for blk in self.blocks:
1593
+ mgr_locs = blk.mgr_locs
1594
+ new_placement = mgr_locs.tile_for_unstack(factor)
1595
+
1596
+ blocks, mask = blk._unstack(
1597
+ unstacker,
1598
+ fill_value,
1599
+ new_placement=new_placement,
1600
+ needs_masking=needs_masking,
1601
+ )
1602
+
1603
+ new_blocks.extend(blocks)
1604
+ columns_mask.extend(mask)
1605
+
1606
+ # Block._unstack should ensure this holds,
1607
+ assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks)
1608
+ # In turn this ensures that in the BlockManager call below
1609
+ # we have len(new_columns) == sum(x.shape[0] for x in new_blocks)
1610
+ # which suffices to allow us to pass verify_inegrity=False
1611
+
1612
+ new_columns = new_columns[columns_mask]
1613
+
1614
+ bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False)
1615
+ return bm
1616
+
1617
+ def to_dict(self) -> dict[str, Self]:
1618
+ """
1619
+ Return a dict of str(dtype) -> BlockManager
1620
+
1621
+ Returns
1622
+ -------
1623
+ values : a dict of dtype -> BlockManager
1624
+ """
1625
+
1626
+ bd: dict[str, list[Block]] = {}
1627
+ for b in self.blocks:
1628
+ bd.setdefault(str(b.dtype), []).append(b)
1629
+
1630
+ # TODO(EA2D): the combine will be unnecessary with 2D EAs
1631
+ return {dtype: self._combine(blocks) for dtype, blocks in bd.items()}
1632
+
1633
+ def as_array(
1634
+ self,
1635
+ dtype: np.dtype | None = None,
1636
+ copy: bool = False,
1637
+ na_value: object = lib.no_default,
1638
+ ) -> np.ndarray:
1639
+ """
1640
+ Convert the blockmanager data into an numpy array.
1641
+
1642
+ Parameters
1643
+ ----------
1644
+ dtype : np.dtype or None, default None
1645
+ Data type of the return array.
1646
+ copy : bool, default False
1647
+ If True then guarantee that a copy is returned. A value of
1648
+ False does not guarantee that the underlying data is not
1649
+ copied.
1650
+ na_value : object, default lib.no_default
1651
+ Value to be used as the missing value sentinel.
1652
+
1653
+ Returns
1654
+ -------
1655
+ arr : ndarray
1656
+ """
1657
+ passed_nan = lib.is_float(na_value) and isna(na_value)
1658
+
1659
+ if len(self.blocks) == 0:
1660
+ arr = np.empty(self.shape, dtype=float)
1661
+ return arr.transpose()
1662
+
1663
+ if self.is_single_block:
1664
+ blk = self.blocks[0]
1665
+
1666
+ if na_value is not lib.no_default:
1667
+ # We want to copy when na_value is provided to avoid
1668
+ # mutating the original object
1669
+ if lib.is_np_dtype(blk.dtype, "f") and passed_nan:
1670
+ # We are already numpy-float and na_value=np.nan
1671
+ pass
1672
+ else:
1673
+ copy = True
1674
+
1675
+ if blk.is_extension:
1676
+ # Avoid implicit conversion of extension blocks to object
1677
+
1678
+ # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no
1679
+ # attribute "to_numpy"
1680
+ arr = blk.values.to_numpy( # type: ignore[union-attr]
1681
+ dtype=dtype,
1682
+ na_value=na_value,
1683
+ copy=copy,
1684
+ ).reshape(blk.shape)
1685
+ elif not copy:
1686
+ arr = np.asarray(blk.values, dtype=dtype)
1687
+ else:
1688
+ arr = np.array(blk.values, dtype=dtype, copy=copy)
1689
+
1690
+ if using_copy_on_write() and not copy:
1691
+ arr = arr.view()
1692
+ arr.flags.writeable = False
1693
+ else:
1694
+ arr = self._interleave(dtype=dtype, na_value=na_value)
1695
+ # The underlying data was copied within _interleave, so no need
1696
+ # to further copy if copy=True or setting na_value
1697
+
1698
+ if na_value is lib.no_default:
1699
+ pass
1700
+ elif arr.dtype.kind == "f" and passed_nan:
1701
+ pass
1702
+ else:
1703
+ arr[isna(arr)] = na_value
1704
+
1705
+ return arr.transpose()
1706
+
1707
+ def _interleave(
1708
+ self,
1709
+ dtype: np.dtype | None = None,
1710
+ na_value: object = lib.no_default,
1711
+ ) -> np.ndarray:
1712
+ """
1713
+ Return ndarray from blocks with specified item order
1714
+ Items must be contained in the blocks
1715
+ """
1716
+ if not dtype:
1717
+ # Incompatible types in assignment (expression has type
1718
+ # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has
1719
+ # type "Optional[dtype[Any]]")
1720
+ dtype = interleaved_dtype( # type: ignore[assignment]
1721
+ [blk.dtype for blk in self.blocks]
1722
+ )
1723
+
1724
+ # error: Argument 1 to "ensure_np_dtype" has incompatible type
1725
+ # "Optional[dtype[Any]]"; expected "Union[dtype[Any], ExtensionDtype]"
1726
+ dtype = ensure_np_dtype(dtype) # type: ignore[arg-type]
1727
+ result = np.empty(self.shape, dtype=dtype)
1728
+
1729
+ itemmask = np.zeros(self.shape[0])
1730
+
1731
+ if dtype == np.dtype("object") and na_value is lib.no_default:
1732
+ # much more performant than using to_numpy below
1733
+ for blk in self.blocks:
1734
+ rl = blk.mgr_locs
1735
+ arr = blk.get_values(dtype)
1736
+ result[rl.indexer] = arr
1737
+ itemmask[rl.indexer] = 1
1738
+ return result
1739
+
1740
+ for blk in self.blocks:
1741
+ rl = blk.mgr_locs
1742
+ if blk.is_extension:
1743
+ # Avoid implicit conversion of extension blocks to object
1744
+
1745
+ # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no
1746
+ # attribute "to_numpy"
1747
+ arr = blk.values.to_numpy( # type: ignore[union-attr]
1748
+ dtype=dtype,
1749
+ na_value=na_value,
1750
+ )
1751
+ else:
1752
+ arr = blk.get_values(dtype)
1753
+ result[rl.indexer] = arr
1754
+ itemmask[rl.indexer] = 1
1755
+
1756
+ if not itemmask.all():
1757
+ raise AssertionError("Some items were not contained in blocks")
1758
+
1759
+ return result
1760
+
1761
+ # ----------------------------------------------------------------
1762
+ # Consolidation
1763
+
1764
+ def is_consolidated(self) -> bool:
1765
+ """
1766
+ Return True if more than one block with the same dtype
1767
+ """
1768
+ if not self._known_consolidated:
1769
+ self._consolidate_check()
1770
+ return self._is_consolidated
1771
+
1772
+ def _consolidate_check(self) -> None:
1773
+ if len(self.blocks) == 1:
1774
+ # fastpath
1775
+ self._is_consolidated = True
1776
+ self._known_consolidated = True
1777
+ return
1778
+ dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate]
1779
+ self._is_consolidated = len(dtypes) == len(set(dtypes))
1780
+ self._known_consolidated = True
1781
+
1782
+ def _consolidate_inplace(self) -> None:
1783
+ # In general, _consolidate_inplace should only be called via
1784
+ # DataFrame._consolidate_inplace, otherwise we will fail to invalidate
1785
+ # the DataFrame's _item_cache. The exception is for newly-created
1786
+ # BlockManager objects not yet attached to a DataFrame.
1787
+ if not self.is_consolidated():
1788
+ self.blocks = _consolidate(self.blocks)
1789
+ self._is_consolidated = True
1790
+ self._known_consolidated = True
1791
+ self._rebuild_blknos_and_blklocs()
1792
+
1793
+ # ----------------------------------------------------------------
1794
+ # Concatenation
1795
+
1796
+ @classmethod
1797
+ def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self:
1798
+ """
1799
+ Concatenate uniformly-indexed BlockManagers horizontally.
1800
+ """
1801
+ offset = 0
1802
+ blocks: list[Block] = []
1803
+ for mgr in mgrs:
1804
+ for blk in mgr.blocks:
1805
+ # We need to do getitem_block here otherwise we would be altering
1806
+ # blk.mgr_locs in place, which would render it invalid. This is only
1807
+ # relevant in the copy=False case.
1808
+ nb = blk.slice_block_columns(slice(None))
1809
+ nb._mgr_locs = nb._mgr_locs.add(offset)
1810
+ blocks.append(nb)
1811
+
1812
+ offset += len(mgr.items)
1813
+
1814
+ new_mgr = cls(tuple(blocks), axes)
1815
+ return new_mgr
1816
+
1817
+ @classmethod
1818
+ def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self:
1819
+ """
1820
+ Concatenate uniformly-indexed BlockManagers vertically.
1821
+ """
1822
+ raise NotImplementedError("This logic lives (for now) in internals.concat")
1823
+
1824
+
1825
+ class SingleBlockManager(BaseBlockManager, SingleDataManager):
1826
+ """manage a single block with"""
1827
+
1828
+ @property
1829
+ def ndim(self) -> Literal[1]:
1830
+ return 1
1831
+
1832
+ _is_consolidated = True
1833
+ _known_consolidated = True
1834
+ __slots__ = ()
1835
+ is_single_block = True
1836
+
1837
+ def __init__(
1838
+ self,
1839
+ block: Block,
1840
+ axis: Index,
1841
+ verify_integrity: bool = False,
1842
+ ) -> None:
1843
+ # Assertions disabled for performance
1844
+ # assert isinstance(block, Block), type(block)
1845
+ # assert isinstance(axis, Index), type(axis)
1846
+
1847
+ self.axes = [axis]
1848
+ self.blocks = (block,)
1849
+
1850
+ @classmethod
1851
+ def from_blocks(
1852
+ cls,
1853
+ blocks: list[Block],
1854
+ axes: list[Index],
1855
+ ) -> Self:
1856
+ """
1857
+ Constructor for BlockManager and SingleBlockManager with same signature.
1858
+ """
1859
+ assert len(blocks) == 1
1860
+ assert len(axes) == 1
1861
+ return cls(blocks[0], axes[0], verify_integrity=False)
1862
+
1863
+ @classmethod
1864
+ def from_array(
1865
+ cls, array: ArrayLike, index: Index, refs: BlockValuesRefs | None = None
1866
+ ) -> SingleBlockManager:
1867
+ """
1868
+ Constructor for if we have an array that is not yet a Block.
1869
+ """
1870
+ array = maybe_coerce_values(array)
1871
+ bp = BlockPlacement(slice(0, len(index)))
1872
+ block = new_block(array, placement=bp, ndim=1, refs=refs)
1873
+ return cls(block, index)
1874
+
1875
+ def to_2d_mgr(self, columns: Index) -> BlockManager:
1876
+ """
1877
+ Manager analogue of Series.to_frame
1878
+ """
1879
+ blk = self.blocks[0]
1880
+ arr = ensure_block_shape(blk.values, ndim=2)
1881
+ bp = BlockPlacement(0)
1882
+ new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs)
1883
+ axes = [columns, self.axes[0]]
1884
+ return BlockManager([new_blk], axes=axes, verify_integrity=False)
1885
+
1886
+ def _has_no_reference(self, i: int = 0) -> bool:
1887
+ """
1888
+ Check for column `i` if it has references.
1889
+ (whether it references another array or is itself being referenced)
1890
+ Returns True if the column has no references.
1891
+ """
1892
+ return not self.blocks[0].refs.has_reference()
1893
+
1894
+ def __getstate__(self):
1895
+ block_values = [b.values for b in self.blocks]
1896
+ block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
1897
+ axes_array = list(self.axes)
1898
+
1899
+ extra_state = {
1900
+ "0.14.1": {
1901
+ "axes": axes_array,
1902
+ "blocks": [
1903
+ {"values": b.values, "mgr_locs": b.mgr_locs.indexer}
1904
+ for b in self.blocks
1905
+ ],
1906
+ }
1907
+ }
1908
+
1909
+ # First three elements of the state are to maintain forward
1910
+ # compatibility with 0.13.1.
1911
+ return axes_array, block_values, block_items, extra_state
1912
+
1913
+ def __setstate__(self, state) -> None:
1914
+ def unpickle_block(values, mgr_locs, ndim: int) -> Block:
1915
+ # TODO(EA2D): ndim would be unnecessary with 2D EAs
1916
+ # older pickles may store e.g. DatetimeIndex instead of DatetimeArray
1917
+ values = extract_array(values, extract_numpy=True)
1918
+ if not isinstance(mgr_locs, BlockPlacement):
1919
+ mgr_locs = BlockPlacement(mgr_locs)
1920
+
1921
+ values = maybe_coerce_values(values)
1922
+ return new_block(values, placement=mgr_locs, ndim=ndim)
1923
+
1924
+ if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
1925
+ state = state[3]["0.14.1"]
1926
+ self.axes = [ensure_index(ax) for ax in state["axes"]]
1927
+ ndim = len(self.axes)
1928
+ self.blocks = tuple(
1929
+ unpickle_block(b["values"], b["mgr_locs"], ndim=ndim)
1930
+ for b in state["blocks"]
1931
+ )
1932
+ else:
1933
+ raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
1934
+
1935
+ self._post_setstate()
1936
+
1937
+ def _post_setstate(self) -> None:
1938
+ pass
1939
+
1940
+ @cache_readonly
1941
+ def _block(self) -> Block:
1942
+ return self.blocks[0]
1943
+
1944
+ @property
1945
+ def _blknos(self):
1946
+ """compat with BlockManager"""
1947
+ return None
1948
+
1949
+ @property
1950
+ def _blklocs(self):
1951
+ """compat with BlockManager"""
1952
+ return None
1953
+
1954
+ def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Self:
1955
+ # similar to get_slice, but not restricted to slice indexer
1956
+ blk = self._block
1957
+ if using_copy_on_write() and len(indexer) > 0 and indexer.all():
1958
+ return type(self)(blk.copy(deep=False), self.index)
1959
+ array = blk.values[indexer]
1960
+
1961
+ if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "b":
1962
+ # boolean indexing always gives a copy with numpy
1963
+ refs = None
1964
+ else:
1965
+ # TODO(CoW) in theory only need to track reference if new_array is a view
1966
+ refs = blk.refs
1967
+
1968
+ bp = BlockPlacement(slice(0, len(array)))
1969
+ block = type(blk)(array, placement=bp, ndim=1, refs=refs)
1970
+
1971
+ new_idx = self.index[indexer]
1972
+ return type(self)(block, new_idx)
1973
+
1974
+ def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleBlockManager:
1975
+ # Assertion disabled for performance
1976
+ # assert isinstance(slobj, slice), type(slobj)
1977
+ if axis >= self.ndim:
1978
+ raise IndexError("Requested axis not found in manager")
1979
+
1980
+ blk = self._block
1981
+ array = blk.values[slobj]
1982
+ bp = BlockPlacement(slice(0, len(array)))
1983
+ # TODO this method is only used in groupby SeriesSplitter at the moment,
1984
+ # so passing refs is not yet covered by the tests
1985
+ block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs)
1986
+ new_index = self.index._getitem_slice(slobj)
1987
+ return type(self)(block, new_index)
1988
+
1989
+ @property
1990
+ def index(self) -> Index:
1991
+ return self.axes[0]
1992
+
1993
+ @property
1994
+ def dtype(self) -> DtypeObj:
1995
+ return self._block.dtype
1996
+
1997
+ def get_dtypes(self) -> npt.NDArray[np.object_]:
1998
+ return np.array([self._block.dtype], dtype=object)
1999
+
2000
+ def external_values(self):
2001
+ """The array that Series.values returns"""
2002
+ return self._block.external_values()
2003
+
2004
+ def internal_values(self):
2005
+ """The array that Series._values returns"""
2006
+ return self._block.values
2007
+
2008
+ def array_values(self) -> ExtensionArray:
2009
+ """The array that Series.array returns"""
2010
+ return self._block.array_values
2011
+
2012
+ def get_numeric_data(self) -> Self:
2013
+ if self._block.is_numeric:
2014
+ return self.copy(deep=False)
2015
+ return self.make_empty()
2016
+
2017
+ @property
2018
+ def _can_hold_na(self) -> bool:
2019
+ return self._block._can_hold_na
2020
+
2021
+ def setitem_inplace(self, indexer, value, warn: bool = True) -> None:
2022
+ """
2023
+ Set values with indexer.
2024
+
2025
+ For Single[Block/Array]Manager, this backs s[indexer] = value
2026
+
2027
+ This is an inplace version of `setitem()`, mutating the manager/values
2028
+ in place, not returning a new Manager (and Block), and thus never changing
2029
+ the dtype.
2030
+ """
2031
+ using_cow = using_copy_on_write()
2032
+ warn_cow = warn_copy_on_write()
2033
+ if (using_cow or warn_cow) and not self._has_no_reference(0):
2034
+ if using_cow:
2035
+ self.blocks = (self._block.copy(),)
2036
+ self._cache.clear()
2037
+ elif warn_cow and warn:
2038
+ warnings.warn(
2039
+ COW_WARNING_SETITEM_MSG,
2040
+ FutureWarning,
2041
+ stacklevel=find_stack_level(),
2042
+ )
2043
+
2044
+ super().setitem_inplace(indexer, value)
2045
+
2046
+ def idelete(self, indexer) -> SingleBlockManager:
2047
+ """
2048
+ Delete single location from SingleBlockManager.
2049
+
2050
+ Ensures that self.blocks doesn't become empty.
2051
+ """
2052
+ nb = self._block.delete(indexer)[0]
2053
+ self.blocks = (nb,)
2054
+ self.axes[0] = self.axes[0].delete(indexer)
2055
+ self._cache.clear()
2056
+ return self
2057
+
2058
+ def fast_xs(self, loc):
2059
+ """
2060
+ fast path for getting a cross-section
2061
+ return a view of the data
2062
+ """
2063
+ raise NotImplementedError("Use series._values[loc] instead")
2064
+
2065
+ def set_values(self, values: ArrayLike) -> None:
2066
+ """
2067
+ Set the values of the single block in place.
2068
+
2069
+ Use at your own risk! This does not check if the passed values are
2070
+ valid for the current Block/SingleBlockManager (length, dtype, etc),
2071
+ and this does not properly keep track of references.
2072
+ """
2073
+ # NOTE(CoW) Currently this is only used for FrameColumnApply.series_generator
2074
+ # which handles CoW by setting the refs manually if necessary
2075
+ self.blocks[0].values = values
2076
+ self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values)))
2077
+
2078
+ def _equal_values(self, other: Self) -> bool:
2079
+ """
2080
+ Used in .equals defined in base class. Only check the column values
2081
+ assuming shape and indexes have already been checked.
2082
+ """
2083
+ # For SingleBlockManager (i.e.Series)
2084
+ if other.ndim != 1:
2085
+ return False
2086
+ left = self.blocks[0].values
2087
+ right = other.blocks[0].values
2088
+ return array_equals(left, right)
2089
+
2090
+
2091
+ # --------------------------------------------------------------------
2092
+ # Constructor Helpers
2093
+
2094
+
2095
+ def create_block_manager_from_blocks(
2096
+ blocks: list[Block],
2097
+ axes: list[Index],
2098
+ consolidate: bool = True,
2099
+ verify_integrity: bool = True,
2100
+ ) -> BlockManager:
2101
+ # If verify_integrity=False, then caller is responsible for checking
2102
+ # all(x.shape[-1] == len(axes[1]) for x in blocks)
2103
+ # sum(x.shape[0] for x in blocks) == len(axes[0])
2104
+ # set(x for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0])))
2105
+ # all(blk.ndim == 2 for blk in blocks)
2106
+ # This allows us to safely pass verify_integrity=False
2107
+
2108
+ try:
2109
+ mgr = BlockManager(blocks, axes, verify_integrity=verify_integrity)
2110
+
2111
+ except ValueError as err:
2112
+ arrays = [blk.values for blk in blocks]
2113
+ tot_items = sum(arr.shape[0] for arr in arrays)
2114
+ raise_construction_error(tot_items, arrays[0].shape[1:], axes, err)
2115
+
2116
+ if consolidate:
2117
+ mgr._consolidate_inplace()
2118
+ return mgr
2119
+
2120
+
2121
+ def create_block_manager_from_column_arrays(
2122
+ arrays: list[ArrayLike],
2123
+ axes: list[Index],
2124
+ consolidate: bool,
2125
+ refs: list,
2126
+ ) -> BlockManager:
2127
+ # Assertions disabled for performance (caller is responsible for verifying)
2128
+ # assert isinstance(axes, list)
2129
+ # assert all(isinstance(x, Index) for x in axes)
2130
+ # assert all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
2131
+ # assert all(type(x) is not NumpyExtensionArray for x in arrays)
2132
+ # assert all(x.ndim == 1 for x in arrays)
2133
+ # assert all(len(x) == len(axes[1]) for x in arrays)
2134
+ # assert len(arrays) == len(axes[0])
2135
+ # These last three are sufficient to allow us to safely pass
2136
+ # verify_integrity=False below.
2137
+
2138
+ try:
2139
+ blocks = _form_blocks(arrays, consolidate, refs)
2140
+ mgr = BlockManager(blocks, axes, verify_integrity=False)
2141
+ except ValueError as e:
2142
+ raise_construction_error(len(arrays), arrays[0].shape, axes, e)
2143
+ if consolidate:
2144
+ mgr._consolidate_inplace()
2145
+ return mgr
2146
+
2147
+
2148
+ def raise_construction_error(
2149
+ tot_items: int,
2150
+ block_shape: Shape,
2151
+ axes: list[Index],
2152
+ e: ValueError | None = None,
2153
+ ):
2154
+ """raise a helpful message about our construction"""
2155
+ passed = tuple(map(int, [tot_items] + list(block_shape)))
2156
+ # Correcting the user facing error message during dataframe construction
2157
+ if len(passed) <= 2:
2158
+ passed = passed[::-1]
2159
+
2160
+ implied = tuple(len(ax) for ax in axes)
2161
+ # Correcting the user facing error message during dataframe construction
2162
+ if len(implied) <= 2:
2163
+ implied = implied[::-1]
2164
+
2165
+ # We return the exception object instead of raising it so that we
2166
+ # can raise it in the caller; mypy plays better with that
2167
+ if passed == implied and e is not None:
2168
+ raise e
2169
+ if block_shape[0] == 0:
2170
+ raise ValueError("Empty data passed with indices specified.")
2171
+ raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
2172
+
2173
+
2174
+ # -----------------------------------------------------------------------
2175
+
2176
+
2177
+ def _grouping_func(tup: tuple[int, ArrayLike]) -> tuple[int, DtypeObj]:
2178
+ dtype = tup[1].dtype
2179
+
2180
+ if is_1d_only_ea_dtype(dtype):
2181
+ # We know these won't be consolidated, so don't need to group these.
2182
+ # This avoids expensive comparisons of CategoricalDtype objects
2183
+ sep = id(dtype)
2184
+ else:
2185
+ sep = 0
2186
+
2187
+ return sep, dtype
2188
+
2189
+
2190
+ def _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list[Block]:
2191
+ tuples = list(enumerate(arrays))
2192
+
2193
+ if not consolidate:
2194
+ return _tuples_to_blocks_no_consolidate(tuples, refs)
2195
+
2196
+ # when consolidating, we can ignore refs (either stacking always copies,
2197
+ # or the EA is already copied in the calling dict_to_mgr)
2198
+
2199
+ # group by dtype
2200
+ grouper = itertools.groupby(tuples, _grouping_func)
2201
+
2202
+ nbs: list[Block] = []
2203
+ for (_, dtype), tup_block in grouper:
2204
+ block_type = get_block_type(dtype)
2205
+
2206
+ if isinstance(dtype, np.dtype):
2207
+ is_dtlike = dtype.kind in "mM"
2208
+
2209
+ if issubclass(dtype.type, (str, bytes)):
2210
+ dtype = np.dtype(object)
2211
+
2212
+ values, placement = _stack_arrays(list(tup_block), dtype)
2213
+ if is_dtlike:
2214
+ values = ensure_wrapped_if_datetimelike(values)
2215
+ blk = block_type(values, placement=BlockPlacement(placement), ndim=2)
2216
+ nbs.append(blk)
2217
+
2218
+ elif is_1d_only_ea_dtype(dtype):
2219
+ dtype_blocks = [
2220
+ block_type(x[1], placement=BlockPlacement(x[0]), ndim=2)
2221
+ for x in tup_block
2222
+ ]
2223
+ nbs.extend(dtype_blocks)
2224
+
2225
+ else:
2226
+ dtype_blocks = [
2227
+ block_type(
2228
+ ensure_block_shape(x[1], 2), placement=BlockPlacement(x[0]), ndim=2
2229
+ )
2230
+ for x in tup_block
2231
+ ]
2232
+ nbs.extend(dtype_blocks)
2233
+ return nbs
2234
+
2235
+
2236
+ def _tuples_to_blocks_no_consolidate(tuples, refs) -> list[Block]:
2237
+ # tuples produced within _form_blocks are of the form (placement, array)
2238
+ return [
2239
+ new_block_2d(
2240
+ ensure_block_shape(arr, ndim=2), placement=BlockPlacement(i), refs=ref
2241
+ )
2242
+ for ((i, arr), ref) in zip(tuples, refs)
2243
+ ]
2244
+
2245
+
2246
+ def _stack_arrays(tuples, dtype: np.dtype):
2247
+ placement, arrays = zip(*tuples)
2248
+
2249
+ first = arrays[0]
2250
+ shape = (len(arrays),) + first.shape
2251
+
2252
+ stacked = np.empty(shape, dtype=dtype)
2253
+ for i, arr in enumerate(arrays):
2254
+ stacked[i] = arr
2255
+
2256
+ return stacked, placement
2257
+
2258
+
2259
+ def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]:
2260
+ """
2261
+ Merge blocks having same dtype, exclude non-consolidating blocks
2262
+ """
2263
+ # sort by _can_consolidate, dtype
2264
+ gkey = lambda x: x._consolidate_key
2265
+ grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
2266
+
2267
+ new_blocks: list[Block] = []
2268
+ for (_can_consolidate, dtype), group_blocks in grouper:
2269
+ merged_blocks, _ = _merge_blocks(
2270
+ list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
2271
+ )
2272
+ new_blocks = extend_blocks(merged_blocks, new_blocks)
2273
+ return tuple(new_blocks)
2274
+
2275
+
2276
+ def _merge_blocks(
2277
+ blocks: list[Block], dtype: DtypeObj, can_consolidate: bool
2278
+ ) -> tuple[list[Block], bool]:
2279
+ if len(blocks) == 1:
2280
+ return blocks, False
2281
+
2282
+ if can_consolidate:
2283
+ # TODO: optimization potential in case all mgrs contain slices and
2284
+ # combination of those slices is a slice, too.
2285
+ new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
2286
+
2287
+ new_values: ArrayLike
2288
+
2289
+ if isinstance(blocks[0].dtype, np.dtype):
2290
+ # error: List comprehension has incompatible type List[Union[ndarray,
2291
+ # ExtensionArray]]; expected List[Union[complex, generic,
2292
+ # Sequence[Union[int, float, complex, str, bytes, generic]],
2293
+ # Sequence[Sequence[Any]], SupportsArray]]
2294
+ new_values = np.vstack([b.values for b in blocks]) # type: ignore[misc]
2295
+ else:
2296
+ bvals = [blk.values for blk in blocks]
2297
+ bvals2 = cast(Sequence[NDArrayBackedExtensionArray], bvals)
2298
+ new_values = bvals2[0]._concat_same_type(bvals2, axis=0)
2299
+
2300
+ argsort = np.argsort(new_mgr_locs)
2301
+ new_values = new_values[argsort]
2302
+ new_mgr_locs = new_mgr_locs[argsort]
2303
+
2304
+ bp = BlockPlacement(new_mgr_locs)
2305
+ return [new_block_2d(new_values, placement=bp)], True
2306
+
2307
+ # can't consolidate --> no merge
2308
+ return blocks, False
2309
+
2310
+
2311
+ def _fast_count_smallints(arr: npt.NDArray[np.intp]):
2312
+ """Faster version of set(arr) for sequences of small numbers."""
2313
+ counts = np.bincount(arr)
2314
+ nz = counts.nonzero()[0]
2315
+ # Note: list(zip(...) outperforms list(np.c_[nz, counts[nz]]) here,
2316
+ # in one benchmark by a factor of 11
2317
+ return zip(nz, counts[nz])
2318
+
2319
+
2320
+ def _preprocess_slice_or_indexer(
2321
+ slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool
2322
+ ):
2323
+ if isinstance(slice_or_indexer, slice):
2324
+ return (
2325
+ "slice",
2326
+ slice_or_indexer,
2327
+ libinternals.slice_len(slice_or_indexer, length),
2328
+ )
2329
+ else:
2330
+ if (
2331
+ not isinstance(slice_or_indexer, np.ndarray)
2332
+ or slice_or_indexer.dtype.kind != "i"
2333
+ ):
2334
+ dtype = getattr(slice_or_indexer, "dtype", None)
2335
+ raise TypeError(type(slice_or_indexer), dtype)
2336
+
2337
+ indexer = ensure_platform_int(slice_or_indexer)
2338
+ if not allow_fill:
2339
+ indexer = maybe_convert_indices(indexer, length)
2340
+ return "fancy", indexer, len(indexer)
2341
+
2342
+
2343
+ def make_na_array(dtype: DtypeObj, shape: Shape, fill_value) -> ArrayLike:
2344
+ if isinstance(dtype, DatetimeTZDtype):
2345
+ # NB: exclude e.g. pyarrow[dt64tz] dtypes
2346
+ ts = Timestamp(fill_value).as_unit(dtype.unit)
2347
+ i8values = np.full(shape, ts._value)
2348
+ dt64values = i8values.view(f"M8[{dtype.unit}]")
2349
+ return DatetimeArray._simple_new(dt64values, dtype=dtype)
2350
+
2351
+ elif is_1d_only_ea_dtype(dtype):
2352
+ dtype = cast(ExtensionDtype, dtype)
2353
+ cls = dtype.construct_array_type()
2354
+
2355
+ missing_arr = cls._from_sequence([], dtype=dtype)
2356
+ ncols, nrows = shape
2357
+ assert ncols == 1, ncols
2358
+ empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
2359
+ return missing_arr.take(empty_arr, allow_fill=True, fill_value=fill_value)
2360
+ elif isinstance(dtype, ExtensionDtype):
2361
+ # TODO: no tests get here, a handful would if we disabled
2362
+ # the dt64tz special-case above (which is faster)
2363
+ cls = dtype.construct_array_type()
2364
+ missing_arr = cls._empty(shape=shape, dtype=dtype)
2365
+ missing_arr[:] = fill_value
2366
+ return missing_arr
2367
+ else:
2368
+ # NB: we should never get here with dtype integer or bool;
2369
+ # if we did, the missing_arr.fill would cast to gibberish
2370
+ missing_arr = np.empty(shape, dtype=dtype)
2371
+ missing_arr.fill(fill_value)
2372
+
2373
+ if dtype.kind in "mM":
2374
+ missing_arr = ensure_wrapped_if_datetimelike(missing_arr)
2375
+ return missing_arr
venv/lib/python3.10/site-packages/pandas/core/internals/ops.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ NamedTuple,
6
+ )
7
+
8
+ from pandas.core.dtypes.common import is_1d_only_ea_dtype
9
+
10
+ if TYPE_CHECKING:
11
+ from collections.abc import Iterator
12
+
13
+ from pandas._libs.internals import BlockPlacement
14
+ from pandas._typing import ArrayLike
15
+
16
+ from pandas.core.internals.blocks import Block
17
+ from pandas.core.internals.managers import BlockManager
18
+
19
+
20
+ class BlockPairInfo(NamedTuple):
21
+ lvals: ArrayLike
22
+ rvals: ArrayLike
23
+ locs: BlockPlacement
24
+ left_ea: bool
25
+ right_ea: bool
26
+ rblk: Block
27
+
28
+
29
+ def _iter_block_pairs(
30
+ left: BlockManager, right: BlockManager
31
+ ) -> Iterator[BlockPairInfo]:
32
+ # At this point we have already checked the parent DataFrames for
33
+ # assert rframe._indexed_same(lframe)
34
+
35
+ for blk in left.blocks:
36
+ locs = blk.mgr_locs
37
+ blk_vals = blk.values
38
+
39
+ left_ea = blk_vals.ndim == 1
40
+
41
+ rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
42
+
43
+ # Assertions are disabled for performance, but should hold:
44
+ # if left_ea:
45
+ # assert len(locs) == 1, locs
46
+ # assert len(rblks) == 1, rblks
47
+ # assert rblks[0].shape[0] == 1, rblks[0].shape
48
+
49
+ for rblk in rblks:
50
+ right_ea = rblk.values.ndim == 1
51
+
52
+ lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea)
53
+ info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk)
54
+ yield info
55
+
56
+
57
+ def operate_blockwise(
58
+ left: BlockManager, right: BlockManager, array_op
59
+ ) -> BlockManager:
60
+ # At this point we have already checked the parent DataFrames for
61
+ # assert rframe._indexed_same(lframe)
62
+
63
+ res_blks: list[Block] = []
64
+ for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right):
65
+ res_values = array_op(lvals, rvals)
66
+ if (
67
+ left_ea
68
+ and not right_ea
69
+ and hasattr(res_values, "reshape")
70
+ and not is_1d_only_ea_dtype(res_values.dtype)
71
+ ):
72
+ res_values = res_values.reshape(1, -1)
73
+ nbs = rblk._split_op_result(res_values)
74
+
75
+ # Assertions are disabled for performance, but should hold:
76
+ # if right_ea or left_ea:
77
+ # assert len(nbs) == 1
78
+ # else:
79
+ # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape)
80
+
81
+ _reset_block_mgr_locs(nbs, locs)
82
+
83
+ res_blks.extend(nbs)
84
+
85
+ # Assertions are disabled for performance, but should hold:
86
+ # slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array}
87
+ # nlocs = sum(len(nb.mgr_locs.as_array) for nb in res_blks)
88
+ # assert nlocs == len(left.items), (nlocs, len(left.items))
89
+ # assert len(slocs) == nlocs, (len(slocs), nlocs)
90
+ # assert slocs == set(range(nlocs)), slocs
91
+
92
+ new_mgr = type(right)(tuple(res_blks), axes=right.axes, verify_integrity=False)
93
+ return new_mgr
94
+
95
+
96
+ def _reset_block_mgr_locs(nbs: list[Block], locs) -> None:
97
+ """
98
+ Reset mgr_locs to correspond to our original DataFrame.
99
+ """
100
+ for nb in nbs:
101
+ nblocs = locs[nb.mgr_locs.indexer]
102
+ nb.mgr_locs = nblocs
103
+ # Assertions are disabled for performance, but should hold:
104
+ # assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape)
105
+ # assert all(x in locs.as_array for x in nb.mgr_locs.as_array)
106
+
107
+
108
+ def _get_same_shape_values(
109
+ lblk: Block, rblk: Block, left_ea: bool, right_ea: bool
110
+ ) -> tuple[ArrayLike, ArrayLike]:
111
+ """
112
+ Slice lblk.values to align with rblk. Squeeze if we have EAs.
113
+ """
114
+ lvals = lblk.values
115
+ rvals = rblk.values
116
+
117
+ # Require that the indexing into lvals be slice-like
118
+ assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs
119
+
120
+ # TODO(EA2D): with 2D EAs only this first clause would be needed
121
+ if not (left_ea or right_ea):
122
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
123
+ # argument type "Tuple[Union[ndarray, slice], slice]"
124
+ lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload]
125
+ assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
126
+ elif left_ea and right_ea:
127
+ assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
128
+ elif right_ea:
129
+ # lvals are 2D, rvals are 1D
130
+
131
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
132
+ # argument type "Tuple[Union[ndarray, slice], slice]"
133
+ lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload]
134
+ assert lvals.shape[0] == 1, lvals.shape
135
+ lvals = lvals[0, :]
136
+ else:
137
+ # lvals are 1D, rvals are 2D
138
+ assert rvals.shape[0] == 1, rvals.shape
139
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
140
+ # argument type "Tuple[int, slice]"
141
+ rvals = rvals[0, :] # type: ignore[call-overload]
142
+
143
+ return lvals, rvals
144
+
145
+
146
+ def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool:
147
+ """
148
+ Blockwise `all` reduction.
149
+ """
150
+ for info in _iter_block_pairs(left, right):
151
+ res = op(info.lvals, info.rvals)
152
+ if not res:
153
+ return False
154
+ return True
venv/lib/python3.10/site-packages/pandas/core/ops/__init__.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Arithmetic operations for PandasObjects
3
+
4
+ This is not a public API.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from pandas.core.ops.array_ops import (
9
+ arithmetic_op,
10
+ comp_method_OBJECT_ARRAY,
11
+ comparison_op,
12
+ fill_binop,
13
+ get_array_op,
14
+ logical_op,
15
+ maybe_prepare_scalar_for_op,
16
+ )
17
+ from pandas.core.ops.common import (
18
+ get_op_result_name,
19
+ unpack_zerodim_and_defer,
20
+ )
21
+ from pandas.core.ops.docstrings import make_flex_doc
22
+ from pandas.core.ops.invalid import invalid_comparison
23
+ from pandas.core.ops.mask_ops import (
24
+ kleene_and,
25
+ kleene_or,
26
+ kleene_xor,
27
+ )
28
+ from pandas.core.roperator import (
29
+ radd,
30
+ rand_,
31
+ rdiv,
32
+ rdivmod,
33
+ rfloordiv,
34
+ rmod,
35
+ rmul,
36
+ ror_,
37
+ rpow,
38
+ rsub,
39
+ rtruediv,
40
+ rxor,
41
+ )
42
+
43
+ # -----------------------------------------------------------------------------
44
+ # constants
45
+ ARITHMETIC_BINOPS: set[str] = {
46
+ "add",
47
+ "sub",
48
+ "mul",
49
+ "pow",
50
+ "mod",
51
+ "floordiv",
52
+ "truediv",
53
+ "divmod",
54
+ "radd",
55
+ "rsub",
56
+ "rmul",
57
+ "rpow",
58
+ "rmod",
59
+ "rfloordiv",
60
+ "rtruediv",
61
+ "rdivmod",
62
+ }
63
+
64
+
65
+ __all__ = [
66
+ "ARITHMETIC_BINOPS",
67
+ "arithmetic_op",
68
+ "comparison_op",
69
+ "comp_method_OBJECT_ARRAY",
70
+ "invalid_comparison",
71
+ "fill_binop",
72
+ "kleene_and",
73
+ "kleene_or",
74
+ "kleene_xor",
75
+ "logical_op",
76
+ "make_flex_doc",
77
+ "radd",
78
+ "rand_",
79
+ "rdiv",
80
+ "rdivmod",
81
+ "rfloordiv",
82
+ "rmod",
83
+ "rmul",
84
+ "ror_",
85
+ "rpow",
86
+ "rsub",
87
+ "rtruediv",
88
+ "rxor",
89
+ "unpack_zerodim_and_defer",
90
+ "get_op_result_name",
91
+ "maybe_prepare_scalar_for_op",
92
+ "get_array_op",
93
+ ]
venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.42 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
venv/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc ADDED
Binary file (3.41 kB). View file