applied-ai-018 commited on
Commit
4e9ce68
·
verified ·
1 Parent(s): 82573e5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. llmeval-env/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so +3 -0
  3. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/__init__.cpython-310.pyc +0 -0
  4. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/accessor.cpython-310.pyc +0 -0
  5. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/algorithms.cpython-310.pyc +0 -0
  6. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/api.cpython-310.pyc +0 -0
  7. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/base.cpython-310.pyc +0 -0
  8. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/common.cpython-310.pyc +0 -0
  9. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/construction.cpython-310.pyc +0 -0
  10. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/flags.cpython-310.pyc +0 -0
  11. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython-310.pyc +0 -0
  12. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/generic.cpython-310.pyc +0 -0
  13. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/indexing.cpython-310.pyc +0 -0
  14. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/resample.cpython-310.pyc +0 -0
  15. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/roperator.cpython-310.pyc +0 -0
  16. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/sample.cpython-310.pyc +0 -0
  17. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/shared_docs.cpython-310.pyc +0 -0
  18. llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/sorting.cpython-310.pyc +0 -0
  19. llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/__init__.py +31 -0
  20. llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc +0 -0
  21. llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc +0 -0
  22. llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc +0 -0
  23. llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/objects.py +453 -0
  24. llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/utils.py +553 -0
  25. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__init__.py +85 -0
  26. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc +0 -0
  27. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc +0 -0
  28. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc +0 -0
  29. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc +0 -0
  30. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc +0 -0
  31. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc +0 -0
  32. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc +0 -0
  33. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc +0 -0
  34. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc +0 -0
  35. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/api.py +156 -0
  36. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/array_manager.py +1340 -0
  37. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/base.py +407 -0
  38. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/blocks.py +0 -0
  39. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/concat.py +598 -0
  40. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/construction.py +1072 -0
  41. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/managers.py +2375 -0
  42. llmeval-env/lib/python3.10/site-packages/pandas/core/internals/ops.py +154 -0
  43. llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__init__.py +93 -0
  44. llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc +0 -0
  45. llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc +0 -0
  46. llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc +0 -0
  47. llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/dispatch.cpython-310.pyc +0 -0
  48. llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc +0 -0
  49. llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc +0 -0
  50. llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -189,3 +189,4 @@ llmeval-env/lib/python3.10/site-packages/scipy/stats/_unuran/unuran_wrapper.cpyt
189
  llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 filter=lfs diff=lfs merge=lfs -text
190
  llmeval-env/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
191
  llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text
 
 
189
  llmeval-env/lib/python3.10/site-packages/nvidia/cudnn/lib/libcudnn_adv_infer.so.8 filter=lfs diff=lfs merge=lfs -text
190
  llmeval-env/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text
191
  llmeval-env/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text
192
+ llmeval-env/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
llmeval-env/lib/python3.10/site-packages/numexpr/interpreter.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bcb0c6481fb539a0d205a98b8ad9860f3226a3b80ec3dd81200970415199cab5
3
+ size 1150680
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (184 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/accessor.cpython-310.pyc ADDED
Binary file (11.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/algorithms.cpython-310.pyc ADDED
Binary file (39.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/api.cpython-310.pyc ADDED
Binary file (2.62 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/base.cpython-310.pyc ADDED
Binary file (37.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/common.cpython-310.pyc ADDED
Binary file (17.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/construction.cpython-310.pyc ADDED
Binary file (19.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/flags.cpython-310.pyc ADDED
Binary file (4.35 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/frame.cpython-310.pyc ADDED
Binary file (363 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/generic.cpython-310.pyc ADDED
Binary file (386 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/indexing.cpython-310.pyc ADDED
Binary file (68.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/resample.cpython-310.pyc ADDED
Binary file (74.8 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/roperator.cpython-310.pyc ADDED
Binary file (1.81 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/sample.cpython-310.pyc ADDED
Binary file (3.97 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/shared_docs.cpython-310.pyc ADDED
Binary file (30.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/__pycache__/sorting.cpython-310.pyc ADDED
Binary file (19.9 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/__init__.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.indexers.utils import (
2
+ check_array_indexer,
3
+ check_key_length,
4
+ check_setitem_lengths,
5
+ disallow_ndim_indexing,
6
+ is_empty_indexer,
7
+ is_list_like_indexer,
8
+ is_scalar_indexer,
9
+ is_valid_positional_slice,
10
+ length_of_indexer,
11
+ maybe_convert_indices,
12
+ unpack_1tuple,
13
+ unpack_tuple_and_ellipses,
14
+ validate_indices,
15
+ )
16
+
17
+ __all__ = [
18
+ "is_valid_positional_slice",
19
+ "is_list_like_indexer",
20
+ "is_scalar_indexer",
21
+ "is_empty_indexer",
22
+ "check_setitem_lengths",
23
+ "validate_indices",
24
+ "maybe_convert_indices",
25
+ "length_of_indexer",
26
+ "disallow_ndim_indexing",
27
+ "unpack_1tuple",
28
+ "check_key_length",
29
+ "check_array_indexer",
30
+ "unpack_tuple_and_ellipses",
31
+ ]
llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (710 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/objects.cpython-310.pyc ADDED
Binary file (11.6 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/__pycache__/utils.cpython-310.pyc ADDED
Binary file (13.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/objects.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Indexer objects for computing start/end window bounds for rolling operations"""
2
+ from __future__ import annotations
3
+
4
+ from datetime import timedelta
5
+
6
+ import numpy as np
7
+
8
+ from pandas._libs.tslibs import BaseOffset
9
+ from pandas._libs.window.indexers import calculate_variable_window_bounds
10
+ from pandas.util._decorators import Appender
11
+
12
+ from pandas.core.dtypes.common import ensure_platform_int
13
+
14
+ from pandas.core.indexes.datetimes import DatetimeIndex
15
+
16
+ from pandas.tseries.offsets import Nano
17
+
18
+ get_window_bounds_doc = """
19
+ Computes the bounds of a window.
20
+
21
+ Parameters
22
+ ----------
23
+ num_values : int, default 0
24
+ number of values that will be aggregated over
25
+ window_size : int, default 0
26
+ the number of rows in a window
27
+ min_periods : int, default None
28
+ min_periods passed from the top level rolling API
29
+ center : bool, default None
30
+ center passed from the top level rolling API
31
+ closed : str, default None
32
+ closed passed from the top level rolling API
33
+ step : int, default None
34
+ step passed from the top level rolling API
35
+ .. versionadded:: 1.5
36
+ win_type : str, default None
37
+ win_type passed from the top level rolling API
38
+
39
+ Returns
40
+ -------
41
+ A tuple of ndarray[int64]s, indicating the boundaries of each
42
+ window
43
+ """
44
+
45
+
46
+ class BaseIndexer:
47
+ """
48
+ Base class for window bounds calculations.
49
+
50
+ Examples
51
+ --------
52
+ >>> from pandas.api.indexers import BaseIndexer
53
+ >>> class CustomIndexer(BaseIndexer):
54
+ ... def get_window_bounds(self, num_values, min_periods, center, closed, step):
55
+ ... start = np.empty(num_values, dtype=np.int64)
56
+ ... end = np.empty(num_values, dtype=np.int64)
57
+ ... for i in range(num_values):
58
+ ... start[i] = i
59
+ ... end[i] = i + self.window_size
60
+ ... return start, end
61
+ >>> df = pd.DataFrame({"values": range(5)})
62
+ >>> indexer = CustomIndexer(window_size=2)
63
+ >>> df.rolling(indexer).sum()
64
+ values
65
+ 0 1.0
66
+ 1 3.0
67
+ 2 5.0
68
+ 3 7.0
69
+ 4 4.0
70
+ """
71
+
72
+ def __init__(
73
+ self, index_array: np.ndarray | None = None, window_size: int = 0, **kwargs
74
+ ) -> None:
75
+ self.index_array = index_array
76
+ self.window_size = window_size
77
+ # Set user defined kwargs as attributes that can be used in get_window_bounds
78
+ for key, value in kwargs.items():
79
+ setattr(self, key, value)
80
+
81
+ @Appender(get_window_bounds_doc)
82
+ def get_window_bounds(
83
+ self,
84
+ num_values: int = 0,
85
+ min_periods: int | None = None,
86
+ center: bool | None = None,
87
+ closed: str | None = None,
88
+ step: int | None = None,
89
+ ) -> tuple[np.ndarray, np.ndarray]:
90
+ raise NotImplementedError
91
+
92
+
93
+ class FixedWindowIndexer(BaseIndexer):
94
+ """Creates window boundaries that are of fixed length."""
95
+
96
+ @Appender(get_window_bounds_doc)
97
+ def get_window_bounds(
98
+ self,
99
+ num_values: int = 0,
100
+ min_periods: int | None = None,
101
+ center: bool | None = None,
102
+ closed: str | None = None,
103
+ step: int | None = None,
104
+ ) -> tuple[np.ndarray, np.ndarray]:
105
+ if center or self.window_size == 0:
106
+ offset = (self.window_size - 1) // 2
107
+ else:
108
+ offset = 0
109
+
110
+ end = np.arange(1 + offset, num_values + 1 + offset, step, dtype="int64")
111
+ start = end - self.window_size
112
+ if closed in ["left", "both"]:
113
+ start -= 1
114
+ if closed in ["left", "neither"]:
115
+ end -= 1
116
+
117
+ end = np.clip(end, 0, num_values)
118
+ start = np.clip(start, 0, num_values)
119
+
120
+ return start, end
121
+
122
+
123
+ class VariableWindowIndexer(BaseIndexer):
124
+ """Creates window boundaries that are of variable length, namely for time series."""
125
+
126
+ @Appender(get_window_bounds_doc)
127
+ def get_window_bounds(
128
+ self,
129
+ num_values: int = 0,
130
+ min_periods: int | None = None,
131
+ center: bool | None = None,
132
+ closed: str | None = None,
133
+ step: int | None = None,
134
+ ) -> tuple[np.ndarray, np.ndarray]:
135
+ # error: Argument 4 to "calculate_variable_window_bounds" has incompatible
136
+ # type "Optional[bool]"; expected "bool"
137
+ # error: Argument 6 to "calculate_variable_window_bounds" has incompatible
138
+ # type "Optional[ndarray]"; expected "ndarray"
139
+ return calculate_variable_window_bounds(
140
+ num_values,
141
+ self.window_size,
142
+ min_periods,
143
+ center, # type: ignore[arg-type]
144
+ closed,
145
+ self.index_array, # type: ignore[arg-type]
146
+ )
147
+
148
+
149
+ class VariableOffsetWindowIndexer(BaseIndexer):
150
+ """
151
+ Calculate window boundaries based on a non-fixed offset such as a BusinessDay.
152
+
153
+ Examples
154
+ --------
155
+ >>> from pandas.api.indexers import VariableOffsetWindowIndexer
156
+ >>> df = pd.DataFrame(range(10), index=pd.date_range("2020", periods=10))
157
+ >>> offset = pd.offsets.BDay(1)
158
+ >>> indexer = VariableOffsetWindowIndexer(index=df.index, offset=offset)
159
+ >>> df
160
+ 0
161
+ 2020-01-01 0
162
+ 2020-01-02 1
163
+ 2020-01-03 2
164
+ 2020-01-04 3
165
+ 2020-01-05 4
166
+ 2020-01-06 5
167
+ 2020-01-07 6
168
+ 2020-01-08 7
169
+ 2020-01-09 8
170
+ 2020-01-10 9
171
+ >>> df.rolling(indexer).sum()
172
+ 0
173
+ 2020-01-01 0.0
174
+ 2020-01-02 1.0
175
+ 2020-01-03 2.0
176
+ 2020-01-04 3.0
177
+ 2020-01-05 7.0
178
+ 2020-01-06 12.0
179
+ 2020-01-07 6.0
180
+ 2020-01-08 7.0
181
+ 2020-01-09 8.0
182
+ 2020-01-10 9.0
183
+ """
184
+
185
+ def __init__(
186
+ self,
187
+ index_array: np.ndarray | None = None,
188
+ window_size: int = 0,
189
+ index: DatetimeIndex | None = None,
190
+ offset: BaseOffset | None = None,
191
+ **kwargs,
192
+ ) -> None:
193
+ super().__init__(index_array, window_size, **kwargs)
194
+ if not isinstance(index, DatetimeIndex):
195
+ raise ValueError("index must be a DatetimeIndex.")
196
+ self.index = index
197
+ if not isinstance(offset, BaseOffset):
198
+ raise ValueError("offset must be a DateOffset-like object.")
199
+ self.offset = offset
200
+
201
+ @Appender(get_window_bounds_doc)
202
+ def get_window_bounds(
203
+ self,
204
+ num_values: int = 0,
205
+ min_periods: int | None = None,
206
+ center: bool | None = None,
207
+ closed: str | None = None,
208
+ step: int | None = None,
209
+ ) -> tuple[np.ndarray, np.ndarray]:
210
+ if step is not None:
211
+ raise NotImplementedError("step not implemented for variable offset window")
212
+ if num_values <= 0:
213
+ return np.empty(0, dtype="int64"), np.empty(0, dtype="int64")
214
+
215
+ # if windows is variable, default is 'right', otherwise default is 'both'
216
+ if closed is None:
217
+ closed = "right" if self.index is not None else "both"
218
+
219
+ right_closed = closed in ["right", "both"]
220
+ left_closed = closed in ["left", "both"]
221
+
222
+ if self.index[num_values - 1] < self.index[0]:
223
+ index_growth_sign = -1
224
+ else:
225
+ index_growth_sign = 1
226
+ offset_diff = index_growth_sign * self.offset
227
+
228
+ start = np.empty(num_values, dtype="int64")
229
+ start.fill(-1)
230
+ end = np.empty(num_values, dtype="int64")
231
+ end.fill(-1)
232
+
233
+ start[0] = 0
234
+
235
+ # right endpoint is closed
236
+ if right_closed:
237
+ end[0] = 1
238
+ # right endpoint is open
239
+ else:
240
+ end[0] = 0
241
+
242
+ zero = timedelta(0)
243
+ # start is start of slice interval (including)
244
+ # end is end of slice interval (not including)
245
+ for i in range(1, num_values):
246
+ end_bound = self.index[i]
247
+ start_bound = end_bound - offset_diff
248
+
249
+ # left endpoint is closed
250
+ if left_closed:
251
+ start_bound -= Nano(1)
252
+
253
+ # advance the start bound until we are
254
+ # within the constraint
255
+ start[i] = i
256
+ for j in range(start[i - 1], i):
257
+ start_diff = (self.index[j] - start_bound) * index_growth_sign
258
+ if start_diff > zero:
259
+ start[i] = j
260
+ break
261
+
262
+ # end bound is previous end
263
+ # or current index
264
+ end_diff = (self.index[end[i - 1]] - end_bound) * index_growth_sign
265
+ if end_diff == zero and not right_closed:
266
+ end[i] = end[i - 1] + 1
267
+ elif end_diff <= zero:
268
+ end[i] = i + 1
269
+ else:
270
+ end[i] = end[i - 1]
271
+
272
+ # right endpoint is open
273
+ if not right_closed:
274
+ end[i] -= 1
275
+
276
+ return start, end
277
+
278
+
279
+ class ExpandingIndexer(BaseIndexer):
280
+ """Calculate expanding window bounds, mimicking df.expanding()"""
281
+
282
+ @Appender(get_window_bounds_doc)
283
+ def get_window_bounds(
284
+ self,
285
+ num_values: int = 0,
286
+ min_periods: int | None = None,
287
+ center: bool | None = None,
288
+ closed: str | None = None,
289
+ step: int | None = None,
290
+ ) -> tuple[np.ndarray, np.ndarray]:
291
+ return (
292
+ np.zeros(num_values, dtype=np.int64),
293
+ np.arange(1, num_values + 1, dtype=np.int64),
294
+ )
295
+
296
+
297
+ class FixedForwardWindowIndexer(BaseIndexer):
298
+ """
299
+ Creates window boundaries for fixed-length windows that include the current row.
300
+
301
+ Examples
302
+ --------
303
+ >>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
304
+ >>> df
305
+ B
306
+ 0 0.0
307
+ 1 1.0
308
+ 2 2.0
309
+ 3 NaN
310
+ 4 4.0
311
+
312
+ >>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
313
+ >>> df.rolling(window=indexer, min_periods=1).sum()
314
+ B
315
+ 0 1.0
316
+ 1 3.0
317
+ 2 2.0
318
+ 3 4.0
319
+ 4 4.0
320
+ """
321
+
322
+ @Appender(get_window_bounds_doc)
323
+ def get_window_bounds(
324
+ self,
325
+ num_values: int = 0,
326
+ min_periods: int | None = None,
327
+ center: bool | None = None,
328
+ closed: str | None = None,
329
+ step: int | None = None,
330
+ ) -> tuple[np.ndarray, np.ndarray]:
331
+ if center:
332
+ raise ValueError("Forward-looking windows can't have center=True")
333
+ if closed is not None:
334
+ raise ValueError(
335
+ "Forward-looking windows don't support setting the closed argument"
336
+ )
337
+ if step is None:
338
+ step = 1
339
+
340
+ start = np.arange(0, num_values, step, dtype="int64")
341
+ end = start + self.window_size
342
+ if self.window_size:
343
+ end = np.clip(end, 0, num_values)
344
+
345
+ return start, end
346
+
347
+
348
+ class GroupbyIndexer(BaseIndexer):
349
+ """Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""
350
+
351
+ def __init__(
352
+ self,
353
+ index_array: np.ndarray | None = None,
354
+ window_size: int | BaseIndexer = 0,
355
+ groupby_indices: dict | None = None,
356
+ window_indexer: type[BaseIndexer] = BaseIndexer,
357
+ indexer_kwargs: dict | None = None,
358
+ **kwargs,
359
+ ) -> None:
360
+ """
361
+ Parameters
362
+ ----------
363
+ index_array : np.ndarray or None
364
+ np.ndarray of the index of the original object that we are performing
365
+ a chained groupby operation over. This index has been pre-sorted relative to
366
+ the groups
367
+ window_size : int or BaseIndexer
368
+ window size during the windowing operation
369
+ groupby_indices : dict or None
370
+ dict of {group label: [positional index of rows belonging to the group]}
371
+ window_indexer : BaseIndexer
372
+ BaseIndexer class determining the start and end bounds of each group
373
+ indexer_kwargs : dict or None
374
+ Custom kwargs to be passed to window_indexer
375
+ **kwargs :
376
+ keyword arguments that will be available when get_window_bounds is called
377
+ """
378
+ self.groupby_indices = groupby_indices or {}
379
+ self.window_indexer = window_indexer
380
+ self.indexer_kwargs = indexer_kwargs.copy() if indexer_kwargs else {}
381
+ super().__init__(
382
+ index_array=index_array,
383
+ window_size=self.indexer_kwargs.pop("window_size", window_size),
384
+ **kwargs,
385
+ )
386
+
387
+ @Appender(get_window_bounds_doc)
388
+ def get_window_bounds(
389
+ self,
390
+ num_values: int = 0,
391
+ min_periods: int | None = None,
392
+ center: bool | None = None,
393
+ closed: str | None = None,
394
+ step: int | None = None,
395
+ ) -> tuple[np.ndarray, np.ndarray]:
396
+ # 1) For each group, get the indices that belong to the group
397
+ # 2) Use the indices to calculate the start & end bounds of the window
398
+ # 3) Append the window bounds in group order
399
+ start_arrays = []
400
+ end_arrays = []
401
+ window_indices_start = 0
402
+ for key, indices in self.groupby_indices.items():
403
+ index_array: np.ndarray | None
404
+
405
+ if self.index_array is not None:
406
+ index_array = self.index_array.take(ensure_platform_int(indices))
407
+ else:
408
+ index_array = self.index_array
409
+ indexer = self.window_indexer(
410
+ index_array=index_array,
411
+ window_size=self.window_size,
412
+ **self.indexer_kwargs,
413
+ )
414
+ start, end = indexer.get_window_bounds(
415
+ len(indices), min_periods, center, closed, step
416
+ )
417
+ start = start.astype(np.int64)
418
+ end = end.astype(np.int64)
419
+ assert len(start) == len(
420
+ end
421
+ ), "these should be equal in length from get_window_bounds"
422
+ # Cannot use groupby_indices as they might not be monotonic with the object
423
+ # we're rolling over
424
+ window_indices = np.arange(
425
+ window_indices_start, window_indices_start + len(indices)
426
+ )
427
+ window_indices_start += len(indices)
428
+ # Extend as we'll be slicing window like [start, end)
429
+ window_indices = np.append(window_indices, [window_indices[-1] + 1]).astype(
430
+ np.int64, copy=False
431
+ )
432
+ start_arrays.append(window_indices.take(ensure_platform_int(start)))
433
+ end_arrays.append(window_indices.take(ensure_platform_int(end)))
434
+ if len(start_arrays) == 0:
435
+ return np.array([], dtype=np.int64), np.array([], dtype=np.int64)
436
+ start = np.concatenate(start_arrays)
437
+ end = np.concatenate(end_arrays)
438
+ return start, end
439
+
440
+
441
+ class ExponentialMovingWindowIndexer(BaseIndexer):
442
+ """Calculate ewm window bounds (the entire window)"""
443
+
444
+ @Appender(get_window_bounds_doc)
445
+ def get_window_bounds(
446
+ self,
447
+ num_values: int = 0,
448
+ min_periods: int | None = None,
449
+ center: bool | None = None,
450
+ closed: str | None = None,
451
+ step: int | None = None,
452
+ ) -> tuple[np.ndarray, np.ndarray]:
453
+ return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64)
llmeval-env/lib/python3.10/site-packages/pandas/core/indexers/utils.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Low-dependency indexing utilities.
3
+ """
4
+ from __future__ import annotations
5
+
6
+ from typing import (
7
+ TYPE_CHECKING,
8
+ Any,
9
+ )
10
+
11
+ import numpy as np
12
+
13
+ from pandas._libs import lib
14
+
15
+ from pandas.core.dtypes.common import (
16
+ is_array_like,
17
+ is_bool_dtype,
18
+ is_integer,
19
+ is_integer_dtype,
20
+ is_list_like,
21
+ )
22
+ from pandas.core.dtypes.dtypes import ExtensionDtype
23
+ from pandas.core.dtypes.generic import (
24
+ ABCIndex,
25
+ ABCSeries,
26
+ )
27
+
28
+ if TYPE_CHECKING:
29
+ from pandas._typing import AnyArrayLike
30
+
31
+ from pandas.core.frame import DataFrame
32
+ from pandas.core.indexes.base import Index
33
+
34
+ # -----------------------------------------------------------
35
+ # Indexer Identification
36
+
37
+
38
+ def is_valid_positional_slice(slc: slice) -> bool:
39
+ """
40
+ Check if a slice object can be interpreted as a positional indexer.
41
+
42
+ Parameters
43
+ ----------
44
+ slc : slice
45
+
46
+ Returns
47
+ -------
48
+ bool
49
+
50
+ Notes
51
+ -----
52
+ A valid positional slice may also be interpreted as a label-based slice
53
+ depending on the index being sliced.
54
+ """
55
+ return (
56
+ lib.is_int_or_none(slc.start)
57
+ and lib.is_int_or_none(slc.stop)
58
+ and lib.is_int_or_none(slc.step)
59
+ )
60
+
61
+
62
+ def is_list_like_indexer(key) -> bool:
63
+ """
64
+ Check if we have a list-like indexer that is *not* a NamedTuple.
65
+
66
+ Parameters
67
+ ----------
68
+ key : object
69
+
70
+ Returns
71
+ -------
72
+ bool
73
+ """
74
+ # allow a list_like, but exclude NamedTuples which can be indexers
75
+ return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
76
+
77
+
78
+ def is_scalar_indexer(indexer, ndim: int) -> bool:
79
+ """
80
+ Return True if we are all scalar indexers.
81
+
82
+ Parameters
83
+ ----------
84
+ indexer : object
85
+ ndim : int
86
+ Number of dimensions in the object being indexed.
87
+
88
+ Returns
89
+ -------
90
+ bool
91
+ """
92
+ if ndim == 1 and is_integer(indexer):
93
+ # GH37748: allow indexer to be an integer for Series
94
+ return True
95
+ if isinstance(indexer, tuple) and len(indexer) == ndim:
96
+ return all(is_integer(x) for x in indexer)
97
+ return False
98
+
99
+
100
+ def is_empty_indexer(indexer) -> bool:
101
+ """
102
+ Check if we have an empty indexer.
103
+
104
+ Parameters
105
+ ----------
106
+ indexer : object
107
+
108
+ Returns
109
+ -------
110
+ bool
111
+ """
112
+ if is_list_like(indexer) and not len(indexer):
113
+ return True
114
+ if not isinstance(indexer, tuple):
115
+ indexer = (indexer,)
116
+ return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
117
+
118
+
119
+ # -----------------------------------------------------------
120
+ # Indexer Validation
121
+
122
+
123
+ def check_setitem_lengths(indexer, value, values) -> bool:
124
+ """
125
+ Validate that value and indexer are the same length.
126
+
127
+ An special-case is allowed for when the indexer is a boolean array
128
+ and the number of true values equals the length of ``value``. In
129
+ this case, no exception is raised.
130
+
131
+ Parameters
132
+ ----------
133
+ indexer : sequence
134
+ Key for the setitem.
135
+ value : array-like
136
+ Value for the setitem.
137
+ values : array-like
138
+ Values being set into.
139
+
140
+ Returns
141
+ -------
142
+ bool
143
+ Whether this is an empty listlike setting which is a no-op.
144
+
145
+ Raises
146
+ ------
147
+ ValueError
148
+ When the indexer is an ndarray or list and the lengths don't match.
149
+ """
150
+ no_op = False
151
+
152
+ if isinstance(indexer, (np.ndarray, list)):
153
+ # We can ignore other listlikes because they are either
154
+ # a) not necessarily 1-D indexers, e.g. tuple
155
+ # b) boolean indexers e.g. BoolArray
156
+ if is_list_like(value):
157
+ if len(indexer) != len(value) and values.ndim == 1:
158
+ # boolean with truth values == len of the value is ok too
159
+ if isinstance(indexer, list):
160
+ indexer = np.array(indexer)
161
+ if not (
162
+ isinstance(indexer, np.ndarray)
163
+ and indexer.dtype == np.bool_
164
+ and indexer.sum() == len(value)
165
+ ):
166
+ raise ValueError(
167
+ "cannot set using a list-like indexer "
168
+ "with a different length than the value"
169
+ )
170
+ if not len(indexer):
171
+ no_op = True
172
+
173
+ elif isinstance(indexer, slice):
174
+ if is_list_like(value):
175
+ if len(value) != length_of_indexer(indexer, values) and values.ndim == 1:
176
+ # In case of two dimensional value is used row-wise and broadcasted
177
+ raise ValueError(
178
+ "cannot set using a slice indexer with a "
179
+ "different length than the value"
180
+ )
181
+ if not len(value):
182
+ no_op = True
183
+
184
+ return no_op
185
+
186
+
187
+ def validate_indices(indices: np.ndarray, n: int) -> None:
188
+ """
189
+ Perform bounds-checking for an indexer.
190
+
191
+ -1 is allowed for indicating missing values.
192
+
193
+ Parameters
194
+ ----------
195
+ indices : ndarray
196
+ n : int
197
+ Length of the array being indexed.
198
+
199
+ Raises
200
+ ------
201
+ ValueError
202
+
203
+ Examples
204
+ --------
205
+ >>> validate_indices(np.array([1, 2]), 3) # OK
206
+
207
+ >>> validate_indices(np.array([1, -2]), 3)
208
+ Traceback (most recent call last):
209
+ ...
210
+ ValueError: negative dimensions are not allowed
211
+
212
+ >>> validate_indices(np.array([1, 2, 3]), 3)
213
+ Traceback (most recent call last):
214
+ ...
215
+ IndexError: indices are out-of-bounds
216
+
217
+ >>> validate_indices(np.array([-1, -1]), 0) # OK
218
+
219
+ >>> validate_indices(np.array([0, 1]), 0)
220
+ Traceback (most recent call last):
221
+ ...
222
+ IndexError: indices are out-of-bounds
223
+ """
224
+ if len(indices):
225
+ min_idx = indices.min()
226
+ if min_idx < -1:
227
+ msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
228
+ raise ValueError(msg)
229
+
230
+ max_idx = indices.max()
231
+ if max_idx >= n:
232
+ raise IndexError("indices are out-of-bounds")
233
+
234
+
235
+ # -----------------------------------------------------------
236
+ # Indexer Conversion
237
+
238
+
239
+ def maybe_convert_indices(indices, n: int, verify: bool = True) -> np.ndarray:
240
+ """
241
+ Attempt to convert indices into valid, positive indices.
242
+
243
+ If we have negative indices, translate to positive here.
244
+ If we have indices that are out-of-bounds, raise an IndexError.
245
+
246
+ Parameters
247
+ ----------
248
+ indices : array-like
249
+ Array of indices that we are to convert.
250
+ n : int
251
+ Number of elements in the array that we are indexing.
252
+ verify : bool, default True
253
+ Check that all entries are between 0 and n - 1, inclusive.
254
+
255
+ Returns
256
+ -------
257
+ array-like
258
+ An array-like of positive indices that correspond to the ones
259
+ that were passed in initially to this function.
260
+
261
+ Raises
262
+ ------
263
+ IndexError
264
+ One of the converted indices either exceeded the number of,
265
+ elements (specified by `n`), or was still negative.
266
+ """
267
+ if isinstance(indices, list):
268
+ indices = np.array(indices)
269
+ if len(indices) == 0:
270
+ # If `indices` is empty, np.array will return a float,
271
+ # and will cause indexing errors.
272
+ return np.empty(0, dtype=np.intp)
273
+
274
+ mask = indices < 0
275
+ if mask.any():
276
+ indices = indices.copy()
277
+ indices[mask] += n
278
+
279
+ if verify:
280
+ mask = (indices >= n) | (indices < 0)
281
+ if mask.any():
282
+ raise IndexError("indices are out-of-bounds")
283
+ return indices
284
+
285
+
286
+ # -----------------------------------------------------------
287
+ # Unsorted
288
+
289
+
290
+ def length_of_indexer(indexer, target=None) -> int:
291
+ """
292
+ Return the expected length of target[indexer]
293
+
294
+ Returns
295
+ -------
296
+ int
297
+ """
298
+ if target is not None and isinstance(indexer, slice):
299
+ target_len = len(target)
300
+ start = indexer.start
301
+ stop = indexer.stop
302
+ step = indexer.step
303
+ if start is None:
304
+ start = 0
305
+ elif start < 0:
306
+ start += target_len
307
+ if stop is None or stop > target_len:
308
+ stop = target_len
309
+ elif stop < 0:
310
+ stop += target_len
311
+ if step is None:
312
+ step = 1
313
+ elif step < 0:
314
+ start, stop = stop + 1, start + 1
315
+ step = -step
316
+ return (stop - start + step - 1) // step
317
+ elif isinstance(indexer, (ABCSeries, ABCIndex, np.ndarray, list)):
318
+ if isinstance(indexer, list):
319
+ indexer = np.array(indexer)
320
+
321
+ if indexer.dtype == bool:
322
+ # GH#25774
323
+ return indexer.sum()
324
+ return len(indexer)
325
+ elif isinstance(indexer, range):
326
+ return (indexer.stop - indexer.start) // indexer.step
327
+ elif not is_list_like_indexer(indexer):
328
+ return 1
329
+ raise AssertionError("cannot find the length of the indexer")
330
+
331
+
332
+ def disallow_ndim_indexing(result) -> None:
333
+ """
334
+ Helper function to disallow multi-dimensional indexing on 1D Series/Index.
335
+
336
+ GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
337
+ and keep an index, so we used to return ndarray, which was deprecated
338
+ in GH#30588.
339
+ """
340
+ if np.ndim(result) > 1:
341
+ raise ValueError(
342
+ "Multi-dimensional indexing (e.g. `obj[:, None]`) is no longer "
343
+ "supported. Convert to a numpy array before indexing instead."
344
+ )
345
+
346
+
347
+ def unpack_1tuple(tup):
348
+ """
349
+ If we have a length-1 tuple/list that contains a slice, unpack to just
350
+ the slice.
351
+
352
+ Notes
353
+ -----
354
+ The list case is deprecated.
355
+ """
356
+ if len(tup) == 1 and isinstance(tup[0], slice):
357
+ # if we don't have a MultiIndex, we may still be able to handle
358
+ # a 1-tuple. see test_1tuple_without_multiindex
359
+
360
+ if isinstance(tup, list):
361
+ # GH#31299
362
+ raise ValueError(
363
+ "Indexing with a single-item list containing a "
364
+ "slice is not allowed. Pass a tuple instead.",
365
+ )
366
+
367
+ return tup[0]
368
+ return tup
369
+
370
+
371
+ def check_key_length(columns: Index, key, value: DataFrame) -> None:
372
+ """
373
+ Checks if a key used as indexer has the same length as the columns it is
374
+ associated with.
375
+
376
+ Parameters
377
+ ----------
378
+ columns : Index The columns of the DataFrame to index.
379
+ key : A list-like of keys to index with.
380
+ value : DataFrame The value to set for the keys.
381
+
382
+ Raises
383
+ ------
384
+ ValueError: If the length of key is not equal to the number of columns in value
385
+ or if the number of columns referenced by key is not equal to number
386
+ of columns.
387
+ """
388
+ if columns.is_unique:
389
+ if len(value.columns) != len(key):
390
+ raise ValueError("Columns must be same length as key")
391
+ else:
392
+ # Missing keys in columns are represented as -1
393
+ if len(columns.get_indexer_non_unique(key)[0]) != len(value.columns):
394
+ raise ValueError("Columns must be same length as key")
395
+
396
+
397
+ def unpack_tuple_and_ellipses(item: tuple):
398
+ """
399
+ Possibly unpack arr[..., n] to arr[n]
400
+ """
401
+ if len(item) > 1:
402
+ # Note: we are assuming this indexing is being done on a 1D arraylike
403
+ if item[0] is Ellipsis:
404
+ item = item[1:]
405
+ elif item[-1] is Ellipsis:
406
+ item = item[:-1]
407
+
408
+ if len(item) > 1:
409
+ raise IndexError("too many indices for array.")
410
+
411
+ item = item[0]
412
+ return item
413
+
414
+
415
+ # -----------------------------------------------------------
416
+ # Public indexer validation
417
+
418
+
419
+ def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
420
+ """
421
+ Check if `indexer` is a valid array indexer for `array`.
422
+
423
+ For a boolean mask, `array` and `indexer` are checked to have the same
424
+ length. The dtype is validated, and if it is an integer or boolean
425
+ ExtensionArray, it is checked if there are missing values present, and
426
+ it is converted to the appropriate numpy array. Other dtypes will raise
427
+ an error.
428
+
429
+ Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
430
+ through as is.
431
+
432
+ Parameters
433
+ ----------
434
+ array : array-like
435
+ The array that is being indexed (only used for the length).
436
+ indexer : array-like or list-like
437
+ The array-like that's used to index. List-like input that is not yet
438
+ a numpy array or an ExtensionArray is converted to one. Other input
439
+ types are passed through as is.
440
+
441
+ Returns
442
+ -------
443
+ numpy.ndarray
444
+ The validated indexer as a numpy array that can be used to index.
445
+
446
+ Raises
447
+ ------
448
+ IndexError
449
+ When the lengths don't match.
450
+ ValueError
451
+ When `indexer` cannot be converted to a numpy ndarray to index
452
+ (e.g. presence of missing values).
453
+
454
+ See Also
455
+ --------
456
+ api.types.is_bool_dtype : Check if `key` is of boolean dtype.
457
+
458
+ Examples
459
+ --------
460
+ When checking a boolean mask, a boolean ndarray is returned when the
461
+ arguments are all valid.
462
+
463
+ >>> mask = pd.array([True, False])
464
+ >>> arr = pd.array([1, 2])
465
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
466
+ array([ True, False])
467
+
468
+ An IndexError is raised when the lengths don't match.
469
+
470
+ >>> mask = pd.array([True, False, True])
471
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
472
+ Traceback (most recent call last):
473
+ ...
474
+ IndexError: Boolean index has wrong length: 3 instead of 2.
475
+
476
+ NA values in a boolean array are treated as False.
477
+
478
+ >>> mask = pd.array([True, pd.NA])
479
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
480
+ array([ True, False])
481
+
482
+ A numpy boolean mask will get passed through (if the length is correct):
483
+
484
+ >>> mask = np.array([True, False])
485
+ >>> pd.api.indexers.check_array_indexer(arr, mask)
486
+ array([ True, False])
487
+
488
+ Similarly for integer indexers, an integer ndarray is returned when it is
489
+ a valid indexer, otherwise an error is (for integer indexers, a matching
490
+ length is not required):
491
+
492
+ >>> indexer = pd.array([0, 2], dtype="Int64")
493
+ >>> arr = pd.array([1, 2, 3])
494
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
495
+ array([0, 2])
496
+
497
+ >>> indexer = pd.array([0, pd.NA], dtype="Int64")
498
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
499
+ Traceback (most recent call last):
500
+ ...
501
+ ValueError: Cannot index with an integer indexer containing NA values
502
+
503
+ For non-integer/boolean dtypes, an appropriate error is raised:
504
+
505
+ >>> indexer = np.array([0., 2.], dtype="float64")
506
+ >>> pd.api.indexers.check_array_indexer(arr, indexer)
507
+ Traceback (most recent call last):
508
+ ...
509
+ IndexError: arrays used as indices must be of integer or boolean type
510
+ """
511
+ from pandas.core.construction import array as pd_array
512
+
513
+ # whatever is not an array-like is returned as-is (possible valid array
514
+ # indexers that are not array-like: integer, slice, Ellipsis, None)
515
+ # In this context, tuples are not considered as array-like, as they have
516
+ # a specific meaning in indexing (multi-dimensional indexing)
517
+ if is_list_like(indexer):
518
+ if isinstance(indexer, tuple):
519
+ return indexer
520
+ else:
521
+ return indexer
522
+
523
+ # convert list-likes to array
524
+ if not is_array_like(indexer):
525
+ indexer = pd_array(indexer)
526
+ if len(indexer) == 0:
527
+ # empty list is converted to float array by pd.array
528
+ indexer = np.array([], dtype=np.intp)
529
+
530
+ dtype = indexer.dtype
531
+ if is_bool_dtype(dtype):
532
+ if isinstance(dtype, ExtensionDtype):
533
+ indexer = indexer.to_numpy(dtype=bool, na_value=False)
534
+ else:
535
+ indexer = np.asarray(indexer, dtype=bool)
536
+
537
+ # GH26658
538
+ if len(indexer) != len(array):
539
+ raise IndexError(
540
+ f"Boolean index has wrong length: "
541
+ f"{len(indexer)} instead of {len(array)}"
542
+ )
543
+ elif is_integer_dtype(dtype):
544
+ try:
545
+ indexer = np.asarray(indexer, dtype=np.intp)
546
+ except ValueError as err:
547
+ raise ValueError(
548
+ "Cannot index with an integer indexer containing NA values"
549
+ ) from err
550
+ else:
551
+ raise IndexError("arrays used as indices must be of integer or boolean type")
552
+
553
+ return indexer
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__init__.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pandas.core.internals.api import make_block # 2023-09-18 pyarrow uses this
2
+ from pandas.core.internals.array_manager import (
3
+ ArrayManager,
4
+ SingleArrayManager,
5
+ )
6
+ from pandas.core.internals.base import (
7
+ DataManager,
8
+ SingleDataManager,
9
+ )
10
+ from pandas.core.internals.concat import concatenate_managers
11
+ from pandas.core.internals.managers import (
12
+ BlockManager,
13
+ SingleBlockManager,
14
+ )
15
+
16
+ __all__ = [
17
+ "Block", # pylint: disable=undefined-all-variable
18
+ "DatetimeTZBlock", # pylint: disable=undefined-all-variable
19
+ "ExtensionBlock", # pylint: disable=undefined-all-variable
20
+ "make_block",
21
+ "DataManager",
22
+ "ArrayManager",
23
+ "BlockManager",
24
+ "SingleDataManager",
25
+ "SingleBlockManager",
26
+ "SingleArrayManager",
27
+ "concatenate_managers",
28
+ ]
29
+
30
+
31
+ def __getattr__(name: str):
32
+ # GH#55139
33
+ import warnings
34
+
35
+ if name == "create_block_manager_from_blocks":
36
+ # GH#33892
37
+ warnings.warn(
38
+ f"{name} is deprecated and will be removed in a future version. "
39
+ "Use public APIs instead.",
40
+ DeprecationWarning,
41
+ # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758
42
+ # on hard-coding stacklevel
43
+ stacklevel=2,
44
+ )
45
+ from pandas.core.internals.managers import create_block_manager_from_blocks
46
+
47
+ return create_block_manager_from_blocks
48
+
49
+ if name in [
50
+ "NumericBlock",
51
+ "ObjectBlock",
52
+ "Block",
53
+ "ExtensionBlock",
54
+ "DatetimeTZBlock",
55
+ ]:
56
+ warnings.warn(
57
+ f"{name} is deprecated and will be removed in a future version. "
58
+ "Use public APIs instead.",
59
+ DeprecationWarning,
60
+ # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758
61
+ # on hard-coding stacklevel
62
+ stacklevel=2,
63
+ )
64
+ if name == "NumericBlock":
65
+ from pandas.core.internals.blocks import NumericBlock
66
+
67
+ return NumericBlock
68
+ elif name == "DatetimeTZBlock":
69
+ from pandas.core.internals.blocks import DatetimeTZBlock
70
+
71
+ return DatetimeTZBlock
72
+ elif name == "ExtensionBlock":
73
+ from pandas.core.internals.blocks import ExtensionBlock
74
+
75
+ return ExtensionBlock
76
+ elif name == "Block":
77
+ from pandas.core.internals.blocks import Block
78
+
79
+ return Block
80
+ else:
81
+ from pandas.core.internals.blocks import ObjectBlock
82
+
83
+ return ObjectBlock
84
+
85
+ raise AttributeError(f"module 'pandas.core.internals' has no attribute '{name}'")
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc ADDED
Binary file (3.18 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc ADDED
Binary file (39.1 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc ADDED
Binary file (10.7 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc ADDED
Binary file (59.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc ADDED
Binary file (14.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc ADDED
Binary file (23.4 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc ADDED
Binary file (60.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc ADDED
Binary file (3.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/api.py ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This is a pseudo-public API for downstream libraries. We ask that downstream
3
+ authors
4
+
5
+ 1) Try to avoid using internals directly altogether, and failing that,
6
+ 2) Use only functions exposed here (or in core.internals)
7
+
8
+ """
9
+ from __future__ import annotations
10
+
11
+ from typing import TYPE_CHECKING
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs.internals import BlockPlacement
16
+
17
+ from pandas.core.dtypes.common import pandas_dtype
18
+ from pandas.core.dtypes.dtypes import (
19
+ DatetimeTZDtype,
20
+ PeriodDtype,
21
+ )
22
+
23
+ from pandas.core.arrays import DatetimeArray
24
+ from pandas.core.construction import extract_array
25
+ from pandas.core.internals.blocks import (
26
+ check_ndim,
27
+ ensure_block_shape,
28
+ extract_pandas_array,
29
+ get_block_type,
30
+ maybe_coerce_values,
31
+ )
32
+
33
+ if TYPE_CHECKING:
34
+ from pandas._typing import Dtype
35
+
36
+ from pandas.core.internals.blocks import Block
37
+
38
+
39
+ def make_block(
40
+ values, placement, klass=None, ndim=None, dtype: Dtype | None = None
41
+ ) -> Block:
42
+ """
43
+ This is a pseudo-public analogue to blocks.new_block.
44
+
45
+ We ask that downstream libraries use this rather than any fully-internal
46
+ APIs, including but not limited to:
47
+
48
+ - core.internals.blocks.make_block
49
+ - Block.make_block
50
+ - Block.make_block_same_class
51
+ - Block.__init__
52
+ """
53
+ if dtype is not None:
54
+ dtype = pandas_dtype(dtype)
55
+
56
+ values, dtype = extract_pandas_array(values, dtype, ndim)
57
+
58
+ from pandas.core.internals.blocks import (
59
+ DatetimeTZBlock,
60
+ ExtensionBlock,
61
+ )
62
+
63
+ if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype):
64
+ # GH-44681 changed PeriodArray to be stored in the 2D
65
+ # NDArrayBackedExtensionBlock instead of ExtensionBlock
66
+ # -> still allow ExtensionBlock to be passed in this case for back compat
67
+ klass = None
68
+
69
+ if klass is None:
70
+ dtype = dtype or values.dtype
71
+ klass = get_block_type(dtype)
72
+
73
+ elif klass is DatetimeTZBlock and not isinstance(values.dtype, DatetimeTZDtype):
74
+ # pyarrow calls get here
75
+ values = DatetimeArray._simple_new(
76
+ # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has
77
+ # incompatible type "Union[ExtensionDtype, dtype[Any], None]";
78
+ # expected "Union[dtype[datetime64], DatetimeTZDtype]"
79
+ values,
80
+ dtype=dtype, # type: ignore[arg-type]
81
+ )
82
+
83
+ if not isinstance(placement, BlockPlacement):
84
+ placement = BlockPlacement(placement)
85
+
86
+ ndim = maybe_infer_ndim(values, placement, ndim)
87
+ if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)):
88
+ # GH#41168 ensure we can pass 1D dt64tz values
89
+ # More generally, any EA dtype that isn't is_1d_only_ea_dtype
90
+ values = extract_array(values, extract_numpy=True)
91
+ values = ensure_block_shape(values, ndim)
92
+
93
+ check_ndim(values, placement, ndim)
94
+ values = maybe_coerce_values(values)
95
+ return klass(values, ndim=ndim, placement=placement)
96
+
97
+
98
+ def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int:
99
+ """
100
+ If `ndim` is not provided, infer it from placement and values.
101
+ """
102
+ if ndim is None:
103
+ # GH#38134 Block constructor now assumes ndim is not None
104
+ if not isinstance(values.dtype, np.dtype):
105
+ if len(placement) != 1:
106
+ ndim = 1
107
+ else:
108
+ ndim = 2
109
+ else:
110
+ ndim = values.ndim
111
+ return ndim
112
+
113
+
114
+ def __getattr__(name: str):
115
+ # GH#55139
116
+ import warnings
117
+
118
+ if name in [
119
+ "Block",
120
+ "ExtensionBlock",
121
+ "DatetimeTZBlock",
122
+ "create_block_manager_from_blocks",
123
+ ]:
124
+ # GH#33892
125
+ warnings.warn(
126
+ f"{name} is deprecated and will be removed in a future version. "
127
+ "Use public APIs instead.",
128
+ DeprecationWarning,
129
+ # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758
130
+ # on hard-coding stacklevel
131
+ stacklevel=2,
132
+ )
133
+
134
+ if name == "create_block_manager_from_blocks":
135
+ from pandas.core.internals.managers import create_block_manager_from_blocks
136
+
137
+ return create_block_manager_from_blocks
138
+
139
+ elif name == "Block":
140
+ from pandas.core.internals.blocks import Block
141
+
142
+ return Block
143
+
144
+ elif name == "DatetimeTZBlock":
145
+ from pandas.core.internals.blocks import DatetimeTZBlock
146
+
147
+ return DatetimeTZBlock
148
+
149
+ elif name == "ExtensionBlock":
150
+ from pandas.core.internals.blocks import ExtensionBlock
151
+
152
+ return ExtensionBlock
153
+
154
+ raise AttributeError(
155
+ f"module 'pandas.core.internals.api' has no attribute '{name}'"
156
+ )
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/array_manager.py ADDED
@@ -0,0 +1,1340 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Experimental manager based on storing a collection of 1D arrays
3
+ """
4
+ from __future__ import annotations
5
+
6
+ import itertools
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Callable,
10
+ Literal,
11
+ )
12
+
13
+ import numpy as np
14
+
15
+ from pandas._libs import (
16
+ NaT,
17
+ lib,
18
+ )
19
+
20
+ from pandas.core.dtypes.astype import (
21
+ astype_array,
22
+ astype_array_safe,
23
+ )
24
+ from pandas.core.dtypes.cast import (
25
+ ensure_dtype_can_hold_na,
26
+ find_common_type,
27
+ infer_dtype_from_scalar,
28
+ np_find_common_type,
29
+ )
30
+ from pandas.core.dtypes.common import (
31
+ ensure_platform_int,
32
+ is_datetime64_ns_dtype,
33
+ is_integer,
34
+ is_numeric_dtype,
35
+ is_object_dtype,
36
+ is_timedelta64_ns_dtype,
37
+ )
38
+ from pandas.core.dtypes.dtypes import ExtensionDtype
39
+ from pandas.core.dtypes.generic import (
40
+ ABCDataFrame,
41
+ ABCSeries,
42
+ )
43
+ from pandas.core.dtypes.missing import (
44
+ array_equals,
45
+ isna,
46
+ na_value_for_dtype,
47
+ )
48
+
49
+ import pandas.core.algorithms as algos
50
+ from pandas.core.array_algos.quantile import quantile_compat
51
+ from pandas.core.array_algos.take import take_1d
52
+ from pandas.core.arrays import (
53
+ DatetimeArray,
54
+ ExtensionArray,
55
+ NumpyExtensionArray,
56
+ TimedeltaArray,
57
+ )
58
+ from pandas.core.construction import (
59
+ ensure_wrapped_if_datetimelike,
60
+ extract_array,
61
+ sanitize_array,
62
+ )
63
+ from pandas.core.indexers import (
64
+ maybe_convert_indices,
65
+ validate_indices,
66
+ )
67
+ from pandas.core.indexes.api import (
68
+ Index,
69
+ ensure_index,
70
+ )
71
+ from pandas.core.indexes.base import get_values_for_csv
72
+ from pandas.core.internals.base import (
73
+ DataManager,
74
+ SingleDataManager,
75
+ ensure_np_dtype,
76
+ interleaved_dtype,
77
+ )
78
+ from pandas.core.internals.blocks import (
79
+ BlockPlacement,
80
+ ensure_block_shape,
81
+ external_values,
82
+ extract_pandas_array,
83
+ maybe_coerce_values,
84
+ new_block,
85
+ )
86
+ from pandas.core.internals.managers import make_na_array
87
+
88
+ if TYPE_CHECKING:
89
+ from collections.abc import Hashable
90
+
91
+ from pandas._typing import (
92
+ ArrayLike,
93
+ AxisInt,
94
+ DtypeObj,
95
+ QuantileInterpolation,
96
+ Self,
97
+ npt,
98
+ )
99
+
100
+
101
+ class BaseArrayManager(DataManager):
102
+ """
103
+ Core internal data structure to implement DataFrame and Series.
104
+
105
+ Alternative to the BlockManager, storing a list of 1D arrays instead of
106
+ Blocks.
107
+
108
+ This is *not* a public API class
109
+
110
+ Parameters
111
+ ----------
112
+ arrays : Sequence of arrays
113
+ axes : Sequence of Index
114
+ verify_integrity : bool, default True
115
+
116
+ """
117
+
118
+ __slots__ = [
119
+ "_axes", # private attribute, because 'axes' has different order, see below
120
+ "arrays",
121
+ ]
122
+
123
+ arrays: list[np.ndarray | ExtensionArray]
124
+ _axes: list[Index]
125
+
126
+ def __init__(
127
+ self,
128
+ arrays: list[np.ndarray | ExtensionArray],
129
+ axes: list[Index],
130
+ verify_integrity: bool = True,
131
+ ) -> None:
132
+ raise NotImplementedError
133
+
134
+ def make_empty(self, axes=None) -> Self:
135
+ """Return an empty ArrayManager with the items axis of len 0 (no columns)"""
136
+ if axes is None:
137
+ axes = [self.axes[1:], Index([])]
138
+
139
+ arrays: list[np.ndarray | ExtensionArray] = []
140
+ return type(self)(arrays, axes)
141
+
142
+ @property
143
+ def items(self) -> Index:
144
+ return self._axes[-1]
145
+
146
+ @property
147
+ # error: Signature of "axes" incompatible with supertype "DataManager"
148
+ def axes(self) -> list[Index]: # type: ignore[override]
149
+ # mypy doesn't work to override attribute with property
150
+ # see https://github.com/python/mypy/issues/4125
151
+ """Axes is BlockManager-compatible order (columns, rows)"""
152
+ return [self._axes[1], self._axes[0]]
153
+
154
+ @property
155
+ def shape_proper(self) -> tuple[int, ...]:
156
+ # this returns (n_rows, n_columns)
157
+ return tuple(len(ax) for ax in self._axes)
158
+
159
+ @staticmethod
160
+ def _normalize_axis(axis: AxisInt) -> int:
161
+ # switch axis
162
+ axis = 1 if axis == 0 else 0
163
+ return axis
164
+
165
+ def set_axis(self, axis: AxisInt, new_labels: Index) -> None:
166
+ # Caller is responsible for ensuring we have an Index object.
167
+ self._validate_set_axis(axis, new_labels)
168
+ axis = self._normalize_axis(axis)
169
+ self._axes[axis] = new_labels
170
+
171
+ def get_dtypes(self) -> npt.NDArray[np.object_]:
172
+ return np.array([arr.dtype for arr in self.arrays], dtype="object")
173
+
174
+ def add_references(self, mgr: BaseArrayManager) -> None:
175
+ """
176
+ Only implemented on the BlockManager level
177
+ """
178
+ return
179
+
180
+ def __getstate__(self):
181
+ return self.arrays, self._axes
182
+
183
+ def __setstate__(self, state) -> None:
184
+ self.arrays = state[0]
185
+ self._axes = state[1]
186
+
187
+ def __repr__(self) -> str:
188
+ output = type(self).__name__
189
+ output += f"\nIndex: {self._axes[0]}"
190
+ if self.ndim == 2:
191
+ output += f"\nColumns: {self._axes[1]}"
192
+ output += f"\n{len(self.arrays)} arrays:"
193
+ for arr in self.arrays:
194
+ output += f"\n{arr.dtype}"
195
+ return output
196
+
197
+ def apply(
198
+ self,
199
+ f,
200
+ align_keys: list[str] | None = None,
201
+ **kwargs,
202
+ ) -> Self:
203
+ """
204
+ Iterate over the arrays, collect and create a new ArrayManager.
205
+
206
+ Parameters
207
+ ----------
208
+ f : str or callable
209
+ Name of the Array method to apply.
210
+ align_keys: List[str] or None, default None
211
+ **kwargs
212
+ Keywords to pass to `f`
213
+
214
+ Returns
215
+ -------
216
+ ArrayManager
217
+ """
218
+ assert "filter" not in kwargs
219
+
220
+ align_keys = align_keys or []
221
+ result_arrays: list[ArrayLike] = []
222
+ # fillna: Series/DataFrame is responsible for making sure value is aligned
223
+
224
+ aligned_args = {k: kwargs[k] for k in align_keys}
225
+
226
+ if f == "apply":
227
+ f = kwargs.pop("func")
228
+
229
+ for i, arr in enumerate(self.arrays):
230
+ if aligned_args:
231
+ for k, obj in aligned_args.items():
232
+ if isinstance(obj, (ABCSeries, ABCDataFrame)):
233
+ # The caller is responsible for ensuring that
234
+ # obj.axes[-1].equals(self.items)
235
+ if obj.ndim == 1:
236
+ kwargs[k] = obj.iloc[i]
237
+ else:
238
+ kwargs[k] = obj.iloc[:, i]._values
239
+ else:
240
+ # otherwise we have an array-like
241
+ kwargs[k] = obj[i]
242
+
243
+ if callable(f):
244
+ applied = f(arr, **kwargs)
245
+ else:
246
+ applied = getattr(arr, f)(**kwargs)
247
+
248
+ result_arrays.append(applied)
249
+
250
+ new_axes = self._axes
251
+ return type(self)(result_arrays, new_axes)
252
+
253
+ def apply_with_block(self, f, align_keys=None, **kwargs) -> Self:
254
+ # switch axis to follow BlockManager logic
255
+ swap_axis = True
256
+ if f == "interpolate":
257
+ swap_axis = False
258
+ if swap_axis and "axis" in kwargs and self.ndim == 2:
259
+ kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0
260
+
261
+ align_keys = align_keys or []
262
+ aligned_args = {k: kwargs[k] for k in align_keys}
263
+
264
+ result_arrays = []
265
+
266
+ for i, arr in enumerate(self.arrays):
267
+ if aligned_args:
268
+ for k, obj in aligned_args.items():
269
+ if isinstance(obj, (ABCSeries, ABCDataFrame)):
270
+ # The caller is responsible for ensuring that
271
+ # obj.axes[-1].equals(self.items)
272
+ if obj.ndim == 1:
273
+ if self.ndim == 2:
274
+ kwargs[k] = obj.iloc[slice(i, i + 1)]._values
275
+ else:
276
+ kwargs[k] = obj.iloc[:]._values
277
+ else:
278
+ kwargs[k] = obj.iloc[:, [i]]._values
279
+ else:
280
+ # otherwise we have an ndarray
281
+ if obj.ndim == 2:
282
+ kwargs[k] = obj[[i]]
283
+
284
+ if isinstance(arr.dtype, np.dtype) and not isinstance(arr, np.ndarray):
285
+ # i.e. TimedeltaArray, DatetimeArray with tz=None. Need to
286
+ # convert for the Block constructors.
287
+ arr = np.asarray(arr)
288
+
289
+ arr = maybe_coerce_values(arr)
290
+ if self.ndim == 2:
291
+ arr = ensure_block_shape(arr, 2)
292
+ bp = BlockPlacement(slice(0, 1, 1))
293
+ block = new_block(arr, placement=bp, ndim=2)
294
+ else:
295
+ bp = BlockPlacement(slice(0, len(self), 1))
296
+ block = new_block(arr, placement=bp, ndim=1)
297
+
298
+ applied = getattr(block, f)(**kwargs)
299
+ if isinstance(applied, list):
300
+ applied = applied[0]
301
+ arr = applied.values
302
+ if self.ndim == 2 and arr.ndim == 2:
303
+ # 2D for np.ndarray or DatetimeArray/TimedeltaArray
304
+ assert len(arr) == 1
305
+ # error: No overload variant of "__getitem__" of "ExtensionArray"
306
+ # matches argument type "Tuple[int, slice]"
307
+ arr = arr[0, :] # type: ignore[call-overload]
308
+ result_arrays.append(arr)
309
+
310
+ return type(self)(result_arrays, self._axes)
311
+
312
+ def setitem(self, indexer, value, warn: bool = True) -> Self:
313
+ return self.apply_with_block("setitem", indexer=indexer, value=value)
314
+
315
+ def diff(self, n: int) -> Self:
316
+ assert self.ndim == 2 # caller ensures
317
+ return self.apply(algos.diff, n=n)
318
+
319
+ def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self:
320
+ if copy is None:
321
+ copy = True
322
+
323
+ return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors)
324
+
325
+ def convert(self, copy: bool | None) -> Self:
326
+ if copy is None:
327
+ copy = True
328
+
329
+ def _convert(arr):
330
+ if is_object_dtype(arr.dtype):
331
+ # extract NumpyExtensionArray for tests that patch
332
+ # NumpyExtensionArray._typ
333
+ arr = np.asarray(arr)
334
+ result = lib.maybe_convert_objects(
335
+ arr,
336
+ convert_non_numeric=True,
337
+ )
338
+ if result is arr and copy:
339
+ return arr.copy()
340
+ return result
341
+ else:
342
+ return arr.copy() if copy else arr
343
+
344
+ return self.apply(_convert)
345
+
346
+ def get_values_for_csv(
347
+ self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None
348
+ ) -> Self:
349
+ return self.apply(
350
+ get_values_for_csv,
351
+ na_rep=na_rep,
352
+ quoting=quoting,
353
+ float_format=float_format,
354
+ date_format=date_format,
355
+ decimal=decimal,
356
+ )
357
+
358
+ @property
359
+ def any_extension_types(self) -> bool:
360
+ """Whether any of the blocks in this manager are extension blocks"""
361
+ return False # any(block.is_extension for block in self.blocks)
362
+
363
+ @property
364
+ def is_view(self) -> bool:
365
+ """return a boolean if we are a single block and are a view"""
366
+ # TODO what is this used for?
367
+ return False
368
+
369
+ @property
370
+ def is_single_block(self) -> bool:
371
+ return len(self.arrays) == 1
372
+
373
+ def _get_data_subset(self, predicate: Callable) -> Self:
374
+ indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)]
375
+ arrays = [self.arrays[i] for i in indices]
376
+ # TODO copy?
377
+ # Note: using Index.take ensures we can retain e.g. DatetimeIndex.freq,
378
+ # see test_describe_datetime_columns
379
+ taker = np.array(indices, dtype="intp")
380
+ new_cols = self._axes[1].take(taker)
381
+ new_axes = [self._axes[0], new_cols]
382
+ return type(self)(arrays, new_axes, verify_integrity=False)
383
+
384
+ def get_bool_data(self, copy: bool = False) -> Self:
385
+ """
386
+ Select columns that are bool-dtype and object-dtype columns that are all-bool.
387
+
388
+ Parameters
389
+ ----------
390
+ copy : bool, default False
391
+ Whether to copy the blocks
392
+ """
393
+ return self._get_data_subset(lambda x: x.dtype == np.dtype(bool))
394
+
395
+ def get_numeric_data(self, copy: bool = False) -> Self:
396
+ """
397
+ Select columns that have a numeric dtype.
398
+
399
+ Parameters
400
+ ----------
401
+ copy : bool, default False
402
+ Whether to copy the blocks
403
+ """
404
+ return self._get_data_subset(
405
+ lambda arr: is_numeric_dtype(arr.dtype)
406
+ or getattr(arr.dtype, "_is_numeric", False)
407
+ )
408
+
409
+ def copy(self, deep: bool | Literal["all"] | None = True) -> Self:
410
+ """
411
+ Make deep or shallow copy of ArrayManager
412
+
413
+ Parameters
414
+ ----------
415
+ deep : bool or string, default True
416
+ If False, return shallow copy (do not copy data)
417
+ If 'all', copy data and a deep copy of the index
418
+
419
+ Returns
420
+ -------
421
+ BlockManager
422
+ """
423
+ if deep is None:
424
+ # ArrayManager does not yet support CoW, so deep=None always means
425
+ # deep=True for now
426
+ deep = True
427
+
428
+ # this preserves the notion of view copying of axes
429
+ if deep:
430
+ # hit in e.g. tests.io.json.test_pandas
431
+
432
+ def copy_func(ax):
433
+ return ax.copy(deep=True) if deep == "all" else ax.view()
434
+
435
+ new_axes = [copy_func(ax) for ax in self._axes]
436
+ else:
437
+ new_axes = list(self._axes)
438
+
439
+ if deep:
440
+ new_arrays = [arr.copy() for arr in self.arrays]
441
+ else:
442
+ new_arrays = list(self.arrays)
443
+ return type(self)(new_arrays, new_axes, verify_integrity=False)
444
+
445
+ def reindex_indexer(
446
+ self,
447
+ new_axis,
448
+ indexer,
449
+ axis: AxisInt,
450
+ fill_value=None,
451
+ allow_dups: bool = False,
452
+ copy: bool | None = True,
453
+ # ignored keywords
454
+ only_slice: bool = False,
455
+ # ArrayManager specific keywords
456
+ use_na_proxy: bool = False,
457
+ ) -> Self:
458
+ axis = self._normalize_axis(axis)
459
+ return self._reindex_indexer(
460
+ new_axis,
461
+ indexer,
462
+ axis,
463
+ fill_value,
464
+ allow_dups,
465
+ copy,
466
+ use_na_proxy,
467
+ )
468
+
469
+ def _reindex_indexer(
470
+ self,
471
+ new_axis,
472
+ indexer: npt.NDArray[np.intp] | None,
473
+ axis: AxisInt,
474
+ fill_value=None,
475
+ allow_dups: bool = False,
476
+ copy: bool | None = True,
477
+ use_na_proxy: bool = False,
478
+ ) -> Self:
479
+ """
480
+ Parameters
481
+ ----------
482
+ new_axis : Index
483
+ indexer : ndarray[intp] or None
484
+ axis : int
485
+ fill_value : object, default None
486
+ allow_dups : bool, default False
487
+ copy : bool, default True
488
+
489
+
490
+ pandas-indexer with -1's only.
491
+ """
492
+ if copy is None:
493
+ # ArrayManager does not yet support CoW, so deep=None always means
494
+ # deep=True for now
495
+ copy = True
496
+
497
+ if indexer is None:
498
+ if new_axis is self._axes[axis] and not copy:
499
+ return self
500
+
501
+ result = self.copy(deep=copy)
502
+ result._axes = list(self._axes)
503
+ result._axes[axis] = new_axis
504
+ return result
505
+
506
+ # some axes don't allow reindexing with dups
507
+ if not allow_dups:
508
+ self._axes[axis]._validate_can_reindex(indexer)
509
+
510
+ if axis >= self.ndim:
511
+ raise IndexError("Requested axis not found in manager")
512
+
513
+ if axis == 1:
514
+ new_arrays = []
515
+ for i in indexer:
516
+ if i == -1:
517
+ arr = self._make_na_array(
518
+ fill_value=fill_value, use_na_proxy=use_na_proxy
519
+ )
520
+ else:
521
+ arr = self.arrays[i]
522
+ if copy:
523
+ arr = arr.copy()
524
+ new_arrays.append(arr)
525
+
526
+ else:
527
+ validate_indices(indexer, len(self._axes[0]))
528
+ indexer = ensure_platform_int(indexer)
529
+ mask = indexer == -1
530
+ needs_masking = mask.any()
531
+ new_arrays = [
532
+ take_1d(
533
+ arr,
534
+ indexer,
535
+ allow_fill=needs_masking,
536
+ fill_value=fill_value,
537
+ mask=mask,
538
+ # if fill_value is not None else blk.fill_value
539
+ )
540
+ for arr in self.arrays
541
+ ]
542
+
543
+ new_axes = list(self._axes)
544
+ new_axes[axis] = new_axis
545
+
546
+ return type(self)(new_arrays, new_axes, verify_integrity=False)
547
+
548
+ def take(
549
+ self,
550
+ indexer: npt.NDArray[np.intp],
551
+ axis: AxisInt = 1,
552
+ verify: bool = True,
553
+ ) -> Self:
554
+ """
555
+ Take items along any axis.
556
+ """
557
+ assert isinstance(indexer, np.ndarray), type(indexer)
558
+ assert indexer.dtype == np.intp, indexer.dtype
559
+
560
+ axis = self._normalize_axis(axis)
561
+
562
+ if not indexer.ndim == 1:
563
+ raise ValueError("indexer should be 1-dimensional")
564
+
565
+ n = self.shape_proper[axis]
566
+ indexer = maybe_convert_indices(indexer, n, verify=verify)
567
+
568
+ new_labels = self._axes[axis].take(indexer)
569
+ return self._reindex_indexer(
570
+ new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True
571
+ )
572
+
573
+ def _make_na_array(self, fill_value=None, use_na_proxy: bool = False):
574
+ if use_na_proxy:
575
+ assert fill_value is None
576
+ return NullArrayProxy(self.shape_proper[0])
577
+
578
+ if fill_value is None:
579
+ fill_value = np.nan
580
+
581
+ dtype, fill_value = infer_dtype_from_scalar(fill_value)
582
+ array_values = make_na_array(dtype, self.shape_proper[:1], fill_value)
583
+ return array_values
584
+
585
+ def _equal_values(self, other) -> bool:
586
+ """
587
+ Used in .equals defined in base class. Only check the column values
588
+ assuming shape and indexes have already been checked.
589
+ """
590
+ for left, right in zip(self.arrays, other.arrays):
591
+ if not array_equals(left, right):
592
+ return False
593
+ return True
594
+
595
+ # TODO
596
+ # to_dict
597
+
598
+
599
+ class ArrayManager(BaseArrayManager):
600
+ @property
601
+ def ndim(self) -> Literal[2]:
602
+ return 2
603
+
604
+ def __init__(
605
+ self,
606
+ arrays: list[np.ndarray | ExtensionArray],
607
+ axes: list[Index],
608
+ verify_integrity: bool = True,
609
+ ) -> None:
610
+ # Note: we are storing the axes in "_axes" in the (row, columns) order
611
+ # which contrasts the order how it is stored in BlockManager
612
+ self._axes = axes
613
+ self.arrays = arrays
614
+
615
+ if verify_integrity:
616
+ self._axes = [ensure_index(ax) for ax in axes]
617
+ arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays]
618
+ self.arrays = [maybe_coerce_values(arr) for arr in arrays]
619
+ self._verify_integrity()
620
+
621
+ def _verify_integrity(self) -> None:
622
+ n_rows, n_columns = self.shape_proper
623
+ if not len(self.arrays) == n_columns:
624
+ raise ValueError(
625
+ "Number of passed arrays must equal the size of the column Index: "
626
+ f"{len(self.arrays)} arrays vs {n_columns} columns."
627
+ )
628
+ for arr in self.arrays:
629
+ if not len(arr) == n_rows:
630
+ raise ValueError(
631
+ "Passed arrays should have the same length as the rows Index: "
632
+ f"{len(arr)} vs {n_rows} rows"
633
+ )
634
+ if not isinstance(arr, (np.ndarray, ExtensionArray)):
635
+ raise ValueError(
636
+ "Passed arrays should be np.ndarray or ExtensionArray instances, "
637
+ f"got {type(arr)} instead"
638
+ )
639
+ if not arr.ndim == 1:
640
+ raise ValueError(
641
+ "Passed arrays should be 1-dimensional, got array with "
642
+ f"{arr.ndim} dimensions instead."
643
+ )
644
+
645
+ # --------------------------------------------------------------------
646
+ # Indexing
647
+
648
+ def fast_xs(self, loc: int) -> SingleArrayManager:
649
+ """
650
+ Return the array corresponding to `frame.iloc[loc]`.
651
+
652
+ Parameters
653
+ ----------
654
+ loc : int
655
+
656
+ Returns
657
+ -------
658
+ np.ndarray or ExtensionArray
659
+ """
660
+ dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
661
+
662
+ values = [arr[loc] for arr in self.arrays]
663
+ if isinstance(dtype, ExtensionDtype):
664
+ result = dtype.construct_array_type()._from_sequence(values, dtype=dtype)
665
+ # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT
666
+ elif is_datetime64_ns_dtype(dtype):
667
+ result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray
668
+ elif is_timedelta64_ns_dtype(dtype):
669
+ result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray
670
+ else:
671
+ result = np.array(values, dtype=dtype)
672
+ return SingleArrayManager([result], [self._axes[1]])
673
+
674
+ def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager:
675
+ axis = self._normalize_axis(axis)
676
+
677
+ if axis == 0:
678
+ arrays = [arr[slobj] for arr in self.arrays]
679
+ elif axis == 1:
680
+ arrays = self.arrays[slobj]
681
+
682
+ new_axes = list(self._axes)
683
+ new_axes[axis] = new_axes[axis]._getitem_slice(slobj)
684
+
685
+ return type(self)(arrays, new_axes, verify_integrity=False)
686
+
687
+ def iget(self, i: int) -> SingleArrayManager:
688
+ """
689
+ Return the data as a SingleArrayManager.
690
+ """
691
+ values = self.arrays[i]
692
+ return SingleArrayManager([values], [self._axes[0]])
693
+
694
+ def iget_values(self, i: int) -> ArrayLike:
695
+ """
696
+ Return the data for column i as the values (ndarray or ExtensionArray).
697
+ """
698
+ return self.arrays[i]
699
+
700
+ @property
701
+ def column_arrays(self) -> list[ArrayLike]:
702
+ """
703
+ Used in the JSON C code to access column arrays.
704
+ """
705
+
706
+ return [np.asarray(arr) for arr in self.arrays]
707
+
708
+ def iset(
709
+ self,
710
+ loc: int | slice | np.ndarray,
711
+ value: ArrayLike,
712
+ inplace: bool = False,
713
+ refs=None,
714
+ ) -> None:
715
+ """
716
+ Set new column(s).
717
+
718
+ This changes the ArrayManager in-place, but replaces (an) existing
719
+ column(s), not changing column values in-place).
720
+
721
+ Parameters
722
+ ----------
723
+ loc : integer, slice or boolean mask
724
+ Positional location (already bounds checked)
725
+ value : np.ndarray or ExtensionArray
726
+ inplace : bool, default False
727
+ Whether overwrite existing array as opposed to replacing it.
728
+ """
729
+ # single column -> single integer index
730
+ if lib.is_integer(loc):
731
+ # TODO can we avoid needing to unpack this here? That means converting
732
+ # DataFrame into 1D array when loc is an integer
733
+ if isinstance(value, np.ndarray) and value.ndim == 2:
734
+ assert value.shape[1] == 1
735
+ value = value[:, 0]
736
+
737
+ # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item
738
+ # but we should avoid that and pass directly the proper array
739
+ value = maybe_coerce_values(value)
740
+
741
+ assert isinstance(value, (np.ndarray, ExtensionArray))
742
+ assert value.ndim == 1
743
+ assert len(value) == len(self._axes[0])
744
+ self.arrays[loc] = value
745
+ return
746
+
747
+ # multiple columns -> convert slice or array to integer indices
748
+ elif isinstance(loc, slice):
749
+ indices: range | np.ndarray = range(
750
+ loc.start if loc.start is not None else 0,
751
+ loc.stop if loc.stop is not None else self.shape_proper[1],
752
+ loc.step if loc.step is not None else 1,
753
+ )
754
+ else:
755
+ assert isinstance(loc, np.ndarray)
756
+ assert loc.dtype == "bool"
757
+ indices = np.nonzero(loc)[0]
758
+
759
+ assert value.ndim == 2
760
+ assert value.shape[0] == len(self._axes[0])
761
+
762
+ for value_idx, mgr_idx in enumerate(indices):
763
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
764
+ # argument type "Tuple[slice, int]"
765
+ value_arr = value[:, value_idx] # type: ignore[call-overload]
766
+ self.arrays[mgr_idx] = value_arr
767
+ return
768
+
769
+ def column_setitem(
770
+ self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False
771
+ ) -> None:
772
+ """
773
+ Set values ("setitem") into a single column (not setting the full column).
774
+
775
+ This is a method on the ArrayManager level, to avoid creating an
776
+ intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)
777
+ """
778
+ if not is_integer(loc):
779
+ raise TypeError("The column index should be an integer")
780
+ arr = self.arrays[loc]
781
+ mgr = SingleArrayManager([arr], [self._axes[0]])
782
+ if inplace_only:
783
+ mgr.setitem_inplace(idx, value)
784
+ else:
785
+ new_mgr = mgr.setitem((idx,), value)
786
+ # update existing ArrayManager in-place
787
+ self.arrays[loc] = new_mgr.arrays[0]
788
+
789
+ def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None:
790
+ """
791
+ Insert item at selected position.
792
+
793
+ Parameters
794
+ ----------
795
+ loc : int
796
+ item : hashable
797
+ value : np.ndarray or ExtensionArray
798
+ """
799
+ # insert to the axis; this could possibly raise a TypeError
800
+ new_axis = self.items.insert(loc, item)
801
+
802
+ value = extract_array(value, extract_numpy=True)
803
+ if value.ndim == 2:
804
+ if value.shape[0] == 1:
805
+ # error: No overload variant of "__getitem__" of "ExtensionArray"
806
+ # matches argument type "Tuple[int, slice]"
807
+ value = value[0, :] # type: ignore[call-overload]
808
+ else:
809
+ raise ValueError(
810
+ f"Expected a 1D array, got an array with shape {value.shape}"
811
+ )
812
+ value = maybe_coerce_values(value)
813
+
814
+ # TODO self.arrays can be empty
815
+ # assert len(value) == len(self.arrays[0])
816
+
817
+ # TODO is this copy needed?
818
+ arrays = self.arrays.copy()
819
+ arrays.insert(loc, value)
820
+
821
+ self.arrays = arrays
822
+ self._axes[1] = new_axis
823
+
824
+ def idelete(self, indexer) -> ArrayManager:
825
+ """
826
+ Delete selected locations in-place (new block and array, same BlockManager)
827
+ """
828
+ to_keep = np.ones(self.shape[0], dtype=np.bool_)
829
+ to_keep[indexer] = False
830
+
831
+ self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]]
832
+ self._axes = [self._axes[0], self._axes[1][to_keep]]
833
+ return self
834
+
835
+ # --------------------------------------------------------------------
836
+ # Array-wise Operation
837
+
838
+ def grouped_reduce(self, func: Callable) -> Self:
839
+ """
840
+ Apply grouped reduction function columnwise, returning a new ArrayManager.
841
+
842
+ Parameters
843
+ ----------
844
+ func : grouped reduction function
845
+
846
+ Returns
847
+ -------
848
+ ArrayManager
849
+ """
850
+ result_arrays: list[np.ndarray] = []
851
+ result_indices: list[int] = []
852
+
853
+ for i, arr in enumerate(self.arrays):
854
+ # grouped_reduce functions all expect 2D arrays
855
+ arr = ensure_block_shape(arr, ndim=2)
856
+ res = func(arr)
857
+ if res.ndim == 2:
858
+ # reverse of ensure_block_shape
859
+ assert res.shape[0] == 1
860
+ res = res[0]
861
+
862
+ result_arrays.append(res)
863
+ result_indices.append(i)
864
+
865
+ if len(result_arrays) == 0:
866
+ nrows = 0
867
+ else:
868
+ nrows = result_arrays[0].shape[0]
869
+ index = Index(range(nrows))
870
+
871
+ columns = self.items
872
+
873
+ # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
874
+ # expected "List[Union[ndarray, ExtensionArray]]"
875
+ return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]
876
+
877
+ def reduce(self, func: Callable) -> Self:
878
+ """
879
+ Apply reduction function column-wise, returning a single-row ArrayManager.
880
+
881
+ Parameters
882
+ ----------
883
+ func : reduction function
884
+
885
+ Returns
886
+ -------
887
+ ArrayManager
888
+ """
889
+ result_arrays: list[np.ndarray] = []
890
+ for i, arr in enumerate(self.arrays):
891
+ res = func(arr, axis=0)
892
+
893
+ # TODO NaT doesn't preserve dtype, so we need to ensure to create
894
+ # a timedelta result array if original was timedelta
895
+ # what if datetime results in timedelta? (eg std)
896
+ dtype = arr.dtype if res is NaT else None
897
+ result_arrays.append(
898
+ sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type]
899
+ )
900
+
901
+ index = Index._simple_new(np.array([None], dtype=object)) # placeholder
902
+ columns = self.items
903
+
904
+ # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]";
905
+ # expected "List[Union[ndarray, ExtensionArray]]"
906
+ new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type]
907
+ return new_mgr
908
+
909
+ def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager:
910
+ """
911
+ Apply array_op blockwise with another (aligned) BlockManager.
912
+ """
913
+ # TODO what if `other` is BlockManager ?
914
+ left_arrays = self.arrays
915
+ right_arrays = other.arrays
916
+ result_arrays = [
917
+ array_op(left, right) for left, right in zip(left_arrays, right_arrays)
918
+ ]
919
+ return type(self)(result_arrays, self._axes)
920
+
921
+ def quantile(
922
+ self,
923
+ *,
924
+ qs: Index, # with dtype float64
925
+ transposed: bool = False,
926
+ interpolation: QuantileInterpolation = "linear",
927
+ ) -> ArrayManager:
928
+ arrs = [ensure_block_shape(x, 2) for x in self.arrays]
929
+ new_arrs = [
930
+ quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs
931
+ ]
932
+ for i, arr in enumerate(new_arrs):
933
+ if arr.ndim == 2:
934
+ assert arr.shape[0] == 1, arr.shape
935
+ new_arrs[i] = arr[0]
936
+
937
+ axes = [qs, self._axes[1]]
938
+ return type(self)(new_arrs, axes)
939
+
940
+ # ----------------------------------------------------------------
941
+
942
+ def unstack(self, unstacker, fill_value) -> ArrayManager:
943
+ """
944
+ Return a BlockManager with all blocks unstacked.
945
+
946
+ Parameters
947
+ ----------
948
+ unstacker : reshape._Unstacker
949
+ fill_value : Any
950
+ fill_value for newly introduced missing values.
951
+
952
+ Returns
953
+ -------
954
+ unstacked : BlockManager
955
+ """
956
+ indexer, _ = unstacker._indexer_and_to_sort
957
+ if unstacker.mask.all():
958
+ new_indexer = indexer
959
+ allow_fill = False
960
+ new_mask2D = None
961
+ needs_masking = None
962
+ else:
963
+ new_indexer = np.full(unstacker.mask.shape, -1)
964
+ new_indexer[unstacker.mask] = indexer
965
+ allow_fill = True
966
+ # calculating the full mask once and passing it to take_1d is faster
967
+ # than letting take_1d calculate it in each repeated call
968
+ new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape)
969
+ needs_masking = new_mask2D.any(axis=0)
970
+ new_indexer2D = new_indexer.reshape(*unstacker.full_shape)
971
+ new_indexer2D = ensure_platform_int(new_indexer2D)
972
+
973
+ new_arrays = []
974
+ for arr in self.arrays:
975
+ for i in range(unstacker.full_shape[1]):
976
+ if allow_fill:
977
+ # error: Value of type "Optional[Any]" is not indexable [index]
978
+ new_arr = take_1d(
979
+ arr,
980
+ new_indexer2D[:, i],
981
+ allow_fill=needs_masking[i], # type: ignore[index]
982
+ fill_value=fill_value,
983
+ mask=new_mask2D[:, i], # type: ignore[index]
984
+ )
985
+ else:
986
+ new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False)
987
+ new_arrays.append(new_arr)
988
+
989
+ new_index = unstacker.new_index
990
+ new_columns = unstacker.get_new_columns(self._axes[1])
991
+ new_axes = [new_index, new_columns]
992
+
993
+ return type(self)(new_arrays, new_axes, verify_integrity=False)
994
+
995
+ def as_array(
996
+ self,
997
+ dtype=None,
998
+ copy: bool = False,
999
+ na_value: object = lib.no_default,
1000
+ ) -> np.ndarray:
1001
+ """
1002
+ Convert the blockmanager data into an numpy array.
1003
+
1004
+ Parameters
1005
+ ----------
1006
+ dtype : object, default None
1007
+ Data type of the return array.
1008
+ copy : bool, default False
1009
+ If True then guarantee that a copy is returned. A value of
1010
+ False does not guarantee that the underlying data is not
1011
+ copied.
1012
+ na_value : object, default lib.no_default
1013
+ Value to be used as the missing value sentinel.
1014
+
1015
+ Returns
1016
+ -------
1017
+ arr : ndarray
1018
+ """
1019
+ if len(self.arrays) == 0:
1020
+ empty_arr = np.empty(self.shape, dtype=float)
1021
+ return empty_arr.transpose()
1022
+
1023
+ # We want to copy when na_value is provided to avoid
1024
+ # mutating the original object
1025
+ copy = copy or na_value is not lib.no_default
1026
+
1027
+ if not dtype:
1028
+ dtype = interleaved_dtype([arr.dtype for arr in self.arrays])
1029
+
1030
+ dtype = ensure_np_dtype(dtype)
1031
+
1032
+ result = np.empty(self.shape_proper, dtype=dtype)
1033
+
1034
+ for i, arr in enumerate(self.arrays):
1035
+ arr = arr.astype(dtype, copy=copy)
1036
+ result[:, i] = arr
1037
+
1038
+ if na_value is not lib.no_default:
1039
+ result[isna(result)] = na_value
1040
+
1041
+ return result
1042
+
1043
+ @classmethod
1044
+ def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self:
1045
+ """
1046
+ Concatenate uniformly-indexed ArrayManagers horizontally.
1047
+ """
1048
+ # concatting along the columns -> combine reindexed arrays in a single manager
1049
+ arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs]))
1050
+ new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False)
1051
+ return new_mgr
1052
+
1053
+ @classmethod
1054
+ def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self:
1055
+ """
1056
+ Concatenate uniformly-indexed ArrayManagers vertically.
1057
+ """
1058
+ # concatting along the rows -> concat the reindexed arrays
1059
+ # TODO(ArrayManager) doesn't yet preserve the correct dtype
1060
+ arrays = [
1061
+ concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))])
1062
+ for j in range(len(mgrs[0].arrays))
1063
+ ]
1064
+ new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False)
1065
+ return new_mgr
1066
+
1067
+
1068
+ class SingleArrayManager(BaseArrayManager, SingleDataManager):
1069
+ __slots__ = [
1070
+ "_axes", # private attribute, because 'axes' has different order, see below
1071
+ "arrays",
1072
+ ]
1073
+
1074
+ arrays: list[np.ndarray | ExtensionArray]
1075
+ _axes: list[Index]
1076
+
1077
+ @property
1078
+ def ndim(self) -> Literal[1]:
1079
+ return 1
1080
+
1081
+ def __init__(
1082
+ self,
1083
+ arrays: list[np.ndarray | ExtensionArray],
1084
+ axes: list[Index],
1085
+ verify_integrity: bool = True,
1086
+ ) -> None:
1087
+ self._axes = axes
1088
+ self.arrays = arrays
1089
+
1090
+ if verify_integrity:
1091
+ assert len(axes) == 1
1092
+ assert len(arrays) == 1
1093
+ self._axes = [ensure_index(ax) for ax in self._axes]
1094
+ arr = arrays[0]
1095
+ arr = maybe_coerce_values(arr)
1096
+ arr = extract_pandas_array(arr, None, 1)[0]
1097
+ self.arrays = [arr]
1098
+ self._verify_integrity()
1099
+
1100
+ def _verify_integrity(self) -> None:
1101
+ (n_rows,) = self.shape
1102
+ assert len(self.arrays) == 1
1103
+ arr = self.arrays[0]
1104
+ assert len(arr) == n_rows
1105
+ if not arr.ndim == 1:
1106
+ raise ValueError(
1107
+ "Passed array should be 1-dimensional, got array with "
1108
+ f"{arr.ndim} dimensions instead."
1109
+ )
1110
+
1111
+ @staticmethod
1112
+ def _normalize_axis(axis):
1113
+ return axis
1114
+
1115
+ def make_empty(self, axes=None) -> Self:
1116
+ """Return an empty ArrayManager with index/array of length 0"""
1117
+ if axes is None:
1118
+ axes = [Index([], dtype=object)]
1119
+ array: np.ndarray = np.array([], dtype=self.dtype)
1120
+ return type(self)([array], axes)
1121
+
1122
+ @classmethod
1123
+ def from_array(cls, array, index) -> SingleArrayManager:
1124
+ return cls([array], [index])
1125
+
1126
+ # error: Cannot override writeable attribute with read-only property
1127
+ @property
1128
+ def axes(self) -> list[Index]: # type: ignore[override]
1129
+ return self._axes
1130
+
1131
+ @property
1132
+ def index(self) -> Index:
1133
+ return self._axes[0]
1134
+
1135
+ @property
1136
+ def dtype(self):
1137
+ return self.array.dtype
1138
+
1139
+ def external_values(self):
1140
+ """The array that Series.values returns"""
1141
+ return external_values(self.array)
1142
+
1143
+ def internal_values(self):
1144
+ """The array that Series._values returns"""
1145
+ return self.array
1146
+
1147
+ def array_values(self):
1148
+ """The array that Series.array returns"""
1149
+ arr = self.array
1150
+ if isinstance(arr, np.ndarray):
1151
+ arr = NumpyExtensionArray(arr)
1152
+ return arr
1153
+
1154
+ @property
1155
+ def _can_hold_na(self) -> bool:
1156
+ if isinstance(self.array, np.ndarray):
1157
+ return self.array.dtype.kind not in "iub"
1158
+ else:
1159
+ # ExtensionArray
1160
+ return self.array._can_hold_na
1161
+
1162
+ @property
1163
+ def is_single_block(self) -> bool:
1164
+ return True
1165
+
1166
+ def fast_xs(self, loc: int) -> SingleArrayManager:
1167
+ raise NotImplementedError("Use series._values[loc] instead")
1168
+
1169
+ def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleArrayManager:
1170
+ if axis >= self.ndim:
1171
+ raise IndexError("Requested axis not found in manager")
1172
+
1173
+ new_array = self.array[slobj]
1174
+ new_index = self.index._getitem_slice(slobj)
1175
+ return type(self)([new_array], [new_index], verify_integrity=False)
1176
+
1177
+ def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> SingleArrayManager:
1178
+ new_array = self.array[indexer]
1179
+ new_index = self.index[indexer]
1180
+ return type(self)([new_array], [new_index])
1181
+
1182
+ # error: Signature of "apply" incompatible with supertype "BaseArrayManager"
1183
+ def apply(self, func, **kwargs) -> Self: # type: ignore[override]
1184
+ if callable(func):
1185
+ new_array = func(self.array, **kwargs)
1186
+ else:
1187
+ new_array = getattr(self.array, func)(**kwargs)
1188
+ return type(self)([new_array], self._axes)
1189
+
1190
+ def setitem(self, indexer, value, warn: bool = True) -> SingleArrayManager:
1191
+ """
1192
+ Set values with indexer.
1193
+
1194
+ For SingleArrayManager, this backs s[indexer] = value
1195
+
1196
+ See `setitem_inplace` for a version that works inplace and doesn't
1197
+ return a new Manager.
1198
+ """
1199
+ if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:
1200
+ raise ValueError(f"Cannot set values with ndim > {self.ndim}")
1201
+ return self.apply_with_block("setitem", indexer=indexer, value=value)
1202
+
1203
+ def idelete(self, indexer) -> SingleArrayManager:
1204
+ """
1205
+ Delete selected locations in-place (new array, same ArrayManager)
1206
+ """
1207
+ to_keep = np.ones(self.shape[0], dtype=np.bool_)
1208
+ to_keep[indexer] = False
1209
+
1210
+ self.arrays = [self.arrays[0][to_keep]]
1211
+ self._axes = [self._axes[0][to_keep]]
1212
+ return self
1213
+
1214
+ def _get_data_subset(self, predicate: Callable) -> SingleArrayManager:
1215
+ # used in get_numeric_data / get_bool_data
1216
+ if predicate(self.array):
1217
+ return type(self)(self.arrays, self._axes, verify_integrity=False)
1218
+ else:
1219
+ return self.make_empty()
1220
+
1221
+ def set_values(self, values: ArrayLike) -> None:
1222
+ """
1223
+ Set (replace) the values of the SingleArrayManager in place.
1224
+
1225
+ Use at your own risk! This does not check if the passed values are
1226
+ valid for the current SingleArrayManager (length, dtype, etc).
1227
+ """
1228
+ self.arrays[0] = values
1229
+
1230
+ def to_2d_mgr(self, columns: Index) -> ArrayManager:
1231
+ """
1232
+ Manager analogue of Series.to_frame
1233
+ """
1234
+ arrays = [self.arrays[0]]
1235
+ axes = [self.axes[0], columns]
1236
+
1237
+ return ArrayManager(arrays, axes, verify_integrity=False)
1238
+
1239
+
1240
+ class NullArrayProxy:
1241
+ """
1242
+ Proxy object for an all-NA array.
1243
+
1244
+ Only stores the length of the array, and not the dtype. The dtype
1245
+ will only be known when actually concatenating (after determining the
1246
+ common dtype, for which this proxy is ignored).
1247
+ Using this object avoids that the internals/concat.py needs to determine
1248
+ the proper dtype and array type.
1249
+ """
1250
+
1251
+ ndim = 1
1252
+
1253
+ def __init__(self, n: int) -> None:
1254
+ self.n = n
1255
+
1256
+ @property
1257
+ def shape(self) -> tuple[int]:
1258
+ return (self.n,)
1259
+
1260
+ def to_array(self, dtype: DtypeObj) -> ArrayLike:
1261
+ """
1262
+ Helper function to create the actual all-NA array from the NullArrayProxy
1263
+ object.
1264
+
1265
+ Parameters
1266
+ ----------
1267
+ arr : NullArrayProxy
1268
+ dtype : the dtype for the resulting array
1269
+
1270
+ Returns
1271
+ -------
1272
+ np.ndarray or ExtensionArray
1273
+ """
1274
+ if isinstance(dtype, ExtensionDtype):
1275
+ empty = dtype.construct_array_type()._from_sequence([], dtype=dtype)
1276
+ indexer = -np.ones(self.n, dtype=np.intp)
1277
+ return empty.take(indexer, allow_fill=True)
1278
+ else:
1279
+ # when introducing missing values, int becomes float, bool becomes object
1280
+ dtype = ensure_dtype_can_hold_na(dtype)
1281
+ fill_value = na_value_for_dtype(dtype)
1282
+ arr = np.empty(self.n, dtype=dtype)
1283
+ arr.fill(fill_value)
1284
+ return ensure_wrapped_if_datetimelike(arr)
1285
+
1286
+
1287
+ def concat_arrays(to_concat: list) -> ArrayLike:
1288
+ """
1289
+ Alternative for concat_compat but specialized for use in the ArrayManager.
1290
+
1291
+ Differences: only deals with 1D arrays (no axis keyword), assumes
1292
+ ensure_wrapped_if_datetimelike and does not skip empty arrays to determine
1293
+ the dtype.
1294
+ In addition ensures that all NullArrayProxies get replaced with actual
1295
+ arrays.
1296
+
1297
+ Parameters
1298
+ ----------
1299
+ to_concat : list of arrays
1300
+
1301
+ Returns
1302
+ -------
1303
+ np.ndarray or ExtensionArray
1304
+ """
1305
+ # ignore the all-NA proxies to determine the resulting dtype
1306
+ to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)]
1307
+
1308
+ dtypes = {x.dtype for x in to_concat_no_proxy}
1309
+ single_dtype = len(dtypes) == 1
1310
+
1311
+ if single_dtype:
1312
+ target_dtype = to_concat_no_proxy[0].dtype
1313
+ elif all(lib.is_np_dtype(x, "iub") for x in dtypes):
1314
+ # GH#42092
1315
+ target_dtype = np_find_common_type(*dtypes)
1316
+ else:
1317
+ target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy])
1318
+
1319
+ to_concat = [
1320
+ arr.to_array(target_dtype)
1321
+ if isinstance(arr, NullArrayProxy)
1322
+ else astype_array(arr, target_dtype, copy=False)
1323
+ for arr in to_concat
1324
+ ]
1325
+
1326
+ if isinstance(to_concat[0], ExtensionArray):
1327
+ cls = type(to_concat[0])
1328
+ return cls._concat_same_type(to_concat)
1329
+
1330
+ result = np.concatenate(to_concat)
1331
+
1332
+ # TODO decide on exact behaviour (we shouldn't do this only for empty result)
1333
+ # see https://github.com/pandas-dev/pandas/issues/39817
1334
+ if len(result) == 0:
1335
+ # all empties -> check for bool to not coerce to float
1336
+ kinds = {obj.dtype.kind for obj in to_concat_no_proxy}
1337
+ if len(kinds) != 1:
1338
+ if "b" in kinds:
1339
+ result = result.astype(object)
1340
+ return result
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/base.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Base class for the internal managers. Both BlockManager and ArrayManager
3
+ inherit from this class.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from typing import (
8
+ TYPE_CHECKING,
9
+ Any,
10
+ Literal,
11
+ cast,
12
+ final,
13
+ )
14
+
15
+ import numpy as np
16
+
17
+ from pandas._config import (
18
+ using_copy_on_write,
19
+ warn_copy_on_write,
20
+ )
21
+
22
+ from pandas._libs import (
23
+ algos as libalgos,
24
+ lib,
25
+ )
26
+ from pandas.errors import AbstractMethodError
27
+ from pandas.util._validators import validate_bool_kwarg
28
+
29
+ from pandas.core.dtypes.cast import (
30
+ find_common_type,
31
+ np_can_hold_element,
32
+ )
33
+ from pandas.core.dtypes.dtypes import (
34
+ ExtensionDtype,
35
+ SparseDtype,
36
+ )
37
+
38
+ from pandas.core.base import PandasObject
39
+ from pandas.core.construction import extract_array
40
+ from pandas.core.indexes.api import (
41
+ Index,
42
+ default_index,
43
+ )
44
+
45
+ if TYPE_CHECKING:
46
+ from pandas._typing import (
47
+ ArrayLike,
48
+ AxisInt,
49
+ DtypeObj,
50
+ Self,
51
+ Shape,
52
+ )
53
+
54
+
55
+ class _AlreadyWarned:
56
+ def __init__(self):
57
+ # This class is used on the manager level to the block level to
58
+ # ensure that we warn only once. The block method can update the
59
+ # warned_already option without returning a value to keep the
60
+ # interface consistent. This is only a temporary solution for
61
+ # CoW warnings.
62
+ self.warned_already = False
63
+
64
+
65
+ class DataManager(PandasObject):
66
+ # TODO share more methods/attributes
67
+
68
+ axes: list[Index]
69
+
70
+ @property
71
+ def items(self) -> Index:
72
+ raise AbstractMethodError(self)
73
+
74
+ @final
75
+ def __len__(self) -> int:
76
+ return len(self.items)
77
+
78
+ @property
79
+ def ndim(self) -> int:
80
+ return len(self.axes)
81
+
82
+ @property
83
+ def shape(self) -> Shape:
84
+ return tuple(len(ax) for ax in self.axes)
85
+
86
+ @final
87
+ def _validate_set_axis(self, axis: AxisInt, new_labels: Index) -> None:
88
+ # Caller is responsible for ensuring we have an Index object.
89
+ old_len = len(self.axes[axis])
90
+ new_len = len(new_labels)
91
+
92
+ if axis == 1 and len(self.items) == 0:
93
+ # If we are setting the index on a DataFrame with no columns,
94
+ # it is OK to change the length.
95
+ pass
96
+
97
+ elif new_len != old_len:
98
+ raise ValueError(
99
+ f"Length mismatch: Expected axis has {old_len} elements, new "
100
+ f"values have {new_len} elements"
101
+ )
102
+
103
+ def reindex_indexer(
104
+ self,
105
+ new_axis,
106
+ indexer,
107
+ axis: AxisInt,
108
+ fill_value=None,
109
+ allow_dups: bool = False,
110
+ copy: bool = True,
111
+ only_slice: bool = False,
112
+ ) -> Self:
113
+ raise AbstractMethodError(self)
114
+
115
+ @final
116
+ def reindex_axis(
117
+ self,
118
+ new_index: Index,
119
+ axis: AxisInt,
120
+ fill_value=None,
121
+ only_slice: bool = False,
122
+ ) -> Self:
123
+ """
124
+ Conform data manager to new index.
125
+ """
126
+ new_index, indexer = self.axes[axis].reindex(new_index)
127
+
128
+ return self.reindex_indexer(
129
+ new_index,
130
+ indexer,
131
+ axis=axis,
132
+ fill_value=fill_value,
133
+ copy=False,
134
+ only_slice=only_slice,
135
+ )
136
+
137
+ def _equal_values(self, other: Self) -> bool:
138
+ """
139
+ To be implemented by the subclasses. Only check the column values
140
+ assuming shape and indexes have already been checked.
141
+ """
142
+ raise AbstractMethodError(self)
143
+
144
+ @final
145
+ def equals(self, other: object) -> bool:
146
+ """
147
+ Implementation for DataFrame.equals
148
+ """
149
+ if not isinstance(other, type(self)):
150
+ return False
151
+
152
+ self_axes, other_axes = self.axes, other.axes
153
+ if len(self_axes) != len(other_axes):
154
+ return False
155
+ if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)):
156
+ return False
157
+
158
+ return self._equal_values(other)
159
+
160
+ def apply(
161
+ self,
162
+ f,
163
+ align_keys: list[str] | None = None,
164
+ **kwargs,
165
+ ) -> Self:
166
+ raise AbstractMethodError(self)
167
+
168
+ def apply_with_block(
169
+ self,
170
+ f,
171
+ align_keys: list[str] | None = None,
172
+ **kwargs,
173
+ ) -> Self:
174
+ raise AbstractMethodError(self)
175
+
176
+ @final
177
+ def isna(self, func) -> Self:
178
+ return self.apply("apply", func=func)
179
+
180
+ @final
181
+ def fillna(self, value, limit: int | None, inplace: bool, downcast) -> Self:
182
+ if limit is not None:
183
+ # Do this validation even if we go through one of the no-op paths
184
+ limit = libalgos.validate_limit(None, limit=limit)
185
+
186
+ return self.apply_with_block(
187
+ "fillna",
188
+ value=value,
189
+ limit=limit,
190
+ inplace=inplace,
191
+ downcast=downcast,
192
+ using_cow=using_copy_on_write(),
193
+ already_warned=_AlreadyWarned(),
194
+ )
195
+
196
+ @final
197
+ def where(self, other, cond, align: bool) -> Self:
198
+ if align:
199
+ align_keys = ["other", "cond"]
200
+ else:
201
+ align_keys = ["cond"]
202
+ other = extract_array(other, extract_numpy=True)
203
+
204
+ return self.apply_with_block(
205
+ "where",
206
+ align_keys=align_keys,
207
+ other=other,
208
+ cond=cond,
209
+ using_cow=using_copy_on_write(),
210
+ )
211
+
212
+ @final
213
+ def putmask(self, mask, new, align: bool = True, warn: bool = True) -> Self:
214
+ if align:
215
+ align_keys = ["new", "mask"]
216
+ else:
217
+ align_keys = ["mask"]
218
+ new = extract_array(new, extract_numpy=True)
219
+
220
+ already_warned = None
221
+ if warn_copy_on_write():
222
+ already_warned = _AlreadyWarned()
223
+ if not warn:
224
+ already_warned.warned_already = True
225
+
226
+ return self.apply_with_block(
227
+ "putmask",
228
+ align_keys=align_keys,
229
+ mask=mask,
230
+ new=new,
231
+ using_cow=using_copy_on_write(),
232
+ already_warned=already_warned,
233
+ )
234
+
235
+ @final
236
+ def round(self, decimals: int, using_cow: bool = False) -> Self:
237
+ return self.apply_with_block(
238
+ "round",
239
+ decimals=decimals,
240
+ using_cow=using_cow,
241
+ )
242
+
243
+ @final
244
+ def replace(self, to_replace, value, inplace: bool) -> Self:
245
+ inplace = validate_bool_kwarg(inplace, "inplace")
246
+ # NDFrame.replace ensures the not-is_list_likes here
247
+ assert not lib.is_list_like(to_replace)
248
+ assert not lib.is_list_like(value)
249
+ return self.apply_with_block(
250
+ "replace",
251
+ to_replace=to_replace,
252
+ value=value,
253
+ inplace=inplace,
254
+ using_cow=using_copy_on_write(),
255
+ already_warned=_AlreadyWarned(),
256
+ )
257
+
258
+ @final
259
+ def replace_regex(self, **kwargs) -> Self:
260
+ return self.apply_with_block(
261
+ "_replace_regex",
262
+ **kwargs,
263
+ using_cow=using_copy_on_write(),
264
+ already_warned=_AlreadyWarned(),
265
+ )
266
+
267
+ @final
268
+ def replace_list(
269
+ self,
270
+ src_list: list[Any],
271
+ dest_list: list[Any],
272
+ inplace: bool = False,
273
+ regex: bool = False,
274
+ ) -> Self:
275
+ """do a list replace"""
276
+ inplace = validate_bool_kwarg(inplace, "inplace")
277
+
278
+ bm = self.apply_with_block(
279
+ "replace_list",
280
+ src_list=src_list,
281
+ dest_list=dest_list,
282
+ inplace=inplace,
283
+ regex=regex,
284
+ using_cow=using_copy_on_write(),
285
+ already_warned=_AlreadyWarned(),
286
+ )
287
+ bm._consolidate_inplace()
288
+ return bm
289
+
290
+ def interpolate(self, inplace: bool, **kwargs) -> Self:
291
+ return self.apply_with_block(
292
+ "interpolate",
293
+ inplace=inplace,
294
+ **kwargs,
295
+ using_cow=using_copy_on_write(),
296
+ already_warned=_AlreadyWarned(),
297
+ )
298
+
299
+ def pad_or_backfill(self, inplace: bool, **kwargs) -> Self:
300
+ return self.apply_with_block(
301
+ "pad_or_backfill",
302
+ inplace=inplace,
303
+ **kwargs,
304
+ using_cow=using_copy_on_write(),
305
+ already_warned=_AlreadyWarned(),
306
+ )
307
+
308
+ def shift(self, periods: int, fill_value) -> Self:
309
+ if fill_value is lib.no_default:
310
+ fill_value = None
311
+
312
+ return self.apply_with_block("shift", periods=periods, fill_value=fill_value)
313
+
314
+ # --------------------------------------------------------------------
315
+ # Consolidation: No-ops for all but BlockManager
316
+
317
+ def is_consolidated(self) -> bool:
318
+ return True
319
+
320
+ def consolidate(self) -> Self:
321
+ return self
322
+
323
+ def _consolidate_inplace(self) -> None:
324
+ return
325
+
326
+
327
+ class SingleDataManager(DataManager):
328
+ @property
329
+ def ndim(self) -> Literal[1]:
330
+ return 1
331
+
332
+ @final
333
+ @property
334
+ def array(self) -> ArrayLike:
335
+ """
336
+ Quick access to the backing array of the Block or SingleArrayManager.
337
+ """
338
+ # error: "SingleDataManager" has no attribute "arrays"; maybe "array"
339
+ return self.arrays[0] # type: ignore[attr-defined]
340
+
341
+ def setitem_inplace(self, indexer, value, warn: bool = True) -> None:
342
+ """
343
+ Set values with indexer.
344
+
345
+ For Single[Block/Array]Manager, this backs s[indexer] = value
346
+
347
+ This is an inplace version of `setitem()`, mutating the manager/values
348
+ in place, not returning a new Manager (and Block), and thus never changing
349
+ the dtype.
350
+ """
351
+ arr = self.array
352
+
353
+ # EAs will do this validation in their own __setitem__ methods.
354
+ if isinstance(arr, np.ndarray):
355
+ # Note: checking for ndarray instead of np.dtype means we exclude
356
+ # dt64/td64, which do their own validation.
357
+ value = np_can_hold_element(arr.dtype, value)
358
+
359
+ if isinstance(value, np.ndarray) and value.ndim == 1 and len(value) == 1:
360
+ # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615
361
+ value = value[0, ...]
362
+
363
+ arr[indexer] = value
364
+
365
+ def grouped_reduce(self, func):
366
+ arr = self.array
367
+ res = func(arr)
368
+ index = default_index(len(res))
369
+
370
+ mgr = type(self).from_array(res, index)
371
+ return mgr
372
+
373
+ @classmethod
374
+ def from_array(cls, arr: ArrayLike, index: Index):
375
+ raise AbstractMethodError(cls)
376
+
377
+
378
+ def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None:
379
+ """
380
+ Find the common dtype for `blocks`.
381
+
382
+ Parameters
383
+ ----------
384
+ blocks : List[DtypeObj]
385
+
386
+ Returns
387
+ -------
388
+ dtype : np.dtype, ExtensionDtype, or None
389
+ None is returned when `blocks` is empty.
390
+ """
391
+ if not len(dtypes):
392
+ return None
393
+
394
+ return find_common_type(dtypes)
395
+
396
+
397
+ def ensure_np_dtype(dtype: DtypeObj) -> np.dtype:
398
+ # TODO: https://github.com/pandas-dev/pandas/issues/22791
399
+ # Give EAs some input on what happens here. Sparse needs this.
400
+ if isinstance(dtype, SparseDtype):
401
+ dtype = dtype.subtype
402
+ dtype = cast(np.dtype, dtype)
403
+ elif isinstance(dtype, ExtensionDtype):
404
+ dtype = np.dtype("object")
405
+ elif dtype == np.dtype(str):
406
+ dtype = np.dtype("object")
407
+ return dtype
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/blocks.py ADDED
The diff for this file is too large to render. See raw diff
 
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/concat.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ cast,
6
+ )
7
+ import warnings
8
+
9
+ import numpy as np
10
+
11
+ from pandas._libs import (
12
+ NaT,
13
+ algos as libalgos,
14
+ internals as libinternals,
15
+ lib,
16
+ )
17
+ from pandas._libs.missing import NA
18
+ from pandas.util._decorators import cache_readonly
19
+ from pandas.util._exceptions import find_stack_level
20
+
21
+ from pandas.core.dtypes.cast import (
22
+ ensure_dtype_can_hold_na,
23
+ find_common_type,
24
+ )
25
+ from pandas.core.dtypes.common import (
26
+ is_1d_only_ea_dtype,
27
+ is_scalar,
28
+ needs_i8_conversion,
29
+ )
30
+ from pandas.core.dtypes.concat import concat_compat
31
+ from pandas.core.dtypes.dtypes import (
32
+ ExtensionDtype,
33
+ SparseDtype,
34
+ )
35
+ from pandas.core.dtypes.missing import (
36
+ is_valid_na_for_dtype,
37
+ isna,
38
+ isna_all,
39
+ )
40
+
41
+ from pandas.core.construction import ensure_wrapped_if_datetimelike
42
+ from pandas.core.internals.array_manager import ArrayManager
43
+ from pandas.core.internals.blocks import (
44
+ ensure_block_shape,
45
+ new_block_2d,
46
+ )
47
+ from pandas.core.internals.managers import (
48
+ BlockManager,
49
+ make_na_array,
50
+ )
51
+
52
+ if TYPE_CHECKING:
53
+ from collections.abc import Sequence
54
+
55
+ from pandas._typing import (
56
+ ArrayLike,
57
+ AxisInt,
58
+ DtypeObj,
59
+ Manager2D,
60
+ Shape,
61
+ )
62
+
63
+ from pandas import Index
64
+ from pandas.core.internals.blocks import (
65
+ Block,
66
+ BlockPlacement,
67
+ )
68
+
69
+
70
+ def _concatenate_array_managers(
71
+ mgrs: list[ArrayManager], axes: list[Index], concat_axis: AxisInt
72
+ ) -> Manager2D:
73
+ """
74
+ Concatenate array managers into one.
75
+
76
+ Parameters
77
+ ----------
78
+ mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples
79
+ axes : list of Index
80
+ concat_axis : int
81
+
82
+ Returns
83
+ -------
84
+ ArrayManager
85
+ """
86
+ if concat_axis == 1:
87
+ return mgrs[0].concat_vertical(mgrs, axes)
88
+ else:
89
+ # concatting along the columns -> combine reindexed arrays in a single manager
90
+ assert concat_axis == 0
91
+ return mgrs[0].concat_horizontal(mgrs, axes)
92
+
93
+
94
+ def concatenate_managers(
95
+ mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool
96
+ ) -> Manager2D:
97
+ """
98
+ Concatenate block managers into one.
99
+
100
+ Parameters
101
+ ----------
102
+ mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples
103
+ axes : list of Index
104
+ concat_axis : int
105
+ copy : bool
106
+
107
+ Returns
108
+ -------
109
+ BlockManager
110
+ """
111
+
112
+ needs_copy = copy and concat_axis == 0
113
+
114
+ # TODO(ArrayManager) this assumes that all managers are of the same type
115
+ if isinstance(mgrs_indexers[0][0], ArrayManager):
116
+ mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
117
+ # error: Argument 1 to "_concatenate_array_managers" has incompatible
118
+ # type "List[BlockManager]"; expected "List[Union[ArrayManager,
119
+ # SingleArrayManager, BlockManager, SingleBlockManager]]"
120
+ return _concatenate_array_managers(
121
+ mgrs, axes, concat_axis # type: ignore[arg-type]
122
+ )
123
+
124
+ # Assertions disabled for performance
125
+ # for tup in mgrs_indexers:
126
+ # # caller is responsible for ensuring this
127
+ # indexers = tup[1]
128
+ # assert concat_axis not in indexers
129
+
130
+ if concat_axis == 0:
131
+ mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
132
+ return mgrs[0].concat_horizontal(mgrs, axes)
133
+
134
+ if len(mgrs_indexers) > 0 and mgrs_indexers[0][0].nblocks > 0:
135
+ first_dtype = mgrs_indexers[0][0].blocks[0].dtype
136
+ if first_dtype in [np.float64, np.float32]:
137
+ # TODO: support more dtypes here. This will be simpler once
138
+ # JoinUnit.is_na behavior is deprecated.
139
+ if (
140
+ all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in mgrs_indexers)
141
+ and len(mgrs_indexers) > 1
142
+ ):
143
+ # Fastpath!
144
+ # Length restriction is just to avoid having to worry about 'copy'
145
+ shape = tuple(len(x) for x in axes)
146
+ nb = _concat_homogeneous_fastpath(mgrs_indexers, shape, first_dtype)
147
+ return BlockManager((nb,), axes)
148
+
149
+ mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy)
150
+
151
+ if len(mgrs) == 1:
152
+ mgr = mgrs[0]
153
+ out = mgr.copy(deep=False)
154
+ out.axes = axes
155
+ return out
156
+
157
+ concat_plan = _get_combined_plan(mgrs)
158
+
159
+ blocks = []
160
+ values: ArrayLike
161
+
162
+ for placement, join_units in concat_plan:
163
+ unit = join_units[0]
164
+ blk = unit.block
165
+
166
+ if _is_uniform_join_units(join_units):
167
+ vals = [ju.block.values for ju in join_units]
168
+
169
+ if not blk.is_extension:
170
+ # _is_uniform_join_units ensures a single dtype, so
171
+ # we can use np.concatenate, which is more performant
172
+ # than concat_compat
173
+ # error: Argument 1 to "concatenate" has incompatible type
174
+ # "List[Union[ndarray[Any, Any], ExtensionArray]]";
175
+ # expected "Union[_SupportsArray[dtype[Any]],
176
+ # _NestedSequence[_SupportsArray[dtype[Any]]]]"
177
+ values = np.concatenate(vals, axis=1) # type: ignore[arg-type]
178
+ elif is_1d_only_ea_dtype(blk.dtype):
179
+ # TODO(EA2D): special-casing not needed with 2D EAs
180
+ values = concat_compat(vals, axis=0, ea_compat_axis=True)
181
+ values = ensure_block_shape(values, ndim=2)
182
+ else:
183
+ values = concat_compat(vals, axis=1)
184
+
185
+ values = ensure_wrapped_if_datetimelike(values)
186
+
187
+ fastpath = blk.values.dtype == values.dtype
188
+ else:
189
+ values = _concatenate_join_units(join_units, copy=copy)
190
+ fastpath = False
191
+
192
+ if fastpath:
193
+ b = blk.make_block_same_class(values, placement=placement)
194
+ else:
195
+ b = new_block_2d(values, placement=placement)
196
+
197
+ blocks.append(b)
198
+
199
+ return BlockManager(tuple(blocks), axes)
200
+
201
+
202
+ def _maybe_reindex_columns_na_proxy(
203
+ axes: list[Index],
204
+ mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]],
205
+ needs_copy: bool,
206
+ ) -> list[BlockManager]:
207
+ """
208
+ Reindex along columns so that all of the BlockManagers being concatenated
209
+ have matching columns.
210
+
211
+ Columns added in this reindexing have dtype=np.void, indicating they
212
+ should be ignored when choosing a column's final dtype.
213
+ """
214
+ new_mgrs = []
215
+
216
+ for mgr, indexers in mgrs_indexers:
217
+ # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this
218
+ # is a cheap reindexing.
219
+ for i, indexer in indexers.items():
220
+ mgr = mgr.reindex_indexer(
221
+ axes[i],
222
+ indexers[i],
223
+ axis=i,
224
+ copy=False,
225
+ only_slice=True, # only relevant for i==0
226
+ allow_dups=True,
227
+ use_na_proxy=True, # only relevant for i==0
228
+ )
229
+ if needs_copy and not indexers:
230
+ mgr = mgr.copy()
231
+
232
+ new_mgrs.append(mgr)
233
+ return new_mgrs
234
+
235
+
236
+ def _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool:
237
+ """
238
+ Check if this Manager can be treated as a single ndarray.
239
+ """
240
+ if mgr.nblocks != 1:
241
+ return False
242
+ blk = mgr.blocks[0]
243
+ if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1):
244
+ return False
245
+
246
+ return blk.dtype == first_dtype
247
+
248
+
249
+ def _concat_homogeneous_fastpath(
250
+ mgrs_indexers, shape: Shape, first_dtype: np.dtype
251
+ ) -> Block:
252
+ """
253
+ With single-Block managers with homogeneous dtypes (that can already hold nan),
254
+ we avoid [...]
255
+ """
256
+ # assumes
257
+ # all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in in mgrs_indexers)
258
+
259
+ if all(not indexers for _, indexers in mgrs_indexers):
260
+ # https://github.com/pandas-dev/pandas/pull/52685#issuecomment-1523287739
261
+ arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers]
262
+ arr = np.concatenate(arrs).T
263
+ bp = libinternals.BlockPlacement(slice(shape[0]))
264
+ nb = new_block_2d(arr, bp)
265
+ return nb
266
+
267
+ arr = np.empty(shape, dtype=first_dtype)
268
+
269
+ if first_dtype == np.float64:
270
+ take_func = libalgos.take_2d_axis0_float64_float64
271
+ else:
272
+ take_func = libalgos.take_2d_axis0_float32_float32
273
+
274
+ start = 0
275
+ for mgr, indexers in mgrs_indexers:
276
+ mgr_len = mgr.shape[1]
277
+ end = start + mgr_len
278
+
279
+ if 0 in indexers:
280
+ take_func(
281
+ mgr.blocks[0].values,
282
+ indexers[0],
283
+ arr[:, start:end],
284
+ )
285
+ else:
286
+ # No reindexing necessary, we can copy values directly
287
+ arr[:, start:end] = mgr.blocks[0].values
288
+
289
+ start += mgr_len
290
+
291
+ bp = libinternals.BlockPlacement(slice(shape[0]))
292
+ nb = new_block_2d(arr, bp)
293
+ return nb
294
+
295
+
296
+ def _get_combined_plan(
297
+ mgrs: list[BlockManager],
298
+ ) -> list[tuple[BlockPlacement, list[JoinUnit]]]:
299
+ plan = []
300
+
301
+ max_len = mgrs[0].shape[0]
302
+
303
+ blknos_list = [mgr.blknos for mgr in mgrs]
304
+ pairs = libinternals.get_concat_blkno_indexers(blknos_list)
305
+ for ind, (blknos, bp) in enumerate(pairs):
306
+ # assert bp.is_slice_like
307
+ # assert len(bp) > 0
308
+
309
+ units_for_bp = []
310
+ for k, mgr in enumerate(mgrs):
311
+ blkno = blknos[k]
312
+
313
+ nb = _get_block_for_concat_plan(mgr, bp, blkno, max_len=max_len)
314
+ unit = JoinUnit(nb)
315
+ units_for_bp.append(unit)
316
+
317
+ plan.append((bp, units_for_bp))
318
+
319
+ return plan
320
+
321
+
322
+ def _get_block_for_concat_plan(
323
+ mgr: BlockManager, bp: BlockPlacement, blkno: int, *, max_len: int
324
+ ) -> Block:
325
+ blk = mgr.blocks[blkno]
326
+ # Assertions disabled for performance:
327
+ # assert bp.is_slice_like
328
+ # assert blkno != -1
329
+ # assert (mgr.blknos[bp] == blkno).all()
330
+
331
+ if len(bp) == len(blk.mgr_locs) and (
332
+ blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1
333
+ ):
334
+ nb = blk
335
+ else:
336
+ ax0_blk_indexer = mgr.blklocs[bp.indexer]
337
+
338
+ slc = lib.maybe_indices_to_slice(ax0_blk_indexer, max_len)
339
+ # TODO: in all extant test cases 2023-04-08 we have a slice here.
340
+ # Will this always be the case?
341
+ if isinstance(slc, slice):
342
+ nb = blk.slice_block_columns(slc)
343
+ else:
344
+ nb = blk.take_block_columns(slc)
345
+
346
+ # assert nb.shape == (len(bp), mgr.shape[1])
347
+ return nb
348
+
349
+
350
+ class JoinUnit:
351
+ def __init__(self, block: Block) -> None:
352
+ self.block = block
353
+
354
+ def __repr__(self) -> str:
355
+ return f"{type(self).__name__}({repr(self.block)})"
356
+
357
+ def _is_valid_na_for(self, dtype: DtypeObj) -> bool:
358
+ """
359
+ Check that we are all-NA of a type/dtype that is compatible with this dtype.
360
+ Augments `self.is_na` with an additional check of the type of NA values.
361
+ """
362
+ if not self.is_na:
363
+ return False
364
+
365
+ blk = self.block
366
+ if blk.dtype.kind == "V":
367
+ return True
368
+
369
+ if blk.dtype == object:
370
+ values = blk.values
371
+ return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K"))
372
+
373
+ na_value = blk.fill_value
374
+ if na_value is NaT and blk.dtype != dtype:
375
+ # e.g. we are dt64 and other is td64
376
+ # fill_values match but we should not cast blk.values to dtype
377
+ # TODO: this will need updating if we ever have non-nano dt64/td64
378
+ return False
379
+
380
+ if na_value is NA and needs_i8_conversion(dtype):
381
+ # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat
382
+ # e.g. blk.dtype == "Int64" and dtype is td64, we dont want
383
+ # to consider these as matching
384
+ return False
385
+
386
+ # TODO: better to use can_hold_element?
387
+ return is_valid_na_for_dtype(na_value, dtype)
388
+
389
+ @cache_readonly
390
+ def is_na(self) -> bool:
391
+ blk = self.block
392
+ if blk.dtype.kind == "V":
393
+ return True
394
+
395
+ if not blk._can_hold_na:
396
+ return False
397
+
398
+ values = blk.values
399
+ if values.size == 0:
400
+ # GH#39122 this case will return False once deprecation is enforced
401
+ return True
402
+
403
+ if isinstance(values.dtype, SparseDtype):
404
+ return False
405
+
406
+ if values.ndim == 1:
407
+ # TODO(EA2D): no need for special case with 2D EAs
408
+ val = values[0]
409
+ if not is_scalar(val) or not isna(val):
410
+ # ideally isna_all would do this short-circuiting
411
+ return False
412
+ return isna_all(values)
413
+ else:
414
+ val = values[0][0]
415
+ if not is_scalar(val) or not isna(val):
416
+ # ideally isna_all would do this short-circuiting
417
+ return False
418
+ return all(isna_all(row) for row in values)
419
+
420
+ @cache_readonly
421
+ def is_na_after_size_and_isna_all_deprecation(self) -> bool:
422
+ """
423
+ Will self.is_na be True after values.size == 0 deprecation and isna_all
424
+ deprecation are enforced?
425
+ """
426
+ blk = self.block
427
+ if blk.dtype.kind == "V":
428
+ return True
429
+ return False
430
+
431
+ def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike:
432
+ values: ArrayLike
433
+
434
+ if upcasted_na is None and self.block.dtype.kind != "V":
435
+ # No upcasting is necessary
436
+ return self.block.values
437
+ else:
438
+ fill_value = upcasted_na
439
+
440
+ if self._is_valid_na_for(empty_dtype):
441
+ # note: always holds when self.block.dtype.kind == "V"
442
+ blk_dtype = self.block.dtype
443
+
444
+ if blk_dtype == np.dtype("object"):
445
+ # we want to avoid filling with np.nan if we are
446
+ # using None; we already know that we are all
447
+ # nulls
448
+ values = cast(np.ndarray, self.block.values)
449
+ if values.size and values[0, 0] is None:
450
+ fill_value = None
451
+
452
+ return make_na_array(empty_dtype, self.block.shape, fill_value)
453
+
454
+ return self.block.values
455
+
456
+
457
+ def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike:
458
+ """
459
+ Concatenate values from several join units along axis=1.
460
+ """
461
+ empty_dtype, empty_dtype_future = _get_empty_dtype(join_units)
462
+
463
+ has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
464
+ upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks)
465
+
466
+ to_concat = [
467
+ ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na)
468
+ for ju in join_units
469
+ ]
470
+
471
+ if any(is_1d_only_ea_dtype(t.dtype) for t in to_concat):
472
+ # TODO(EA2D): special case not needed if all EAs used HybridBlocks
473
+
474
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
475
+ # argument type "Tuple[int, slice]"
476
+ to_concat = [
477
+ t
478
+ if is_1d_only_ea_dtype(t.dtype)
479
+ else t[0, :] # type: ignore[call-overload]
480
+ for t in to_concat
481
+ ]
482
+ concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True)
483
+ concat_values = ensure_block_shape(concat_values, 2)
484
+
485
+ else:
486
+ concat_values = concat_compat(to_concat, axis=1)
487
+
488
+ if empty_dtype != empty_dtype_future:
489
+ if empty_dtype == concat_values.dtype:
490
+ # GH#39122, GH#40893
491
+ warnings.warn(
492
+ "The behavior of DataFrame concatenation with empty or all-NA "
493
+ "entries is deprecated. In a future version, this will no longer "
494
+ "exclude empty or all-NA columns when determining the result dtypes. "
495
+ "To retain the old behavior, exclude the relevant entries before "
496
+ "the concat operation.",
497
+ FutureWarning,
498
+ stacklevel=find_stack_level(),
499
+ )
500
+ return concat_values
501
+
502
+
503
+ def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool):
504
+ """
505
+ Find the NA value to go with this dtype.
506
+ """
507
+ if isinstance(dtype, ExtensionDtype):
508
+ return dtype.na_value
509
+ elif dtype.kind in "mM":
510
+ return dtype.type("NaT")
511
+ elif dtype.kind in "fc":
512
+ return dtype.type("NaN")
513
+ elif dtype.kind == "b":
514
+ # different from missing.na_value_for_dtype
515
+ return None
516
+ elif dtype.kind in "iu":
517
+ if not has_none_blocks:
518
+ # different from missing.na_value_for_dtype
519
+ return None
520
+ return np.nan
521
+ elif dtype.kind == "O":
522
+ return np.nan
523
+ raise NotImplementedError
524
+
525
+
526
+ def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> tuple[DtypeObj, DtypeObj]:
527
+ """
528
+ Return dtype and N/A values to use when concatenating specified units.
529
+
530
+ Returned N/A value may be None which means there was no casting involved.
531
+
532
+ Returns
533
+ -------
534
+ dtype
535
+ """
536
+ if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]):
537
+ empty_dtype = join_units[0].block.dtype
538
+ return empty_dtype, empty_dtype
539
+
540
+ has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units)
541
+
542
+ dtypes = [unit.block.dtype for unit in join_units if not unit.is_na]
543
+ if not len(dtypes):
544
+ dtypes = [
545
+ unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V"
546
+ ]
547
+
548
+ dtype = find_common_type(dtypes)
549
+ if has_none_blocks:
550
+ dtype = ensure_dtype_can_hold_na(dtype)
551
+
552
+ dtype_future = dtype
553
+ if len(dtypes) != len(join_units):
554
+ dtypes_future = [
555
+ unit.block.dtype
556
+ for unit in join_units
557
+ if not unit.is_na_after_size_and_isna_all_deprecation
558
+ ]
559
+ if not len(dtypes_future):
560
+ dtypes_future = [
561
+ unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V"
562
+ ]
563
+
564
+ if len(dtypes) != len(dtypes_future):
565
+ dtype_future = find_common_type(dtypes_future)
566
+ if has_none_blocks:
567
+ dtype_future = ensure_dtype_can_hold_na(dtype_future)
568
+
569
+ return dtype, dtype_future
570
+
571
+
572
+ def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool:
573
+ """
574
+ Check if the join units consist of blocks of uniform type that can
575
+ be concatenated using Block.concat_same_type instead of the generic
576
+ _concatenate_join_units (which uses `concat_compat`).
577
+
578
+ """
579
+ first = join_units[0].block
580
+ if first.dtype.kind == "V":
581
+ return False
582
+ return (
583
+ # exclude cases where a) ju.block is None or b) we have e.g. Int64+int64
584
+ all(type(ju.block) is type(first) for ju in join_units)
585
+ and
586
+ # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform
587
+ all(
588
+ ju.block.dtype == first.dtype
589
+ # GH#42092 we only want the dtype_equal check for non-numeric blocks
590
+ # (for now, may change but that would need a deprecation)
591
+ or ju.block.dtype.kind in "iub"
592
+ for ju in join_units
593
+ )
594
+ and
595
+ # no blocks that would get missing values (can lead to type upcasts)
596
+ # unless we're an extension dtype.
597
+ all(not ju.is_na or ju.block.is_extension for ju in join_units)
598
+ )
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/construction.py ADDED
@@ -0,0 +1,1072 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for preparing various inputs passed to the DataFrame or Series
3
+ constructors before passing them to a BlockManager.
4
+ """
5
+ from __future__ import annotations
6
+
7
+ from collections import abc
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Any,
11
+ )
12
+
13
+ import numpy as np
14
+ from numpy import ma
15
+
16
+ from pandas._config import using_pyarrow_string_dtype
17
+
18
+ from pandas._libs import lib
19
+
20
+ from pandas.core.dtypes.astype import astype_is_view
21
+ from pandas.core.dtypes.cast import (
22
+ construct_1d_arraylike_from_scalar,
23
+ dict_compat,
24
+ maybe_cast_to_datetime,
25
+ maybe_convert_platform,
26
+ maybe_infer_to_datetimelike,
27
+ )
28
+ from pandas.core.dtypes.common import (
29
+ is_1d_only_ea_dtype,
30
+ is_integer_dtype,
31
+ is_list_like,
32
+ is_named_tuple,
33
+ is_object_dtype,
34
+ )
35
+ from pandas.core.dtypes.dtypes import ExtensionDtype
36
+ from pandas.core.dtypes.generic import (
37
+ ABCDataFrame,
38
+ ABCSeries,
39
+ )
40
+
41
+ from pandas.core import (
42
+ algorithms,
43
+ common as com,
44
+ )
45
+ from pandas.core.arrays import ExtensionArray
46
+ from pandas.core.arrays.string_ import StringDtype
47
+ from pandas.core.construction import (
48
+ array as pd_array,
49
+ ensure_wrapped_if_datetimelike,
50
+ extract_array,
51
+ range_to_ndarray,
52
+ sanitize_array,
53
+ )
54
+ from pandas.core.indexes.api import (
55
+ DatetimeIndex,
56
+ Index,
57
+ TimedeltaIndex,
58
+ default_index,
59
+ ensure_index,
60
+ get_objs_combined_axis,
61
+ union_indexes,
62
+ )
63
+ from pandas.core.internals.array_manager import (
64
+ ArrayManager,
65
+ SingleArrayManager,
66
+ )
67
+ from pandas.core.internals.blocks import (
68
+ BlockPlacement,
69
+ ensure_block_shape,
70
+ new_block,
71
+ new_block_2d,
72
+ )
73
+ from pandas.core.internals.managers import (
74
+ BlockManager,
75
+ SingleBlockManager,
76
+ create_block_manager_from_blocks,
77
+ create_block_manager_from_column_arrays,
78
+ )
79
+
80
+ if TYPE_CHECKING:
81
+ from collections.abc import (
82
+ Hashable,
83
+ Sequence,
84
+ )
85
+
86
+ from pandas._typing import (
87
+ ArrayLike,
88
+ DtypeObj,
89
+ Manager,
90
+ npt,
91
+ )
92
+ # ---------------------------------------------------------------------
93
+ # BlockManager Interface
94
+
95
+
96
+ def arrays_to_mgr(
97
+ arrays,
98
+ columns: Index,
99
+ index,
100
+ *,
101
+ dtype: DtypeObj | None = None,
102
+ verify_integrity: bool = True,
103
+ typ: str | None = None,
104
+ consolidate: bool = True,
105
+ ) -> Manager:
106
+ """
107
+ Segregate Series based on type and coerce into matrices.
108
+
109
+ Needs to handle a lot of exceptional cases.
110
+ """
111
+ if verify_integrity:
112
+ # figure out the index, if necessary
113
+ if index is None:
114
+ index = _extract_index(arrays)
115
+ else:
116
+ index = ensure_index(index)
117
+
118
+ # don't force copy because getting jammed in an ndarray anyway
119
+ arrays, refs = _homogenize(arrays, index, dtype)
120
+ # _homogenize ensures
121
+ # - all(len(x) == len(index) for x in arrays)
122
+ # - all(x.ndim == 1 for x in arrays)
123
+ # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
124
+ # - all(type(x) is not NumpyExtensionArray for x in arrays)
125
+
126
+ else:
127
+ index = ensure_index(index)
128
+ arrays = [extract_array(x, extract_numpy=True) for x in arrays]
129
+ # with _from_arrays, the passed arrays should never be Series objects
130
+ refs = [None] * len(arrays)
131
+
132
+ # Reached via DataFrame._from_arrays; we do minimal validation here
133
+ for arr in arrays:
134
+ if (
135
+ not isinstance(arr, (np.ndarray, ExtensionArray))
136
+ or arr.ndim != 1
137
+ or len(arr) != len(index)
138
+ ):
139
+ raise ValueError(
140
+ "Arrays must be 1-dimensional np.ndarray or ExtensionArray "
141
+ "with length matching len(index)"
142
+ )
143
+
144
+ columns = ensure_index(columns)
145
+ if len(columns) != len(arrays):
146
+ raise ValueError("len(arrays) must match len(columns)")
147
+
148
+ # from BlockManager perspective
149
+ axes = [columns, index]
150
+
151
+ if typ == "block":
152
+ return create_block_manager_from_column_arrays(
153
+ arrays, axes, consolidate=consolidate, refs=refs
154
+ )
155
+ elif typ == "array":
156
+ return ArrayManager(arrays, [index, columns])
157
+ else:
158
+ raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
159
+
160
+
161
+ def rec_array_to_mgr(
162
+ data: np.rec.recarray | np.ndarray,
163
+ index,
164
+ columns,
165
+ dtype: DtypeObj | None,
166
+ copy: bool,
167
+ typ: str,
168
+ ) -> Manager:
169
+ """
170
+ Extract from a masked rec array and create the manager.
171
+ """
172
+ # essentially process a record array then fill it
173
+ fdata = ma.getdata(data)
174
+ if index is None:
175
+ index = default_index(len(fdata))
176
+ else:
177
+ index = ensure_index(index)
178
+
179
+ if columns is not None:
180
+ columns = ensure_index(columns)
181
+ arrays, arr_columns = to_arrays(fdata, columns)
182
+
183
+ # create the manager
184
+
185
+ arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index))
186
+ if columns is None:
187
+ columns = arr_columns
188
+
189
+ mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ)
190
+
191
+ if copy:
192
+ mgr = mgr.copy()
193
+ return mgr
194
+
195
+
196
+ def mgr_to_mgr(mgr, typ: str, copy: bool = True) -> Manager:
197
+ """
198
+ Convert to specific type of Manager. Does not copy if the type is already
199
+ correct. Does not guarantee a copy otherwise. `copy` keyword only controls
200
+ whether conversion from Block->ArrayManager copies the 1D arrays.
201
+ """
202
+ new_mgr: Manager
203
+
204
+ if typ == "block":
205
+ if isinstance(mgr, BlockManager):
206
+ new_mgr = mgr
207
+ else:
208
+ if mgr.ndim == 2:
209
+ new_mgr = arrays_to_mgr(
210
+ mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block"
211
+ )
212
+ else:
213
+ new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index)
214
+ elif typ == "array":
215
+ if isinstance(mgr, ArrayManager):
216
+ new_mgr = mgr
217
+ else:
218
+ if mgr.ndim == 2:
219
+ arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))]
220
+ if copy:
221
+ arrays = [arr.copy() for arr in arrays]
222
+ new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]])
223
+ else:
224
+ array = mgr.internal_values()
225
+ if copy:
226
+ array = array.copy()
227
+ new_mgr = SingleArrayManager([array], [mgr.index])
228
+ else:
229
+ raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'")
230
+ return new_mgr
231
+
232
+
233
+ # ---------------------------------------------------------------------
234
+ # DataFrame Constructor Interface
235
+
236
+
237
+ def ndarray_to_mgr(
238
+ values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str
239
+ ) -> Manager:
240
+ # used in DataFrame.__init__
241
+ # input must be a ndarray, list, Series, Index, ExtensionArray
242
+
243
+ if isinstance(values, ABCSeries):
244
+ if columns is None:
245
+ if values.name is not None:
246
+ columns = Index([values.name])
247
+ if index is None:
248
+ index = values.index
249
+ else:
250
+ values = values.reindex(index)
251
+
252
+ # zero len case (GH #2234)
253
+ if not len(values) and columns is not None and len(columns):
254
+ values = np.empty((0, 1), dtype=object)
255
+
256
+ # if the array preparation does a copy -> avoid this for ArrayManager,
257
+ # since the copy is done on conversion to 1D arrays
258
+ copy_on_sanitize = False if typ == "array" else copy
259
+
260
+ vdtype = getattr(values, "dtype", None)
261
+ refs = None
262
+ if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype):
263
+ # GH#19157
264
+
265
+ if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1:
266
+ # GH#12513 a EA dtype passed with a 2D array, split into
267
+ # multiple EAs that view the values
268
+ # error: No overload variant of "__getitem__" of "ExtensionArray"
269
+ # matches argument type "Tuple[slice, int]"
270
+ values = [
271
+ values[:, n] # type: ignore[call-overload]
272
+ for n in range(values.shape[1])
273
+ ]
274
+ else:
275
+ values = [values]
276
+
277
+ if columns is None:
278
+ columns = Index(range(len(values)))
279
+ else:
280
+ columns = ensure_index(columns)
281
+
282
+ return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ)
283
+
284
+ elif isinstance(vdtype, ExtensionDtype):
285
+ # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype)
286
+ # are already caught above
287
+ values = extract_array(values, extract_numpy=True)
288
+ if copy:
289
+ values = values.copy()
290
+ if values.ndim == 1:
291
+ values = values.reshape(-1, 1)
292
+
293
+ elif isinstance(values, (ABCSeries, Index)):
294
+ if not copy_on_sanitize and (
295
+ dtype is None or astype_is_view(values.dtype, dtype)
296
+ ):
297
+ refs = values._references
298
+
299
+ if copy_on_sanitize:
300
+ values = values._values.copy()
301
+ else:
302
+ values = values._values
303
+
304
+ values = _ensure_2d(values)
305
+
306
+ elif isinstance(values, (np.ndarray, ExtensionArray)):
307
+ # drop subclass info
308
+ _copy = (
309
+ copy_on_sanitize
310
+ if (dtype is None or astype_is_view(values.dtype, dtype))
311
+ else False
312
+ )
313
+ values = np.array(values, copy=_copy)
314
+ values = _ensure_2d(values)
315
+
316
+ else:
317
+ # by definition an array here
318
+ # the dtypes will be coerced to a single dtype
319
+ values = _prep_ndarraylike(values, copy=copy_on_sanitize)
320
+
321
+ if dtype is not None and values.dtype != dtype:
322
+ # GH#40110 see similar check inside sanitize_array
323
+ values = sanitize_array(
324
+ values,
325
+ None,
326
+ dtype=dtype,
327
+ copy=copy_on_sanitize,
328
+ allow_2d=True,
329
+ )
330
+
331
+ # _prep_ndarraylike ensures that values.ndim == 2 at this point
332
+ index, columns = _get_axes(
333
+ values.shape[0], values.shape[1], index=index, columns=columns
334
+ )
335
+
336
+ _check_values_indices_shape_match(values, index, columns)
337
+
338
+ if typ == "array":
339
+ if issubclass(values.dtype.type, str):
340
+ values = np.array(values, dtype=object)
341
+
342
+ if dtype is None and is_object_dtype(values.dtype):
343
+ arrays = [
344
+ ensure_wrapped_if_datetimelike(
345
+ maybe_infer_to_datetimelike(values[:, i])
346
+ )
347
+ for i in range(values.shape[1])
348
+ ]
349
+ else:
350
+ if lib.is_np_dtype(values.dtype, "mM"):
351
+ values = ensure_wrapped_if_datetimelike(values)
352
+ arrays = [values[:, i] for i in range(values.shape[1])]
353
+
354
+ if copy:
355
+ arrays = [arr.copy() for arr in arrays]
356
+
357
+ return ArrayManager(arrays, [index, columns], verify_integrity=False)
358
+
359
+ values = values.T
360
+
361
+ # if we don't have a dtype specified, then try to convert objects
362
+ # on the entire block; this is to convert if we have datetimelike's
363
+ # embedded in an object type
364
+ if dtype is None and is_object_dtype(values.dtype):
365
+ obj_columns = list(values)
366
+ maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns]
367
+ # don't convert (and copy) the objects if no type inference occurs
368
+ if any(x is not y for x, y in zip(obj_columns, maybe_datetime)):
369
+ dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime]
370
+ block_values = [
371
+ new_block_2d(dvals_list[n], placement=BlockPlacement(n))
372
+ for n in range(len(dvals_list))
373
+ ]
374
+ else:
375
+ bp = BlockPlacement(slice(len(columns)))
376
+ nb = new_block_2d(values, placement=bp, refs=refs)
377
+ block_values = [nb]
378
+ elif dtype is None and values.dtype.kind == "U" and using_pyarrow_string_dtype():
379
+ dtype = StringDtype(storage="pyarrow_numpy")
380
+
381
+ obj_columns = list(values)
382
+ block_values = [
383
+ new_block(
384
+ dtype.construct_array_type()._from_sequence(data, dtype=dtype),
385
+ BlockPlacement(slice(i, i + 1)),
386
+ ndim=2,
387
+ )
388
+ for i, data in enumerate(obj_columns)
389
+ ]
390
+
391
+ else:
392
+ bp = BlockPlacement(slice(len(columns)))
393
+ nb = new_block_2d(values, placement=bp, refs=refs)
394
+ block_values = [nb]
395
+
396
+ if len(columns) == 0:
397
+ # TODO: check len(values) == 0?
398
+ block_values = []
399
+
400
+ return create_block_manager_from_blocks(
401
+ block_values, [columns, index], verify_integrity=False
402
+ )
403
+
404
+
405
+ def _check_values_indices_shape_match(
406
+ values: np.ndarray, index: Index, columns: Index
407
+ ) -> None:
408
+ """
409
+ Check that the shape implied by our axes matches the actual shape of the
410
+ data.
411
+ """
412
+ if values.shape[1] != len(columns) or values.shape[0] != len(index):
413
+ # Could let this raise in Block constructor, but we get a more
414
+ # helpful exception message this way.
415
+ if values.shape[0] == 0 < len(index):
416
+ raise ValueError("Empty data passed with indices specified.")
417
+
418
+ passed = values.shape
419
+ implied = (len(index), len(columns))
420
+ raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
421
+
422
+
423
+ def dict_to_mgr(
424
+ data: dict,
425
+ index,
426
+ columns,
427
+ *,
428
+ dtype: DtypeObj | None = None,
429
+ typ: str = "block",
430
+ copy: bool = True,
431
+ ) -> Manager:
432
+ """
433
+ Segregate Series based on type and coerce into matrices.
434
+ Needs to handle a lot of exceptional cases.
435
+
436
+ Used in DataFrame.__init__
437
+ """
438
+ arrays: Sequence[Any] | Series
439
+
440
+ if columns is not None:
441
+ from pandas.core.series import Series
442
+
443
+ arrays = Series(data, index=columns, dtype=object)
444
+ missing = arrays.isna()
445
+ if index is None:
446
+ # GH10856
447
+ # raise ValueError if only scalars in dict
448
+ index = _extract_index(arrays[~missing])
449
+ else:
450
+ index = ensure_index(index)
451
+
452
+ # no obvious "empty" int column
453
+ if missing.any() and not is_integer_dtype(dtype):
454
+ nan_dtype: DtypeObj
455
+
456
+ if dtype is not None:
457
+ # calling sanitize_array ensures we don't mix-and-match
458
+ # NA dtypes
459
+ midxs = missing.values.nonzero()[0]
460
+ for i in midxs:
461
+ arr = sanitize_array(arrays.iat[i], index, dtype=dtype)
462
+ arrays.iat[i] = arr
463
+ else:
464
+ # GH#1783
465
+ nan_dtype = np.dtype("object")
466
+ val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype)
467
+ nmissing = missing.sum()
468
+ if copy:
469
+ rhs = [val] * nmissing
470
+ else:
471
+ # GH#45369
472
+ rhs = [val.copy() for _ in range(nmissing)]
473
+ arrays.loc[missing] = rhs
474
+
475
+ arrays = list(arrays)
476
+ columns = ensure_index(columns)
477
+
478
+ else:
479
+ keys = list(data.keys())
480
+ columns = Index(keys) if keys else default_index(0)
481
+ arrays = [com.maybe_iterable_to_list(data[k]) for k in keys]
482
+
483
+ if copy:
484
+ if typ == "block":
485
+ # We only need to copy arrays that will not get consolidated, i.e.
486
+ # only EA arrays
487
+ arrays = [
488
+ x.copy()
489
+ if isinstance(x, ExtensionArray)
490
+ else x.copy(deep=True)
491
+ if (
492
+ isinstance(x, Index)
493
+ or isinstance(x, ABCSeries)
494
+ and is_1d_only_ea_dtype(x.dtype)
495
+ )
496
+ else x
497
+ for x in arrays
498
+ ]
499
+ else:
500
+ # dtype check to exclude e.g. range objects, scalars
501
+ arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays]
502
+
503
+ return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy)
504
+
505
+
506
+ def nested_data_to_arrays(
507
+ data: Sequence,
508
+ columns: Index | None,
509
+ index: Index | None,
510
+ dtype: DtypeObj | None,
511
+ ) -> tuple[list[ArrayLike], Index, Index]:
512
+ """
513
+ Convert a single sequence of arrays to multiple arrays.
514
+ """
515
+ # By the time we get here we have already checked treat_as_nested(data)
516
+
517
+ if is_named_tuple(data[0]) and columns is None:
518
+ columns = ensure_index(data[0]._fields)
519
+
520
+ arrays, columns = to_arrays(data, columns, dtype=dtype)
521
+ columns = ensure_index(columns)
522
+
523
+ if index is None:
524
+ if isinstance(data[0], ABCSeries):
525
+ index = _get_names_from_index(data)
526
+ else:
527
+ index = default_index(len(data))
528
+
529
+ return arrays, columns, index
530
+
531
+
532
+ def treat_as_nested(data) -> bool:
533
+ """
534
+ Check if we should use nested_data_to_arrays.
535
+ """
536
+ return (
537
+ len(data) > 0
538
+ and is_list_like(data[0])
539
+ and getattr(data[0], "ndim", 1) == 1
540
+ and not (isinstance(data, ExtensionArray) and data.ndim == 2)
541
+ )
542
+
543
+
544
+ # ---------------------------------------------------------------------
545
+
546
+
547
+ def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray:
548
+ # values is specifically _not_ ndarray, EA, Index, or Series
549
+ # We only get here with `not treat_as_nested(values)`
550
+
551
+ if len(values) == 0:
552
+ # TODO: check for length-zero range, in which case return int64 dtype?
553
+ # TODO: reuse anything in try_cast?
554
+ return np.empty((0, 0), dtype=object)
555
+ elif isinstance(values, range):
556
+ arr = range_to_ndarray(values)
557
+ return arr[..., np.newaxis]
558
+
559
+ def convert(v):
560
+ if not is_list_like(v) or isinstance(v, ABCDataFrame):
561
+ return v
562
+
563
+ v = extract_array(v, extract_numpy=True)
564
+ res = maybe_convert_platform(v)
565
+ # We don't do maybe_infer_to_datetimelike here bc we will end up doing
566
+ # it column-by-column in ndarray_to_mgr
567
+ return res
568
+
569
+ # we could have a 1-dim or 2-dim list here
570
+ # this is equiv of np.asarray, but does object conversion
571
+ # and platform dtype preservation
572
+ # does not convert e.g. [1, "a", True] to ["1", "a", "True"] like
573
+ # np.asarray would
574
+ if is_list_like(values[0]):
575
+ values = np.array([convert(v) for v in values])
576
+ elif isinstance(values[0], np.ndarray) and values[0].ndim == 0:
577
+ # GH#21861 see test_constructor_list_of_lists
578
+ values = np.array([convert(v) for v in values])
579
+ else:
580
+ values = convert(values)
581
+
582
+ return _ensure_2d(values)
583
+
584
+
585
+ def _ensure_2d(values: np.ndarray) -> np.ndarray:
586
+ """
587
+ Reshape 1D values, raise on anything else other than 2D.
588
+ """
589
+ if values.ndim == 1:
590
+ values = values.reshape((values.shape[0], 1))
591
+ elif values.ndim != 2:
592
+ raise ValueError(f"Must pass 2-d input. shape={values.shape}")
593
+ return values
594
+
595
+
596
+ def _homogenize(
597
+ data, index: Index, dtype: DtypeObj | None
598
+ ) -> tuple[list[ArrayLike], list[Any]]:
599
+ oindex = None
600
+ homogenized = []
601
+ # if the original array-like in `data` is a Series, keep track of this Series' refs
602
+ refs: list[Any] = []
603
+
604
+ for val in data:
605
+ if isinstance(val, (ABCSeries, Index)):
606
+ if dtype is not None:
607
+ val = val.astype(dtype, copy=False)
608
+ if isinstance(val, ABCSeries) and val.index is not index:
609
+ # Forces alignment. No need to copy data since we
610
+ # are putting it into an ndarray later
611
+ val = val.reindex(index, copy=False)
612
+ refs.append(val._references)
613
+ val = val._values
614
+ else:
615
+ if isinstance(val, dict):
616
+ # GH#41785 this _should_ be equivalent to (but faster than)
617
+ # val = Series(val, index=index)._values
618
+ if oindex is None:
619
+ oindex = index.astype("O")
620
+
621
+ if isinstance(index, (DatetimeIndex, TimedeltaIndex)):
622
+ # see test_constructor_dict_datetime64_index
623
+ val = dict_compat(val)
624
+ else:
625
+ # see test_constructor_subclass_dict
626
+ val = dict(val)
627
+ val = lib.fast_multiget(val, oindex._values, default=np.nan)
628
+
629
+ val = sanitize_array(val, index, dtype=dtype, copy=False)
630
+ com.require_length_match(val, index)
631
+ refs.append(None)
632
+
633
+ homogenized.append(val)
634
+
635
+ return homogenized, refs
636
+
637
+
638
+ def _extract_index(data) -> Index:
639
+ """
640
+ Try to infer an Index from the passed data, raise ValueError on failure.
641
+ """
642
+ index: Index
643
+ if len(data) == 0:
644
+ return default_index(0)
645
+
646
+ raw_lengths = []
647
+ indexes: list[list[Hashable] | Index] = []
648
+
649
+ have_raw_arrays = False
650
+ have_series = False
651
+ have_dicts = False
652
+
653
+ for val in data:
654
+ if isinstance(val, ABCSeries):
655
+ have_series = True
656
+ indexes.append(val.index)
657
+ elif isinstance(val, dict):
658
+ have_dicts = True
659
+ indexes.append(list(val.keys()))
660
+ elif is_list_like(val) and getattr(val, "ndim", 1) == 1:
661
+ have_raw_arrays = True
662
+ raw_lengths.append(len(val))
663
+ elif isinstance(val, np.ndarray) and val.ndim > 1:
664
+ raise ValueError("Per-column arrays must each be 1-dimensional")
665
+
666
+ if not indexes and not raw_lengths:
667
+ raise ValueError("If using all scalar values, you must pass an index")
668
+
669
+ if have_series:
670
+ index = union_indexes(indexes)
671
+ elif have_dicts:
672
+ index = union_indexes(indexes, sort=False)
673
+
674
+ if have_raw_arrays:
675
+ lengths = list(set(raw_lengths))
676
+ if len(lengths) > 1:
677
+ raise ValueError("All arrays must be of the same length")
678
+
679
+ if have_dicts:
680
+ raise ValueError(
681
+ "Mixing dicts with non-Series may lead to ambiguous ordering."
682
+ )
683
+
684
+ if have_series:
685
+ if lengths[0] != len(index):
686
+ msg = (
687
+ f"array length {lengths[0]} does not match index "
688
+ f"length {len(index)}"
689
+ )
690
+ raise ValueError(msg)
691
+ else:
692
+ index = default_index(lengths[0])
693
+
694
+ return ensure_index(index)
695
+
696
+
697
+ def reorder_arrays(
698
+ arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int
699
+ ) -> tuple[list[ArrayLike], Index]:
700
+ """
701
+ Pre-emptively (cheaply) reindex arrays with new columns.
702
+ """
703
+ # reorder according to the columns
704
+ if columns is not None:
705
+ if not columns.equals(arr_columns):
706
+ # if they are equal, there is nothing to do
707
+ new_arrays: list[ArrayLike] = []
708
+ indexer = arr_columns.get_indexer(columns)
709
+ for i, k in enumerate(indexer):
710
+ if k == -1:
711
+ # by convention default is all-NaN object dtype
712
+ arr = np.empty(length, dtype=object)
713
+ arr.fill(np.nan)
714
+ else:
715
+ arr = arrays[k]
716
+ new_arrays.append(arr)
717
+
718
+ arrays = new_arrays
719
+ arr_columns = columns
720
+
721
+ return arrays, arr_columns
722
+
723
+
724
+ def _get_names_from_index(data) -> Index:
725
+ has_some_name = any(getattr(s, "name", None) is not None for s in data)
726
+ if not has_some_name:
727
+ return default_index(len(data))
728
+
729
+ index: list[Hashable] = list(range(len(data)))
730
+ count = 0
731
+ for i, s in enumerate(data):
732
+ n = getattr(s, "name", None)
733
+ if n is not None:
734
+ index[i] = n
735
+ else:
736
+ index[i] = f"Unnamed {count}"
737
+ count += 1
738
+
739
+ return Index(index)
740
+
741
+
742
+ def _get_axes(
743
+ N: int, K: int, index: Index | None, columns: Index | None
744
+ ) -> tuple[Index, Index]:
745
+ # helper to create the axes as indexes
746
+ # return axes or defaults
747
+
748
+ if index is None:
749
+ index = default_index(N)
750
+ else:
751
+ index = ensure_index(index)
752
+
753
+ if columns is None:
754
+ columns = default_index(K)
755
+ else:
756
+ columns = ensure_index(columns)
757
+ return index, columns
758
+
759
+
760
+ def dataclasses_to_dicts(data):
761
+ """
762
+ Converts a list of dataclass instances to a list of dictionaries.
763
+
764
+ Parameters
765
+ ----------
766
+ data : List[Type[dataclass]]
767
+
768
+ Returns
769
+ --------
770
+ list_dict : List[dict]
771
+
772
+ Examples
773
+ --------
774
+ >>> from dataclasses import dataclass
775
+ >>> @dataclass
776
+ ... class Point:
777
+ ... x: int
778
+ ... y: int
779
+
780
+ >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)])
781
+ [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}]
782
+
783
+ """
784
+ from dataclasses import asdict
785
+
786
+ return list(map(asdict, data))
787
+
788
+
789
+ # ---------------------------------------------------------------------
790
+ # Conversion of Inputs to Arrays
791
+
792
+
793
+ def to_arrays(
794
+ data, columns: Index | None, dtype: DtypeObj | None = None
795
+ ) -> tuple[list[ArrayLike], Index]:
796
+ """
797
+ Return list of arrays, columns.
798
+
799
+ Returns
800
+ -------
801
+ list[ArrayLike]
802
+ These will become columns in a DataFrame.
803
+ Index
804
+ This will become frame.columns.
805
+
806
+ Notes
807
+ -----
808
+ Ensures that len(result_arrays) == len(result_index).
809
+ """
810
+
811
+ if not len(data):
812
+ if isinstance(data, np.ndarray):
813
+ if data.dtype.names is not None:
814
+ # i.e. numpy structured array
815
+ columns = ensure_index(data.dtype.names)
816
+ arrays = [data[name] for name in columns]
817
+
818
+ if len(data) == 0:
819
+ # GH#42456 the indexing above results in list of 2D ndarrays
820
+ # TODO: is that an issue with numpy?
821
+ for i, arr in enumerate(arrays):
822
+ if arr.ndim == 2:
823
+ arrays[i] = arr[:, 0]
824
+
825
+ return arrays, columns
826
+ return [], ensure_index([])
827
+
828
+ elif isinstance(data, np.ndarray) and data.dtype.names is not None:
829
+ # e.g. recarray
830
+ columns = Index(list(data.dtype.names))
831
+ arrays = [data[k] for k in columns]
832
+ return arrays, columns
833
+
834
+ if isinstance(data[0], (list, tuple)):
835
+ arr = _list_to_arrays(data)
836
+ elif isinstance(data[0], abc.Mapping):
837
+ arr, columns = _list_of_dict_to_arrays(data, columns)
838
+ elif isinstance(data[0], ABCSeries):
839
+ arr, columns = _list_of_series_to_arrays(data, columns)
840
+ else:
841
+ # last ditch effort
842
+ data = [tuple(x) for x in data]
843
+ arr = _list_to_arrays(data)
844
+
845
+ content, columns = _finalize_columns_and_data(arr, columns, dtype)
846
+ return content, columns
847
+
848
+
849
+ def _list_to_arrays(data: list[tuple | list]) -> np.ndarray:
850
+ # Returned np.ndarray has ndim = 2
851
+ # Note: we already check len(data) > 0 before getting hre
852
+ if isinstance(data[0], tuple):
853
+ content = lib.to_object_array_tuples(data)
854
+ else:
855
+ # list of lists
856
+ content = lib.to_object_array(data)
857
+ return content
858
+
859
+
860
+ def _list_of_series_to_arrays(
861
+ data: list,
862
+ columns: Index | None,
863
+ ) -> tuple[np.ndarray, Index]:
864
+ # returned np.ndarray has ndim == 2
865
+
866
+ if columns is None:
867
+ # We know pass_data is non-empty because data[0] is a Series
868
+ pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))]
869
+ columns = get_objs_combined_axis(pass_data, sort=False)
870
+
871
+ indexer_cache: dict[int, np.ndarray] = {}
872
+
873
+ aligned_values = []
874
+ for s in data:
875
+ index = getattr(s, "index", None)
876
+ if index is None:
877
+ index = default_index(len(s))
878
+
879
+ if id(index) in indexer_cache:
880
+ indexer = indexer_cache[id(index)]
881
+ else:
882
+ indexer = indexer_cache[id(index)] = index.get_indexer(columns)
883
+
884
+ values = extract_array(s, extract_numpy=True)
885
+ aligned_values.append(algorithms.take_nd(values, indexer))
886
+
887
+ content = np.vstack(aligned_values)
888
+ return content, columns
889
+
890
+
891
+ def _list_of_dict_to_arrays(
892
+ data: list[dict],
893
+ columns: Index | None,
894
+ ) -> tuple[np.ndarray, Index]:
895
+ """
896
+ Convert list of dicts to numpy arrays
897
+
898
+ if `columns` is not passed, column names are inferred from the records
899
+ - for OrderedDict and dicts, the column names match
900
+ the key insertion-order from the first record to the last.
901
+ - For other kinds of dict-likes, the keys are lexically sorted.
902
+
903
+ Parameters
904
+ ----------
905
+ data : iterable
906
+ collection of records (OrderedDict, dict)
907
+ columns: iterables or None
908
+
909
+ Returns
910
+ -------
911
+ content : np.ndarray[object, ndim=2]
912
+ columns : Index
913
+ """
914
+ if columns is None:
915
+ gen = (list(x.keys()) for x in data)
916
+ sort = not any(isinstance(d, dict) for d in data)
917
+ pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort)
918
+ columns = ensure_index(pre_cols)
919
+
920
+ # assure that they are of the base dict class and not of derived
921
+ # classes
922
+ data = [d if type(d) is dict else dict(d) for d in data] # noqa: E721
923
+
924
+ content = lib.dicts_to_array(data, list(columns))
925
+ return content, columns
926
+
927
+
928
+ def _finalize_columns_and_data(
929
+ content: np.ndarray, # ndim == 2
930
+ columns: Index | None,
931
+ dtype: DtypeObj | None,
932
+ ) -> tuple[list[ArrayLike], Index]:
933
+ """
934
+ Ensure we have valid columns, cast object dtypes if possible.
935
+ """
936
+ contents = list(content.T)
937
+
938
+ try:
939
+ columns = _validate_or_indexify_columns(contents, columns)
940
+ except AssertionError as err:
941
+ # GH#26429 do not raise user-facing AssertionError
942
+ raise ValueError(err) from err
943
+
944
+ if len(contents) and contents[0].dtype == np.object_:
945
+ contents = convert_object_array(contents, dtype=dtype)
946
+
947
+ return contents, columns
948
+
949
+
950
+ def _validate_or_indexify_columns(
951
+ content: list[np.ndarray], columns: Index | None
952
+ ) -> Index:
953
+ """
954
+ If columns is None, make numbers as column names; Otherwise, validate that
955
+ columns have valid length.
956
+
957
+ Parameters
958
+ ----------
959
+ content : list of np.ndarrays
960
+ columns : Index or None
961
+
962
+ Returns
963
+ -------
964
+ Index
965
+ If columns is None, assign positional column index value as columns.
966
+
967
+ Raises
968
+ ------
969
+ 1. AssertionError when content is not composed of list of lists, and if
970
+ length of columns is not equal to length of content.
971
+ 2. ValueError when content is list of lists, but length of each sub-list
972
+ is not equal
973
+ 3. ValueError when content is list of lists, but length of sub-list is
974
+ not equal to length of content
975
+ """
976
+ if columns is None:
977
+ columns = default_index(len(content))
978
+ else:
979
+ # Add mask for data which is composed of list of lists
980
+ is_mi_list = isinstance(columns, list) and all(
981
+ isinstance(col, list) for col in columns
982
+ )
983
+
984
+ if not is_mi_list and len(columns) != len(content): # pragma: no cover
985
+ # caller's responsibility to check for this...
986
+ raise AssertionError(
987
+ f"{len(columns)} columns passed, passed data had "
988
+ f"{len(content)} columns"
989
+ )
990
+ if is_mi_list:
991
+ # check if nested list column, length of each sub-list should be equal
992
+ if len({len(col) for col in columns}) > 1:
993
+ raise ValueError(
994
+ "Length of columns passed for MultiIndex columns is different"
995
+ )
996
+
997
+ # if columns is not empty and length of sublist is not equal to content
998
+ if columns and len(columns[0]) != len(content):
999
+ raise ValueError(
1000
+ f"{len(columns[0])} columns passed, passed data had "
1001
+ f"{len(content)} columns"
1002
+ )
1003
+ return columns
1004
+
1005
+
1006
+ def convert_object_array(
1007
+ content: list[npt.NDArray[np.object_]],
1008
+ dtype: DtypeObj | None,
1009
+ dtype_backend: str = "numpy",
1010
+ coerce_float: bool = False,
1011
+ ) -> list[ArrayLike]:
1012
+ """
1013
+ Internal function to convert object array.
1014
+
1015
+ Parameters
1016
+ ----------
1017
+ content: List[np.ndarray]
1018
+ dtype: np.dtype or ExtensionDtype
1019
+ dtype_backend: Controls if nullable/pyarrow dtypes are returned.
1020
+ coerce_float: Cast floats that are integers to int.
1021
+
1022
+ Returns
1023
+ -------
1024
+ List[ArrayLike]
1025
+ """
1026
+ # provide soft conversion of object dtypes
1027
+
1028
+ def convert(arr):
1029
+ if dtype != np.dtype("O"):
1030
+ arr = lib.maybe_convert_objects(
1031
+ arr,
1032
+ try_float=coerce_float,
1033
+ convert_to_nullable_dtype=dtype_backend != "numpy",
1034
+ )
1035
+ # Notes on cases that get here 2023-02-15
1036
+ # 1) we DO get here when arr is all Timestamps and dtype=None
1037
+ # 2) disabling this doesn't break the world, so this must be
1038
+ # getting caught at a higher level
1039
+ # 3) passing convert_non_numeric to maybe_convert_objects get this right
1040
+ # 4) convert_non_numeric?
1041
+
1042
+ if dtype is None:
1043
+ if arr.dtype == np.dtype("O"):
1044
+ # i.e. maybe_convert_objects didn't convert
1045
+ arr = maybe_infer_to_datetimelike(arr)
1046
+ if dtype_backend != "numpy" and arr.dtype == np.dtype("O"):
1047
+ new_dtype = StringDtype()
1048
+ arr_cls = new_dtype.construct_array_type()
1049
+ arr = arr_cls._from_sequence(arr, dtype=new_dtype)
1050
+ elif dtype_backend != "numpy" and isinstance(arr, np.ndarray):
1051
+ if arr.dtype.kind in "iufb":
1052
+ arr = pd_array(arr, copy=False)
1053
+
1054
+ elif isinstance(dtype, ExtensionDtype):
1055
+ # TODO: test(s) that get here
1056
+ # TODO: try to de-duplicate this convert function with
1057
+ # core.construction functions
1058
+ cls = dtype.construct_array_type()
1059
+ arr = cls._from_sequence(arr, dtype=dtype, copy=False)
1060
+ elif dtype.kind in "mM":
1061
+ # This restriction is harmless bc these are the only cases
1062
+ # where maybe_cast_to_datetime is not a no-op.
1063
+ # Here we know:
1064
+ # 1) dtype.kind in "mM" and
1065
+ # 2) arr is either object or numeric dtype
1066
+ arr = maybe_cast_to_datetime(arr, dtype)
1067
+
1068
+ return arr
1069
+
1070
+ arrays = [convert(arr) for arr in content]
1071
+
1072
+ return arrays
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/managers.py ADDED
@@ -0,0 +1,2375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from collections.abc import (
4
+ Hashable,
5
+ Sequence,
6
+ )
7
+ import itertools
8
+ from typing import (
9
+ TYPE_CHECKING,
10
+ Callable,
11
+ Literal,
12
+ cast,
13
+ )
14
+ import warnings
15
+
16
+ import numpy as np
17
+
18
+ from pandas._config import (
19
+ using_copy_on_write,
20
+ warn_copy_on_write,
21
+ )
22
+
23
+ from pandas._libs import (
24
+ internals as libinternals,
25
+ lib,
26
+ )
27
+ from pandas._libs.internals import (
28
+ BlockPlacement,
29
+ BlockValuesRefs,
30
+ )
31
+ from pandas._libs.tslibs import Timestamp
32
+ from pandas.errors import PerformanceWarning
33
+ from pandas.util._decorators import cache_readonly
34
+ from pandas.util._exceptions import find_stack_level
35
+
36
+ from pandas.core.dtypes.cast import infer_dtype_from_scalar
37
+ from pandas.core.dtypes.common import (
38
+ ensure_platform_int,
39
+ is_1d_only_ea_dtype,
40
+ is_list_like,
41
+ )
42
+ from pandas.core.dtypes.dtypes import (
43
+ DatetimeTZDtype,
44
+ ExtensionDtype,
45
+ )
46
+ from pandas.core.dtypes.generic import (
47
+ ABCDataFrame,
48
+ ABCSeries,
49
+ )
50
+ from pandas.core.dtypes.missing import (
51
+ array_equals,
52
+ isna,
53
+ )
54
+
55
+ import pandas.core.algorithms as algos
56
+ from pandas.core.arrays import (
57
+ ArrowExtensionArray,
58
+ ArrowStringArray,
59
+ DatetimeArray,
60
+ )
61
+ from pandas.core.arrays._mixins import NDArrayBackedExtensionArray
62
+ from pandas.core.construction import (
63
+ ensure_wrapped_if_datetimelike,
64
+ extract_array,
65
+ )
66
+ from pandas.core.indexers import maybe_convert_indices
67
+ from pandas.core.indexes.api import (
68
+ Index,
69
+ ensure_index,
70
+ )
71
+ from pandas.core.internals.base import (
72
+ DataManager,
73
+ SingleDataManager,
74
+ ensure_np_dtype,
75
+ interleaved_dtype,
76
+ )
77
+ from pandas.core.internals.blocks import (
78
+ COW_WARNING_GENERAL_MSG,
79
+ COW_WARNING_SETITEM_MSG,
80
+ Block,
81
+ NumpyBlock,
82
+ ensure_block_shape,
83
+ extend_blocks,
84
+ get_block_type,
85
+ maybe_coerce_values,
86
+ new_block,
87
+ new_block_2d,
88
+ )
89
+ from pandas.core.internals.ops import (
90
+ blockwise_all,
91
+ operate_blockwise,
92
+ )
93
+
94
+ if TYPE_CHECKING:
95
+ from pandas._typing import (
96
+ ArrayLike,
97
+ AxisInt,
98
+ DtypeObj,
99
+ QuantileInterpolation,
100
+ Self,
101
+ Shape,
102
+ npt,
103
+ )
104
+
105
+ from pandas.api.extensions import ExtensionArray
106
+
107
+
108
+ class BaseBlockManager(DataManager):
109
+ """
110
+ Core internal data structure to implement DataFrame, Series, etc.
111
+
112
+ Manage a bunch of labeled 2D mixed-type ndarrays. Essentially it's a
113
+ lightweight blocked set of labeled data to be manipulated by the DataFrame
114
+ public API class
115
+
116
+ Attributes
117
+ ----------
118
+ shape
119
+ ndim
120
+ axes
121
+ values
122
+ items
123
+
124
+ Methods
125
+ -------
126
+ set_axis(axis, new_labels)
127
+ copy(deep=True)
128
+
129
+ get_dtypes
130
+
131
+ apply(func, axes, block_filter_fn)
132
+
133
+ get_bool_data
134
+ get_numeric_data
135
+
136
+ get_slice(slice_like, axis)
137
+ get(label)
138
+ iget(loc)
139
+
140
+ take(indexer, axis)
141
+ reindex_axis(new_labels, axis)
142
+ reindex_indexer(new_labels, indexer, axis)
143
+
144
+ delete(label)
145
+ insert(loc, label, value)
146
+ set(label, value)
147
+
148
+ Parameters
149
+ ----------
150
+ blocks: Sequence of Block
151
+ axes: Sequence of Index
152
+ verify_integrity: bool, default True
153
+
154
+ Notes
155
+ -----
156
+ This is *not* a public API class
157
+ """
158
+
159
+ __slots__ = ()
160
+
161
+ _blknos: npt.NDArray[np.intp]
162
+ _blklocs: npt.NDArray[np.intp]
163
+ blocks: tuple[Block, ...]
164
+ axes: list[Index]
165
+
166
+ @property
167
+ def ndim(self) -> int:
168
+ raise NotImplementedError
169
+
170
+ _known_consolidated: bool
171
+ _is_consolidated: bool
172
+
173
+ def __init__(self, blocks, axes, verify_integrity: bool = True) -> None:
174
+ raise NotImplementedError
175
+
176
+ @classmethod
177
+ def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self:
178
+ raise NotImplementedError
179
+
180
+ @property
181
+ def blknos(self) -> npt.NDArray[np.intp]:
182
+ """
183
+ Suppose we want to find the array corresponding to our i'th column.
184
+
185
+ blknos[i] identifies the block from self.blocks that contains this column.
186
+
187
+ blklocs[i] identifies the column of interest within
188
+ self.blocks[self.blknos[i]]
189
+ """
190
+ if self._blknos is None:
191
+ # Note: these can be altered by other BlockManager methods.
192
+ self._rebuild_blknos_and_blklocs()
193
+
194
+ return self._blknos
195
+
196
+ @property
197
+ def blklocs(self) -> npt.NDArray[np.intp]:
198
+ """
199
+ See blknos.__doc__
200
+ """
201
+ if self._blklocs is None:
202
+ # Note: these can be altered by other BlockManager methods.
203
+ self._rebuild_blknos_and_blklocs()
204
+
205
+ return self._blklocs
206
+
207
+ def make_empty(self, axes=None) -> Self:
208
+ """return an empty BlockManager with the items axis of len 0"""
209
+ if axes is None:
210
+ axes = [Index([])] + self.axes[1:]
211
+
212
+ # preserve dtype if possible
213
+ if self.ndim == 1:
214
+ assert isinstance(self, SingleBlockManager) # for mypy
215
+ blk = self.blocks[0]
216
+ arr = blk.values[:0]
217
+ bp = BlockPlacement(slice(0, 0))
218
+ nb = blk.make_block_same_class(arr, placement=bp)
219
+ blocks = [nb]
220
+ else:
221
+ blocks = []
222
+ return type(self).from_blocks(blocks, axes)
223
+
224
+ def __nonzero__(self) -> bool:
225
+ return True
226
+
227
+ # Python3 compat
228
+ __bool__ = __nonzero__
229
+
230
+ def _normalize_axis(self, axis: AxisInt) -> int:
231
+ # switch axis to follow BlockManager logic
232
+ if self.ndim == 2:
233
+ axis = 1 if axis == 0 else 0
234
+ return axis
235
+
236
+ def set_axis(self, axis: AxisInt, new_labels: Index) -> None:
237
+ # Caller is responsible for ensuring we have an Index object.
238
+ self._validate_set_axis(axis, new_labels)
239
+ self.axes[axis] = new_labels
240
+
241
+ @property
242
+ def is_single_block(self) -> bool:
243
+ # Assumes we are 2D; overridden by SingleBlockManager
244
+ return len(self.blocks) == 1
245
+
246
+ @property
247
+ def items(self) -> Index:
248
+ return self.axes[0]
249
+
250
+ def _has_no_reference(self, i: int) -> bool:
251
+ """
252
+ Check for column `i` if it has references.
253
+ (whether it references another array or is itself being referenced)
254
+ Returns True if the column has no references.
255
+ """
256
+ blkno = self.blknos[i]
257
+ return self._has_no_reference_block(blkno)
258
+
259
+ def _has_no_reference_block(self, blkno: int) -> bool:
260
+ """
261
+ Check for block `i` if it has references.
262
+ (whether it references another array or is itself being referenced)
263
+ Returns True if the block has no references.
264
+ """
265
+ return not self.blocks[blkno].refs.has_reference()
266
+
267
+ def add_references(self, mgr: BaseBlockManager) -> None:
268
+ """
269
+ Adds the references from one manager to another. We assume that both
270
+ managers have the same block structure.
271
+ """
272
+ if len(self.blocks) != len(mgr.blocks):
273
+ # If block structure changes, then we made a copy
274
+ return
275
+ for i, blk in enumerate(self.blocks):
276
+ blk.refs = mgr.blocks[i].refs
277
+ blk.refs.add_reference(blk)
278
+
279
+ def references_same_values(self, mgr: BaseBlockManager, blkno: int) -> bool:
280
+ """
281
+ Checks if two blocks from two different block managers reference the
282
+ same underlying values.
283
+ """
284
+ blk = self.blocks[blkno]
285
+ return any(blk is ref() for ref in mgr.blocks[blkno].refs.referenced_blocks)
286
+
287
+ def get_dtypes(self) -> npt.NDArray[np.object_]:
288
+ dtypes = np.array([blk.dtype for blk in self.blocks], dtype=object)
289
+ return dtypes.take(self.blknos)
290
+
291
+ @property
292
+ def arrays(self) -> list[ArrayLike]:
293
+ """
294
+ Quick access to the backing arrays of the Blocks.
295
+
296
+ Only for compatibility with ArrayManager for testing convenience.
297
+ Not to be used in actual code, and return value is not the same as the
298
+ ArrayManager method (list of 1D arrays vs iterator of 2D ndarrays / 1D EAs).
299
+
300
+ Warning! The returned arrays don't handle Copy-on-Write, so this should
301
+ be used with caution (only in read-mode).
302
+ """
303
+ return [blk.values for blk in self.blocks]
304
+
305
+ def __repr__(self) -> str:
306
+ output = type(self).__name__
307
+ for i, ax in enumerate(self.axes):
308
+ if i == 0:
309
+ output += f"\nItems: {ax}"
310
+ else:
311
+ output += f"\nAxis {i}: {ax}"
312
+
313
+ for block in self.blocks:
314
+ output += f"\n{block}"
315
+ return output
316
+
317
+ def apply(
318
+ self,
319
+ f,
320
+ align_keys: list[str] | None = None,
321
+ **kwargs,
322
+ ) -> Self:
323
+ """
324
+ Iterate over the blocks, collect and create a new BlockManager.
325
+
326
+ Parameters
327
+ ----------
328
+ f : str or callable
329
+ Name of the Block method to apply.
330
+ align_keys: List[str] or None, default None
331
+ **kwargs
332
+ Keywords to pass to `f`
333
+
334
+ Returns
335
+ -------
336
+ BlockManager
337
+ """
338
+ assert "filter" not in kwargs
339
+
340
+ align_keys = align_keys or []
341
+ result_blocks: list[Block] = []
342
+ # fillna: Series/DataFrame is responsible for making sure value is aligned
343
+
344
+ aligned_args = {k: kwargs[k] for k in align_keys}
345
+
346
+ for b in self.blocks:
347
+ if aligned_args:
348
+ for k, obj in aligned_args.items():
349
+ if isinstance(obj, (ABCSeries, ABCDataFrame)):
350
+ # The caller is responsible for ensuring that
351
+ # obj.axes[-1].equals(self.items)
352
+ if obj.ndim == 1:
353
+ kwargs[k] = obj.iloc[b.mgr_locs.indexer]._values
354
+ else:
355
+ kwargs[k] = obj.iloc[:, b.mgr_locs.indexer]._values
356
+ else:
357
+ # otherwise we have an ndarray
358
+ kwargs[k] = obj[b.mgr_locs.indexer]
359
+
360
+ if callable(f):
361
+ applied = b.apply(f, **kwargs)
362
+ else:
363
+ applied = getattr(b, f)(**kwargs)
364
+ result_blocks = extend_blocks(applied, result_blocks)
365
+
366
+ out = type(self).from_blocks(result_blocks, self.axes)
367
+ return out
368
+
369
+ # Alias so we can share code with ArrayManager
370
+ apply_with_block = apply
371
+
372
+ def setitem(self, indexer, value, warn: bool = True) -> Self:
373
+ """
374
+ Set values with indexer.
375
+
376
+ For SingleBlockManager, this backs s[indexer] = value
377
+ """
378
+ if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim:
379
+ raise ValueError(f"Cannot set values with ndim > {self.ndim}")
380
+
381
+ if warn and warn_copy_on_write() and not self._has_no_reference(0):
382
+ warnings.warn(
383
+ COW_WARNING_GENERAL_MSG,
384
+ FutureWarning,
385
+ stacklevel=find_stack_level(),
386
+ )
387
+
388
+ elif using_copy_on_write() and not self._has_no_reference(0):
389
+ # this method is only called if there is a single block -> hardcoded 0
390
+ # Split blocks to only copy the columns we want to modify
391
+ if self.ndim == 2 and isinstance(indexer, tuple):
392
+ blk_loc = self.blklocs[indexer[1]]
393
+ if is_list_like(blk_loc) and blk_loc.ndim == 2:
394
+ blk_loc = np.squeeze(blk_loc, axis=0)
395
+ elif not is_list_like(blk_loc):
396
+ # Keep dimension and copy data later
397
+ blk_loc = [blk_loc] # type: ignore[assignment]
398
+ if len(blk_loc) == 0:
399
+ return self.copy(deep=False)
400
+
401
+ values = self.blocks[0].values
402
+ if values.ndim == 2:
403
+ values = values[blk_loc]
404
+ # "T" has no attribute "_iset_split_block"
405
+ self._iset_split_block( # type: ignore[attr-defined]
406
+ 0, blk_loc, values
407
+ )
408
+ # first block equals values
409
+ self.blocks[0].setitem((indexer[0], np.arange(len(blk_loc))), value)
410
+ return self
411
+ # No need to split if we either set all columns or on a single block
412
+ # manager
413
+ self = self.copy()
414
+
415
+ return self.apply("setitem", indexer=indexer, value=value)
416
+
417
+ def diff(self, n: int) -> Self:
418
+ # only reached with self.ndim == 2
419
+ return self.apply("diff", n=n)
420
+
421
+ def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self:
422
+ if copy is None:
423
+ if using_copy_on_write():
424
+ copy = False
425
+ else:
426
+ copy = True
427
+ elif using_copy_on_write():
428
+ copy = False
429
+
430
+ return self.apply(
431
+ "astype",
432
+ dtype=dtype,
433
+ copy=copy,
434
+ errors=errors,
435
+ using_cow=using_copy_on_write(),
436
+ )
437
+
438
+ def convert(self, copy: bool | None) -> Self:
439
+ if copy is None:
440
+ if using_copy_on_write():
441
+ copy = False
442
+ else:
443
+ copy = True
444
+ elif using_copy_on_write():
445
+ copy = False
446
+
447
+ return self.apply("convert", copy=copy, using_cow=using_copy_on_write())
448
+
449
+ def convert_dtypes(self, **kwargs):
450
+ if using_copy_on_write():
451
+ copy = False
452
+ else:
453
+ copy = True
454
+
455
+ return self.apply(
456
+ "convert_dtypes", copy=copy, using_cow=using_copy_on_write(), **kwargs
457
+ )
458
+
459
+ def get_values_for_csv(
460
+ self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None
461
+ ) -> Self:
462
+ """
463
+ Convert values to native types (strings / python objects) that are used
464
+ in formatting (repr / csv).
465
+ """
466
+ return self.apply(
467
+ "get_values_for_csv",
468
+ na_rep=na_rep,
469
+ quoting=quoting,
470
+ float_format=float_format,
471
+ date_format=date_format,
472
+ decimal=decimal,
473
+ )
474
+
475
+ @property
476
+ def any_extension_types(self) -> bool:
477
+ """Whether any of the blocks in this manager are extension blocks"""
478
+ return any(block.is_extension for block in self.blocks)
479
+
480
+ @property
481
+ def is_view(self) -> bool:
482
+ """return a boolean if we are a single block and are a view"""
483
+ if len(self.blocks) == 1:
484
+ return self.blocks[0].is_view
485
+
486
+ # It is technically possible to figure out which blocks are views
487
+ # e.g. [ b.values.base is not None for b in self.blocks ]
488
+ # but then we have the case of possibly some blocks being a view
489
+ # and some blocks not. setting in theory is possible on the non-view
490
+ # blocks w/o causing a SettingWithCopy raise/warn. But this is a bit
491
+ # complicated
492
+
493
+ return False
494
+
495
+ def _get_data_subset(self, predicate: Callable) -> Self:
496
+ blocks = [blk for blk in self.blocks if predicate(blk.values)]
497
+ return self._combine(blocks)
498
+
499
+ def get_bool_data(self) -> Self:
500
+ """
501
+ Select blocks that are bool-dtype and columns from object-dtype blocks
502
+ that are all-bool.
503
+ """
504
+
505
+ new_blocks = []
506
+
507
+ for blk in self.blocks:
508
+ if blk.dtype == bool:
509
+ new_blocks.append(blk)
510
+
511
+ elif blk.is_object:
512
+ nbs = blk._split()
513
+ new_blocks.extend(nb for nb in nbs if nb.is_bool)
514
+
515
+ return self._combine(new_blocks)
516
+
517
+ def get_numeric_data(self) -> Self:
518
+ numeric_blocks = [blk for blk in self.blocks if blk.is_numeric]
519
+ if len(numeric_blocks) == len(self.blocks):
520
+ # Avoid somewhat expensive _combine
521
+ return self
522
+ return self._combine(numeric_blocks)
523
+
524
+ def _combine(self, blocks: list[Block], index: Index | None = None) -> Self:
525
+ """return a new manager with the blocks"""
526
+ if len(blocks) == 0:
527
+ if self.ndim == 2:
528
+ # retain our own Index dtype
529
+ if index is not None:
530
+ axes = [self.items[:0], index]
531
+ else:
532
+ axes = [self.items[:0]] + self.axes[1:]
533
+ return self.make_empty(axes)
534
+ return self.make_empty()
535
+
536
+ # FIXME: optimization potential
537
+ indexer = np.sort(np.concatenate([b.mgr_locs.as_array for b in blocks]))
538
+ inv_indexer = lib.get_reverse_indexer(indexer, self.shape[0])
539
+
540
+ new_blocks: list[Block] = []
541
+ for b in blocks:
542
+ nb = b.copy(deep=False)
543
+ nb.mgr_locs = BlockPlacement(inv_indexer[nb.mgr_locs.indexer])
544
+ new_blocks.append(nb)
545
+
546
+ axes = list(self.axes)
547
+ if index is not None:
548
+ axes[-1] = index
549
+ axes[0] = self.items.take(indexer)
550
+
551
+ return type(self).from_blocks(new_blocks, axes)
552
+
553
+ @property
554
+ def nblocks(self) -> int:
555
+ return len(self.blocks)
556
+
557
+ def copy(self, deep: bool | None | Literal["all"] = True) -> Self:
558
+ """
559
+ Make deep or shallow copy of BlockManager
560
+
561
+ Parameters
562
+ ----------
563
+ deep : bool, string or None, default True
564
+ If False or None, return a shallow copy (do not copy data)
565
+ If 'all', copy data and a deep copy of the index
566
+
567
+ Returns
568
+ -------
569
+ BlockManager
570
+ """
571
+ if deep is None:
572
+ if using_copy_on_write():
573
+ # use shallow copy
574
+ deep = False
575
+ else:
576
+ # preserve deep copy for BlockManager with copy=None
577
+ deep = True
578
+
579
+ # this preserves the notion of view copying of axes
580
+ if deep:
581
+ # hit in e.g. tests.io.json.test_pandas
582
+
583
+ def copy_func(ax):
584
+ return ax.copy(deep=True) if deep == "all" else ax.view()
585
+
586
+ new_axes = [copy_func(ax) for ax in self.axes]
587
+ else:
588
+ if using_copy_on_write():
589
+ new_axes = [ax.view() for ax in self.axes]
590
+ else:
591
+ new_axes = list(self.axes)
592
+
593
+ res = self.apply("copy", deep=deep)
594
+ res.axes = new_axes
595
+
596
+ if self.ndim > 1:
597
+ # Avoid needing to re-compute these
598
+ blknos = self._blknos
599
+ if blknos is not None:
600
+ res._blknos = blknos.copy()
601
+ res._blklocs = self._blklocs.copy()
602
+
603
+ if deep:
604
+ res._consolidate_inplace()
605
+ return res
606
+
607
+ def consolidate(self) -> Self:
608
+ """
609
+ Join together blocks having same dtype
610
+
611
+ Returns
612
+ -------
613
+ y : BlockManager
614
+ """
615
+ if self.is_consolidated():
616
+ return self
617
+
618
+ bm = type(self)(self.blocks, self.axes, verify_integrity=False)
619
+ bm._is_consolidated = False
620
+ bm._consolidate_inplace()
621
+ return bm
622
+
623
+ def reindex_indexer(
624
+ self,
625
+ new_axis: Index,
626
+ indexer: npt.NDArray[np.intp] | None,
627
+ axis: AxisInt,
628
+ fill_value=None,
629
+ allow_dups: bool = False,
630
+ copy: bool | None = True,
631
+ only_slice: bool = False,
632
+ *,
633
+ use_na_proxy: bool = False,
634
+ ) -> Self:
635
+ """
636
+ Parameters
637
+ ----------
638
+ new_axis : Index
639
+ indexer : ndarray[intp] or None
640
+ axis : int
641
+ fill_value : object, default None
642
+ allow_dups : bool, default False
643
+ copy : bool or None, default True
644
+ If None, regard as False to get shallow copy.
645
+ only_slice : bool, default False
646
+ Whether to take views, not copies, along columns.
647
+ use_na_proxy : bool, default False
648
+ Whether to use a np.void ndarray for newly introduced columns.
649
+
650
+ pandas-indexer with -1's only.
651
+ """
652
+ if copy is None:
653
+ if using_copy_on_write():
654
+ # use shallow copy
655
+ copy = False
656
+ else:
657
+ # preserve deep copy for BlockManager with copy=None
658
+ copy = True
659
+
660
+ if indexer is None:
661
+ if new_axis is self.axes[axis] and not copy:
662
+ return self
663
+
664
+ result = self.copy(deep=copy)
665
+ result.axes = list(self.axes)
666
+ result.axes[axis] = new_axis
667
+ return result
668
+
669
+ # Should be intp, but in some cases we get int64 on 32bit builds
670
+ assert isinstance(indexer, np.ndarray)
671
+
672
+ # some axes don't allow reindexing with dups
673
+ if not allow_dups:
674
+ self.axes[axis]._validate_can_reindex(indexer)
675
+
676
+ if axis >= self.ndim:
677
+ raise IndexError("Requested axis not found in manager")
678
+
679
+ if axis == 0:
680
+ new_blocks = self._slice_take_blocks_ax0(
681
+ indexer,
682
+ fill_value=fill_value,
683
+ only_slice=only_slice,
684
+ use_na_proxy=use_na_proxy,
685
+ )
686
+ else:
687
+ new_blocks = [
688
+ blk.take_nd(
689
+ indexer,
690
+ axis=1,
691
+ fill_value=(
692
+ fill_value if fill_value is not None else blk.fill_value
693
+ ),
694
+ )
695
+ for blk in self.blocks
696
+ ]
697
+
698
+ new_axes = list(self.axes)
699
+ new_axes[axis] = new_axis
700
+
701
+ new_mgr = type(self).from_blocks(new_blocks, new_axes)
702
+ if axis == 1:
703
+ # We can avoid the need to rebuild these
704
+ new_mgr._blknos = self.blknos.copy()
705
+ new_mgr._blklocs = self.blklocs.copy()
706
+ return new_mgr
707
+
708
+ def _slice_take_blocks_ax0(
709
+ self,
710
+ slice_or_indexer: slice | np.ndarray,
711
+ fill_value=lib.no_default,
712
+ only_slice: bool = False,
713
+ *,
714
+ use_na_proxy: bool = False,
715
+ ref_inplace_op: bool = False,
716
+ ) -> list[Block]:
717
+ """
718
+ Slice/take blocks along axis=0.
719
+
720
+ Overloaded for SingleBlock
721
+
722
+ Parameters
723
+ ----------
724
+ slice_or_indexer : slice or np.ndarray[int64]
725
+ fill_value : scalar, default lib.no_default
726
+ only_slice : bool, default False
727
+ If True, we always return views on existing arrays, never copies.
728
+ This is used when called from ops.blockwise.operate_blockwise.
729
+ use_na_proxy : bool, default False
730
+ Whether to use a np.void ndarray for newly introduced columns.
731
+ ref_inplace_op: bool, default False
732
+ Don't track refs if True because we operate inplace
733
+
734
+ Returns
735
+ -------
736
+ new_blocks : list of Block
737
+ """
738
+ allow_fill = fill_value is not lib.no_default
739
+
740
+ sl_type, slobj, sllen = _preprocess_slice_or_indexer(
741
+ slice_or_indexer, self.shape[0], allow_fill=allow_fill
742
+ )
743
+
744
+ if self.is_single_block:
745
+ blk = self.blocks[0]
746
+
747
+ if sl_type == "slice":
748
+ # GH#32959 EABlock would fail since we can't make 0-width
749
+ # TODO(EA2D): special casing unnecessary with 2D EAs
750
+ if sllen == 0:
751
+ return []
752
+ bp = BlockPlacement(slice(0, sllen))
753
+ return [blk.getitem_block_columns(slobj, new_mgr_locs=bp)]
754
+ elif not allow_fill or self.ndim == 1:
755
+ if allow_fill and fill_value is None:
756
+ fill_value = blk.fill_value
757
+
758
+ if not allow_fill and only_slice:
759
+ # GH#33597 slice instead of take, so we get
760
+ # views instead of copies
761
+ blocks = [
762
+ blk.getitem_block_columns(
763
+ slice(ml, ml + 1),
764
+ new_mgr_locs=BlockPlacement(i),
765
+ ref_inplace_op=ref_inplace_op,
766
+ )
767
+ for i, ml in enumerate(slobj)
768
+ ]
769
+ return blocks
770
+ else:
771
+ bp = BlockPlacement(slice(0, sllen))
772
+ return [
773
+ blk.take_nd(
774
+ slobj,
775
+ axis=0,
776
+ new_mgr_locs=bp,
777
+ fill_value=fill_value,
778
+ )
779
+ ]
780
+
781
+ if sl_type == "slice":
782
+ blknos = self.blknos[slobj]
783
+ blklocs = self.blklocs[slobj]
784
+ else:
785
+ blknos = algos.take_nd(
786
+ self.blknos, slobj, fill_value=-1, allow_fill=allow_fill
787
+ )
788
+ blklocs = algos.take_nd(
789
+ self.blklocs, slobj, fill_value=-1, allow_fill=allow_fill
790
+ )
791
+
792
+ # When filling blknos, make sure blknos is updated before appending to
793
+ # blocks list, that way new blkno is exactly len(blocks).
794
+ blocks = []
795
+ group = not only_slice
796
+ for blkno, mgr_locs in libinternals.get_blkno_placements(blknos, group=group):
797
+ if blkno == -1:
798
+ # If we've got here, fill_value was not lib.no_default
799
+
800
+ blocks.append(
801
+ self._make_na_block(
802
+ placement=mgr_locs,
803
+ fill_value=fill_value,
804
+ use_na_proxy=use_na_proxy,
805
+ )
806
+ )
807
+ else:
808
+ blk = self.blocks[blkno]
809
+
810
+ # Otherwise, slicing along items axis is necessary.
811
+ if not blk._can_consolidate and not blk._validate_ndim:
812
+ # i.e. we dont go through here for DatetimeTZBlock
813
+ # A non-consolidatable block, it's easy, because there's
814
+ # only one item and each mgr loc is a copy of that single
815
+ # item.
816
+ deep = not (only_slice or using_copy_on_write())
817
+ for mgr_loc in mgr_locs:
818
+ newblk = blk.copy(deep=deep)
819
+ newblk.mgr_locs = BlockPlacement(slice(mgr_loc, mgr_loc + 1))
820
+ blocks.append(newblk)
821
+
822
+ else:
823
+ # GH#32779 to avoid the performance penalty of copying,
824
+ # we may try to only slice
825
+ taker = blklocs[mgr_locs.indexer]
826
+ max_len = max(len(mgr_locs), taker.max() + 1)
827
+ if only_slice or using_copy_on_write():
828
+ taker = lib.maybe_indices_to_slice(taker, max_len)
829
+
830
+ if isinstance(taker, slice):
831
+ nb = blk.getitem_block_columns(taker, new_mgr_locs=mgr_locs)
832
+ blocks.append(nb)
833
+ elif only_slice:
834
+ # GH#33597 slice instead of take, so we get
835
+ # views instead of copies
836
+ for i, ml in zip(taker, mgr_locs):
837
+ slc = slice(i, i + 1)
838
+ bp = BlockPlacement(ml)
839
+ nb = blk.getitem_block_columns(slc, new_mgr_locs=bp)
840
+ # We have np.shares_memory(nb.values, blk.values)
841
+ blocks.append(nb)
842
+ else:
843
+ nb = blk.take_nd(taker, axis=0, new_mgr_locs=mgr_locs)
844
+ blocks.append(nb)
845
+
846
+ return blocks
847
+
848
+ def _make_na_block(
849
+ self, placement: BlockPlacement, fill_value=None, use_na_proxy: bool = False
850
+ ) -> Block:
851
+ # Note: we only get here with self.ndim == 2
852
+
853
+ if use_na_proxy:
854
+ assert fill_value is None
855
+ shape = (len(placement), self.shape[1])
856
+ vals = np.empty(shape, dtype=np.void)
857
+ nb = NumpyBlock(vals, placement, ndim=2)
858
+ return nb
859
+
860
+ if fill_value is None:
861
+ fill_value = np.nan
862
+
863
+ shape = (len(placement), self.shape[1])
864
+
865
+ dtype, fill_value = infer_dtype_from_scalar(fill_value)
866
+ block_values = make_na_array(dtype, shape, fill_value)
867
+ return new_block_2d(block_values, placement=placement)
868
+
869
+ def take(
870
+ self,
871
+ indexer: npt.NDArray[np.intp],
872
+ axis: AxisInt = 1,
873
+ verify: bool = True,
874
+ ) -> Self:
875
+ """
876
+ Take items along any axis.
877
+
878
+ indexer : np.ndarray[np.intp]
879
+ axis : int, default 1
880
+ verify : bool, default True
881
+ Check that all entries are between 0 and len(self) - 1, inclusive.
882
+ Pass verify=False if this check has been done by the caller.
883
+
884
+ Returns
885
+ -------
886
+ BlockManager
887
+ """
888
+ # Caller is responsible for ensuring indexer annotation is accurate
889
+
890
+ n = self.shape[axis]
891
+ indexer = maybe_convert_indices(indexer, n, verify=verify)
892
+
893
+ new_labels = self.axes[axis].take(indexer)
894
+ return self.reindex_indexer(
895
+ new_axis=new_labels,
896
+ indexer=indexer,
897
+ axis=axis,
898
+ allow_dups=True,
899
+ copy=None,
900
+ )
901
+
902
+
903
+ class BlockManager(libinternals.BlockManager, BaseBlockManager):
904
+ """
905
+ BaseBlockManager that holds 2D blocks.
906
+ """
907
+
908
+ ndim = 2
909
+
910
+ # ----------------------------------------------------------------
911
+ # Constructors
912
+
913
+ def __init__(
914
+ self,
915
+ blocks: Sequence[Block],
916
+ axes: Sequence[Index],
917
+ verify_integrity: bool = True,
918
+ ) -> None:
919
+ if verify_integrity:
920
+ # Assertion disabled for performance
921
+ # assert all(isinstance(x, Index) for x in axes)
922
+
923
+ for block in blocks:
924
+ if self.ndim != block.ndim:
925
+ raise AssertionError(
926
+ f"Number of Block dimensions ({block.ndim}) must equal "
927
+ f"number of axes ({self.ndim})"
928
+ )
929
+ # As of 2.0, the caller is responsible for ensuring that
930
+ # DatetimeTZBlock with block.ndim == 2 has block.values.ndim ==2;
931
+ # previously there was a special check for fastparquet compat.
932
+
933
+ self._verify_integrity()
934
+
935
+ def _verify_integrity(self) -> None:
936
+ mgr_shape = self.shape
937
+ tot_items = sum(len(x.mgr_locs) for x in self.blocks)
938
+ for block in self.blocks:
939
+ if block.shape[1:] != mgr_shape[1:]:
940
+ raise_construction_error(tot_items, block.shape[1:], self.axes)
941
+ if len(self.items) != tot_items:
942
+ raise AssertionError(
943
+ "Number of manager items must equal union of "
944
+ f"block items\n# manager items: {len(self.items)}, # "
945
+ f"tot_items: {tot_items}"
946
+ )
947
+
948
+ @classmethod
949
+ def from_blocks(cls, blocks: list[Block], axes: list[Index]) -> Self:
950
+ """
951
+ Constructor for BlockManager and SingleBlockManager with same signature.
952
+ """
953
+ return cls(blocks, axes, verify_integrity=False)
954
+
955
+ # ----------------------------------------------------------------
956
+ # Indexing
957
+
958
+ def fast_xs(self, loc: int) -> SingleBlockManager:
959
+ """
960
+ Return the array corresponding to `frame.iloc[loc]`.
961
+
962
+ Parameters
963
+ ----------
964
+ loc : int
965
+
966
+ Returns
967
+ -------
968
+ np.ndarray or ExtensionArray
969
+ """
970
+ if len(self.blocks) == 1:
971
+ # TODO: this could be wrong if blk.mgr_locs is not slice(None)-like;
972
+ # is this ruled out in the general case?
973
+ result = self.blocks[0].iget((slice(None), loc))
974
+ # in the case of a single block, the new block is a view
975
+ bp = BlockPlacement(slice(0, len(result)))
976
+ block = new_block(
977
+ result,
978
+ placement=bp,
979
+ ndim=1,
980
+ refs=self.blocks[0].refs,
981
+ )
982
+ return SingleBlockManager(block, self.axes[0])
983
+
984
+ dtype = interleaved_dtype([blk.dtype for blk in self.blocks])
985
+
986
+ n = len(self)
987
+
988
+ if isinstance(dtype, ExtensionDtype):
989
+ # TODO: use object dtype as workaround for non-performant
990
+ # EA.__setitem__ methods. (primarily ArrowExtensionArray.__setitem__
991
+ # when iteratively setting individual values)
992
+ # https://github.com/pandas-dev/pandas/pull/54508#issuecomment-1675827918
993
+ result = np.empty(n, dtype=object)
994
+ else:
995
+ result = np.empty(n, dtype=dtype)
996
+ result = ensure_wrapped_if_datetimelike(result)
997
+
998
+ for blk in self.blocks:
999
+ # Such assignment may incorrectly coerce NaT to None
1000
+ # result[blk.mgr_locs] = blk._slice((slice(None), loc))
1001
+ for i, rl in enumerate(blk.mgr_locs):
1002
+ result[rl] = blk.iget((i, loc))
1003
+
1004
+ if isinstance(dtype, ExtensionDtype):
1005
+ cls = dtype.construct_array_type()
1006
+ result = cls._from_sequence(result, dtype=dtype)
1007
+
1008
+ bp = BlockPlacement(slice(0, len(result)))
1009
+ block = new_block(result, placement=bp, ndim=1)
1010
+ return SingleBlockManager(block, self.axes[0])
1011
+
1012
+ def iget(self, i: int, track_ref: bool = True) -> SingleBlockManager:
1013
+ """
1014
+ Return the data as a SingleBlockManager.
1015
+ """
1016
+ block = self.blocks[self.blknos[i]]
1017
+ values = block.iget(self.blklocs[i])
1018
+
1019
+ # shortcut for select a single-dim from a 2-dim BM
1020
+ bp = BlockPlacement(slice(0, len(values)))
1021
+ nb = type(block)(
1022
+ values, placement=bp, ndim=1, refs=block.refs if track_ref else None
1023
+ )
1024
+ return SingleBlockManager(nb, self.axes[1])
1025
+
1026
+ def iget_values(self, i: int) -> ArrayLike:
1027
+ """
1028
+ Return the data for column i as the values (ndarray or ExtensionArray).
1029
+
1030
+ Warning! The returned array is a view but doesn't handle Copy-on-Write,
1031
+ so this should be used with caution.
1032
+ """
1033
+ # TODO(CoW) making the arrays read-only might make this safer to use?
1034
+ block = self.blocks[self.blknos[i]]
1035
+ values = block.iget(self.blklocs[i])
1036
+ return values
1037
+
1038
+ @property
1039
+ def column_arrays(self) -> list[np.ndarray]:
1040
+ """
1041
+ Used in the JSON C code to access column arrays.
1042
+ This optimizes compared to using `iget_values` by converting each
1043
+
1044
+ Warning! This doesn't handle Copy-on-Write, so should be used with
1045
+ caution (current use case of consuming this in the JSON code is fine).
1046
+ """
1047
+ # This is an optimized equivalent to
1048
+ # result = [self.iget_values(i) for i in range(len(self.items))]
1049
+ result: list[np.ndarray | None] = [None] * len(self.items)
1050
+
1051
+ for blk in self.blocks:
1052
+ mgr_locs = blk._mgr_locs
1053
+ values = blk.array_values._values_for_json()
1054
+ if values.ndim == 1:
1055
+ # TODO(EA2D): special casing not needed with 2D EAs
1056
+ result[mgr_locs[0]] = values
1057
+
1058
+ else:
1059
+ for i, loc in enumerate(mgr_locs):
1060
+ result[loc] = values[i]
1061
+
1062
+ # error: Incompatible return value type (got "List[None]",
1063
+ # expected "List[ndarray[Any, Any]]")
1064
+ return result # type: ignore[return-value]
1065
+
1066
+ def iset(
1067
+ self,
1068
+ loc: int | slice | np.ndarray,
1069
+ value: ArrayLike,
1070
+ inplace: bool = False,
1071
+ refs: BlockValuesRefs | None = None,
1072
+ ) -> None:
1073
+ """
1074
+ Set new item in-place. Does not consolidate. Adds new Block if not
1075
+ contained in the current set of items
1076
+ """
1077
+
1078
+ # FIXME: refactor, clearly separate broadcasting & zip-like assignment
1079
+ # can prob also fix the various if tests for sparse/categorical
1080
+ if self._blklocs is None and self.ndim > 1:
1081
+ self._rebuild_blknos_and_blklocs()
1082
+
1083
+ # Note: we exclude DTA/TDA here
1084
+ value_is_extension_type = is_1d_only_ea_dtype(value.dtype)
1085
+ if not value_is_extension_type:
1086
+ if value.ndim == 2:
1087
+ value = value.T
1088
+ else:
1089
+ value = ensure_block_shape(value, ndim=2)
1090
+
1091
+ if value.shape[1:] != self.shape[1:]:
1092
+ raise AssertionError(
1093
+ "Shape of new values must be compatible with manager shape"
1094
+ )
1095
+
1096
+ if lib.is_integer(loc):
1097
+ # We have 6 tests where loc is _not_ an int.
1098
+ # In this case, get_blkno_placements will yield only one tuple,
1099
+ # containing (self._blknos[loc], BlockPlacement(slice(0, 1, 1)))
1100
+
1101
+ # Check if we can use _iset_single fastpath
1102
+ loc = cast(int, loc)
1103
+ blkno = self.blknos[loc]
1104
+ blk = self.blocks[blkno]
1105
+ if len(blk._mgr_locs) == 1: # TODO: fastest way to check this?
1106
+ return self._iset_single(
1107
+ loc,
1108
+ value,
1109
+ inplace=inplace,
1110
+ blkno=blkno,
1111
+ blk=blk,
1112
+ refs=refs,
1113
+ )
1114
+
1115
+ # error: Incompatible types in assignment (expression has type
1116
+ # "List[Union[int, slice, ndarray]]", variable has type "Union[int,
1117
+ # slice, ndarray]")
1118
+ loc = [loc] # type: ignore[assignment]
1119
+
1120
+ # categorical/sparse/datetimetz
1121
+ if value_is_extension_type:
1122
+
1123
+ def value_getitem(placement):
1124
+ return value
1125
+
1126
+ else:
1127
+
1128
+ def value_getitem(placement):
1129
+ return value[placement.indexer]
1130
+
1131
+ # Accessing public blknos ensures the public versions are initialized
1132
+ blknos = self.blknos[loc]
1133
+ blklocs = self.blklocs[loc].copy()
1134
+
1135
+ unfit_mgr_locs = []
1136
+ unfit_val_locs = []
1137
+ removed_blknos = []
1138
+ for blkno_l, val_locs in libinternals.get_blkno_placements(blknos, group=True):
1139
+ blk = self.blocks[blkno_l]
1140
+ blk_locs = blklocs[val_locs.indexer]
1141
+ if inplace and blk.should_store(value):
1142
+ # Updating inplace -> check if we need to do Copy-on-Write
1143
+ if using_copy_on_write() and not self._has_no_reference_block(blkno_l):
1144
+ self._iset_split_block(
1145
+ blkno_l, blk_locs, value_getitem(val_locs), refs=refs
1146
+ )
1147
+ else:
1148
+ blk.set_inplace(blk_locs, value_getitem(val_locs))
1149
+ continue
1150
+ else:
1151
+ unfit_mgr_locs.append(blk.mgr_locs.as_array[blk_locs])
1152
+ unfit_val_locs.append(val_locs)
1153
+
1154
+ # If all block items are unfit, schedule the block for removal.
1155
+ if len(val_locs) == len(blk.mgr_locs):
1156
+ removed_blknos.append(blkno_l)
1157
+ continue
1158
+ else:
1159
+ # Defer setting the new values to enable consolidation
1160
+ self._iset_split_block(blkno_l, blk_locs, refs=refs)
1161
+
1162
+ if len(removed_blknos):
1163
+ # Remove blocks & update blknos accordingly
1164
+ is_deleted = np.zeros(self.nblocks, dtype=np.bool_)
1165
+ is_deleted[removed_blknos] = True
1166
+
1167
+ new_blknos = np.empty(self.nblocks, dtype=np.intp)
1168
+ new_blknos.fill(-1)
1169
+ new_blknos[~is_deleted] = np.arange(self.nblocks - len(removed_blknos))
1170
+ self._blknos = new_blknos[self._blknos]
1171
+ self.blocks = tuple(
1172
+ blk for i, blk in enumerate(self.blocks) if i not in set(removed_blknos)
1173
+ )
1174
+
1175
+ if unfit_val_locs:
1176
+ unfit_idxr = np.concatenate(unfit_mgr_locs)
1177
+ unfit_count = len(unfit_idxr)
1178
+
1179
+ new_blocks: list[Block] = []
1180
+ if value_is_extension_type:
1181
+ # This code (ab-)uses the fact that EA blocks contain only
1182
+ # one item.
1183
+ # TODO(EA2D): special casing unnecessary with 2D EAs
1184
+ new_blocks.extend(
1185
+ new_block_2d(
1186
+ values=value,
1187
+ placement=BlockPlacement(slice(mgr_loc, mgr_loc + 1)),
1188
+ refs=refs,
1189
+ )
1190
+ for mgr_loc in unfit_idxr
1191
+ )
1192
+
1193
+ self._blknos[unfit_idxr] = np.arange(unfit_count) + len(self.blocks)
1194
+ self._blklocs[unfit_idxr] = 0
1195
+
1196
+ else:
1197
+ # unfit_val_locs contains BlockPlacement objects
1198
+ unfit_val_items = unfit_val_locs[0].append(unfit_val_locs[1:])
1199
+
1200
+ new_blocks.append(
1201
+ new_block_2d(
1202
+ values=value_getitem(unfit_val_items),
1203
+ placement=BlockPlacement(unfit_idxr),
1204
+ refs=refs,
1205
+ )
1206
+ )
1207
+
1208
+ self._blknos[unfit_idxr] = len(self.blocks)
1209
+ self._blklocs[unfit_idxr] = np.arange(unfit_count)
1210
+
1211
+ self.blocks += tuple(new_blocks)
1212
+
1213
+ # Newly created block's dtype may already be present.
1214
+ self._known_consolidated = False
1215
+
1216
+ def _iset_split_block(
1217
+ self,
1218
+ blkno_l: int,
1219
+ blk_locs: np.ndarray | list[int],
1220
+ value: ArrayLike | None = None,
1221
+ refs: BlockValuesRefs | None = None,
1222
+ ) -> None:
1223
+ """Removes columns from a block by splitting the block.
1224
+
1225
+ Avoids copying the whole block through slicing and updates the manager
1226
+ after determinint the new block structure. Optionally adds a new block,
1227
+ otherwise has to be done by the caller.
1228
+
1229
+ Parameters
1230
+ ----------
1231
+ blkno_l: The block number to operate on, relevant for updating the manager
1232
+ blk_locs: The locations of our block that should be deleted.
1233
+ value: The value to set as a replacement.
1234
+ refs: The reference tracking object of the value to set.
1235
+ """
1236
+ blk = self.blocks[blkno_l]
1237
+
1238
+ if self._blklocs is None:
1239
+ self._rebuild_blknos_and_blklocs()
1240
+
1241
+ nbs_tup = tuple(blk.delete(blk_locs))
1242
+ if value is not None:
1243
+ locs = blk.mgr_locs.as_array[blk_locs]
1244
+ first_nb = new_block_2d(value, BlockPlacement(locs), refs=refs)
1245
+ else:
1246
+ first_nb = nbs_tup[0]
1247
+ nbs_tup = tuple(nbs_tup[1:])
1248
+
1249
+ nr_blocks = len(self.blocks)
1250
+ blocks_tup = (
1251
+ self.blocks[:blkno_l] + (first_nb,) + self.blocks[blkno_l + 1 :] + nbs_tup
1252
+ )
1253
+ self.blocks = blocks_tup
1254
+
1255
+ if not nbs_tup and value is not None:
1256
+ # No need to update anything if split did not happen
1257
+ return
1258
+
1259
+ self._blklocs[first_nb.mgr_locs.indexer] = np.arange(len(first_nb))
1260
+
1261
+ for i, nb in enumerate(nbs_tup):
1262
+ self._blklocs[nb.mgr_locs.indexer] = np.arange(len(nb))
1263
+ self._blknos[nb.mgr_locs.indexer] = i + nr_blocks
1264
+
1265
+ def _iset_single(
1266
+ self,
1267
+ loc: int,
1268
+ value: ArrayLike,
1269
+ inplace: bool,
1270
+ blkno: int,
1271
+ blk: Block,
1272
+ refs: BlockValuesRefs | None = None,
1273
+ ) -> None:
1274
+ """
1275
+ Fastpath for iset when we are only setting a single position and
1276
+ the Block currently in that position is itself single-column.
1277
+
1278
+ In this case we can swap out the entire Block and blklocs and blknos
1279
+ are unaffected.
1280
+ """
1281
+ # Caller is responsible for verifying value.shape
1282
+
1283
+ if inplace and blk.should_store(value):
1284
+ copy = False
1285
+ if using_copy_on_write() and not self._has_no_reference_block(blkno):
1286
+ # perform Copy-on-Write and clear the reference
1287
+ copy = True
1288
+ iloc = self.blklocs[loc]
1289
+ blk.set_inplace(slice(iloc, iloc + 1), value, copy=copy)
1290
+ return
1291
+
1292
+ nb = new_block_2d(value, placement=blk._mgr_locs, refs=refs)
1293
+ old_blocks = self.blocks
1294
+ new_blocks = old_blocks[:blkno] + (nb,) + old_blocks[blkno + 1 :]
1295
+ self.blocks = new_blocks
1296
+ return
1297
+
1298
+ def column_setitem(
1299
+ self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False
1300
+ ) -> None:
1301
+ """
1302
+ Set values ("setitem") into a single column (not setting the full column).
1303
+
1304
+ This is a method on the BlockManager level, to avoid creating an
1305
+ intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`)
1306
+ """
1307
+ needs_to_warn = False
1308
+ if warn_copy_on_write() and not self._has_no_reference(loc):
1309
+ if not isinstance(
1310
+ self.blocks[self.blknos[loc]].values,
1311
+ (ArrowExtensionArray, ArrowStringArray),
1312
+ ):
1313
+ # We might raise if we are in an expansion case, so defer
1314
+ # warning till we actually updated
1315
+ needs_to_warn = True
1316
+
1317
+ elif using_copy_on_write() and not self._has_no_reference(loc):
1318
+ blkno = self.blknos[loc]
1319
+ # Split blocks to only copy the column we want to modify
1320
+ blk_loc = self.blklocs[loc]
1321
+ # Copy our values
1322
+ values = self.blocks[blkno].values
1323
+ if values.ndim == 1:
1324
+ values = values.copy()
1325
+ else:
1326
+ # Use [blk_loc] as indexer to keep ndim=2, this already results in a
1327
+ # copy
1328
+ values = values[[blk_loc]]
1329
+ self._iset_split_block(blkno, [blk_loc], values)
1330
+
1331
+ # this manager is only created temporarily to mutate the values in place
1332
+ # so don't track references, otherwise the `setitem` would perform CoW again
1333
+ col_mgr = self.iget(loc, track_ref=False)
1334
+ if inplace_only:
1335
+ col_mgr.setitem_inplace(idx, value)
1336
+ else:
1337
+ new_mgr = col_mgr.setitem((idx,), value)
1338
+ self.iset(loc, new_mgr._block.values, inplace=True)
1339
+
1340
+ if needs_to_warn:
1341
+ warnings.warn(
1342
+ COW_WARNING_GENERAL_MSG,
1343
+ FutureWarning,
1344
+ stacklevel=find_stack_level(),
1345
+ )
1346
+
1347
+ def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None:
1348
+ """
1349
+ Insert item at selected position.
1350
+
1351
+ Parameters
1352
+ ----------
1353
+ loc : int
1354
+ item : hashable
1355
+ value : np.ndarray or ExtensionArray
1356
+ refs : The reference tracking object of the value to set.
1357
+ """
1358
+ with warnings.catch_warnings():
1359
+ # TODO: re-issue this with setitem-specific message?
1360
+ warnings.filterwarnings(
1361
+ "ignore",
1362
+ "The behavior of Index.insert with object-dtype is deprecated",
1363
+ category=FutureWarning,
1364
+ )
1365
+ new_axis = self.items.insert(loc, item)
1366
+
1367
+ if value.ndim == 2:
1368
+ value = value.T
1369
+ if len(value) > 1:
1370
+ raise ValueError(
1371
+ f"Expected a 1D array, got an array with shape {value.T.shape}"
1372
+ )
1373
+ else:
1374
+ value = ensure_block_shape(value, ndim=self.ndim)
1375
+
1376
+ bp = BlockPlacement(slice(loc, loc + 1))
1377
+ block = new_block_2d(values=value, placement=bp, refs=refs)
1378
+
1379
+ if not len(self.blocks):
1380
+ # Fastpath
1381
+ self._blklocs = np.array([0], dtype=np.intp)
1382
+ self._blknos = np.array([0], dtype=np.intp)
1383
+ else:
1384
+ self._insert_update_mgr_locs(loc)
1385
+ self._insert_update_blklocs_and_blknos(loc)
1386
+
1387
+ self.axes[0] = new_axis
1388
+ self.blocks += (block,)
1389
+
1390
+ self._known_consolidated = False
1391
+
1392
+ if sum(not block.is_extension for block in self.blocks) > 100:
1393
+ warnings.warn(
1394
+ "DataFrame is highly fragmented. This is usually the result "
1395
+ "of calling `frame.insert` many times, which has poor performance. "
1396
+ "Consider joining all columns at once using pd.concat(axis=1) "
1397
+ "instead. To get a de-fragmented frame, use `newframe = frame.copy()`",
1398
+ PerformanceWarning,
1399
+ stacklevel=find_stack_level(),
1400
+ )
1401
+
1402
+ def _insert_update_mgr_locs(self, loc) -> None:
1403
+ """
1404
+ When inserting a new Block at location 'loc', we increment
1405
+ all of the mgr_locs of blocks above that by one.
1406
+ """
1407
+ for blkno, count in _fast_count_smallints(self.blknos[loc:]):
1408
+ # .620 this way, .326 of which is in increment_above
1409
+ blk = self.blocks[blkno]
1410
+ blk._mgr_locs = blk._mgr_locs.increment_above(loc)
1411
+
1412
+ def _insert_update_blklocs_and_blknos(self, loc) -> None:
1413
+ """
1414
+ When inserting a new Block at location 'loc', we update our
1415
+ _blklocs and _blknos.
1416
+ """
1417
+
1418
+ # Accessing public blklocs ensures the public versions are initialized
1419
+ if loc == self.blklocs.shape[0]:
1420
+ # np.append is a lot faster, let's use it if we can.
1421
+ self._blklocs = np.append(self._blklocs, 0)
1422
+ self._blknos = np.append(self._blknos, len(self.blocks))
1423
+ elif loc == 0:
1424
+ # np.append is a lot faster, let's use it if we can.
1425
+ self._blklocs = np.append(self._blklocs[::-1], 0)[::-1]
1426
+ self._blknos = np.append(self._blknos[::-1], len(self.blocks))[::-1]
1427
+ else:
1428
+ new_blklocs, new_blknos = libinternals.update_blklocs_and_blknos(
1429
+ self.blklocs, self.blknos, loc, len(self.blocks)
1430
+ )
1431
+ self._blklocs = new_blklocs
1432
+ self._blknos = new_blknos
1433
+
1434
+ def idelete(self, indexer) -> BlockManager:
1435
+ """
1436
+ Delete selected locations, returning a new BlockManager.
1437
+ """
1438
+ is_deleted = np.zeros(self.shape[0], dtype=np.bool_)
1439
+ is_deleted[indexer] = True
1440
+ taker = (~is_deleted).nonzero()[0]
1441
+
1442
+ nbs = self._slice_take_blocks_ax0(taker, only_slice=True, ref_inplace_op=True)
1443
+ new_columns = self.items[~is_deleted]
1444
+ axes = [new_columns, self.axes[1]]
1445
+ return type(self)(tuple(nbs), axes, verify_integrity=False)
1446
+
1447
+ # ----------------------------------------------------------------
1448
+ # Block-wise Operation
1449
+
1450
+ def grouped_reduce(self, func: Callable) -> Self:
1451
+ """
1452
+ Apply grouped reduction function blockwise, returning a new BlockManager.
1453
+
1454
+ Parameters
1455
+ ----------
1456
+ func : grouped reduction function
1457
+
1458
+ Returns
1459
+ -------
1460
+ BlockManager
1461
+ """
1462
+ result_blocks: list[Block] = []
1463
+
1464
+ for blk in self.blocks:
1465
+ if blk.is_object:
1466
+ # split on object-dtype blocks bc some columns may raise
1467
+ # while others do not.
1468
+ for sb in blk._split():
1469
+ applied = sb.apply(func)
1470
+ result_blocks = extend_blocks(applied, result_blocks)
1471
+ else:
1472
+ applied = blk.apply(func)
1473
+ result_blocks = extend_blocks(applied, result_blocks)
1474
+
1475
+ if len(result_blocks) == 0:
1476
+ nrows = 0
1477
+ else:
1478
+ nrows = result_blocks[0].values.shape[-1]
1479
+ index = Index(range(nrows))
1480
+
1481
+ return type(self).from_blocks(result_blocks, [self.axes[0], index])
1482
+
1483
+ def reduce(self, func: Callable) -> Self:
1484
+ """
1485
+ Apply reduction function blockwise, returning a single-row BlockManager.
1486
+
1487
+ Parameters
1488
+ ----------
1489
+ func : reduction function
1490
+
1491
+ Returns
1492
+ -------
1493
+ BlockManager
1494
+ """
1495
+ # If 2D, we assume that we're operating column-wise
1496
+ assert self.ndim == 2
1497
+
1498
+ res_blocks: list[Block] = []
1499
+ for blk in self.blocks:
1500
+ nbs = blk.reduce(func)
1501
+ res_blocks.extend(nbs)
1502
+
1503
+ index = Index([None]) # placeholder
1504
+ new_mgr = type(self).from_blocks(res_blocks, [self.items, index])
1505
+ return new_mgr
1506
+
1507
+ def operate_blockwise(self, other: BlockManager, array_op) -> BlockManager:
1508
+ """
1509
+ Apply array_op blockwise with another (aligned) BlockManager.
1510
+ """
1511
+ return operate_blockwise(self, other, array_op)
1512
+
1513
+ def _equal_values(self: BlockManager, other: BlockManager) -> bool:
1514
+ """
1515
+ Used in .equals defined in base class. Only check the column values
1516
+ assuming shape and indexes have already been checked.
1517
+ """
1518
+ return blockwise_all(self, other, array_equals)
1519
+
1520
+ def quantile(
1521
+ self,
1522
+ *,
1523
+ qs: Index, # with dtype float 64
1524
+ interpolation: QuantileInterpolation = "linear",
1525
+ ) -> Self:
1526
+ """
1527
+ Iterate over blocks applying quantile reduction.
1528
+ This routine is intended for reduction type operations and
1529
+ will do inference on the generated blocks.
1530
+
1531
+ Parameters
1532
+ ----------
1533
+ interpolation : type of interpolation, default 'linear'
1534
+ qs : list of the quantiles to be computed
1535
+
1536
+ Returns
1537
+ -------
1538
+ BlockManager
1539
+ """
1540
+ # Series dispatches to DataFrame for quantile, which allows us to
1541
+ # simplify some of the code here and in the blocks
1542
+ assert self.ndim >= 2
1543
+ assert is_list_like(qs) # caller is responsible for this
1544
+
1545
+ new_axes = list(self.axes)
1546
+ new_axes[1] = Index(qs, dtype=np.float64)
1547
+
1548
+ blocks = [
1549
+ blk.quantile(qs=qs, interpolation=interpolation) for blk in self.blocks
1550
+ ]
1551
+
1552
+ return type(self)(blocks, new_axes)
1553
+
1554
+ # ----------------------------------------------------------------
1555
+
1556
+ def unstack(self, unstacker, fill_value) -> BlockManager:
1557
+ """
1558
+ Return a BlockManager with all blocks unstacked.
1559
+
1560
+ Parameters
1561
+ ----------
1562
+ unstacker : reshape._Unstacker
1563
+ fill_value : Any
1564
+ fill_value for newly introduced missing values.
1565
+
1566
+ Returns
1567
+ -------
1568
+ unstacked : BlockManager
1569
+ """
1570
+ new_columns = unstacker.get_new_columns(self.items)
1571
+ new_index = unstacker.new_index
1572
+
1573
+ allow_fill = not unstacker.mask_all
1574
+ if allow_fill:
1575
+ # calculating the full mask once and passing it to Block._unstack is
1576
+ # faster than letting calculating it in each repeated call
1577
+ new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape)
1578
+ needs_masking = new_mask2D.any(axis=0)
1579
+ else:
1580
+ needs_masking = np.zeros(unstacker.full_shape[1], dtype=bool)
1581
+
1582
+ new_blocks: list[Block] = []
1583
+ columns_mask: list[np.ndarray] = []
1584
+
1585
+ if len(self.items) == 0:
1586
+ factor = 1
1587
+ else:
1588
+ fac = len(new_columns) / len(self.items)
1589
+ assert fac == int(fac)
1590
+ factor = int(fac)
1591
+
1592
+ for blk in self.blocks:
1593
+ mgr_locs = blk.mgr_locs
1594
+ new_placement = mgr_locs.tile_for_unstack(factor)
1595
+
1596
+ blocks, mask = blk._unstack(
1597
+ unstacker,
1598
+ fill_value,
1599
+ new_placement=new_placement,
1600
+ needs_masking=needs_masking,
1601
+ )
1602
+
1603
+ new_blocks.extend(blocks)
1604
+ columns_mask.extend(mask)
1605
+
1606
+ # Block._unstack should ensure this holds,
1607
+ assert mask.sum() == sum(len(nb._mgr_locs) for nb in blocks)
1608
+ # In turn this ensures that in the BlockManager call below
1609
+ # we have len(new_columns) == sum(x.shape[0] for x in new_blocks)
1610
+ # which suffices to allow us to pass verify_inegrity=False
1611
+
1612
+ new_columns = new_columns[columns_mask]
1613
+
1614
+ bm = BlockManager(new_blocks, [new_columns, new_index], verify_integrity=False)
1615
+ return bm
1616
+
1617
+ def to_dict(self) -> dict[str, Self]:
1618
+ """
1619
+ Return a dict of str(dtype) -> BlockManager
1620
+
1621
+ Returns
1622
+ -------
1623
+ values : a dict of dtype -> BlockManager
1624
+ """
1625
+
1626
+ bd: dict[str, list[Block]] = {}
1627
+ for b in self.blocks:
1628
+ bd.setdefault(str(b.dtype), []).append(b)
1629
+
1630
+ # TODO(EA2D): the combine will be unnecessary with 2D EAs
1631
+ return {dtype: self._combine(blocks) for dtype, blocks in bd.items()}
1632
+
1633
+ def as_array(
1634
+ self,
1635
+ dtype: np.dtype | None = None,
1636
+ copy: bool = False,
1637
+ na_value: object = lib.no_default,
1638
+ ) -> np.ndarray:
1639
+ """
1640
+ Convert the blockmanager data into an numpy array.
1641
+
1642
+ Parameters
1643
+ ----------
1644
+ dtype : np.dtype or None, default None
1645
+ Data type of the return array.
1646
+ copy : bool, default False
1647
+ If True then guarantee that a copy is returned. A value of
1648
+ False does not guarantee that the underlying data is not
1649
+ copied.
1650
+ na_value : object, default lib.no_default
1651
+ Value to be used as the missing value sentinel.
1652
+
1653
+ Returns
1654
+ -------
1655
+ arr : ndarray
1656
+ """
1657
+ passed_nan = lib.is_float(na_value) and isna(na_value)
1658
+
1659
+ if len(self.blocks) == 0:
1660
+ arr = np.empty(self.shape, dtype=float)
1661
+ return arr.transpose()
1662
+
1663
+ if self.is_single_block:
1664
+ blk = self.blocks[0]
1665
+
1666
+ if na_value is not lib.no_default:
1667
+ # We want to copy when na_value is provided to avoid
1668
+ # mutating the original object
1669
+ if lib.is_np_dtype(blk.dtype, "f") and passed_nan:
1670
+ # We are already numpy-float and na_value=np.nan
1671
+ pass
1672
+ else:
1673
+ copy = True
1674
+
1675
+ if blk.is_extension:
1676
+ # Avoid implicit conversion of extension blocks to object
1677
+
1678
+ # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no
1679
+ # attribute "to_numpy"
1680
+ arr = blk.values.to_numpy( # type: ignore[union-attr]
1681
+ dtype=dtype,
1682
+ na_value=na_value,
1683
+ copy=copy,
1684
+ ).reshape(blk.shape)
1685
+ elif not copy:
1686
+ arr = np.asarray(blk.values, dtype=dtype)
1687
+ else:
1688
+ arr = np.array(blk.values, dtype=dtype, copy=copy)
1689
+
1690
+ if using_copy_on_write() and not copy:
1691
+ arr = arr.view()
1692
+ arr.flags.writeable = False
1693
+ else:
1694
+ arr = self._interleave(dtype=dtype, na_value=na_value)
1695
+ # The underlying data was copied within _interleave, so no need
1696
+ # to further copy if copy=True or setting na_value
1697
+
1698
+ if na_value is lib.no_default:
1699
+ pass
1700
+ elif arr.dtype.kind == "f" and passed_nan:
1701
+ pass
1702
+ else:
1703
+ arr[isna(arr)] = na_value
1704
+
1705
+ return arr.transpose()
1706
+
1707
+ def _interleave(
1708
+ self,
1709
+ dtype: np.dtype | None = None,
1710
+ na_value: object = lib.no_default,
1711
+ ) -> np.ndarray:
1712
+ """
1713
+ Return ndarray from blocks with specified item order
1714
+ Items must be contained in the blocks
1715
+ """
1716
+ if not dtype:
1717
+ # Incompatible types in assignment (expression has type
1718
+ # "Optional[Union[dtype[Any], ExtensionDtype]]", variable has
1719
+ # type "Optional[dtype[Any]]")
1720
+ dtype = interleaved_dtype( # type: ignore[assignment]
1721
+ [blk.dtype for blk in self.blocks]
1722
+ )
1723
+
1724
+ # error: Argument 1 to "ensure_np_dtype" has incompatible type
1725
+ # "Optional[dtype[Any]]"; expected "Union[dtype[Any], ExtensionDtype]"
1726
+ dtype = ensure_np_dtype(dtype) # type: ignore[arg-type]
1727
+ result = np.empty(self.shape, dtype=dtype)
1728
+
1729
+ itemmask = np.zeros(self.shape[0])
1730
+
1731
+ if dtype == np.dtype("object") and na_value is lib.no_default:
1732
+ # much more performant than using to_numpy below
1733
+ for blk in self.blocks:
1734
+ rl = blk.mgr_locs
1735
+ arr = blk.get_values(dtype)
1736
+ result[rl.indexer] = arr
1737
+ itemmask[rl.indexer] = 1
1738
+ return result
1739
+
1740
+ for blk in self.blocks:
1741
+ rl = blk.mgr_locs
1742
+ if blk.is_extension:
1743
+ # Avoid implicit conversion of extension blocks to object
1744
+
1745
+ # error: Item "ndarray" of "Union[ndarray, ExtensionArray]" has no
1746
+ # attribute "to_numpy"
1747
+ arr = blk.values.to_numpy( # type: ignore[union-attr]
1748
+ dtype=dtype,
1749
+ na_value=na_value,
1750
+ )
1751
+ else:
1752
+ arr = blk.get_values(dtype)
1753
+ result[rl.indexer] = arr
1754
+ itemmask[rl.indexer] = 1
1755
+
1756
+ if not itemmask.all():
1757
+ raise AssertionError("Some items were not contained in blocks")
1758
+
1759
+ return result
1760
+
1761
+ # ----------------------------------------------------------------
1762
+ # Consolidation
1763
+
1764
+ def is_consolidated(self) -> bool:
1765
+ """
1766
+ Return True if more than one block with the same dtype
1767
+ """
1768
+ if not self._known_consolidated:
1769
+ self._consolidate_check()
1770
+ return self._is_consolidated
1771
+
1772
+ def _consolidate_check(self) -> None:
1773
+ if len(self.blocks) == 1:
1774
+ # fastpath
1775
+ self._is_consolidated = True
1776
+ self._known_consolidated = True
1777
+ return
1778
+ dtypes = [blk.dtype for blk in self.blocks if blk._can_consolidate]
1779
+ self._is_consolidated = len(dtypes) == len(set(dtypes))
1780
+ self._known_consolidated = True
1781
+
1782
+ def _consolidate_inplace(self) -> None:
1783
+ # In general, _consolidate_inplace should only be called via
1784
+ # DataFrame._consolidate_inplace, otherwise we will fail to invalidate
1785
+ # the DataFrame's _item_cache. The exception is for newly-created
1786
+ # BlockManager objects not yet attached to a DataFrame.
1787
+ if not self.is_consolidated():
1788
+ self.blocks = _consolidate(self.blocks)
1789
+ self._is_consolidated = True
1790
+ self._known_consolidated = True
1791
+ self._rebuild_blknos_and_blklocs()
1792
+
1793
+ # ----------------------------------------------------------------
1794
+ # Concatenation
1795
+
1796
+ @classmethod
1797
+ def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self:
1798
+ """
1799
+ Concatenate uniformly-indexed BlockManagers horizontally.
1800
+ """
1801
+ offset = 0
1802
+ blocks: list[Block] = []
1803
+ for mgr in mgrs:
1804
+ for blk in mgr.blocks:
1805
+ # We need to do getitem_block here otherwise we would be altering
1806
+ # blk.mgr_locs in place, which would render it invalid. This is only
1807
+ # relevant in the copy=False case.
1808
+ nb = blk.slice_block_columns(slice(None))
1809
+ nb._mgr_locs = nb._mgr_locs.add(offset)
1810
+ blocks.append(nb)
1811
+
1812
+ offset += len(mgr.items)
1813
+
1814
+ new_mgr = cls(tuple(blocks), axes)
1815
+ return new_mgr
1816
+
1817
+ @classmethod
1818
+ def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self:
1819
+ """
1820
+ Concatenate uniformly-indexed BlockManagers vertically.
1821
+ """
1822
+ raise NotImplementedError("This logic lives (for now) in internals.concat")
1823
+
1824
+
1825
+ class SingleBlockManager(BaseBlockManager, SingleDataManager):
1826
+ """manage a single block with"""
1827
+
1828
+ @property
1829
+ def ndim(self) -> Literal[1]:
1830
+ return 1
1831
+
1832
+ _is_consolidated = True
1833
+ _known_consolidated = True
1834
+ __slots__ = ()
1835
+ is_single_block = True
1836
+
1837
+ def __init__(
1838
+ self,
1839
+ block: Block,
1840
+ axis: Index,
1841
+ verify_integrity: bool = False,
1842
+ ) -> None:
1843
+ # Assertions disabled for performance
1844
+ # assert isinstance(block, Block), type(block)
1845
+ # assert isinstance(axis, Index), type(axis)
1846
+
1847
+ self.axes = [axis]
1848
+ self.blocks = (block,)
1849
+
1850
+ @classmethod
1851
+ def from_blocks(
1852
+ cls,
1853
+ blocks: list[Block],
1854
+ axes: list[Index],
1855
+ ) -> Self:
1856
+ """
1857
+ Constructor for BlockManager and SingleBlockManager with same signature.
1858
+ """
1859
+ assert len(blocks) == 1
1860
+ assert len(axes) == 1
1861
+ return cls(blocks[0], axes[0], verify_integrity=False)
1862
+
1863
+ @classmethod
1864
+ def from_array(
1865
+ cls, array: ArrayLike, index: Index, refs: BlockValuesRefs | None = None
1866
+ ) -> SingleBlockManager:
1867
+ """
1868
+ Constructor for if we have an array that is not yet a Block.
1869
+ """
1870
+ array = maybe_coerce_values(array)
1871
+ bp = BlockPlacement(slice(0, len(index)))
1872
+ block = new_block(array, placement=bp, ndim=1, refs=refs)
1873
+ return cls(block, index)
1874
+
1875
+ def to_2d_mgr(self, columns: Index) -> BlockManager:
1876
+ """
1877
+ Manager analogue of Series.to_frame
1878
+ """
1879
+ blk = self.blocks[0]
1880
+ arr = ensure_block_shape(blk.values, ndim=2)
1881
+ bp = BlockPlacement(0)
1882
+ new_blk = type(blk)(arr, placement=bp, ndim=2, refs=blk.refs)
1883
+ axes = [columns, self.axes[0]]
1884
+ return BlockManager([new_blk], axes=axes, verify_integrity=False)
1885
+
1886
+ def _has_no_reference(self, i: int = 0) -> bool:
1887
+ """
1888
+ Check for column `i` if it has references.
1889
+ (whether it references another array or is itself being referenced)
1890
+ Returns True if the column has no references.
1891
+ """
1892
+ return not self.blocks[0].refs.has_reference()
1893
+
1894
+ def __getstate__(self):
1895
+ block_values = [b.values for b in self.blocks]
1896
+ block_items = [self.items[b.mgr_locs.indexer] for b in self.blocks]
1897
+ axes_array = list(self.axes)
1898
+
1899
+ extra_state = {
1900
+ "0.14.1": {
1901
+ "axes": axes_array,
1902
+ "blocks": [
1903
+ {"values": b.values, "mgr_locs": b.mgr_locs.indexer}
1904
+ for b in self.blocks
1905
+ ],
1906
+ }
1907
+ }
1908
+
1909
+ # First three elements of the state are to maintain forward
1910
+ # compatibility with 0.13.1.
1911
+ return axes_array, block_values, block_items, extra_state
1912
+
1913
+ def __setstate__(self, state) -> None:
1914
+ def unpickle_block(values, mgr_locs, ndim: int) -> Block:
1915
+ # TODO(EA2D): ndim would be unnecessary with 2D EAs
1916
+ # older pickles may store e.g. DatetimeIndex instead of DatetimeArray
1917
+ values = extract_array(values, extract_numpy=True)
1918
+ if not isinstance(mgr_locs, BlockPlacement):
1919
+ mgr_locs = BlockPlacement(mgr_locs)
1920
+
1921
+ values = maybe_coerce_values(values)
1922
+ return new_block(values, placement=mgr_locs, ndim=ndim)
1923
+
1924
+ if isinstance(state, tuple) and len(state) >= 4 and "0.14.1" in state[3]:
1925
+ state = state[3]["0.14.1"]
1926
+ self.axes = [ensure_index(ax) for ax in state["axes"]]
1927
+ ndim = len(self.axes)
1928
+ self.blocks = tuple(
1929
+ unpickle_block(b["values"], b["mgr_locs"], ndim=ndim)
1930
+ for b in state["blocks"]
1931
+ )
1932
+ else:
1933
+ raise NotImplementedError("pre-0.14.1 pickles are no longer supported")
1934
+
1935
+ self._post_setstate()
1936
+
1937
+ def _post_setstate(self) -> None:
1938
+ pass
1939
+
1940
+ @cache_readonly
1941
+ def _block(self) -> Block:
1942
+ return self.blocks[0]
1943
+
1944
+ @property
1945
+ def _blknos(self):
1946
+ """compat with BlockManager"""
1947
+ return None
1948
+
1949
+ @property
1950
+ def _blklocs(self):
1951
+ """compat with BlockManager"""
1952
+ return None
1953
+
1954
+ def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> Self:
1955
+ # similar to get_slice, but not restricted to slice indexer
1956
+ blk = self._block
1957
+ if using_copy_on_write() and len(indexer) > 0 and indexer.all():
1958
+ return type(self)(blk.copy(deep=False), self.index)
1959
+ array = blk.values[indexer]
1960
+
1961
+ if isinstance(indexer, np.ndarray) and indexer.dtype.kind == "b":
1962
+ # boolean indexing always gives a copy with numpy
1963
+ refs = None
1964
+ else:
1965
+ # TODO(CoW) in theory only need to track reference if new_array is a view
1966
+ refs = blk.refs
1967
+
1968
+ bp = BlockPlacement(slice(0, len(array)))
1969
+ block = type(blk)(array, placement=bp, ndim=1, refs=refs)
1970
+
1971
+ new_idx = self.index[indexer]
1972
+ return type(self)(block, new_idx)
1973
+
1974
+ def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleBlockManager:
1975
+ # Assertion disabled for performance
1976
+ # assert isinstance(slobj, slice), type(slobj)
1977
+ if axis >= self.ndim:
1978
+ raise IndexError("Requested axis not found in manager")
1979
+
1980
+ blk = self._block
1981
+ array = blk.values[slobj]
1982
+ bp = BlockPlacement(slice(0, len(array)))
1983
+ # TODO this method is only used in groupby SeriesSplitter at the moment,
1984
+ # so passing refs is not yet covered by the tests
1985
+ block = type(blk)(array, placement=bp, ndim=1, refs=blk.refs)
1986
+ new_index = self.index._getitem_slice(slobj)
1987
+ return type(self)(block, new_index)
1988
+
1989
+ @property
1990
+ def index(self) -> Index:
1991
+ return self.axes[0]
1992
+
1993
+ @property
1994
+ def dtype(self) -> DtypeObj:
1995
+ return self._block.dtype
1996
+
1997
+ def get_dtypes(self) -> npt.NDArray[np.object_]:
1998
+ return np.array([self._block.dtype], dtype=object)
1999
+
2000
+ def external_values(self):
2001
+ """The array that Series.values returns"""
2002
+ return self._block.external_values()
2003
+
2004
+ def internal_values(self):
2005
+ """The array that Series._values returns"""
2006
+ return self._block.values
2007
+
2008
+ def array_values(self) -> ExtensionArray:
2009
+ """The array that Series.array returns"""
2010
+ return self._block.array_values
2011
+
2012
+ def get_numeric_data(self) -> Self:
2013
+ if self._block.is_numeric:
2014
+ return self.copy(deep=False)
2015
+ return self.make_empty()
2016
+
2017
+ @property
2018
+ def _can_hold_na(self) -> bool:
2019
+ return self._block._can_hold_na
2020
+
2021
+ def setitem_inplace(self, indexer, value, warn: bool = True) -> None:
2022
+ """
2023
+ Set values with indexer.
2024
+
2025
+ For Single[Block/Array]Manager, this backs s[indexer] = value
2026
+
2027
+ This is an inplace version of `setitem()`, mutating the manager/values
2028
+ in place, not returning a new Manager (and Block), and thus never changing
2029
+ the dtype.
2030
+ """
2031
+ using_cow = using_copy_on_write()
2032
+ warn_cow = warn_copy_on_write()
2033
+ if (using_cow or warn_cow) and not self._has_no_reference(0):
2034
+ if using_cow:
2035
+ self.blocks = (self._block.copy(),)
2036
+ self._cache.clear()
2037
+ elif warn_cow and warn:
2038
+ warnings.warn(
2039
+ COW_WARNING_SETITEM_MSG,
2040
+ FutureWarning,
2041
+ stacklevel=find_stack_level(),
2042
+ )
2043
+
2044
+ super().setitem_inplace(indexer, value)
2045
+
2046
+ def idelete(self, indexer) -> SingleBlockManager:
2047
+ """
2048
+ Delete single location from SingleBlockManager.
2049
+
2050
+ Ensures that self.blocks doesn't become empty.
2051
+ """
2052
+ nb = self._block.delete(indexer)[0]
2053
+ self.blocks = (nb,)
2054
+ self.axes[0] = self.axes[0].delete(indexer)
2055
+ self._cache.clear()
2056
+ return self
2057
+
2058
+ def fast_xs(self, loc):
2059
+ """
2060
+ fast path for getting a cross-section
2061
+ return a view of the data
2062
+ """
2063
+ raise NotImplementedError("Use series._values[loc] instead")
2064
+
2065
+ def set_values(self, values: ArrayLike) -> None:
2066
+ """
2067
+ Set the values of the single block in place.
2068
+
2069
+ Use at your own risk! This does not check if the passed values are
2070
+ valid for the current Block/SingleBlockManager (length, dtype, etc),
2071
+ and this does not properly keep track of references.
2072
+ """
2073
+ # NOTE(CoW) Currently this is only used for FrameColumnApply.series_generator
2074
+ # which handles CoW by setting the refs manually if necessary
2075
+ self.blocks[0].values = values
2076
+ self.blocks[0]._mgr_locs = BlockPlacement(slice(len(values)))
2077
+
2078
+ def _equal_values(self, other: Self) -> bool:
2079
+ """
2080
+ Used in .equals defined in base class. Only check the column values
2081
+ assuming shape and indexes have already been checked.
2082
+ """
2083
+ # For SingleBlockManager (i.e.Series)
2084
+ if other.ndim != 1:
2085
+ return False
2086
+ left = self.blocks[0].values
2087
+ right = other.blocks[0].values
2088
+ return array_equals(left, right)
2089
+
2090
+
2091
+ # --------------------------------------------------------------------
2092
+ # Constructor Helpers
2093
+
2094
+
2095
+ def create_block_manager_from_blocks(
2096
+ blocks: list[Block],
2097
+ axes: list[Index],
2098
+ consolidate: bool = True,
2099
+ verify_integrity: bool = True,
2100
+ ) -> BlockManager:
2101
+ # If verify_integrity=False, then caller is responsible for checking
2102
+ # all(x.shape[-1] == len(axes[1]) for x in blocks)
2103
+ # sum(x.shape[0] for x in blocks) == len(axes[0])
2104
+ # set(x for blk in blocks for x in blk.mgr_locs) == set(range(len(axes[0])))
2105
+ # all(blk.ndim == 2 for blk in blocks)
2106
+ # This allows us to safely pass verify_integrity=False
2107
+
2108
+ try:
2109
+ mgr = BlockManager(blocks, axes, verify_integrity=verify_integrity)
2110
+
2111
+ except ValueError as err:
2112
+ arrays = [blk.values for blk in blocks]
2113
+ tot_items = sum(arr.shape[0] for arr in arrays)
2114
+ raise_construction_error(tot_items, arrays[0].shape[1:], axes, err)
2115
+
2116
+ if consolidate:
2117
+ mgr._consolidate_inplace()
2118
+ return mgr
2119
+
2120
+
2121
+ def create_block_manager_from_column_arrays(
2122
+ arrays: list[ArrayLike],
2123
+ axes: list[Index],
2124
+ consolidate: bool,
2125
+ refs: list,
2126
+ ) -> BlockManager:
2127
+ # Assertions disabled for performance (caller is responsible for verifying)
2128
+ # assert isinstance(axes, list)
2129
+ # assert all(isinstance(x, Index) for x in axes)
2130
+ # assert all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays)
2131
+ # assert all(type(x) is not NumpyExtensionArray for x in arrays)
2132
+ # assert all(x.ndim == 1 for x in arrays)
2133
+ # assert all(len(x) == len(axes[1]) for x in arrays)
2134
+ # assert len(arrays) == len(axes[0])
2135
+ # These last three are sufficient to allow us to safely pass
2136
+ # verify_integrity=False below.
2137
+
2138
+ try:
2139
+ blocks = _form_blocks(arrays, consolidate, refs)
2140
+ mgr = BlockManager(blocks, axes, verify_integrity=False)
2141
+ except ValueError as e:
2142
+ raise_construction_error(len(arrays), arrays[0].shape, axes, e)
2143
+ if consolidate:
2144
+ mgr._consolidate_inplace()
2145
+ return mgr
2146
+
2147
+
2148
+ def raise_construction_error(
2149
+ tot_items: int,
2150
+ block_shape: Shape,
2151
+ axes: list[Index],
2152
+ e: ValueError | None = None,
2153
+ ):
2154
+ """raise a helpful message about our construction"""
2155
+ passed = tuple(map(int, [tot_items] + list(block_shape)))
2156
+ # Correcting the user facing error message during dataframe construction
2157
+ if len(passed) <= 2:
2158
+ passed = passed[::-1]
2159
+
2160
+ implied = tuple(len(ax) for ax in axes)
2161
+ # Correcting the user facing error message during dataframe construction
2162
+ if len(implied) <= 2:
2163
+ implied = implied[::-1]
2164
+
2165
+ # We return the exception object instead of raising it so that we
2166
+ # can raise it in the caller; mypy plays better with that
2167
+ if passed == implied and e is not None:
2168
+ raise e
2169
+ if block_shape[0] == 0:
2170
+ raise ValueError("Empty data passed with indices specified.")
2171
+ raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}")
2172
+
2173
+
2174
+ # -----------------------------------------------------------------------
2175
+
2176
+
2177
+ def _grouping_func(tup: tuple[int, ArrayLike]) -> tuple[int, DtypeObj]:
2178
+ dtype = tup[1].dtype
2179
+
2180
+ if is_1d_only_ea_dtype(dtype):
2181
+ # We know these won't be consolidated, so don't need to group these.
2182
+ # This avoids expensive comparisons of CategoricalDtype objects
2183
+ sep = id(dtype)
2184
+ else:
2185
+ sep = 0
2186
+
2187
+ return sep, dtype
2188
+
2189
+
2190
+ def _form_blocks(arrays: list[ArrayLike], consolidate: bool, refs: list) -> list[Block]:
2191
+ tuples = list(enumerate(arrays))
2192
+
2193
+ if not consolidate:
2194
+ return _tuples_to_blocks_no_consolidate(tuples, refs)
2195
+
2196
+ # when consolidating, we can ignore refs (either stacking always copies,
2197
+ # or the EA is already copied in the calling dict_to_mgr)
2198
+
2199
+ # group by dtype
2200
+ grouper = itertools.groupby(tuples, _grouping_func)
2201
+
2202
+ nbs: list[Block] = []
2203
+ for (_, dtype), tup_block in grouper:
2204
+ block_type = get_block_type(dtype)
2205
+
2206
+ if isinstance(dtype, np.dtype):
2207
+ is_dtlike = dtype.kind in "mM"
2208
+
2209
+ if issubclass(dtype.type, (str, bytes)):
2210
+ dtype = np.dtype(object)
2211
+
2212
+ values, placement = _stack_arrays(list(tup_block), dtype)
2213
+ if is_dtlike:
2214
+ values = ensure_wrapped_if_datetimelike(values)
2215
+ blk = block_type(values, placement=BlockPlacement(placement), ndim=2)
2216
+ nbs.append(blk)
2217
+
2218
+ elif is_1d_only_ea_dtype(dtype):
2219
+ dtype_blocks = [
2220
+ block_type(x[1], placement=BlockPlacement(x[0]), ndim=2)
2221
+ for x in tup_block
2222
+ ]
2223
+ nbs.extend(dtype_blocks)
2224
+
2225
+ else:
2226
+ dtype_blocks = [
2227
+ block_type(
2228
+ ensure_block_shape(x[1], 2), placement=BlockPlacement(x[0]), ndim=2
2229
+ )
2230
+ for x in tup_block
2231
+ ]
2232
+ nbs.extend(dtype_blocks)
2233
+ return nbs
2234
+
2235
+
2236
+ def _tuples_to_blocks_no_consolidate(tuples, refs) -> list[Block]:
2237
+ # tuples produced within _form_blocks are of the form (placement, array)
2238
+ return [
2239
+ new_block_2d(
2240
+ ensure_block_shape(arr, ndim=2), placement=BlockPlacement(i), refs=ref
2241
+ )
2242
+ for ((i, arr), ref) in zip(tuples, refs)
2243
+ ]
2244
+
2245
+
2246
+ def _stack_arrays(tuples, dtype: np.dtype):
2247
+ placement, arrays = zip(*tuples)
2248
+
2249
+ first = arrays[0]
2250
+ shape = (len(arrays),) + first.shape
2251
+
2252
+ stacked = np.empty(shape, dtype=dtype)
2253
+ for i, arr in enumerate(arrays):
2254
+ stacked[i] = arr
2255
+
2256
+ return stacked, placement
2257
+
2258
+
2259
+ def _consolidate(blocks: tuple[Block, ...]) -> tuple[Block, ...]:
2260
+ """
2261
+ Merge blocks having same dtype, exclude non-consolidating blocks
2262
+ """
2263
+ # sort by _can_consolidate, dtype
2264
+ gkey = lambda x: x._consolidate_key
2265
+ grouper = itertools.groupby(sorted(blocks, key=gkey), gkey)
2266
+
2267
+ new_blocks: list[Block] = []
2268
+ for (_can_consolidate, dtype), group_blocks in grouper:
2269
+ merged_blocks, _ = _merge_blocks(
2270
+ list(group_blocks), dtype=dtype, can_consolidate=_can_consolidate
2271
+ )
2272
+ new_blocks = extend_blocks(merged_blocks, new_blocks)
2273
+ return tuple(new_blocks)
2274
+
2275
+
2276
+ def _merge_blocks(
2277
+ blocks: list[Block], dtype: DtypeObj, can_consolidate: bool
2278
+ ) -> tuple[list[Block], bool]:
2279
+ if len(blocks) == 1:
2280
+ return blocks, False
2281
+
2282
+ if can_consolidate:
2283
+ # TODO: optimization potential in case all mgrs contain slices and
2284
+ # combination of those slices is a slice, too.
2285
+ new_mgr_locs = np.concatenate([b.mgr_locs.as_array for b in blocks])
2286
+
2287
+ new_values: ArrayLike
2288
+
2289
+ if isinstance(blocks[0].dtype, np.dtype):
2290
+ # error: List comprehension has incompatible type List[Union[ndarray,
2291
+ # ExtensionArray]]; expected List[Union[complex, generic,
2292
+ # Sequence[Union[int, float, complex, str, bytes, generic]],
2293
+ # Sequence[Sequence[Any]], SupportsArray]]
2294
+ new_values = np.vstack([b.values for b in blocks]) # type: ignore[misc]
2295
+ else:
2296
+ bvals = [blk.values for blk in blocks]
2297
+ bvals2 = cast(Sequence[NDArrayBackedExtensionArray], bvals)
2298
+ new_values = bvals2[0]._concat_same_type(bvals2, axis=0)
2299
+
2300
+ argsort = np.argsort(new_mgr_locs)
2301
+ new_values = new_values[argsort]
2302
+ new_mgr_locs = new_mgr_locs[argsort]
2303
+
2304
+ bp = BlockPlacement(new_mgr_locs)
2305
+ return [new_block_2d(new_values, placement=bp)], True
2306
+
2307
+ # can't consolidate --> no merge
2308
+ return blocks, False
2309
+
2310
+
2311
+ def _fast_count_smallints(arr: npt.NDArray[np.intp]):
2312
+ """Faster version of set(arr) for sequences of small numbers."""
2313
+ counts = np.bincount(arr)
2314
+ nz = counts.nonzero()[0]
2315
+ # Note: list(zip(...) outperforms list(np.c_[nz, counts[nz]]) here,
2316
+ # in one benchmark by a factor of 11
2317
+ return zip(nz, counts[nz])
2318
+
2319
+
2320
+ def _preprocess_slice_or_indexer(
2321
+ slice_or_indexer: slice | np.ndarray, length: int, allow_fill: bool
2322
+ ):
2323
+ if isinstance(slice_or_indexer, slice):
2324
+ return (
2325
+ "slice",
2326
+ slice_or_indexer,
2327
+ libinternals.slice_len(slice_or_indexer, length),
2328
+ )
2329
+ else:
2330
+ if (
2331
+ not isinstance(slice_or_indexer, np.ndarray)
2332
+ or slice_or_indexer.dtype.kind != "i"
2333
+ ):
2334
+ dtype = getattr(slice_or_indexer, "dtype", None)
2335
+ raise TypeError(type(slice_or_indexer), dtype)
2336
+
2337
+ indexer = ensure_platform_int(slice_or_indexer)
2338
+ if not allow_fill:
2339
+ indexer = maybe_convert_indices(indexer, length)
2340
+ return "fancy", indexer, len(indexer)
2341
+
2342
+
2343
+ def make_na_array(dtype: DtypeObj, shape: Shape, fill_value) -> ArrayLike:
2344
+ if isinstance(dtype, DatetimeTZDtype):
2345
+ # NB: exclude e.g. pyarrow[dt64tz] dtypes
2346
+ ts = Timestamp(fill_value).as_unit(dtype.unit)
2347
+ i8values = np.full(shape, ts._value)
2348
+ dt64values = i8values.view(f"M8[{dtype.unit}]")
2349
+ return DatetimeArray._simple_new(dt64values, dtype=dtype)
2350
+
2351
+ elif is_1d_only_ea_dtype(dtype):
2352
+ dtype = cast(ExtensionDtype, dtype)
2353
+ cls = dtype.construct_array_type()
2354
+
2355
+ missing_arr = cls._from_sequence([], dtype=dtype)
2356
+ ncols, nrows = shape
2357
+ assert ncols == 1, ncols
2358
+ empty_arr = -1 * np.ones((nrows,), dtype=np.intp)
2359
+ return missing_arr.take(empty_arr, allow_fill=True, fill_value=fill_value)
2360
+ elif isinstance(dtype, ExtensionDtype):
2361
+ # TODO: no tests get here, a handful would if we disabled
2362
+ # the dt64tz special-case above (which is faster)
2363
+ cls = dtype.construct_array_type()
2364
+ missing_arr = cls._empty(shape=shape, dtype=dtype)
2365
+ missing_arr[:] = fill_value
2366
+ return missing_arr
2367
+ else:
2368
+ # NB: we should never get here with dtype integer or bool;
2369
+ # if we did, the missing_arr.fill would cast to gibberish
2370
+ missing_arr = np.empty(shape, dtype=dtype)
2371
+ missing_arr.fill(fill_value)
2372
+
2373
+ if dtype.kind in "mM":
2374
+ missing_arr = ensure_wrapped_if_datetimelike(missing_arr)
2375
+ return missing_arr
llmeval-env/lib/python3.10/site-packages/pandas/core/internals/ops.py ADDED
@@ -0,0 +1,154 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ from typing import (
4
+ TYPE_CHECKING,
5
+ NamedTuple,
6
+ )
7
+
8
+ from pandas.core.dtypes.common import is_1d_only_ea_dtype
9
+
10
+ if TYPE_CHECKING:
11
+ from collections.abc import Iterator
12
+
13
+ from pandas._libs.internals import BlockPlacement
14
+ from pandas._typing import ArrayLike
15
+
16
+ from pandas.core.internals.blocks import Block
17
+ from pandas.core.internals.managers import BlockManager
18
+
19
+
20
+ class BlockPairInfo(NamedTuple):
21
+ lvals: ArrayLike
22
+ rvals: ArrayLike
23
+ locs: BlockPlacement
24
+ left_ea: bool
25
+ right_ea: bool
26
+ rblk: Block
27
+
28
+
29
+ def _iter_block_pairs(
30
+ left: BlockManager, right: BlockManager
31
+ ) -> Iterator[BlockPairInfo]:
32
+ # At this point we have already checked the parent DataFrames for
33
+ # assert rframe._indexed_same(lframe)
34
+
35
+ for blk in left.blocks:
36
+ locs = blk.mgr_locs
37
+ blk_vals = blk.values
38
+
39
+ left_ea = blk_vals.ndim == 1
40
+
41
+ rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True)
42
+
43
+ # Assertions are disabled for performance, but should hold:
44
+ # if left_ea:
45
+ # assert len(locs) == 1, locs
46
+ # assert len(rblks) == 1, rblks
47
+ # assert rblks[0].shape[0] == 1, rblks[0].shape
48
+
49
+ for rblk in rblks:
50
+ right_ea = rblk.values.ndim == 1
51
+
52
+ lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea)
53
+ info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk)
54
+ yield info
55
+
56
+
57
+ def operate_blockwise(
58
+ left: BlockManager, right: BlockManager, array_op
59
+ ) -> BlockManager:
60
+ # At this point we have already checked the parent DataFrames for
61
+ # assert rframe._indexed_same(lframe)
62
+
63
+ res_blks: list[Block] = []
64
+ for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right):
65
+ res_values = array_op(lvals, rvals)
66
+ if (
67
+ left_ea
68
+ and not right_ea
69
+ and hasattr(res_values, "reshape")
70
+ and not is_1d_only_ea_dtype(res_values.dtype)
71
+ ):
72
+ res_values = res_values.reshape(1, -1)
73
+ nbs = rblk._split_op_result(res_values)
74
+
75
+ # Assertions are disabled for performance, but should hold:
76
+ # if right_ea or left_ea:
77
+ # assert len(nbs) == 1
78
+ # else:
79
+ # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape)
80
+
81
+ _reset_block_mgr_locs(nbs, locs)
82
+
83
+ res_blks.extend(nbs)
84
+
85
+ # Assertions are disabled for performance, but should hold:
86
+ # slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array}
87
+ # nlocs = sum(len(nb.mgr_locs.as_array) for nb in res_blks)
88
+ # assert nlocs == len(left.items), (nlocs, len(left.items))
89
+ # assert len(slocs) == nlocs, (len(slocs), nlocs)
90
+ # assert slocs == set(range(nlocs)), slocs
91
+
92
+ new_mgr = type(right)(tuple(res_blks), axes=right.axes, verify_integrity=False)
93
+ return new_mgr
94
+
95
+
96
+ def _reset_block_mgr_locs(nbs: list[Block], locs) -> None:
97
+ """
98
+ Reset mgr_locs to correspond to our original DataFrame.
99
+ """
100
+ for nb in nbs:
101
+ nblocs = locs[nb.mgr_locs.indexer]
102
+ nb.mgr_locs = nblocs
103
+ # Assertions are disabled for performance, but should hold:
104
+ # assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape)
105
+ # assert all(x in locs.as_array for x in nb.mgr_locs.as_array)
106
+
107
+
108
+ def _get_same_shape_values(
109
+ lblk: Block, rblk: Block, left_ea: bool, right_ea: bool
110
+ ) -> tuple[ArrayLike, ArrayLike]:
111
+ """
112
+ Slice lblk.values to align with rblk. Squeeze if we have EAs.
113
+ """
114
+ lvals = lblk.values
115
+ rvals = rblk.values
116
+
117
+ # Require that the indexing into lvals be slice-like
118
+ assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs
119
+
120
+ # TODO(EA2D): with 2D EAs only this first clause would be needed
121
+ if not (left_ea or right_ea):
122
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
123
+ # argument type "Tuple[Union[ndarray, slice], slice]"
124
+ lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload]
125
+ assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
126
+ elif left_ea and right_ea:
127
+ assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape)
128
+ elif right_ea:
129
+ # lvals are 2D, rvals are 1D
130
+
131
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
132
+ # argument type "Tuple[Union[ndarray, slice], slice]"
133
+ lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload]
134
+ assert lvals.shape[0] == 1, lvals.shape
135
+ lvals = lvals[0, :]
136
+ else:
137
+ # lvals are 1D, rvals are 2D
138
+ assert rvals.shape[0] == 1, rvals.shape
139
+ # error: No overload variant of "__getitem__" of "ExtensionArray" matches
140
+ # argument type "Tuple[int, slice]"
141
+ rvals = rvals[0, :] # type: ignore[call-overload]
142
+
143
+ return lvals, rvals
144
+
145
+
146
+ def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool:
147
+ """
148
+ Blockwise `all` reduction.
149
+ """
150
+ for info in _iter_block_pairs(left, right):
151
+ res = op(info.lvals, info.rvals)
152
+ if not res:
153
+ return False
154
+ return True
llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__init__.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Arithmetic operations for PandasObjects
3
+
4
+ This is not a public API.
5
+ """
6
+ from __future__ import annotations
7
+
8
+ from pandas.core.ops.array_ops import (
9
+ arithmetic_op,
10
+ comp_method_OBJECT_ARRAY,
11
+ comparison_op,
12
+ fill_binop,
13
+ get_array_op,
14
+ logical_op,
15
+ maybe_prepare_scalar_for_op,
16
+ )
17
+ from pandas.core.ops.common import (
18
+ get_op_result_name,
19
+ unpack_zerodim_and_defer,
20
+ )
21
+ from pandas.core.ops.docstrings import make_flex_doc
22
+ from pandas.core.ops.invalid import invalid_comparison
23
+ from pandas.core.ops.mask_ops import (
24
+ kleene_and,
25
+ kleene_or,
26
+ kleene_xor,
27
+ )
28
+ from pandas.core.roperator import (
29
+ radd,
30
+ rand_,
31
+ rdiv,
32
+ rdivmod,
33
+ rfloordiv,
34
+ rmod,
35
+ rmul,
36
+ ror_,
37
+ rpow,
38
+ rsub,
39
+ rtruediv,
40
+ rxor,
41
+ )
42
+
43
+ # -----------------------------------------------------------------------------
44
+ # constants
45
+ ARITHMETIC_BINOPS: set[str] = {
46
+ "add",
47
+ "sub",
48
+ "mul",
49
+ "pow",
50
+ "mod",
51
+ "floordiv",
52
+ "truediv",
53
+ "divmod",
54
+ "radd",
55
+ "rsub",
56
+ "rmul",
57
+ "rpow",
58
+ "rmod",
59
+ "rfloordiv",
60
+ "rtruediv",
61
+ "rdivmod",
62
+ }
63
+
64
+
65
+ __all__ = [
66
+ "ARITHMETIC_BINOPS",
67
+ "arithmetic_op",
68
+ "comparison_op",
69
+ "comp_method_OBJECT_ARRAY",
70
+ "invalid_comparison",
71
+ "fill_binop",
72
+ "kleene_and",
73
+ "kleene_or",
74
+ "kleene_xor",
75
+ "logical_op",
76
+ "make_flex_doc",
77
+ "radd",
78
+ "rand_",
79
+ "rdiv",
80
+ "rdivmod",
81
+ "rfloordiv",
82
+ "rmod",
83
+ "rmul",
84
+ "ror_",
85
+ "rpow",
86
+ "rsub",
87
+ "rtruediv",
88
+ "rxor",
89
+ "unpack_zerodim_and_defer",
90
+ "get_op_result_name",
91
+ "maybe_prepare_scalar_for_op",
92
+ "get_array_op",
93
+ ]
llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.43 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc ADDED
Binary file (11.5 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc ADDED
Binary file (3.41 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/dispatch.cpython-310.pyc ADDED
Binary file (915 Bytes). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/docstrings.cpython-310.pyc ADDED
Binary file (15.2 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/invalid.cpython-310.pyc ADDED
Binary file (1.82 kB). View file
 
llmeval-env/lib/python3.10/site-packages/pandas/core/ops/__pycache__/mask_ops.cpython-310.pyc ADDED
Binary file (3.87 kB). View file