applied-ai-018 commited on
Commit
b295afd
·
verified ·
1 Parent(s): a2f81d3

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step60/zero/19.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  2. ckpts/universal/global_step60/zero/19.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  3. ckpts/universal/global_step60/zero/29.vocab_parallel_projection.weight/fp32.pt +3 -0
  4. venv/lib/python3.10/site-packages/numpy/lib/arraypad.py +882 -0
  5. venv/lib/python3.10/site-packages/numpy/lib/arraysetops.py +981 -0
  6. venv/lib/python3.10/site-packages/numpy/lib/arrayterator.pyi +49 -0
  7. venv/lib/python3.10/site-packages/numpy/lib/format.pyi +22 -0
  8. venv/lib/python3.10/site-packages/numpy/lib/function_base.py +0 -0
  9. venv/lib/python3.10/site-packages/numpy/lib/histograms.py +1072 -0
  10. venv/lib/python3.10/site-packages/numpy/lib/index_tricks.pyi +162 -0
  11. venv/lib/python3.10/site-packages/numpy/lib/mixins.py +177 -0
  12. venv/lib/python3.10/site-packages/numpy/lib/nanfunctions.pyi +38 -0
  13. venv/lib/python3.10/site-packages/numpy/lib/recfunctions.py +1673 -0
  14. venv/lib/python3.10/site-packages/numpy/lib/shape_base.py +1274 -0
  15. venv/lib/python3.10/site-packages/numpy/lib/utils.py +1211 -0
  16. venv/lib/python3.10/site-packages/numpy/testing/__init__.py +22 -0
  17. venv/lib/python3.10/site-packages/numpy/testing/__init__.pyi +50 -0
  18. venv/lib/python3.10/site-packages/numpy/testing/__pycache__/__init__.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/numpy/testing/__pycache__/overrides.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/numpy/testing/__pycache__/setup.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/numpy/testing/_private/__init__.py +0 -0
  23. venv/lib/python3.10/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/numpy/testing/_private/__pycache__/utils.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/numpy/testing/_private/extbuild.py +248 -0
  27. venv/lib/python3.10/site-packages/numpy/testing/_private/utils.py +2509 -0
  28. venv/lib/python3.10/site-packages/numpy/testing/_private/utils.pyi +402 -0
  29. venv/lib/python3.10/site-packages/numpy/testing/overrides.py +83 -0
  30. venv/lib/python3.10/site-packages/numpy/testing/print_coercion_tables.py +200 -0
  31. venv/lib/python3.10/site-packages/numpy/testing/setup.py +21 -0
  32. venv/lib/python3.10/site-packages/numpy/testing/tests/__init__.py +0 -0
  33. venv/lib/python3.10/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/numpy/testing/tests/test_utils.py +1626 -0
  36. venv/lib/python3.10/site-packages/numpy/typing/__pycache__/__init__.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/numpy/typing/__pycache__/setup.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi +25 -0
  40. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi +516 -0
  41. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi +221 -0
  42. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi +28 -0
  43. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi +68 -0
  44. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi +33 -0
  45. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi +135 -0
  46. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/chararray.pyi +140 -0
  47. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi +270 -0
  48. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/dtype.pyi +85 -0
  49. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi +45 -0
  50. venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi +31 -0
ckpts/universal/global_step60/zero/19.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb77f9077a8a5d2a3838d546b6a42040f062b8b4f864cc3c6e0959d88fa22e34
3
+ size 33555612
ckpts/universal/global_step60/zero/19.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0743e9c2dd1684f844eb6ebaf24b788422d644099a0db3dd2c88088697165915
3
+ size 33555627
ckpts/universal/global_step60/zero/29.vocab_parallel_projection.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c4331b38c8bd16658359526c6911d055c81827507aef930f4aeab92f2b5fee9d
3
+ size 415237197
venv/lib/python3.10/site-packages/numpy/lib/arraypad.py ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The arraypad module contains a group of functions to pad values onto the edges
3
+ of an n-dimensional array.
4
+
5
+ """
6
+ import numpy as np
7
+ from numpy.core.overrides import array_function_dispatch
8
+ from numpy.lib.index_tricks import ndindex
9
+
10
+
11
+ __all__ = ['pad']
12
+
13
+
14
+ ###############################################################################
15
+ # Private utility functions.
16
+
17
+
18
+ def _round_if_needed(arr, dtype):
19
+ """
20
+ Rounds arr inplace if destination dtype is integer.
21
+
22
+ Parameters
23
+ ----------
24
+ arr : ndarray
25
+ Input array.
26
+ dtype : dtype
27
+ The dtype of the destination array.
28
+ """
29
+ if np.issubdtype(dtype, np.integer):
30
+ arr.round(out=arr)
31
+
32
+
33
+ def _slice_at_axis(sl, axis):
34
+ """
35
+ Construct tuple of slices to slice an array in the given dimension.
36
+
37
+ Parameters
38
+ ----------
39
+ sl : slice
40
+ The slice for the given dimension.
41
+ axis : int
42
+ The axis to which `sl` is applied. All other dimensions are left
43
+ "unsliced".
44
+
45
+ Returns
46
+ -------
47
+ sl : tuple of slices
48
+ A tuple with slices matching `shape` in length.
49
+
50
+ Examples
51
+ --------
52
+ >>> _slice_at_axis(slice(None, 3, -1), 1)
53
+ (slice(None, None, None), slice(None, 3, -1), (...,))
54
+ """
55
+ return (slice(None),) * axis + (sl,) + (...,)
56
+
57
+
58
+ def _view_roi(array, original_area_slice, axis):
59
+ """
60
+ Get a view of the current region of interest during iterative padding.
61
+
62
+ When padding multiple dimensions iteratively corner values are
63
+ unnecessarily overwritten multiple times. This function reduces the
64
+ working area for the first dimensions so that corners are excluded.
65
+
66
+ Parameters
67
+ ----------
68
+ array : ndarray
69
+ The array with the region of interest.
70
+ original_area_slice : tuple of slices
71
+ Denotes the area with original values of the unpadded array.
72
+ axis : int
73
+ The currently padded dimension assuming that `axis` is padded before
74
+ `axis` + 1.
75
+
76
+ Returns
77
+ -------
78
+ roi : ndarray
79
+ The region of interest of the original `array`.
80
+ """
81
+ axis += 1
82
+ sl = (slice(None),) * axis + original_area_slice[axis:]
83
+ return array[sl]
84
+
85
+
86
+ def _pad_simple(array, pad_width, fill_value=None):
87
+ """
88
+ Pad array on all sides with either a single value or undefined values.
89
+
90
+ Parameters
91
+ ----------
92
+ array : ndarray
93
+ Array to grow.
94
+ pad_width : sequence of tuple[int, int]
95
+ Pad width on both sides for each dimension in `arr`.
96
+ fill_value : scalar, optional
97
+ If provided the padded area is filled with this value, otherwise
98
+ the pad area left undefined.
99
+
100
+ Returns
101
+ -------
102
+ padded : ndarray
103
+ The padded array with the same dtype as`array`. Its order will default
104
+ to C-style if `array` is not F-contiguous.
105
+ original_area_slice : tuple
106
+ A tuple of slices pointing to the area of the original array.
107
+ """
108
+ # Allocate grown array
109
+ new_shape = tuple(
110
+ left + size + right
111
+ for size, (left, right) in zip(array.shape, pad_width)
112
+ )
113
+ order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
114
+ padded = np.empty(new_shape, dtype=array.dtype, order=order)
115
+
116
+ if fill_value is not None:
117
+ padded.fill(fill_value)
118
+
119
+ # Copy old array into correct space
120
+ original_area_slice = tuple(
121
+ slice(left, left + size)
122
+ for size, (left, right) in zip(array.shape, pad_width)
123
+ )
124
+ padded[original_area_slice] = array
125
+
126
+ return padded, original_area_slice
127
+
128
+
129
+ def _set_pad_area(padded, axis, width_pair, value_pair):
130
+ """
131
+ Set empty-padded area in given dimension.
132
+
133
+ Parameters
134
+ ----------
135
+ padded : ndarray
136
+ Array with the pad area which is modified inplace.
137
+ axis : int
138
+ Dimension with the pad area to set.
139
+ width_pair : (int, int)
140
+ Pair of widths that mark the pad area on both sides in the given
141
+ dimension.
142
+ value_pair : tuple of scalars or ndarrays
143
+ Values inserted into the pad area on each side. It must match or be
144
+ broadcastable to the shape of `arr`.
145
+ """
146
+ left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
147
+ padded[left_slice] = value_pair[0]
148
+
149
+ right_slice = _slice_at_axis(
150
+ slice(padded.shape[axis] - width_pair[1], None), axis)
151
+ padded[right_slice] = value_pair[1]
152
+
153
+
154
+ def _get_edges(padded, axis, width_pair):
155
+ """
156
+ Retrieve edge values from empty-padded array in given dimension.
157
+
158
+ Parameters
159
+ ----------
160
+ padded : ndarray
161
+ Empty-padded array.
162
+ axis : int
163
+ Dimension in which the edges are considered.
164
+ width_pair : (int, int)
165
+ Pair of widths that mark the pad area on both sides in the given
166
+ dimension.
167
+
168
+ Returns
169
+ -------
170
+ left_edge, right_edge : ndarray
171
+ Edge values of the valid area in `padded` in the given dimension. Its
172
+ shape will always match `padded` except for the dimension given by
173
+ `axis` which will have a length of 1.
174
+ """
175
+ left_index = width_pair[0]
176
+ left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
177
+ left_edge = padded[left_slice]
178
+
179
+ right_index = padded.shape[axis] - width_pair[1]
180
+ right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
181
+ right_edge = padded[right_slice]
182
+
183
+ return left_edge, right_edge
184
+
185
+
186
+ def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
187
+ """
188
+ Construct linear ramps for empty-padded array in given dimension.
189
+
190
+ Parameters
191
+ ----------
192
+ padded : ndarray
193
+ Empty-padded array.
194
+ axis : int
195
+ Dimension in which the ramps are constructed.
196
+ width_pair : (int, int)
197
+ Pair of widths that mark the pad area on both sides in the given
198
+ dimension.
199
+ end_value_pair : (scalar, scalar)
200
+ End values for the linear ramps which form the edge of the fully padded
201
+ array. These values are included in the linear ramps.
202
+
203
+ Returns
204
+ -------
205
+ left_ramp, right_ramp : ndarray
206
+ Linear ramps to set on both sides of `padded`.
207
+ """
208
+ edge_pair = _get_edges(padded, axis, width_pair)
209
+
210
+ left_ramp, right_ramp = (
211
+ np.linspace(
212
+ start=end_value,
213
+ stop=edge.squeeze(axis), # Dimension is replaced by linspace
214
+ num=width,
215
+ endpoint=False,
216
+ dtype=padded.dtype,
217
+ axis=axis
218
+ )
219
+ for end_value, edge, width in zip(
220
+ end_value_pair, edge_pair, width_pair
221
+ )
222
+ )
223
+
224
+ # Reverse linear space in appropriate dimension
225
+ right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
226
+
227
+ return left_ramp, right_ramp
228
+
229
+
230
+ def _get_stats(padded, axis, width_pair, length_pair, stat_func):
231
+ """
232
+ Calculate statistic for the empty-padded array in given dimension.
233
+
234
+ Parameters
235
+ ----------
236
+ padded : ndarray
237
+ Empty-padded array.
238
+ axis : int
239
+ Dimension in which the statistic is calculated.
240
+ width_pair : (int, int)
241
+ Pair of widths that mark the pad area on both sides in the given
242
+ dimension.
243
+ length_pair : 2-element sequence of None or int
244
+ Gives the number of values in valid area from each side that is
245
+ taken into account when calculating the statistic. If None the entire
246
+ valid area in `padded` is considered.
247
+ stat_func : function
248
+ Function to compute statistic. The expected signature is
249
+ ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
250
+
251
+ Returns
252
+ -------
253
+ left_stat, right_stat : ndarray
254
+ Calculated statistic for both sides of `padded`.
255
+ """
256
+ # Calculate indices of the edges of the area with original values
257
+ left_index = width_pair[0]
258
+ right_index = padded.shape[axis] - width_pair[1]
259
+ # as well as its length
260
+ max_length = right_index - left_index
261
+
262
+ # Limit stat_lengths to max_length
263
+ left_length, right_length = length_pair
264
+ if left_length is None or max_length < left_length:
265
+ left_length = max_length
266
+ if right_length is None or max_length < right_length:
267
+ right_length = max_length
268
+
269
+ if (left_length == 0 or right_length == 0) \
270
+ and stat_func in {np.amax, np.amin}:
271
+ # amax and amin can't operate on an empty array,
272
+ # raise a more descriptive warning here instead of the default one
273
+ raise ValueError("stat_length of 0 yields no value for padding")
274
+
275
+ # Calculate statistic for the left side
276
+ left_slice = _slice_at_axis(
277
+ slice(left_index, left_index + left_length), axis)
278
+ left_chunk = padded[left_slice]
279
+ left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
280
+ _round_if_needed(left_stat, padded.dtype)
281
+
282
+ if left_length == right_length == max_length:
283
+ # return early as right_stat must be identical to left_stat
284
+ return left_stat, left_stat
285
+
286
+ # Calculate statistic for the right side
287
+ right_slice = _slice_at_axis(
288
+ slice(right_index - right_length, right_index), axis)
289
+ right_chunk = padded[right_slice]
290
+ right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
291
+ _round_if_needed(right_stat, padded.dtype)
292
+
293
+ return left_stat, right_stat
294
+
295
+
296
+ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
297
+ """
298
+ Pad `axis` of `arr` with reflection.
299
+
300
+ Parameters
301
+ ----------
302
+ padded : ndarray
303
+ Input array of arbitrary shape.
304
+ axis : int
305
+ Axis along which to pad `arr`.
306
+ width_pair : (int, int)
307
+ Pair of widths that mark the pad area on both sides in the given
308
+ dimension.
309
+ method : str
310
+ Controls method of reflection; options are 'even' or 'odd'.
311
+ include_edge : bool
312
+ If true, edge value is included in reflection, otherwise the edge
313
+ value forms the symmetric axis to the reflection.
314
+
315
+ Returns
316
+ -------
317
+ pad_amt : tuple of ints, length 2
318
+ New index positions of padding to do along the `axis`. If these are
319
+ both 0, padding is done in this dimension.
320
+ """
321
+ left_pad, right_pad = width_pair
322
+ old_length = padded.shape[axis] - right_pad - left_pad
323
+
324
+ if include_edge:
325
+ # Edge is included, we need to offset the pad amount by 1
326
+ edge_offset = 1
327
+ else:
328
+ edge_offset = 0 # Edge is not included, no need to offset pad amount
329
+ old_length -= 1 # but must be omitted from the chunk
330
+
331
+ if left_pad > 0:
332
+ # Pad with reflected values on left side:
333
+ # First limit chunk size which can't be larger than pad area
334
+ chunk_length = min(old_length, left_pad)
335
+ # Slice right to left, stop on or next to edge, start relative to stop
336
+ stop = left_pad - edge_offset
337
+ start = stop + chunk_length
338
+ left_slice = _slice_at_axis(slice(start, stop, -1), axis)
339
+ left_chunk = padded[left_slice]
340
+
341
+ if method == "odd":
342
+ # Negate chunk and align with edge
343
+ edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
344
+ left_chunk = 2 * padded[edge_slice] - left_chunk
345
+
346
+ # Insert chunk into padded area
347
+ start = left_pad - chunk_length
348
+ stop = left_pad
349
+ pad_area = _slice_at_axis(slice(start, stop), axis)
350
+ padded[pad_area] = left_chunk
351
+ # Adjust pointer to left edge for next iteration
352
+ left_pad -= chunk_length
353
+
354
+ if right_pad > 0:
355
+ # Pad with reflected values on right side:
356
+ # First limit chunk size which can't be larger than pad area
357
+ chunk_length = min(old_length, right_pad)
358
+ # Slice right to left, start on or next to edge, stop relative to start
359
+ start = -right_pad + edge_offset - 2
360
+ stop = start - chunk_length
361
+ right_slice = _slice_at_axis(slice(start, stop, -1), axis)
362
+ right_chunk = padded[right_slice]
363
+
364
+ if method == "odd":
365
+ # Negate chunk and align with edge
366
+ edge_slice = _slice_at_axis(
367
+ slice(-right_pad - 1, -right_pad), axis)
368
+ right_chunk = 2 * padded[edge_slice] - right_chunk
369
+
370
+ # Insert chunk into padded area
371
+ start = padded.shape[axis] - right_pad
372
+ stop = start + chunk_length
373
+ pad_area = _slice_at_axis(slice(start, stop), axis)
374
+ padded[pad_area] = right_chunk
375
+ # Adjust pointer to right edge for next iteration
376
+ right_pad -= chunk_length
377
+
378
+ return left_pad, right_pad
379
+
380
+
381
+ def _set_wrap_both(padded, axis, width_pair, original_period):
382
+ """
383
+ Pad `axis` of `arr` with wrapped values.
384
+
385
+ Parameters
386
+ ----------
387
+ padded : ndarray
388
+ Input array of arbitrary shape.
389
+ axis : int
390
+ Axis along which to pad `arr`.
391
+ width_pair : (int, int)
392
+ Pair of widths that mark the pad area on both sides in the given
393
+ dimension.
394
+ original_period : int
395
+ Original length of data on `axis` of `arr`.
396
+
397
+ Returns
398
+ -------
399
+ pad_amt : tuple of ints, length 2
400
+ New index positions of padding to do along the `axis`. If these are
401
+ both 0, padding is done in this dimension.
402
+ """
403
+ left_pad, right_pad = width_pair
404
+ period = padded.shape[axis] - right_pad - left_pad
405
+ # Avoid wrapping with only a subset of the original area by ensuring period
406
+ # can only be a multiple of the original area's length.
407
+ period = period // original_period * original_period
408
+
409
+ # If the current dimension of `arr` doesn't contain enough valid values
410
+ # (not part of the undefined pad area) we need to pad multiple times.
411
+ # Each time the pad area shrinks on both sides which is communicated with
412
+ # these variables.
413
+ new_left_pad = 0
414
+ new_right_pad = 0
415
+
416
+ if left_pad > 0:
417
+ # Pad with wrapped values on left side
418
+ # First slice chunk from left side of the non-pad area.
419
+ # Use min(period, left_pad) to ensure that chunk is not larger than
420
+ # pad area.
421
+ slice_end = left_pad + period
422
+ slice_start = slice_end - min(period, left_pad)
423
+ right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
424
+ right_chunk = padded[right_slice]
425
+
426
+ if left_pad > period:
427
+ # Chunk is smaller than pad area
428
+ pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
429
+ new_left_pad = left_pad - period
430
+ else:
431
+ # Chunk matches pad area
432
+ pad_area = _slice_at_axis(slice(None, left_pad), axis)
433
+ padded[pad_area] = right_chunk
434
+
435
+ if right_pad > 0:
436
+ # Pad with wrapped values on right side
437
+ # First slice chunk from right side of the non-pad area.
438
+ # Use min(period, right_pad) to ensure that chunk is not larger than
439
+ # pad area.
440
+ slice_start = -right_pad - period
441
+ slice_end = slice_start + min(period, right_pad)
442
+ left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
443
+ left_chunk = padded[left_slice]
444
+
445
+ if right_pad > period:
446
+ # Chunk is smaller than pad area
447
+ pad_area = _slice_at_axis(
448
+ slice(-right_pad, -right_pad + period), axis)
449
+ new_right_pad = right_pad - period
450
+ else:
451
+ # Chunk matches pad area
452
+ pad_area = _slice_at_axis(slice(-right_pad, None), axis)
453
+ padded[pad_area] = left_chunk
454
+
455
+ return new_left_pad, new_right_pad
456
+
457
+
458
+ def _as_pairs(x, ndim, as_index=False):
459
+ """
460
+ Broadcast `x` to an array with the shape (`ndim`, 2).
461
+
462
+ A helper function for `pad` that prepares and validates arguments like
463
+ `pad_width` for iteration in pairs.
464
+
465
+ Parameters
466
+ ----------
467
+ x : {None, scalar, array-like}
468
+ The object to broadcast to the shape (`ndim`, 2).
469
+ ndim : int
470
+ Number of pairs the broadcasted `x` will have.
471
+ as_index : bool, optional
472
+ If `x` is not None, try to round each element of `x` to an integer
473
+ (dtype `np.intp`) and ensure every element is positive.
474
+
475
+ Returns
476
+ -------
477
+ pairs : nested iterables, shape (`ndim`, 2)
478
+ The broadcasted version of `x`.
479
+
480
+ Raises
481
+ ------
482
+ ValueError
483
+ If `as_index` is True and `x` contains negative elements.
484
+ Or if `x` is not broadcastable to the shape (`ndim`, 2).
485
+ """
486
+ if x is None:
487
+ # Pass through None as a special case, otherwise np.round(x) fails
488
+ # with an AttributeError
489
+ return ((None, None),) * ndim
490
+
491
+ x = np.array(x)
492
+ if as_index:
493
+ x = np.round(x).astype(np.intp, copy=False)
494
+
495
+ if x.ndim < 3:
496
+ # Optimization: Possibly use faster paths for cases where `x` has
497
+ # only 1 or 2 elements. `np.broadcast_to` could handle these as well
498
+ # but is currently slower
499
+
500
+ if x.size == 1:
501
+ # x was supplied as a single value
502
+ x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
503
+ if as_index and x < 0:
504
+ raise ValueError("index can't contain negative values")
505
+ return ((x[0], x[0]),) * ndim
506
+
507
+ if x.size == 2 and x.shape != (2, 1):
508
+ # x was supplied with a single value for each side
509
+ # but except case when each dimension has a single value
510
+ # which should be broadcasted to a pair,
511
+ # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
512
+ x = x.ravel() # Ensure x[0], x[1] works
513
+ if as_index and (x[0] < 0 or x[1] < 0):
514
+ raise ValueError("index can't contain negative values")
515
+ return ((x[0], x[1]),) * ndim
516
+
517
+ if as_index and x.min() < 0:
518
+ raise ValueError("index can't contain negative values")
519
+
520
+ # Converting the array with `tolist` seems to improve performance
521
+ # when iterating and indexing the result (see usage in `pad`)
522
+ return np.broadcast_to(x, (ndim, 2)).tolist()
523
+
524
+
525
+ def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
526
+ return (array,)
527
+
528
+
529
+ ###############################################################################
530
+ # Public functions
531
+
532
+
533
+ @array_function_dispatch(_pad_dispatcher, module='numpy')
534
+ def pad(array, pad_width, mode='constant', **kwargs):
535
+ """
536
+ Pad an array.
537
+
538
+ Parameters
539
+ ----------
540
+ array : array_like of rank N
541
+ The array to pad.
542
+ pad_width : {sequence, array_like, int}
543
+ Number of values padded to the edges of each axis.
544
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
545
+ for each axis.
546
+ ``(before, after)`` or ``((before, after),)`` yields same before
547
+ and after pad for each axis.
548
+ ``(pad,)`` or ``int`` is a shortcut for before = after = pad width
549
+ for all axes.
550
+ mode : str or function, optional
551
+ One of the following string values or a user supplied function.
552
+
553
+ 'constant' (default)
554
+ Pads with a constant value.
555
+ 'edge'
556
+ Pads with the edge values of array.
557
+ 'linear_ramp'
558
+ Pads with the linear ramp between end_value and the
559
+ array edge value.
560
+ 'maximum'
561
+ Pads with the maximum value of all or part of the
562
+ vector along each axis.
563
+ 'mean'
564
+ Pads with the mean value of all or part of the
565
+ vector along each axis.
566
+ 'median'
567
+ Pads with the median value of all or part of the
568
+ vector along each axis.
569
+ 'minimum'
570
+ Pads with the minimum value of all or part of the
571
+ vector along each axis.
572
+ 'reflect'
573
+ Pads with the reflection of the vector mirrored on
574
+ the first and last values of the vector along each
575
+ axis.
576
+ 'symmetric'
577
+ Pads with the reflection of the vector mirrored
578
+ along the edge of the array.
579
+ 'wrap'
580
+ Pads with the wrap of the vector along the axis.
581
+ The first values are used to pad the end and the
582
+ end values are used to pad the beginning.
583
+ 'empty'
584
+ Pads with undefined values.
585
+
586
+ .. versionadded:: 1.17
587
+
588
+ <function>
589
+ Padding function, see Notes.
590
+ stat_length : sequence or int, optional
591
+ Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
592
+ values at edge of each axis used to calculate the statistic value.
593
+
594
+ ``((before_1, after_1), ... (before_N, after_N))`` unique statistic
595
+ lengths for each axis.
596
+
597
+ ``(before, after)`` or ``((before, after),)`` yields same before
598
+ and after statistic lengths for each axis.
599
+
600
+ ``(stat_length,)`` or ``int`` is a shortcut for
601
+ ``before = after = statistic`` length for all axes.
602
+
603
+ Default is ``None``, to use the entire axis.
604
+ constant_values : sequence or scalar, optional
605
+ Used in 'constant'. The values to set the padded values for each
606
+ axis.
607
+
608
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
609
+ for each axis.
610
+
611
+ ``(before, after)`` or ``((before, after),)`` yields same before
612
+ and after constants for each axis.
613
+
614
+ ``(constant,)`` or ``constant`` is a shortcut for
615
+ ``before = after = constant`` for all axes.
616
+
617
+ Default is 0.
618
+ end_values : sequence or scalar, optional
619
+ Used in 'linear_ramp'. The values used for the ending value of the
620
+ linear_ramp and that will form the edge of the padded array.
621
+
622
+ ``((before_1, after_1), ... (before_N, after_N))`` unique end values
623
+ for each axis.
624
+
625
+ ``(before, after)`` or ``((before, after),)`` yields same before
626
+ and after end values for each axis.
627
+
628
+ ``(constant,)`` or ``constant`` is a shortcut for
629
+ ``before = after = constant`` for all axes.
630
+
631
+ Default is 0.
632
+ reflect_type : {'even', 'odd'}, optional
633
+ Used in 'reflect', and 'symmetric'. The 'even' style is the
634
+ default with an unaltered reflection around the edge value. For
635
+ the 'odd' style, the extended part of the array is created by
636
+ subtracting the reflected values from two times the edge value.
637
+
638
+ Returns
639
+ -------
640
+ pad : ndarray
641
+ Padded array of rank equal to `array` with shape increased
642
+ according to `pad_width`.
643
+
644
+ Notes
645
+ -----
646
+ .. versionadded:: 1.7.0
647
+
648
+ For an array with rank greater than 1, some of the padding of later
649
+ axes is calculated from padding of previous axes. This is easiest to
650
+ think about with a rank 2 array where the corners of the padded array
651
+ are calculated by using padded values from the first axis.
652
+
653
+ The padding function, if used, should modify a rank 1 array in-place. It
654
+ has the following signature::
655
+
656
+ padding_func(vector, iaxis_pad_width, iaxis, kwargs)
657
+
658
+ where
659
+
660
+ vector : ndarray
661
+ A rank 1 array already padded with zeros. Padded values are
662
+ vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
663
+ iaxis_pad_width : tuple
664
+ A 2-tuple of ints, iaxis_pad_width[0] represents the number of
665
+ values padded at the beginning of vector where
666
+ iaxis_pad_width[1] represents the number of values padded at
667
+ the end of vector.
668
+ iaxis : int
669
+ The axis currently being calculated.
670
+ kwargs : dict
671
+ Any keyword arguments the function requires.
672
+
673
+ Examples
674
+ --------
675
+ >>> a = [1, 2, 3, 4, 5]
676
+ >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
677
+ array([4, 4, 1, ..., 6, 6, 6])
678
+
679
+ >>> np.pad(a, (2, 3), 'edge')
680
+ array([1, 1, 1, ..., 5, 5, 5])
681
+
682
+ >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
683
+ array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
684
+
685
+ >>> np.pad(a, (2,), 'maximum')
686
+ array([5, 5, 1, 2, 3, 4, 5, 5, 5])
687
+
688
+ >>> np.pad(a, (2,), 'mean')
689
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
690
+
691
+ >>> np.pad(a, (2,), 'median')
692
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
693
+
694
+ >>> a = [[1, 2], [3, 4]]
695
+ >>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
696
+ array([[1, 1, 1, 2, 1, 1, 1],
697
+ [1, 1, 1, 2, 1, 1, 1],
698
+ [1, 1, 1, 2, 1, 1, 1],
699
+ [1, 1, 1, 2, 1, 1, 1],
700
+ [3, 3, 3, 4, 3, 3, 3],
701
+ [1, 1, 1, 2, 1, 1, 1],
702
+ [1, 1, 1, 2, 1, 1, 1]])
703
+
704
+ >>> a = [1, 2, 3, 4, 5]
705
+ >>> np.pad(a, (2, 3), 'reflect')
706
+ array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
707
+
708
+ >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')
709
+ array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
710
+
711
+ >>> np.pad(a, (2, 3), 'symmetric')
712
+ array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
713
+
714
+ >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')
715
+ array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
716
+
717
+ >>> np.pad(a, (2, 3), 'wrap')
718
+ array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
719
+
720
+ >>> def pad_with(vector, pad_width, iaxis, kwargs):
721
+ ... pad_value = kwargs.get('padder', 10)
722
+ ... vector[:pad_width[0]] = pad_value
723
+ ... vector[-pad_width[1]:] = pad_value
724
+ >>> a = np.arange(6)
725
+ >>> a = a.reshape((2, 3))
726
+ >>> np.pad(a, 2, pad_with)
727
+ array([[10, 10, 10, 10, 10, 10, 10],
728
+ [10, 10, 10, 10, 10, 10, 10],
729
+ [10, 10, 0, 1, 2, 10, 10],
730
+ [10, 10, 3, 4, 5, 10, 10],
731
+ [10, 10, 10, 10, 10, 10, 10],
732
+ [10, 10, 10, 10, 10, 10, 10]])
733
+ >>> np.pad(a, 2, pad_with, padder=100)
734
+ array([[100, 100, 100, 100, 100, 100, 100],
735
+ [100, 100, 100, 100, 100, 100, 100],
736
+ [100, 100, 0, 1, 2, 100, 100],
737
+ [100, 100, 3, 4, 5, 100, 100],
738
+ [100, 100, 100, 100, 100, 100, 100],
739
+ [100, 100, 100, 100, 100, 100, 100]])
740
+ """
741
+ array = np.asarray(array)
742
+ pad_width = np.asarray(pad_width)
743
+
744
+ if not pad_width.dtype.kind == 'i':
745
+ raise TypeError('`pad_width` must be of integral type.')
746
+
747
+ # Broadcast to shape (array.ndim, 2)
748
+ pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
749
+
750
+ if callable(mode):
751
+ # Old behavior: Use user-supplied function with np.apply_along_axis
752
+ function = mode
753
+ # Create a new zero padded array
754
+ padded, _ = _pad_simple(array, pad_width, fill_value=0)
755
+ # And apply along each axis
756
+
757
+ for axis in range(padded.ndim):
758
+ # Iterate using ndindex as in apply_along_axis, but assuming that
759
+ # function operates inplace on the padded array.
760
+
761
+ # view with the iteration axis at the end
762
+ view = np.moveaxis(padded, axis, -1)
763
+
764
+ # compute indices for the iteration axes, and append a trailing
765
+ # ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
766
+ inds = ndindex(view.shape[:-1])
767
+ inds = (ind + (Ellipsis,) for ind in inds)
768
+ for ind in inds:
769
+ function(view[ind], pad_width[axis], axis, kwargs)
770
+
771
+ return padded
772
+
773
+ # Make sure that no unsupported keywords were passed for the current mode
774
+ allowed_kwargs = {
775
+ 'empty': [], 'edge': [], 'wrap': [],
776
+ 'constant': ['constant_values'],
777
+ 'linear_ramp': ['end_values'],
778
+ 'maximum': ['stat_length'],
779
+ 'mean': ['stat_length'],
780
+ 'median': ['stat_length'],
781
+ 'minimum': ['stat_length'],
782
+ 'reflect': ['reflect_type'],
783
+ 'symmetric': ['reflect_type'],
784
+ }
785
+ try:
786
+ unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
787
+ except KeyError:
788
+ raise ValueError("mode '{}' is not supported".format(mode)) from None
789
+ if unsupported_kwargs:
790
+ raise ValueError("unsupported keyword arguments for mode '{}': {}"
791
+ .format(mode, unsupported_kwargs))
792
+
793
+ stat_functions = {"maximum": np.amax, "minimum": np.amin,
794
+ "mean": np.mean, "median": np.median}
795
+
796
+ # Create array with final shape and original values
797
+ # (padded area is undefined)
798
+ padded, original_area_slice = _pad_simple(array, pad_width)
799
+ # And prepare iteration over all dimensions
800
+ # (zipping may be more readable than using enumerate)
801
+ axes = range(padded.ndim)
802
+
803
+ if mode == "constant":
804
+ values = kwargs.get("constant_values", 0)
805
+ values = _as_pairs(values, padded.ndim)
806
+ for axis, width_pair, value_pair in zip(axes, pad_width, values):
807
+ roi = _view_roi(padded, original_area_slice, axis)
808
+ _set_pad_area(roi, axis, width_pair, value_pair)
809
+
810
+ elif mode == "empty":
811
+ pass # Do nothing as _pad_simple already returned the correct result
812
+
813
+ elif array.size == 0:
814
+ # Only modes "constant" and "empty" can extend empty axes, all other
815
+ # modes depend on `array` not being empty
816
+ # -> ensure every empty axis is only "padded with 0"
817
+ for axis, width_pair in zip(axes, pad_width):
818
+ if array.shape[axis] == 0 and any(width_pair):
819
+ raise ValueError(
820
+ "can't extend empty axis {} using modes other than "
821
+ "'constant' or 'empty'".format(axis)
822
+ )
823
+ # passed, don't need to do anything more as _pad_simple already
824
+ # returned the correct result
825
+
826
+ elif mode == "edge":
827
+ for axis, width_pair in zip(axes, pad_width):
828
+ roi = _view_roi(padded, original_area_slice, axis)
829
+ edge_pair = _get_edges(roi, axis, width_pair)
830
+ _set_pad_area(roi, axis, width_pair, edge_pair)
831
+
832
+ elif mode == "linear_ramp":
833
+ end_values = kwargs.get("end_values", 0)
834
+ end_values = _as_pairs(end_values, padded.ndim)
835
+ for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
836
+ roi = _view_roi(padded, original_area_slice, axis)
837
+ ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
838
+ _set_pad_area(roi, axis, width_pair, ramp_pair)
839
+
840
+ elif mode in stat_functions:
841
+ func = stat_functions[mode]
842
+ length = kwargs.get("stat_length", None)
843
+ length = _as_pairs(length, padded.ndim, as_index=True)
844
+ for axis, width_pair, length_pair in zip(axes, pad_width, length):
845
+ roi = _view_roi(padded, original_area_slice, axis)
846
+ stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
847
+ _set_pad_area(roi, axis, width_pair, stat_pair)
848
+
849
+ elif mode in {"reflect", "symmetric"}:
850
+ method = kwargs.get("reflect_type", "even")
851
+ include_edge = True if mode == "symmetric" else False
852
+ for axis, (left_index, right_index) in zip(axes, pad_width):
853
+ if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
854
+ # Extending singleton dimension for 'reflect' is legacy
855
+ # behavior; it really should raise an error.
856
+ edge_pair = _get_edges(padded, axis, (left_index, right_index))
857
+ _set_pad_area(
858
+ padded, axis, (left_index, right_index), edge_pair)
859
+ continue
860
+
861
+ roi = _view_roi(padded, original_area_slice, axis)
862
+ while left_index > 0 or right_index > 0:
863
+ # Iteratively pad until dimension is filled with reflected
864
+ # values. This is necessary if the pad area is larger than
865
+ # the length of the original values in the current dimension.
866
+ left_index, right_index = _set_reflect_both(
867
+ roi, axis, (left_index, right_index),
868
+ method, include_edge
869
+ )
870
+
871
+ elif mode == "wrap":
872
+ for axis, (left_index, right_index) in zip(axes, pad_width):
873
+ roi = _view_roi(padded, original_area_slice, axis)
874
+ original_period = padded.shape[axis] - right_index - left_index
875
+ while left_index > 0 or right_index > 0:
876
+ # Iteratively pad until dimension is filled with wrapped
877
+ # values. This is necessary if the pad area is larger than
878
+ # the length of the original values in the current dimension.
879
+ left_index, right_index = _set_wrap_both(
880
+ roi, axis, (left_index, right_index), original_period)
881
+
882
+ return padded
venv/lib/python3.10/site-packages/numpy/lib/arraysetops.py ADDED
@@ -0,0 +1,981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Set operations for arrays based on sorting.
3
+
4
+ Notes
5
+ -----
6
+
7
+ For floating point arrays, inaccurate results may appear due to usual round-off
8
+ and floating point comparison issues.
9
+
10
+ Speed could be gained in some operations by an implementation of
11
+ `numpy.sort`, that can provide directly the permutation vectors, thus avoiding
12
+ calls to `numpy.argsort`.
13
+
14
+ Original author: Robert Cimrman
15
+
16
+ """
17
+ import functools
18
+
19
+ import numpy as np
20
+ from numpy.core import overrides
21
+
22
+
23
+ array_function_dispatch = functools.partial(
24
+ overrides.array_function_dispatch, module='numpy')
25
+
26
+
27
+ __all__ = [
28
+ 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
29
+ 'in1d', 'isin'
30
+ ]
31
+
32
+
33
+ def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
34
+ return (ary, to_end, to_begin)
35
+
36
+
37
+ @array_function_dispatch(_ediff1d_dispatcher)
38
+ def ediff1d(ary, to_end=None, to_begin=None):
39
+ """
40
+ The differences between consecutive elements of an array.
41
+
42
+ Parameters
43
+ ----------
44
+ ary : array_like
45
+ If necessary, will be flattened before the differences are taken.
46
+ to_end : array_like, optional
47
+ Number(s) to append at the end of the returned differences.
48
+ to_begin : array_like, optional
49
+ Number(s) to prepend at the beginning of the returned differences.
50
+
51
+ Returns
52
+ -------
53
+ ediff1d : ndarray
54
+ The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
55
+
56
+ See Also
57
+ --------
58
+ diff, gradient
59
+
60
+ Notes
61
+ -----
62
+ When applied to masked arrays, this function drops the mask information
63
+ if the `to_begin` and/or `to_end` parameters are used.
64
+
65
+ Examples
66
+ --------
67
+ >>> x = np.array([1, 2, 4, 7, 0])
68
+ >>> np.ediff1d(x)
69
+ array([ 1, 2, 3, -7])
70
+
71
+ >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
72
+ array([-99, 1, 2, ..., -7, 88, 99])
73
+
74
+ The returned array is always 1D.
75
+
76
+ >>> y = [[1, 2, 4], [1, 6, 24]]
77
+ >>> np.ediff1d(y)
78
+ array([ 1, 2, -3, 5, 18])
79
+
80
+ """
81
+ # force a 1d array
82
+ ary = np.asanyarray(ary).ravel()
83
+
84
+ # enforce that the dtype of `ary` is used for the output
85
+ dtype_req = ary.dtype
86
+
87
+ # fast track default case
88
+ if to_begin is None and to_end is None:
89
+ return ary[1:] - ary[:-1]
90
+
91
+ if to_begin is None:
92
+ l_begin = 0
93
+ else:
94
+ to_begin = np.asanyarray(to_begin)
95
+ if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
96
+ raise TypeError("dtype of `to_begin` must be compatible "
97
+ "with input `ary` under the `same_kind` rule.")
98
+
99
+ to_begin = to_begin.ravel()
100
+ l_begin = len(to_begin)
101
+
102
+ if to_end is None:
103
+ l_end = 0
104
+ else:
105
+ to_end = np.asanyarray(to_end)
106
+ if not np.can_cast(to_end, dtype_req, casting="same_kind"):
107
+ raise TypeError("dtype of `to_end` must be compatible "
108
+ "with input `ary` under the `same_kind` rule.")
109
+
110
+ to_end = to_end.ravel()
111
+ l_end = len(to_end)
112
+
113
+ # do the calculation in place and copy to_begin and to_end
114
+ l_diff = max(len(ary) - 1, 0)
115
+ result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
116
+ result = ary.__array_wrap__(result)
117
+ if l_begin > 0:
118
+ result[:l_begin] = to_begin
119
+ if l_end > 0:
120
+ result[l_begin + l_diff:] = to_end
121
+ np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
122
+ return result
123
+
124
+
125
+ def _unpack_tuple(x):
126
+ """ Unpacks one-element tuples for use as return values """
127
+ if len(x) == 1:
128
+ return x[0]
129
+ else:
130
+ return x
131
+
132
+
133
+ def _unique_dispatcher(ar, return_index=None, return_inverse=None,
134
+ return_counts=None, axis=None, *, equal_nan=None):
135
+ return (ar,)
136
+
137
+
138
+ @array_function_dispatch(_unique_dispatcher)
139
+ def unique(ar, return_index=False, return_inverse=False,
140
+ return_counts=False, axis=None, *, equal_nan=True):
141
+ """
142
+ Find the unique elements of an array.
143
+
144
+ Returns the sorted unique elements of an array. There are three optional
145
+ outputs in addition to the unique elements:
146
+
147
+ * the indices of the input array that give the unique values
148
+ * the indices of the unique array that reconstruct the input array
149
+ * the number of times each unique value comes up in the input array
150
+
151
+ Parameters
152
+ ----------
153
+ ar : array_like
154
+ Input array. Unless `axis` is specified, this will be flattened if it
155
+ is not already 1-D.
156
+ return_index : bool, optional
157
+ If True, also return the indices of `ar` (along the specified axis,
158
+ if provided, or in the flattened array) that result in the unique array.
159
+ return_inverse : bool, optional
160
+ If True, also return the indices of the unique array (for the specified
161
+ axis, if provided) that can be used to reconstruct `ar`.
162
+ return_counts : bool, optional
163
+ If True, also return the number of times each unique item appears
164
+ in `ar`.
165
+ axis : int or None, optional
166
+ The axis to operate on. If None, `ar` will be flattened. If an integer,
167
+ the subarrays indexed by the given axis will be flattened and treated
168
+ as the elements of a 1-D array with the dimension of the given axis,
169
+ see the notes for more details. Object arrays or structured arrays
170
+ that contain objects are not supported if the `axis` kwarg is used. The
171
+ default is None.
172
+
173
+ .. versionadded:: 1.13.0
174
+
175
+ equal_nan : bool, optional
176
+ If True, collapses multiple NaN values in the return array into one.
177
+
178
+ .. versionadded:: 1.24
179
+
180
+ Returns
181
+ -------
182
+ unique : ndarray
183
+ The sorted unique values.
184
+ unique_indices : ndarray, optional
185
+ The indices of the first occurrences of the unique values in the
186
+ original array. Only provided if `return_index` is True.
187
+ unique_inverse : ndarray, optional
188
+ The indices to reconstruct the original array from the
189
+ unique array. Only provided if `return_inverse` is True.
190
+ unique_counts : ndarray, optional
191
+ The number of times each of the unique values comes up in the
192
+ original array. Only provided if `return_counts` is True.
193
+
194
+ .. versionadded:: 1.9.0
195
+
196
+ See Also
197
+ --------
198
+ numpy.lib.arraysetops : Module with a number of other functions for
199
+ performing set operations on arrays.
200
+ repeat : Repeat elements of an array.
201
+
202
+ Notes
203
+ -----
204
+ When an axis is specified the subarrays indexed by the axis are sorted.
205
+ This is done by making the specified axis the first dimension of the array
206
+ (move the axis to the first dimension to keep the order of the other axes)
207
+ and then flattening the subarrays in C order. The flattened subarrays are
208
+ then viewed as a structured type with each element given a label, with the
209
+ effect that we end up with a 1-D array of structured types that can be
210
+ treated in the same way as any other 1-D array. The result is that the
211
+ flattened subarrays are sorted in lexicographic order starting with the
212
+ first element.
213
+
214
+ .. versionchanged: NumPy 1.21
215
+ If nan values are in the input array, a single nan is put
216
+ to the end of the sorted unique values.
217
+
218
+ Also for complex arrays all NaN values are considered equivalent
219
+ (no matter whether the NaN is in the real or imaginary part).
220
+ As the representant for the returned array the smallest one in the
221
+ lexicographical order is chosen - see np.sort for how the lexicographical
222
+ order is defined for complex arrays.
223
+
224
+ Examples
225
+ --------
226
+ >>> np.unique([1, 1, 2, 2, 3, 3])
227
+ array([1, 2, 3])
228
+ >>> a = np.array([[1, 1], [2, 3]])
229
+ >>> np.unique(a)
230
+ array([1, 2, 3])
231
+
232
+ Return the unique rows of a 2D array
233
+
234
+ >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
235
+ >>> np.unique(a, axis=0)
236
+ array([[1, 0, 0], [2, 3, 4]])
237
+
238
+ Return the indices of the original array that give the unique values:
239
+
240
+ >>> a = np.array(['a', 'b', 'b', 'c', 'a'])
241
+ >>> u, indices = np.unique(a, return_index=True)
242
+ >>> u
243
+ array(['a', 'b', 'c'], dtype='<U1')
244
+ >>> indices
245
+ array([0, 1, 3])
246
+ >>> a[indices]
247
+ array(['a', 'b', 'c'], dtype='<U1')
248
+
249
+ Reconstruct the input array from the unique values and inverse:
250
+
251
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
252
+ >>> u, indices = np.unique(a, return_inverse=True)
253
+ >>> u
254
+ array([1, 2, 3, 4, 6])
255
+ >>> indices
256
+ array([0, 1, 4, 3, 1, 2, 1])
257
+ >>> u[indices]
258
+ array([1, 2, 6, 4, 2, 3, 2])
259
+
260
+ Reconstruct the input values from the unique values and counts:
261
+
262
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
263
+ >>> values, counts = np.unique(a, return_counts=True)
264
+ >>> values
265
+ array([1, 2, 3, 4, 6])
266
+ >>> counts
267
+ array([1, 3, 1, 1, 1])
268
+ >>> np.repeat(values, counts)
269
+ array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
270
+
271
+ """
272
+ ar = np.asanyarray(ar)
273
+ if axis is None:
274
+ ret = _unique1d(ar, return_index, return_inverse, return_counts,
275
+ equal_nan=equal_nan)
276
+ return _unpack_tuple(ret)
277
+
278
+ # axis was specified and not None
279
+ try:
280
+ ar = np.moveaxis(ar, axis, 0)
281
+ except np.AxisError:
282
+ # this removes the "axis1" or "axis2" prefix from the error message
283
+ raise np.AxisError(axis, ar.ndim) from None
284
+
285
+ # Must reshape to a contiguous 2D array for this to work...
286
+ orig_shape, orig_dtype = ar.shape, ar.dtype
287
+ ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
288
+ ar = np.ascontiguousarray(ar)
289
+ dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
290
+
291
+ # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
292
+ # data type with `m` fields where each field has the data type of `ar`.
293
+ # In the following, we create the array `consolidated`, which has
294
+ # shape `(n,)` with data type `dtype`.
295
+ try:
296
+ if ar.shape[1] > 0:
297
+ consolidated = ar.view(dtype)
298
+ else:
299
+ # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
300
+ # a data type with itemsize 0, and the call `ar.view(dtype)` will
301
+ # fail. Instead, we'll use `np.empty` to explicitly create the
302
+ # array with shape `(len(ar),)`. Because `dtype` in this case has
303
+ # itemsize 0, the total size of the result is still 0 bytes.
304
+ consolidated = np.empty(len(ar), dtype=dtype)
305
+ except TypeError as e:
306
+ # There's no good way to do this for object arrays, etc...
307
+ msg = 'The axis argument to unique is not supported for dtype {dt}'
308
+ raise TypeError(msg.format(dt=ar.dtype)) from e
309
+
310
+ def reshape_uniq(uniq):
311
+ n = len(uniq)
312
+ uniq = uniq.view(orig_dtype)
313
+ uniq = uniq.reshape(n, *orig_shape[1:])
314
+ uniq = np.moveaxis(uniq, 0, axis)
315
+ return uniq
316
+
317
+ output = _unique1d(consolidated, return_index,
318
+ return_inverse, return_counts, equal_nan=equal_nan)
319
+ output = (reshape_uniq(output[0]),) + output[1:]
320
+ return _unpack_tuple(output)
321
+
322
+
323
+ def _unique1d(ar, return_index=False, return_inverse=False,
324
+ return_counts=False, *, equal_nan=True):
325
+ """
326
+ Find the unique elements of an array, ignoring shape.
327
+ """
328
+ ar = np.asanyarray(ar).flatten()
329
+
330
+ optional_indices = return_index or return_inverse
331
+
332
+ if optional_indices:
333
+ perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
334
+ aux = ar[perm]
335
+ else:
336
+ ar.sort()
337
+ aux = ar
338
+ mask = np.empty(aux.shape, dtype=np.bool_)
339
+ mask[:1] = True
340
+ if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and
341
+ np.isnan(aux[-1])):
342
+ if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent
343
+ aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
344
+ else:
345
+ aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
346
+ if aux_firstnan > 0:
347
+ mask[1:aux_firstnan] = (
348
+ aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
349
+ mask[aux_firstnan] = True
350
+ mask[aux_firstnan + 1:] = False
351
+ else:
352
+ mask[1:] = aux[1:] != aux[:-1]
353
+
354
+ ret = (aux[mask],)
355
+ if return_index:
356
+ ret += (perm[mask],)
357
+ if return_inverse:
358
+ imask = np.cumsum(mask) - 1
359
+ inv_idx = np.empty(mask.shape, dtype=np.intp)
360
+ inv_idx[perm] = imask
361
+ ret += (inv_idx,)
362
+ if return_counts:
363
+ idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
364
+ ret += (np.diff(idx),)
365
+ return ret
366
+
367
+
368
+ def _intersect1d_dispatcher(
369
+ ar1, ar2, assume_unique=None, return_indices=None):
370
+ return (ar1, ar2)
371
+
372
+
373
+ @array_function_dispatch(_intersect1d_dispatcher)
374
+ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
375
+ """
376
+ Find the intersection of two arrays.
377
+
378
+ Return the sorted, unique values that are in both of the input arrays.
379
+
380
+ Parameters
381
+ ----------
382
+ ar1, ar2 : array_like
383
+ Input arrays. Will be flattened if not already 1D.
384
+ assume_unique : bool
385
+ If True, the input arrays are both assumed to be unique, which
386
+ can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
387
+ unique, incorrect results and out-of-bounds indices could result.
388
+ Default is False.
389
+ return_indices : bool
390
+ If True, the indices which correspond to the intersection of the two
391
+ arrays are returned. The first instance of a value is used if there are
392
+ multiple. Default is False.
393
+
394
+ .. versionadded:: 1.15.0
395
+
396
+ Returns
397
+ -------
398
+ intersect1d : ndarray
399
+ Sorted 1D array of common and unique elements.
400
+ comm1 : ndarray
401
+ The indices of the first occurrences of the common values in `ar1`.
402
+ Only provided if `return_indices` is True.
403
+ comm2 : ndarray
404
+ The indices of the first occurrences of the common values in `ar2`.
405
+ Only provided if `return_indices` is True.
406
+
407
+
408
+ See Also
409
+ --------
410
+ numpy.lib.arraysetops : Module with a number of other functions for
411
+ performing set operations on arrays.
412
+
413
+ Examples
414
+ --------
415
+ >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
416
+ array([1, 3])
417
+
418
+ To intersect more than two arrays, use functools.reduce:
419
+
420
+ >>> from functools import reduce
421
+ >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
422
+ array([3])
423
+
424
+ To return the indices of the values common to the input arrays
425
+ along with the intersected values:
426
+
427
+ >>> x = np.array([1, 1, 2, 3, 4])
428
+ >>> y = np.array([2, 1, 4, 6])
429
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
430
+ >>> x_ind, y_ind
431
+ (array([0, 2, 4]), array([1, 0, 2]))
432
+ >>> xy, x[x_ind], y[y_ind]
433
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
434
+
435
+ """
436
+ ar1 = np.asanyarray(ar1)
437
+ ar2 = np.asanyarray(ar2)
438
+
439
+ if not assume_unique:
440
+ if return_indices:
441
+ ar1, ind1 = unique(ar1, return_index=True)
442
+ ar2, ind2 = unique(ar2, return_index=True)
443
+ else:
444
+ ar1 = unique(ar1)
445
+ ar2 = unique(ar2)
446
+ else:
447
+ ar1 = ar1.ravel()
448
+ ar2 = ar2.ravel()
449
+
450
+ aux = np.concatenate((ar1, ar2))
451
+ if return_indices:
452
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
453
+ aux = aux[aux_sort_indices]
454
+ else:
455
+ aux.sort()
456
+
457
+ mask = aux[1:] == aux[:-1]
458
+ int1d = aux[:-1][mask]
459
+
460
+ if return_indices:
461
+ ar1_indices = aux_sort_indices[:-1][mask]
462
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
463
+ if not assume_unique:
464
+ ar1_indices = ind1[ar1_indices]
465
+ ar2_indices = ind2[ar2_indices]
466
+
467
+ return int1d, ar1_indices, ar2_indices
468
+ else:
469
+ return int1d
470
+
471
+
472
+ def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
473
+ return (ar1, ar2)
474
+
475
+
476
+ @array_function_dispatch(_setxor1d_dispatcher)
477
+ def setxor1d(ar1, ar2, assume_unique=False):
478
+ """
479
+ Find the set exclusive-or of two arrays.
480
+
481
+ Return the sorted, unique values that are in only one (not both) of the
482
+ input arrays.
483
+
484
+ Parameters
485
+ ----------
486
+ ar1, ar2 : array_like
487
+ Input arrays.
488
+ assume_unique : bool
489
+ If True, the input arrays are both assumed to be unique, which
490
+ can speed up the calculation. Default is False.
491
+
492
+ Returns
493
+ -------
494
+ setxor1d : ndarray
495
+ Sorted 1D array of unique values that are in only one of the input
496
+ arrays.
497
+
498
+ Examples
499
+ --------
500
+ >>> a = np.array([1, 2, 3, 2, 4])
501
+ >>> b = np.array([2, 3, 5, 7, 5])
502
+ >>> np.setxor1d(a,b)
503
+ array([1, 4, 5, 7])
504
+
505
+ """
506
+ if not assume_unique:
507
+ ar1 = unique(ar1)
508
+ ar2 = unique(ar2)
509
+
510
+ aux = np.concatenate((ar1, ar2))
511
+ if aux.size == 0:
512
+ return aux
513
+
514
+ aux.sort()
515
+ flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
516
+ return aux[flag[1:] & flag[:-1]]
517
+
518
+
519
+ def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *,
520
+ kind=None):
521
+ return (ar1, ar2)
522
+
523
+
524
+ @array_function_dispatch(_in1d_dispatcher)
525
+ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
526
+ """
527
+ Test whether each element of a 1-D array is also present in a second array.
528
+
529
+ Returns a boolean array the same length as `ar1` that is True
530
+ where an element of `ar1` is in `ar2` and False otherwise.
531
+
532
+ We recommend using :func:`isin` instead of `in1d` for new code.
533
+
534
+ Parameters
535
+ ----------
536
+ ar1 : (M,) array_like
537
+ Input array.
538
+ ar2 : array_like
539
+ The values against which to test each value of `ar1`.
540
+ assume_unique : bool, optional
541
+ If True, the input arrays are both assumed to be unique, which
542
+ can speed up the calculation. Default is False.
543
+ invert : bool, optional
544
+ If True, the values in the returned array are inverted (that is,
545
+ False where an element of `ar1` is in `ar2` and True otherwise).
546
+ Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
547
+ to (but is faster than) ``np.invert(in1d(a, b))``.
548
+ kind : {None, 'sort', 'table'}, optional
549
+ The algorithm to use. This will not affect the final result,
550
+ but will affect the speed and memory use. The default, None,
551
+ will select automatically based on memory considerations.
552
+
553
+ * If 'sort', will use a mergesort-based approach. This will have
554
+ a memory usage of roughly 6 times the sum of the sizes of
555
+ `ar1` and `ar2`, not accounting for size of dtypes.
556
+ * If 'table', will use a lookup table approach similar
557
+ to a counting sort. This is only available for boolean and
558
+ integer arrays. This will have a memory usage of the
559
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
560
+ has no effect when the 'table' option is used.
561
+ * If None, will automatically choose 'table' if
562
+ the required memory allocation is less than or equal to
563
+ 6 times the sum of the sizes of `ar1` and `ar2`,
564
+ otherwise will use 'sort'. This is done to not use
565
+ a large amount of memory by default, even though
566
+ 'table' may be faster in most cases. If 'table' is chosen,
567
+ `assume_unique` will have no effect.
568
+
569
+ .. versionadded:: 1.8.0
570
+
571
+ Returns
572
+ -------
573
+ in1d : (M,) ndarray, bool
574
+ The values `ar1[in1d]` are in `ar2`.
575
+
576
+ See Also
577
+ --------
578
+ isin : Version of this function that preserves the
579
+ shape of ar1.
580
+ numpy.lib.arraysetops : Module with a number of other functions for
581
+ performing set operations on arrays.
582
+
583
+ Notes
584
+ -----
585
+ `in1d` can be considered as an element-wise function version of the
586
+ python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
587
+ equivalent to ``np.array([item in b for item in a])``.
588
+ However, this idea fails if `ar2` is a set, or similar (non-sequence)
589
+ container: As ``ar2`` is converted to an array, in those cases
590
+ ``asarray(ar2)`` is an object array rather than the expected array of
591
+ contained values.
592
+
593
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
594
+ following relationship is true:
595
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
596
+ but may use greater memory. The default value for `kind` will
597
+ be automatically selected based only on memory usage, so one may
598
+ manually set ``kind='table'`` if memory constraints can be relaxed.
599
+
600
+ .. versionadded:: 1.4.0
601
+
602
+ Examples
603
+ --------
604
+ >>> test = np.array([0, 1, 2, 5, 0])
605
+ >>> states = [0, 2]
606
+ >>> mask = np.in1d(test, states)
607
+ >>> mask
608
+ array([ True, False, True, False, True])
609
+ >>> test[mask]
610
+ array([0, 2, 0])
611
+ >>> mask = np.in1d(test, states, invert=True)
612
+ >>> mask
613
+ array([False, True, False, True, False])
614
+ >>> test[mask]
615
+ array([1, 5])
616
+ """
617
+ # Ravel both arrays, behavior for the first array could be different
618
+ ar1 = np.asarray(ar1).ravel()
619
+ ar2 = np.asarray(ar2).ravel()
620
+
621
+ # Ensure that iteration through object arrays yields size-1 arrays
622
+ if ar2.dtype == object:
623
+ ar2 = ar2.reshape(-1, 1)
624
+
625
+ if kind not in {None, 'sort', 'table'}:
626
+ raise ValueError(
627
+ f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.")
628
+
629
+ # Can use the table method if all arrays are integers or boolean:
630
+ is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2))
631
+ use_table_method = is_int_arrays and kind in {None, 'table'}
632
+
633
+ if use_table_method:
634
+ if ar2.size == 0:
635
+ if invert:
636
+ return np.ones_like(ar1, dtype=bool)
637
+ else:
638
+ return np.zeros_like(ar1, dtype=bool)
639
+
640
+ # Convert booleans to uint8 so we can use the fast integer algorithm
641
+ if ar1.dtype == bool:
642
+ ar1 = ar1.astype(np.uint8)
643
+ if ar2.dtype == bool:
644
+ ar2 = ar2.astype(np.uint8)
645
+
646
+ ar2_min = np.min(ar2)
647
+ ar2_max = np.max(ar2)
648
+
649
+ ar2_range = int(ar2_max) - int(ar2_min)
650
+
651
+ # Constraints on whether we can actually use the table method:
652
+ # 1. Assert memory usage is not too large
653
+ below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
654
+ # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
655
+ range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
656
+ # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype
657
+ if ar1.size > 0:
658
+ ar1_min = np.min(ar1)
659
+ ar1_max = np.max(ar1)
660
+
661
+ # After masking, the range of ar1 is guaranteed to be
662
+ # within the range of ar2:
663
+ ar1_upper = min(int(ar1_max), int(ar2_max))
664
+ ar1_lower = max(int(ar1_min), int(ar2_min))
665
+
666
+ range_safe_from_overflow &= all((
667
+ ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max,
668
+ ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min
669
+ ))
670
+
671
+ # Optimal performance is for approximately
672
+ # log10(size) > (log10(range) - 2.27) / 0.927.
673
+ # However, here we set the requirement that by default
674
+ # the intermediate array can only be 6x
675
+ # the combined memory allocation of the original
676
+ # arrays. See discussion on
677
+ # https://github.com/numpy/numpy/pull/12065.
678
+
679
+ if (
680
+ range_safe_from_overflow and
681
+ (below_memory_constraint or kind == 'table')
682
+ ):
683
+
684
+ if invert:
685
+ outgoing_array = np.ones_like(ar1, dtype=bool)
686
+ else:
687
+ outgoing_array = np.zeros_like(ar1, dtype=bool)
688
+
689
+ # Make elements 1 where the integer exists in ar2
690
+ if invert:
691
+ isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)
692
+ isin_helper_ar[ar2 - ar2_min] = 0
693
+ else:
694
+ isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)
695
+ isin_helper_ar[ar2 - ar2_min] = 1
696
+
697
+ # Mask out elements we know won't work
698
+ basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)
699
+ outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -
700
+ ar2_min]
701
+
702
+ return outgoing_array
703
+ elif kind == 'table': # not range_safe_from_overflow
704
+ raise RuntimeError(
705
+ "You have specified kind='table', "
706
+ "but the range of values in `ar2` or `ar1` exceed the "
707
+ "maximum integer of the datatype. "
708
+ "Please set `kind` to None or 'sort'."
709
+ )
710
+ elif kind == 'table':
711
+ raise ValueError(
712
+ "The 'table' method is only "
713
+ "supported for boolean or integer arrays. "
714
+ "Please select 'sort' or None for kind."
715
+ )
716
+
717
+
718
+ # Check if one of the arrays may contain arbitrary objects
719
+ contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
720
+
721
+ # This code is run when
722
+ # a) the first condition is true, making the code significantly faster
723
+ # b) the second condition is true (i.e. `ar1` or `ar2` may contain
724
+ # arbitrary objects), since then sorting is not guaranteed to work
725
+ if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
726
+ if invert:
727
+ mask = np.ones(len(ar1), dtype=bool)
728
+ for a in ar2:
729
+ mask &= (ar1 != a)
730
+ else:
731
+ mask = np.zeros(len(ar1), dtype=bool)
732
+ for a in ar2:
733
+ mask |= (ar1 == a)
734
+ return mask
735
+
736
+ # Otherwise use sorting
737
+ if not assume_unique:
738
+ ar1, rev_idx = np.unique(ar1, return_inverse=True)
739
+ ar2 = np.unique(ar2)
740
+
741
+ ar = np.concatenate((ar1, ar2))
742
+ # We need this to be a stable sort, so always use 'mergesort'
743
+ # here. The values from the first array should always come before
744
+ # the values from the second array.
745
+ order = ar.argsort(kind='mergesort')
746
+ sar = ar[order]
747
+ if invert:
748
+ bool_ar = (sar[1:] != sar[:-1])
749
+ else:
750
+ bool_ar = (sar[1:] == sar[:-1])
751
+ flag = np.concatenate((bool_ar, [invert]))
752
+ ret = np.empty(ar.shape, dtype=bool)
753
+ ret[order] = flag
754
+
755
+ if assume_unique:
756
+ return ret[:len(ar1)]
757
+ else:
758
+ return ret[rev_idx]
759
+
760
+
761
+ def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None,
762
+ *, kind=None):
763
+ return (element, test_elements)
764
+
765
+
766
+ @array_function_dispatch(_isin_dispatcher)
767
+ def isin(element, test_elements, assume_unique=False, invert=False, *,
768
+ kind=None):
769
+ """
770
+ Calculates ``element in test_elements``, broadcasting over `element` only.
771
+ Returns a boolean array of the same shape as `element` that is True
772
+ where an element of `element` is in `test_elements` and False otherwise.
773
+
774
+ Parameters
775
+ ----------
776
+ element : array_like
777
+ Input array.
778
+ test_elements : array_like
779
+ The values against which to test each value of `element`.
780
+ This argument is flattened if it is an array or array_like.
781
+ See notes for behavior with non-array-like parameters.
782
+ assume_unique : bool, optional
783
+ If True, the input arrays are both assumed to be unique, which
784
+ can speed up the calculation. Default is False.
785
+ invert : bool, optional
786
+ If True, the values in the returned array are inverted, as if
787
+ calculating `element not in test_elements`. Default is False.
788
+ ``np.isin(a, b, invert=True)`` is equivalent to (but faster
789
+ than) ``np.invert(np.isin(a, b))``.
790
+ kind : {None, 'sort', 'table'}, optional
791
+ The algorithm to use. This will not affect the final result,
792
+ but will affect the speed and memory use. The default, None,
793
+ will select automatically based on memory considerations.
794
+
795
+ * If 'sort', will use a mergesort-based approach. This will have
796
+ a memory usage of roughly 6 times the sum of the sizes of
797
+ `ar1` and `ar2`, not accounting for size of dtypes.
798
+ * If 'table', will use a lookup table approach similar
799
+ to a counting sort. This is only available for boolean and
800
+ integer arrays. This will have a memory usage of the
801
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
802
+ has no effect when the 'table' option is used.
803
+ * If None, will automatically choose 'table' if
804
+ the required memory allocation is less than or equal to
805
+ 6 times the sum of the sizes of `ar1` and `ar2`,
806
+ otherwise will use 'sort'. This is done to not use
807
+ a large amount of memory by default, even though
808
+ 'table' may be faster in most cases. If 'table' is chosen,
809
+ `assume_unique` will have no effect.
810
+
811
+
812
+ Returns
813
+ -------
814
+ isin : ndarray, bool
815
+ Has the same shape as `element`. The values `element[isin]`
816
+ are in `test_elements`.
817
+
818
+ See Also
819
+ --------
820
+ in1d : Flattened version of this function.
821
+ numpy.lib.arraysetops : Module with a number of other functions for
822
+ performing set operations on arrays.
823
+
824
+ Notes
825
+ -----
826
+
827
+ `isin` is an element-wise function version of the python keyword `in`.
828
+ ``isin(a, b)`` is roughly equivalent to
829
+ ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
830
+
831
+ `element` and `test_elements` are converted to arrays if they are not
832
+ already. If `test_elements` is a set (or other non-sequence collection)
833
+ it will be converted to an object array with one element, rather than an
834
+ array of the values contained in `test_elements`. This is a consequence
835
+ of the `array` constructor's way of handling non-sequence collections.
836
+ Converting the set to a list usually gives the desired behavior.
837
+
838
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
839
+ following relationship is true:
840
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
841
+ but may use greater memory. The default value for `kind` will
842
+ be automatically selected based only on memory usage, so one may
843
+ manually set ``kind='table'`` if memory constraints can be relaxed.
844
+
845
+ .. versionadded:: 1.13.0
846
+
847
+ Examples
848
+ --------
849
+ >>> element = 2*np.arange(4).reshape((2, 2))
850
+ >>> element
851
+ array([[0, 2],
852
+ [4, 6]])
853
+ >>> test_elements = [1, 2, 4, 8]
854
+ >>> mask = np.isin(element, test_elements)
855
+ >>> mask
856
+ array([[False, True],
857
+ [ True, False]])
858
+ >>> element[mask]
859
+ array([2, 4])
860
+
861
+ The indices of the matched values can be obtained with `nonzero`:
862
+
863
+ >>> np.nonzero(mask)
864
+ (array([0, 1]), array([1, 0]))
865
+
866
+ The test can also be inverted:
867
+
868
+ >>> mask = np.isin(element, test_elements, invert=True)
869
+ >>> mask
870
+ array([[ True, False],
871
+ [False, True]])
872
+ >>> element[mask]
873
+ array([0, 6])
874
+
875
+ Because of how `array` handles sets, the following does not
876
+ work as expected:
877
+
878
+ >>> test_set = {1, 2, 4, 8}
879
+ >>> np.isin(element, test_set)
880
+ array([[False, False],
881
+ [False, False]])
882
+
883
+ Casting the set to a list gives the expected result:
884
+
885
+ >>> np.isin(element, list(test_set))
886
+ array([[False, True],
887
+ [ True, False]])
888
+ """
889
+ element = np.asarray(element)
890
+ return in1d(element, test_elements, assume_unique=assume_unique,
891
+ invert=invert, kind=kind).reshape(element.shape)
892
+
893
+
894
+ def _union1d_dispatcher(ar1, ar2):
895
+ return (ar1, ar2)
896
+
897
+
898
+ @array_function_dispatch(_union1d_dispatcher)
899
+ def union1d(ar1, ar2):
900
+ """
901
+ Find the union of two arrays.
902
+
903
+ Return the unique, sorted array of values that are in either of the two
904
+ input arrays.
905
+
906
+ Parameters
907
+ ----------
908
+ ar1, ar2 : array_like
909
+ Input arrays. They are flattened if they are not already 1D.
910
+
911
+ Returns
912
+ -------
913
+ union1d : ndarray
914
+ Unique, sorted union of the input arrays.
915
+
916
+ See Also
917
+ --------
918
+ numpy.lib.arraysetops : Module with a number of other functions for
919
+ performing set operations on arrays.
920
+
921
+ Examples
922
+ --------
923
+ >>> np.union1d([-1, 0, 1], [-2, 0, 2])
924
+ array([-2, -1, 0, 1, 2])
925
+
926
+ To find the union of more than two arrays, use functools.reduce:
927
+
928
+ >>> from functools import reduce
929
+ >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
930
+ array([1, 2, 3, 4, 6])
931
+ """
932
+ return unique(np.concatenate((ar1, ar2), axis=None))
933
+
934
+
935
+ def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
936
+ return (ar1, ar2)
937
+
938
+
939
+ @array_function_dispatch(_setdiff1d_dispatcher)
940
+ def setdiff1d(ar1, ar2, assume_unique=False):
941
+ """
942
+ Find the set difference of two arrays.
943
+
944
+ Return the unique values in `ar1` that are not in `ar2`.
945
+
946
+ Parameters
947
+ ----------
948
+ ar1 : array_like
949
+ Input array.
950
+ ar2 : array_like
951
+ Input comparison array.
952
+ assume_unique : bool
953
+ If True, the input arrays are both assumed to be unique, which
954
+ can speed up the calculation. Default is False.
955
+
956
+ Returns
957
+ -------
958
+ setdiff1d : ndarray
959
+ 1D array of values in `ar1` that are not in `ar2`. The result
960
+ is sorted when `assume_unique=False`, but otherwise only sorted
961
+ if the input is sorted.
962
+
963
+ See Also
964
+ --------
965
+ numpy.lib.arraysetops : Module with a number of other functions for
966
+ performing set operations on arrays.
967
+
968
+ Examples
969
+ --------
970
+ >>> a = np.array([1, 2, 3, 2, 4, 1])
971
+ >>> b = np.array([3, 4, 5, 6])
972
+ >>> np.setdiff1d(a, b)
973
+ array([1, 2])
974
+
975
+ """
976
+ if assume_unique:
977
+ ar1 = np.asarray(ar1).ravel()
978
+ else:
979
+ ar1 = unique(ar1)
980
+ ar2 = unique(ar2)
981
+ return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
venv/lib/python3.10/site-packages/numpy/lib/arrayterator.pyi ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Generator
2
+ from typing import (
3
+ Any,
4
+ TypeVar,
5
+ Union,
6
+ overload,
7
+ )
8
+
9
+ from numpy import ndarray, dtype, generic
10
+ from numpy._typing import DTypeLike
11
+
12
+ # TODO: Set a shape bound once we've got proper shape support
13
+ _Shape = TypeVar("_Shape", bound=Any)
14
+ _DType = TypeVar("_DType", bound=dtype[Any])
15
+ _ScalarType = TypeVar("_ScalarType", bound=generic)
16
+
17
+ _Index = Union[
18
+ Union[ellipsis, int, slice],
19
+ tuple[Union[ellipsis, int, slice], ...],
20
+ ]
21
+
22
+ __all__: list[str]
23
+
24
+ # NOTE: In reality `Arrayterator` does not actually inherit from `ndarray`,
25
+ # but its ``__getattr__` method does wrap around the former and thus has
26
+ # access to all its methods
27
+
28
+ class Arrayterator(ndarray[_Shape, _DType]):
29
+ var: ndarray[_Shape, _DType] # type: ignore[assignment]
30
+ buf_size: None | int
31
+ start: list[int]
32
+ stop: list[int]
33
+ step: list[int]
34
+
35
+ @property # type: ignore[misc]
36
+ def shape(self) -> tuple[int, ...]: ...
37
+ @property
38
+ def flat( # type: ignore[override]
39
+ self: ndarray[Any, dtype[_ScalarType]]
40
+ ) -> Generator[_ScalarType, None, None]: ...
41
+ def __init__(
42
+ self, var: ndarray[_Shape, _DType], buf_size: None | int = ...
43
+ ) -> None: ...
44
+ @overload
45
+ def __array__(self, dtype: None = ...) -> ndarray[Any, _DType]: ...
46
+ @overload
47
+ def __array__(self, dtype: DTypeLike) -> ndarray[Any, dtype[Any]]: ...
48
+ def __getitem__(self, index: _Index) -> Arrayterator[Any, _DType]: ...
49
+ def __iter__(self) -> Generator[ndarray[Any, _DType], None, None]: ...
venv/lib/python3.10/site-packages/numpy/lib/format.pyi ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Literal, Final
2
+
3
+ __all__: list[str]
4
+
5
+ EXPECTED_KEYS: Final[set[str]]
6
+ MAGIC_PREFIX: Final[bytes]
7
+ MAGIC_LEN: Literal[8]
8
+ ARRAY_ALIGN: Literal[64]
9
+ BUFFER_SIZE: Literal[262144] # 2**18
10
+
11
+ def magic(major, minor): ...
12
+ def read_magic(fp): ...
13
+ def dtype_to_descr(dtype): ...
14
+ def descr_to_dtype(descr): ...
15
+ def header_data_from_array_1_0(array): ...
16
+ def write_array_header_1_0(fp, d): ...
17
+ def write_array_header_2_0(fp, d): ...
18
+ def read_array_header_1_0(fp): ...
19
+ def read_array_header_2_0(fp): ...
20
+ def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
21
+ def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
22
+ def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...
venv/lib/python3.10/site-packages/numpy/lib/function_base.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/numpy/lib/histograms.py ADDED
@@ -0,0 +1,1072 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Histogram-related functions
3
+ """
4
+ import contextlib
5
+ import functools
6
+ import operator
7
+ import warnings
8
+
9
+ import numpy as np
10
+ from numpy.core import overrides
11
+
12
+ __all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
13
+
14
+ array_function_dispatch = functools.partial(
15
+ overrides.array_function_dispatch, module='numpy')
16
+
17
+ # range is a keyword argument to many functions, so save the builtin so they can
18
+ # use it.
19
+ _range = range
20
+
21
+
22
+ def _ptp(x):
23
+ """Peak-to-peak value of x.
24
+
25
+ This implementation avoids the problem of signed integer arrays having a
26
+ peak-to-peak value that cannot be represented with the array's data type.
27
+ This function returns an unsigned value for signed integer arrays.
28
+ """
29
+ return _unsigned_subtract(x.max(), x.min())
30
+
31
+
32
+ def _hist_bin_sqrt(x, range):
33
+ """
34
+ Square root histogram bin estimator.
35
+
36
+ Bin width is inversely proportional to the data size. Used by many
37
+ programs for its simplicity.
38
+
39
+ Parameters
40
+ ----------
41
+ x : array_like
42
+ Input data that is to be histogrammed, trimmed to range. May not
43
+ be empty.
44
+
45
+ Returns
46
+ -------
47
+ h : An estimate of the optimal bin width for the given data.
48
+ """
49
+ del range # unused
50
+ return _ptp(x) / np.sqrt(x.size)
51
+
52
+
53
+ def _hist_bin_sturges(x, range):
54
+ """
55
+ Sturges histogram bin estimator.
56
+
57
+ A very simplistic estimator based on the assumption of normality of
58
+ the data. This estimator has poor performance for non-normal data,
59
+ which becomes especially obvious for large data sets. The estimate
60
+ depends only on size of the data.
61
+
62
+ Parameters
63
+ ----------
64
+ x : array_like
65
+ Input data that is to be histogrammed, trimmed to range. May not
66
+ be empty.
67
+
68
+ Returns
69
+ -------
70
+ h : An estimate of the optimal bin width for the given data.
71
+ """
72
+ del range # unused
73
+ return _ptp(x) / (np.log2(x.size) + 1.0)
74
+
75
+
76
+ def _hist_bin_rice(x, range):
77
+ """
78
+ Rice histogram bin estimator.
79
+
80
+ Another simple estimator with no normality assumption. It has better
81
+ performance for large data than Sturges, but tends to overestimate
82
+ the number of bins. The number of bins is proportional to the cube
83
+ root of data size (asymptotically optimal). The estimate depends
84
+ only on size of the data.
85
+
86
+ Parameters
87
+ ----------
88
+ x : array_like
89
+ Input data that is to be histogrammed, trimmed to range. May not
90
+ be empty.
91
+
92
+ Returns
93
+ -------
94
+ h : An estimate of the optimal bin width for the given data.
95
+ """
96
+ del range # unused
97
+ return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
98
+
99
+
100
+ def _hist_bin_scott(x, range):
101
+ """
102
+ Scott histogram bin estimator.
103
+
104
+ The binwidth is proportional to the standard deviation of the data
105
+ and inversely proportional to the cube root of data size
106
+ (asymptotically optimal).
107
+
108
+ Parameters
109
+ ----------
110
+ x : array_like
111
+ Input data that is to be histogrammed, trimmed to range. May not
112
+ be empty.
113
+
114
+ Returns
115
+ -------
116
+ h : An estimate of the optimal bin width for the given data.
117
+ """
118
+ del range # unused
119
+ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
120
+
121
+
122
+ def _hist_bin_stone(x, range):
123
+ """
124
+ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
125
+
126
+ The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
127
+ The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
128
+ https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
129
+
130
+ This paper by Stone appears to be the origination of this rule.
131
+ http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
132
+
133
+ Parameters
134
+ ----------
135
+ x : array_like
136
+ Input data that is to be histogrammed, trimmed to range. May not
137
+ be empty.
138
+ range : (float, float)
139
+ The lower and upper range of the bins.
140
+
141
+ Returns
142
+ -------
143
+ h : An estimate of the optimal bin width for the given data.
144
+ """
145
+
146
+ n = x.size
147
+ ptp_x = _ptp(x)
148
+ if n <= 1 or ptp_x == 0:
149
+ return 0
150
+
151
+ def jhat(nbins):
152
+ hh = ptp_x / nbins
153
+ p_k = np.histogram(x, bins=nbins, range=range)[0] / n
154
+ return (2 - (n + 1) * p_k.dot(p_k)) / hh
155
+
156
+ nbins_upper_bound = max(100, int(np.sqrt(n)))
157
+ nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
158
+ if nbins == nbins_upper_bound:
159
+ warnings.warn("The number of bins estimated may be suboptimal.",
160
+ RuntimeWarning, stacklevel=3)
161
+ return ptp_x / nbins
162
+
163
+
164
+ def _hist_bin_doane(x, range):
165
+ """
166
+ Doane's histogram bin estimator.
167
+
168
+ Improved version of Sturges' formula which works better for
169
+ non-normal data. See
170
+ stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
171
+
172
+ Parameters
173
+ ----------
174
+ x : array_like
175
+ Input data that is to be histogrammed, trimmed to range. May not
176
+ be empty.
177
+
178
+ Returns
179
+ -------
180
+ h : An estimate of the optimal bin width for the given data.
181
+ """
182
+ del range # unused
183
+ if x.size > 2:
184
+ sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
185
+ sigma = np.std(x)
186
+ if sigma > 0.0:
187
+ # These three operations add up to
188
+ # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
189
+ # but use only one temp array instead of three
190
+ temp = x - np.mean(x)
191
+ np.true_divide(temp, sigma, temp)
192
+ np.power(temp, 3, temp)
193
+ g1 = np.mean(temp)
194
+ return _ptp(x) / (1.0 + np.log2(x.size) +
195
+ np.log2(1.0 + np.absolute(g1) / sg1))
196
+ return 0.0
197
+
198
+
199
+ def _hist_bin_fd(x, range):
200
+ """
201
+ The Freedman-Diaconis histogram bin estimator.
202
+
203
+ The Freedman-Diaconis rule uses interquartile range (IQR) to
204
+ estimate binwidth. It is considered a variation of the Scott rule
205
+ with more robustness as the IQR is less affected by outliers than
206
+ the standard deviation. However, the IQR depends on fewer points
207
+ than the standard deviation, so it is less accurate, especially for
208
+ long tailed distributions.
209
+
210
+ If the IQR is 0, this function returns 0 for the bin width.
211
+ Binwidth is inversely proportional to the cube root of data size
212
+ (asymptotically optimal).
213
+
214
+ Parameters
215
+ ----------
216
+ x : array_like
217
+ Input data that is to be histogrammed, trimmed to range. May not
218
+ be empty.
219
+
220
+ Returns
221
+ -------
222
+ h : An estimate of the optimal bin width for the given data.
223
+ """
224
+ del range # unused
225
+ iqr = np.subtract(*np.percentile(x, [75, 25]))
226
+ return 2.0 * iqr * x.size ** (-1.0 / 3.0)
227
+
228
+
229
+ def _hist_bin_auto(x, range):
230
+ """
231
+ Histogram bin estimator that uses the minimum width of the
232
+ Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
233
+ If the bin width from the FD estimator is 0, the Sturges estimator is used.
234
+
235
+ The FD estimator is usually the most robust method, but its width
236
+ estimate tends to be too large for small `x` and bad for data with limited
237
+ variance. The Sturges estimator is quite good for small (<1000) datasets
238
+ and is the default in the R language. This method gives good off-the-shelf
239
+ behaviour.
240
+
241
+ .. versionchanged:: 1.15.0
242
+ If there is limited variance the IQR can be 0, which results in the
243
+ FD bin width being 0 too. This is not a valid bin width, so
244
+ ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
245
+ If the IQR is 0, it's unlikely any variance-based estimators will be of
246
+ use, so we revert to the Sturges estimator, which only uses the size of the
247
+ dataset in its calculation.
248
+
249
+ Parameters
250
+ ----------
251
+ x : array_like
252
+ Input data that is to be histogrammed, trimmed to range. May not
253
+ be empty.
254
+
255
+ Returns
256
+ -------
257
+ h : An estimate of the optimal bin width for the given data.
258
+
259
+ See Also
260
+ --------
261
+ _hist_bin_fd, _hist_bin_sturges
262
+ """
263
+ fd_bw = _hist_bin_fd(x, range)
264
+ sturges_bw = _hist_bin_sturges(x, range)
265
+ del range # unused
266
+ if fd_bw:
267
+ return min(fd_bw, sturges_bw)
268
+ else:
269
+ # limited variance, so we return a len dependent bw estimator
270
+ return sturges_bw
271
+
272
+ # Private dict initialized at module load time
273
+ _hist_bin_selectors = {'stone': _hist_bin_stone,
274
+ 'auto': _hist_bin_auto,
275
+ 'doane': _hist_bin_doane,
276
+ 'fd': _hist_bin_fd,
277
+ 'rice': _hist_bin_rice,
278
+ 'scott': _hist_bin_scott,
279
+ 'sqrt': _hist_bin_sqrt,
280
+ 'sturges': _hist_bin_sturges}
281
+
282
+
283
+ def _ravel_and_check_weights(a, weights):
284
+ """ Check a and weights have matching shapes, and ravel both """
285
+ a = np.asarray(a)
286
+
287
+ # Ensure that the array is a "subtractable" dtype
288
+ if a.dtype == np.bool_:
289
+ warnings.warn("Converting input from {} to {} for compatibility."
290
+ .format(a.dtype, np.uint8),
291
+ RuntimeWarning, stacklevel=3)
292
+ a = a.astype(np.uint8)
293
+
294
+ if weights is not None:
295
+ weights = np.asarray(weights)
296
+ if weights.shape != a.shape:
297
+ raise ValueError(
298
+ 'weights should have the same shape as a.')
299
+ weights = weights.ravel()
300
+ a = a.ravel()
301
+ return a, weights
302
+
303
+
304
+ def _get_outer_edges(a, range):
305
+ """
306
+ Determine the outer bin edges to use, from either the data or the range
307
+ argument
308
+ """
309
+ if range is not None:
310
+ first_edge, last_edge = range
311
+ if first_edge > last_edge:
312
+ raise ValueError(
313
+ 'max must be larger than min in range parameter.')
314
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
315
+ raise ValueError(
316
+ "supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
317
+ elif a.size == 0:
318
+ # handle empty arrays. Can't determine range, so use 0-1.
319
+ first_edge, last_edge = 0, 1
320
+ else:
321
+ first_edge, last_edge = a.min(), a.max()
322
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
323
+ raise ValueError(
324
+ "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
325
+
326
+ # expand empty range to avoid divide by zero
327
+ if first_edge == last_edge:
328
+ first_edge = first_edge - 0.5
329
+ last_edge = last_edge + 0.5
330
+
331
+ return first_edge, last_edge
332
+
333
+
334
+ def _unsigned_subtract(a, b):
335
+ """
336
+ Subtract two values where a >= b, and produce an unsigned result
337
+
338
+ This is needed when finding the difference between the upper and lower
339
+ bound of an int16 histogram
340
+ """
341
+ # coerce to a single type
342
+ signed_to_unsigned = {
343
+ np.byte: np.ubyte,
344
+ np.short: np.ushort,
345
+ np.intc: np.uintc,
346
+ np.int_: np.uint,
347
+ np.longlong: np.ulonglong
348
+ }
349
+ dt = np.result_type(a, b)
350
+ try:
351
+ dt = signed_to_unsigned[dt.type]
352
+ except KeyError:
353
+ return np.subtract(a, b, dtype=dt)
354
+ else:
355
+ # we know the inputs are integers, and we are deliberately casting
356
+ # signed to unsigned
357
+ return np.subtract(a, b, casting='unsafe', dtype=dt)
358
+
359
+
360
+ def _get_bin_edges(a, bins, range, weights):
361
+ """
362
+ Computes the bins used internally by `histogram`.
363
+
364
+ Parameters
365
+ ==========
366
+ a : ndarray
367
+ Ravelled data array
368
+ bins, range
369
+ Forwarded arguments from `histogram`.
370
+ weights : ndarray, optional
371
+ Ravelled weights array, or None
372
+
373
+ Returns
374
+ =======
375
+ bin_edges : ndarray
376
+ Array of bin edges
377
+ uniform_bins : (Number, Number, int):
378
+ The upper bound, lowerbound, and number of bins, used in the optimized
379
+ implementation of `histogram` that works on uniform bins.
380
+ """
381
+ # parse the overloaded bins argument
382
+ n_equal_bins = None
383
+ bin_edges = None
384
+
385
+ if isinstance(bins, str):
386
+ bin_name = bins
387
+ # if `bins` is a string for an automatic method,
388
+ # this will replace it with the number of bins calculated
389
+ if bin_name not in _hist_bin_selectors:
390
+ raise ValueError(
391
+ "{!r} is not a valid estimator for `bins`".format(bin_name))
392
+ if weights is not None:
393
+ raise TypeError("Automated estimation of the number of "
394
+ "bins is not supported for weighted data")
395
+
396
+ first_edge, last_edge = _get_outer_edges(a, range)
397
+
398
+ # truncate the range if needed
399
+ if range is not None:
400
+ keep = (a >= first_edge)
401
+ keep &= (a <= last_edge)
402
+ if not np.logical_and.reduce(keep):
403
+ a = a[keep]
404
+
405
+ if a.size == 0:
406
+ n_equal_bins = 1
407
+ else:
408
+ # Do not call selectors on empty arrays
409
+ width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
410
+ if width:
411
+ n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
412
+ else:
413
+ # Width can be zero for some estimators, e.g. FD when
414
+ # the IQR of the data is zero.
415
+ n_equal_bins = 1
416
+
417
+ elif np.ndim(bins) == 0:
418
+ try:
419
+ n_equal_bins = operator.index(bins)
420
+ except TypeError as e:
421
+ raise TypeError(
422
+ '`bins` must be an integer, a string, or an array') from e
423
+ if n_equal_bins < 1:
424
+ raise ValueError('`bins` must be positive, when an integer')
425
+
426
+ first_edge, last_edge = _get_outer_edges(a, range)
427
+
428
+ elif np.ndim(bins) == 1:
429
+ bin_edges = np.asarray(bins)
430
+ if np.any(bin_edges[:-1] > bin_edges[1:]):
431
+ raise ValueError(
432
+ '`bins` must increase monotonically, when an array')
433
+
434
+ else:
435
+ raise ValueError('`bins` must be 1d, when an array')
436
+
437
+ if n_equal_bins is not None:
438
+ # gh-10322 means that type resolution rules are dependent on array
439
+ # shapes. To avoid this causing problems, we pick a type now and stick
440
+ # with it throughout.
441
+ bin_type = np.result_type(first_edge, last_edge, a)
442
+ if np.issubdtype(bin_type, np.integer):
443
+ bin_type = np.result_type(bin_type, float)
444
+
445
+ # bin edges must be computed
446
+ bin_edges = np.linspace(
447
+ first_edge, last_edge, n_equal_bins + 1,
448
+ endpoint=True, dtype=bin_type)
449
+ return bin_edges, (first_edge, last_edge, n_equal_bins)
450
+ else:
451
+ return bin_edges, None
452
+
453
+
454
+ def _search_sorted_inclusive(a, v):
455
+ """
456
+ Like `searchsorted`, but where the last item in `v` is placed on the right.
457
+
458
+ In the context of a histogram, this makes the last bin edge inclusive
459
+ """
460
+ return np.concatenate((
461
+ a.searchsorted(v[:-1], 'left'),
462
+ a.searchsorted(v[-1:], 'right')
463
+ ))
464
+
465
+
466
+ def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
467
+ return (a, bins, weights)
468
+
469
+
470
+ @array_function_dispatch(_histogram_bin_edges_dispatcher)
471
+ def histogram_bin_edges(a, bins=10, range=None, weights=None):
472
+ r"""
473
+ Function to calculate only the edges of the bins used by the `histogram`
474
+ function.
475
+
476
+ Parameters
477
+ ----------
478
+ a : array_like
479
+ Input data. The histogram is computed over the flattened array.
480
+ bins : int or sequence of scalars or str, optional
481
+ If `bins` is an int, it defines the number of equal-width
482
+ bins in the given range (10, by default). If `bins` is a
483
+ sequence, it defines the bin edges, including the rightmost
484
+ edge, allowing for non-uniform bin widths.
485
+
486
+ If `bins` is a string from the list below, `histogram_bin_edges` will use
487
+ the method chosen to calculate the optimal bin width and
488
+ consequently the number of bins (see `Notes` for more detail on
489
+ the estimators) from the data that falls within the requested
490
+ range. While the bin width will be optimal for the actual data
491
+ in the range, the number of bins will be computed to fill the
492
+ entire range, including the empty portions. For visualisation,
493
+ using the 'auto' option is suggested. Weighted data is not
494
+ supported for automated bin size selection.
495
+
496
+ 'auto'
497
+ Maximum of the 'sturges' and 'fd' estimators. Provides good
498
+ all around performance.
499
+
500
+ 'fd' (Freedman Diaconis Estimator)
501
+ Robust (resilient to outliers) estimator that takes into
502
+ account data variability and data size.
503
+
504
+ 'doane'
505
+ An improved version of Sturges' estimator that works better
506
+ with non-normal datasets.
507
+
508
+ 'scott'
509
+ Less robust estimator that takes into account data variability
510
+ and data size.
511
+
512
+ 'stone'
513
+ Estimator based on leave-one-out cross-validation estimate of
514
+ the integrated squared error. Can be regarded as a generalization
515
+ of Scott's rule.
516
+
517
+ 'rice'
518
+ Estimator does not take variability into account, only data
519
+ size. Commonly overestimates number of bins required.
520
+
521
+ 'sturges'
522
+ R's default method, only accounts for data size. Only
523
+ optimal for gaussian data and underestimates number of bins
524
+ for large non-gaussian datasets.
525
+
526
+ 'sqrt'
527
+ Square root (of data size) estimator, used by Excel and
528
+ other programs for its speed and simplicity.
529
+
530
+ range : (float, float), optional
531
+ The lower and upper range of the bins. If not provided, range
532
+ is simply ``(a.min(), a.max())``. Values outside the range are
533
+ ignored. The first element of the range must be less than or
534
+ equal to the second. `range` affects the automatic bin
535
+ computation as well. While bin width is computed to be optimal
536
+ based on the actual data within `range`, the bin count will fill
537
+ the entire range including portions containing no data.
538
+
539
+ weights : array_like, optional
540
+ An array of weights, of the same shape as `a`. Each value in
541
+ `a` only contributes its associated weight towards the bin count
542
+ (instead of 1). This is currently not used by any of the bin estimators,
543
+ but may be in the future.
544
+
545
+ Returns
546
+ -------
547
+ bin_edges : array of dtype float
548
+ The edges to pass into `histogram`
549
+
550
+ See Also
551
+ --------
552
+ histogram
553
+
554
+ Notes
555
+ -----
556
+ The methods to estimate the optimal number of bins are well founded
557
+ in literature, and are inspired by the choices R provides for
558
+ histogram visualisation. Note that having the number of bins
559
+ proportional to :math:`n^{1/3}` is asymptotically optimal, which is
560
+ why it appears in most estimators. These are simply plug-in methods
561
+ that give good starting points for number of bins. In the equations
562
+ below, :math:`h` is the binwidth and :math:`n_h` is the number of
563
+ bins. All estimators that compute bin counts are recast to bin width
564
+ using the `ptp` of the data. The final bin count is obtained from
565
+ ``np.round(np.ceil(range / h))``. The final bin width is often less
566
+ than what is returned by the estimators below.
567
+
568
+ 'auto' (maximum of the 'sturges' and 'fd' estimators)
569
+ A compromise to get a good value. For small datasets the Sturges
570
+ value will usually be chosen, while larger datasets will usually
571
+ default to FD. Avoids the overly conservative behaviour of FD
572
+ and Sturges for small and large datasets respectively.
573
+ Switchover point is usually :math:`a.size \approx 1000`.
574
+
575
+ 'fd' (Freedman Diaconis Estimator)
576
+ .. math:: h = 2 \frac{IQR}{n^{1/3}}
577
+
578
+ The binwidth is proportional to the interquartile range (IQR)
579
+ and inversely proportional to cube root of a.size. Can be too
580
+ conservative for small datasets, but is quite good for large
581
+ datasets. The IQR is very robust to outliers.
582
+
583
+ 'scott'
584
+ .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
585
+
586
+ The binwidth is proportional to the standard deviation of the
587
+ data and inversely proportional to cube root of ``x.size``. Can
588
+ be too conservative for small datasets, but is quite good for
589
+ large datasets. The standard deviation is not very robust to
590
+ outliers. Values are very similar to the Freedman-Diaconis
591
+ estimator in the absence of outliers.
592
+
593
+ 'rice'
594
+ .. math:: n_h = 2n^{1/3}
595
+
596
+ The number of bins is only proportional to cube root of
597
+ ``a.size``. It tends to overestimate the number of bins and it
598
+ does not take into account data variability.
599
+
600
+ 'sturges'
601
+ .. math:: n_h = \log _{2}(n) + 1
602
+
603
+ The number of bins is the base 2 log of ``a.size``. This
604
+ estimator assumes normality of data and is too conservative for
605
+ larger, non-normal datasets. This is the default method in R's
606
+ ``hist`` method.
607
+
608
+ 'doane'
609
+ .. math:: n_h = 1 + \log_{2}(n) +
610
+ \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
611
+
612
+ g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
613
+
614
+ \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
615
+
616
+ An improved version of Sturges' formula that produces better
617
+ estimates for non-normal datasets. This estimator attempts to
618
+ account for the skew of the data.
619
+
620
+ 'sqrt'
621
+ .. math:: n_h = \sqrt n
622
+
623
+ The simplest and fastest estimator. Only takes into account the
624
+ data size.
625
+
626
+ Examples
627
+ --------
628
+ >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
629
+ >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
630
+ array([0. , 0.25, 0.5 , 0.75, 1. ])
631
+ >>> np.histogram_bin_edges(arr, bins=2)
632
+ array([0. , 2.5, 5. ])
633
+
634
+ For consistency with histogram, an array of pre-computed bins is
635
+ passed through unmodified:
636
+
637
+ >>> np.histogram_bin_edges(arr, [1, 2])
638
+ array([1, 2])
639
+
640
+ This function allows one set of bins to be computed, and reused across
641
+ multiple histograms:
642
+
643
+ >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
644
+ >>> shared_bins
645
+ array([0., 1., 2., 3., 4., 5.])
646
+
647
+ >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
648
+ >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
649
+ >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
650
+
651
+ >>> hist_0; hist_1
652
+ array([1, 1, 0, 1, 0])
653
+ array([2, 0, 1, 1, 2])
654
+
655
+ Which gives more easily comparable results than using separate bins for
656
+ each histogram:
657
+
658
+ >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
659
+ >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
660
+ >>> hist_0; hist_1
661
+ array([1, 1, 1])
662
+ array([2, 1, 1, 2])
663
+ >>> bins_0; bins_1
664
+ array([0., 1., 2., 3.])
665
+ array([0. , 1.25, 2.5 , 3.75, 5. ])
666
+
667
+ """
668
+ a, weights = _ravel_and_check_weights(a, weights)
669
+ bin_edges, _ = _get_bin_edges(a, bins, range, weights)
670
+ return bin_edges
671
+
672
+
673
+ def _histogram_dispatcher(
674
+ a, bins=None, range=None, density=None, weights=None):
675
+ return (a, bins, weights)
676
+
677
+
678
+ @array_function_dispatch(_histogram_dispatcher)
679
+ def histogram(a, bins=10, range=None, density=None, weights=None):
680
+ r"""
681
+ Compute the histogram of a dataset.
682
+
683
+ Parameters
684
+ ----------
685
+ a : array_like
686
+ Input data. The histogram is computed over the flattened array.
687
+ bins : int or sequence of scalars or str, optional
688
+ If `bins` is an int, it defines the number of equal-width
689
+ bins in the given range (10, by default). If `bins` is a
690
+ sequence, it defines a monotonically increasing array of bin edges,
691
+ including the rightmost edge, allowing for non-uniform bin widths.
692
+
693
+ .. versionadded:: 1.11.0
694
+
695
+ If `bins` is a string, it defines the method used to calculate the
696
+ optimal bin width, as defined by `histogram_bin_edges`.
697
+
698
+ range : (float, float), optional
699
+ The lower and upper range of the bins. If not provided, range
700
+ is simply ``(a.min(), a.max())``. Values outside the range are
701
+ ignored. The first element of the range must be less than or
702
+ equal to the second. `range` affects the automatic bin
703
+ computation as well. While bin width is computed to be optimal
704
+ based on the actual data within `range`, the bin count will fill
705
+ the entire range including portions containing no data.
706
+ weights : array_like, optional
707
+ An array of weights, of the same shape as `a`. Each value in
708
+ `a` only contributes its associated weight towards the bin count
709
+ (instead of 1). If `density` is True, the weights are
710
+ normalized, so that the integral of the density over the range
711
+ remains 1.
712
+ density : bool, optional
713
+ If ``False``, the result will contain the number of samples in
714
+ each bin. If ``True``, the result is the value of the
715
+ probability *density* function at the bin, normalized such that
716
+ the *integral* over the range is 1. Note that the sum of the
717
+ histogram values will not be equal to 1 unless bins of unity
718
+ width are chosen; it is not a probability *mass* function.
719
+
720
+ Returns
721
+ -------
722
+ hist : array
723
+ The values of the histogram. See `density` and `weights` for a
724
+ description of the possible semantics.
725
+ bin_edges : array of dtype float
726
+ Return the bin edges ``(length(hist)+1)``.
727
+
728
+
729
+ See Also
730
+ --------
731
+ histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
732
+
733
+ Notes
734
+ -----
735
+ All but the last (righthand-most) bin is half-open. In other words,
736
+ if `bins` is::
737
+
738
+ [1, 2, 3, 4]
739
+
740
+ then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
741
+ the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
742
+ *includes* 4.
743
+
744
+
745
+ Examples
746
+ --------
747
+ >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
748
+ (array([0, 2, 1]), array([0, 1, 2, 3]))
749
+ >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
750
+ (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
751
+ >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
752
+ (array([1, 4, 1]), array([0, 1, 2, 3]))
753
+
754
+ >>> a = np.arange(5)
755
+ >>> hist, bin_edges = np.histogram(a, density=True)
756
+ >>> hist
757
+ array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
758
+ >>> hist.sum()
759
+ 2.4999999999999996
760
+ >>> np.sum(hist * np.diff(bin_edges))
761
+ 1.0
762
+
763
+ .. versionadded:: 1.11.0
764
+
765
+ Automated Bin Selection Methods example, using 2 peak random data
766
+ with 2000 points:
767
+
768
+ >>> import matplotlib.pyplot as plt
769
+ >>> rng = np.random.RandomState(10) # deterministic random data
770
+ >>> a = np.hstack((rng.normal(size=1000),
771
+ ... rng.normal(loc=5, scale=2, size=1000)))
772
+ >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
773
+ >>> plt.title("Histogram with 'auto' bins")
774
+ Text(0.5, 1.0, "Histogram with 'auto' bins")
775
+ >>> plt.show()
776
+
777
+ """
778
+ a, weights = _ravel_and_check_weights(a, weights)
779
+
780
+ bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
781
+
782
+ # Histogram is an integer or a float array depending on the weights.
783
+ if weights is None:
784
+ ntype = np.dtype(np.intp)
785
+ else:
786
+ ntype = weights.dtype
787
+
788
+ # We set a block size, as this allows us to iterate over chunks when
789
+ # computing histograms, to minimize memory usage.
790
+ BLOCK = 65536
791
+
792
+ # The fast path uses bincount, but that only works for certain types
793
+ # of weight
794
+ simple_weights = (
795
+ weights is None or
796
+ np.can_cast(weights.dtype, np.double) or
797
+ np.can_cast(weights.dtype, complex)
798
+ )
799
+
800
+ if uniform_bins is not None and simple_weights:
801
+ # Fast algorithm for equal bins
802
+ # We now convert values of a to bin indices, under the assumption of
803
+ # equal bin widths (which is valid here).
804
+ first_edge, last_edge, n_equal_bins = uniform_bins
805
+
806
+ # Initialize empty histogram
807
+ n = np.zeros(n_equal_bins, ntype)
808
+
809
+ # Pre-compute histogram scaling factor
810
+ norm_numerator = n_equal_bins
811
+ norm_denom = _unsigned_subtract(last_edge, first_edge)
812
+
813
+ # We iterate over blocks here for two reasons: the first is that for
814
+ # large arrays, it is actually faster (for example for a 10^8 array it
815
+ # is 2x as fast) and it results in a memory footprint 3x lower in the
816
+ # limit of large arrays.
817
+ for i in _range(0, len(a), BLOCK):
818
+ tmp_a = a[i:i+BLOCK]
819
+ if weights is None:
820
+ tmp_w = None
821
+ else:
822
+ tmp_w = weights[i:i + BLOCK]
823
+
824
+ # Only include values in the right range
825
+ keep = (tmp_a >= first_edge)
826
+ keep &= (tmp_a <= last_edge)
827
+ if not np.logical_and.reduce(keep):
828
+ tmp_a = tmp_a[keep]
829
+ if tmp_w is not None:
830
+ tmp_w = tmp_w[keep]
831
+
832
+ # This cast ensures no type promotions occur below, which gh-10322
833
+ # make unpredictable. Getting it wrong leads to precision errors
834
+ # like gh-8123.
835
+ tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
836
+
837
+ # Compute the bin indices, and for values that lie exactly on
838
+ # last_edge we need to subtract one
839
+ f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom)
840
+ * norm_numerator)
841
+ indices = f_indices.astype(np.intp)
842
+ indices[indices == n_equal_bins] -= 1
843
+
844
+ # The index computation is not guaranteed to give exactly
845
+ # consistent results within ~1 ULP of the bin edges.
846
+ decrement = tmp_a < bin_edges[indices]
847
+ indices[decrement] -= 1
848
+ # The last bin includes the right edge. The other bins do not.
849
+ increment = ((tmp_a >= bin_edges[indices + 1])
850
+ & (indices != n_equal_bins - 1))
851
+ indices[increment] += 1
852
+
853
+ # We now compute the histogram using bincount
854
+ if ntype.kind == 'c':
855
+ n.real += np.bincount(indices, weights=tmp_w.real,
856
+ minlength=n_equal_bins)
857
+ n.imag += np.bincount(indices, weights=tmp_w.imag,
858
+ minlength=n_equal_bins)
859
+ else:
860
+ n += np.bincount(indices, weights=tmp_w,
861
+ minlength=n_equal_bins).astype(ntype)
862
+ else:
863
+ # Compute via cumulative histogram
864
+ cum_n = np.zeros(bin_edges.shape, ntype)
865
+ if weights is None:
866
+ for i in _range(0, len(a), BLOCK):
867
+ sa = np.sort(a[i:i+BLOCK])
868
+ cum_n += _search_sorted_inclusive(sa, bin_edges)
869
+ else:
870
+ zero = np.zeros(1, dtype=ntype)
871
+ for i in _range(0, len(a), BLOCK):
872
+ tmp_a = a[i:i+BLOCK]
873
+ tmp_w = weights[i:i+BLOCK]
874
+ sorting_index = np.argsort(tmp_a)
875
+ sa = tmp_a[sorting_index]
876
+ sw = tmp_w[sorting_index]
877
+ cw = np.concatenate((zero, sw.cumsum()))
878
+ bin_index = _search_sorted_inclusive(sa, bin_edges)
879
+ cum_n += cw[bin_index]
880
+
881
+ n = np.diff(cum_n)
882
+
883
+ if density:
884
+ db = np.array(np.diff(bin_edges), float)
885
+ return n/db/n.sum(), bin_edges
886
+
887
+ return n, bin_edges
888
+
889
+
890
+ def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
891
+ weights=None):
892
+ if hasattr(sample, 'shape'): # same condition as used in histogramdd
893
+ yield sample
894
+ else:
895
+ yield from sample
896
+ with contextlib.suppress(TypeError):
897
+ yield from bins
898
+ yield weights
899
+
900
+
901
+ @array_function_dispatch(_histogramdd_dispatcher)
902
+ def histogramdd(sample, bins=10, range=None, density=None, weights=None):
903
+ """
904
+ Compute the multidimensional histogram of some data.
905
+
906
+ Parameters
907
+ ----------
908
+ sample : (N, D) array, or (N, D) array_like
909
+ The data to be histogrammed.
910
+
911
+ Note the unusual interpretation of sample when an array_like:
912
+
913
+ * When an array, each row is a coordinate in a D-dimensional space -
914
+ such as ``histogramdd(np.array([p1, p2, p3]))``.
915
+ * When an array_like, each element is the list of values for single
916
+ coordinate - such as ``histogramdd((X, Y, Z))``.
917
+
918
+ The first form should be preferred.
919
+
920
+ bins : sequence or int, optional
921
+ The bin specification:
922
+
923
+ * A sequence of arrays describing the monotonically increasing bin
924
+ edges along each dimension.
925
+ * The number of bins for each dimension (nx, ny, ... =bins)
926
+ * The number of bins for all dimensions (nx=ny=...=bins).
927
+
928
+ range : sequence, optional
929
+ A sequence of length D, each an optional (lower, upper) tuple giving
930
+ the outer bin edges to be used if the edges are not given explicitly in
931
+ `bins`.
932
+ An entry of None in the sequence results in the minimum and maximum
933
+ values being used for the corresponding dimension.
934
+ The default, None, is equivalent to passing a tuple of D None values.
935
+ density : bool, optional
936
+ If False, the default, returns the number of samples in each bin.
937
+ If True, returns the probability *density* function at the bin,
938
+ ``bin_count / sample_count / bin_volume``.
939
+ weights : (N,) array_like, optional
940
+ An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
941
+ Weights are normalized to 1 if density is True. If density is False,
942
+ the values of the returned histogram are equal to the sum of the
943
+ weights belonging to the samples falling into each bin.
944
+
945
+ Returns
946
+ -------
947
+ H : ndarray
948
+ The multidimensional histogram of sample x. See density and weights
949
+ for the different possible semantics.
950
+ edges : list
951
+ A list of D arrays describing the bin edges for each dimension.
952
+
953
+ See Also
954
+ --------
955
+ histogram: 1-D histogram
956
+ histogram2d: 2-D histogram
957
+
958
+ Examples
959
+ --------
960
+ >>> r = np.random.randn(100,3)
961
+ >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
962
+ >>> H.shape, edges[0].size, edges[1].size, edges[2].size
963
+ ((5, 8, 4), 6, 9, 5)
964
+
965
+ """
966
+
967
+ try:
968
+ # Sample is an ND-array.
969
+ N, D = sample.shape
970
+ except (AttributeError, ValueError):
971
+ # Sample is a sequence of 1D arrays.
972
+ sample = np.atleast_2d(sample).T
973
+ N, D = sample.shape
974
+
975
+ nbin = np.empty(D, np.intp)
976
+ edges = D*[None]
977
+ dedges = D*[None]
978
+ if weights is not None:
979
+ weights = np.asarray(weights)
980
+
981
+ try:
982
+ M = len(bins)
983
+ if M != D:
984
+ raise ValueError(
985
+ 'The dimension of bins must be equal to the dimension of the '
986
+ 'sample x.')
987
+ except TypeError:
988
+ # bins is an integer
989
+ bins = D*[bins]
990
+
991
+ # normalize the range argument
992
+ if range is None:
993
+ range = (None,) * D
994
+ elif len(range) != D:
995
+ raise ValueError('range argument must have one entry per dimension')
996
+
997
+ # Create edge arrays
998
+ for i in _range(D):
999
+ if np.ndim(bins[i]) == 0:
1000
+ if bins[i] < 1:
1001
+ raise ValueError(
1002
+ '`bins[{}]` must be positive, when an integer'.format(i))
1003
+ smin, smax = _get_outer_edges(sample[:,i], range[i])
1004
+ try:
1005
+ n = operator.index(bins[i])
1006
+
1007
+ except TypeError as e:
1008
+ raise TypeError(
1009
+ "`bins[{}]` must be an integer, when a scalar".format(i)
1010
+ ) from e
1011
+
1012
+ edges[i] = np.linspace(smin, smax, n + 1)
1013
+ elif np.ndim(bins[i]) == 1:
1014
+ edges[i] = np.asarray(bins[i])
1015
+ if np.any(edges[i][:-1] > edges[i][1:]):
1016
+ raise ValueError(
1017
+ '`bins[{}]` must be monotonically increasing, when an array'
1018
+ .format(i))
1019
+ else:
1020
+ raise ValueError(
1021
+ '`bins[{}]` must be a scalar or 1d array'.format(i))
1022
+
1023
+ nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
1024
+ dedges[i] = np.diff(edges[i])
1025
+
1026
+ # Compute the bin number each sample falls into.
1027
+ Ncount = tuple(
1028
+ # avoid np.digitize to work around gh-11022
1029
+ np.searchsorted(edges[i], sample[:, i], side='right')
1030
+ for i in _range(D)
1031
+ )
1032
+
1033
+ # Using digitize, values that fall on an edge are put in the right bin.
1034
+ # For the rightmost bin, we want values equal to the right edge to be
1035
+ # counted in the last bin, and not as an outlier.
1036
+ for i in _range(D):
1037
+ # Find which points are on the rightmost edge.
1038
+ on_edge = (sample[:, i] == edges[i][-1])
1039
+ # Shift these points one bin to the left.
1040
+ Ncount[i][on_edge] -= 1
1041
+
1042
+ # Compute the sample indices in the flattened histogram matrix.
1043
+ # This raises an error if the array is too large.
1044
+ xy = np.ravel_multi_index(Ncount, nbin)
1045
+
1046
+ # Compute the number of repetitions in xy and assign it to the
1047
+ # flattened histmat.
1048
+ hist = np.bincount(xy, weights, minlength=nbin.prod())
1049
+
1050
+ # Shape into a proper matrix
1051
+ hist = hist.reshape(nbin)
1052
+
1053
+ # This preserves the (bad) behavior observed in gh-7845, for now.
1054
+ hist = hist.astype(float, casting='safe')
1055
+
1056
+ # Remove outliers (indices 0 and -1 for each dimension).
1057
+ core = D*(slice(1, -1),)
1058
+ hist = hist[core]
1059
+
1060
+ if density:
1061
+ # calculate the probability density function
1062
+ s = hist.sum()
1063
+ for i in _range(D):
1064
+ shape = np.ones(D, int)
1065
+ shape[i] = nbin[i] - 2
1066
+ hist = hist / dedges[i].reshape(shape)
1067
+ hist /= s
1068
+
1069
+ if (hist.shape != nbin - 2).any():
1070
+ raise RuntimeError(
1071
+ "Internal Shape Error")
1072
+ return hist, edges
venv/lib/python3.10/site-packages/numpy/lib/index_tricks.pyi ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ from typing import (
3
+ Any,
4
+ TypeVar,
5
+ Generic,
6
+ overload,
7
+ Literal,
8
+ SupportsIndex,
9
+ )
10
+
11
+ from numpy import (
12
+ # Circumvent a naming conflict with `AxisConcatenator.matrix`
13
+ matrix as _Matrix,
14
+ ndenumerate as ndenumerate,
15
+ ndindex as ndindex,
16
+ ndarray,
17
+ dtype,
18
+ integer,
19
+ str_,
20
+ bytes_,
21
+ bool_,
22
+ int_,
23
+ float_,
24
+ complex_,
25
+ intp,
26
+ _OrderCF,
27
+ _ModeKind,
28
+ )
29
+ from numpy._typing import (
30
+ # Arrays
31
+ ArrayLike,
32
+ _NestedSequence,
33
+ _FiniteNestedSequence,
34
+ NDArray,
35
+ _ArrayLikeInt,
36
+
37
+ # DTypes
38
+ DTypeLike,
39
+ _SupportsDType,
40
+
41
+ # Shapes
42
+ _ShapeLike,
43
+ )
44
+
45
+ from numpy.core.multiarray import (
46
+ unravel_index as unravel_index,
47
+ ravel_multi_index as ravel_multi_index,
48
+ )
49
+
50
+ _T = TypeVar("_T")
51
+ _DType = TypeVar("_DType", bound=dtype[Any])
52
+ _BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
53
+ _TupType = TypeVar("_TupType", bound=tuple[Any, ...])
54
+ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
55
+
56
+ __all__: list[str]
57
+
58
+ @overload
59
+ def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ...
60
+ @overload
61
+ def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ...
62
+ @overload
63
+ def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ...
64
+ @overload
65
+ def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]: ...
66
+ @overload
67
+ def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ...
68
+ @overload
69
+ def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]: ...
70
+ @overload
71
+ def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]: ...
72
+
73
+ class nd_grid(Generic[_BoolType]):
74
+ sparse: _BoolType
75
+ def __init__(self, sparse: _BoolType = ...) -> None: ...
76
+ @overload
77
+ def __getitem__(
78
+ self: nd_grid[Literal[False]],
79
+ key: slice | Sequence[slice],
80
+ ) -> NDArray[Any]: ...
81
+ @overload
82
+ def __getitem__(
83
+ self: nd_grid[Literal[True]],
84
+ key: slice | Sequence[slice],
85
+ ) -> list[NDArray[Any]]: ...
86
+
87
+ class MGridClass(nd_grid[Literal[False]]):
88
+ def __init__(self) -> None: ...
89
+
90
+ mgrid: MGridClass
91
+
92
+ class OGridClass(nd_grid[Literal[True]]):
93
+ def __init__(self) -> None: ...
94
+
95
+ ogrid: OGridClass
96
+
97
+ class AxisConcatenator:
98
+ axis: int
99
+ matrix: bool
100
+ ndmin: int
101
+ trans1d: int
102
+ def __init__(
103
+ self,
104
+ axis: int = ...,
105
+ matrix: bool = ...,
106
+ ndmin: int = ...,
107
+ trans1d: int = ...,
108
+ ) -> None: ...
109
+ @staticmethod
110
+ @overload
111
+ def concatenate( # type: ignore[misc]
112
+ *a: ArrayLike, axis: SupportsIndex = ..., out: None = ...
113
+ ) -> NDArray[Any]: ...
114
+ @staticmethod
115
+ @overload
116
+ def concatenate(
117
+ *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ...
118
+ ) -> _ArrayType: ...
119
+ @staticmethod
120
+ def makemat(
121
+ data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
122
+ ) -> _Matrix[Any, Any]: ...
123
+
124
+ # TODO: Sort out this `__getitem__` method
125
+ def __getitem__(self, key: Any) -> Any: ...
126
+
127
+ class RClass(AxisConcatenator):
128
+ axis: Literal[0]
129
+ matrix: Literal[False]
130
+ ndmin: Literal[1]
131
+ trans1d: Literal[-1]
132
+ def __init__(self) -> None: ...
133
+
134
+ r_: RClass
135
+
136
+ class CClass(AxisConcatenator):
137
+ axis: Literal[-1]
138
+ matrix: Literal[False]
139
+ ndmin: Literal[2]
140
+ trans1d: Literal[0]
141
+ def __init__(self) -> None: ...
142
+
143
+ c_: CClass
144
+
145
+ class IndexExpression(Generic[_BoolType]):
146
+ maketuple: _BoolType
147
+ def __init__(self, maketuple: _BoolType) -> None: ...
148
+ @overload
149
+ def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc]
150
+ @overload
151
+ def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ...
152
+ @overload
153
+ def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ...
154
+
155
+ index_exp: IndexExpression[Literal[True]]
156
+ s_: IndexExpression[Literal[False]]
157
+
158
+ def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ...
159
+ def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ...
160
+ def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ...
161
+
162
+ # NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex`
venv/lib/python3.10/site-packages/numpy/lib/mixins.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Mixin classes for custom array types that don't inherit from ndarray."""
2
+ from numpy.core import umath as um
3
+
4
+
5
+ __all__ = ['NDArrayOperatorsMixin']
6
+
7
+
8
+ def _disables_array_ufunc(obj):
9
+ """True when __array_ufunc__ is set to None."""
10
+ try:
11
+ return obj.__array_ufunc__ is None
12
+ except AttributeError:
13
+ return False
14
+
15
+
16
+ def _binary_method(ufunc, name):
17
+ """Implement a forward binary method with a ufunc, e.g., __add__."""
18
+ def func(self, other):
19
+ if _disables_array_ufunc(other):
20
+ return NotImplemented
21
+ return ufunc(self, other)
22
+ func.__name__ = '__{}__'.format(name)
23
+ return func
24
+
25
+
26
+ def _reflected_binary_method(ufunc, name):
27
+ """Implement a reflected binary method with a ufunc, e.g., __radd__."""
28
+ def func(self, other):
29
+ if _disables_array_ufunc(other):
30
+ return NotImplemented
31
+ return ufunc(other, self)
32
+ func.__name__ = '__r{}__'.format(name)
33
+ return func
34
+
35
+
36
+ def _inplace_binary_method(ufunc, name):
37
+ """Implement an in-place binary method with a ufunc, e.g., __iadd__."""
38
+ def func(self, other):
39
+ return ufunc(self, other, out=(self,))
40
+ func.__name__ = '__i{}__'.format(name)
41
+ return func
42
+
43
+
44
+ def _numeric_methods(ufunc, name):
45
+ """Implement forward, reflected and inplace binary methods with a ufunc."""
46
+ return (_binary_method(ufunc, name),
47
+ _reflected_binary_method(ufunc, name),
48
+ _inplace_binary_method(ufunc, name))
49
+
50
+
51
+ def _unary_method(ufunc, name):
52
+ """Implement a unary special method with a ufunc."""
53
+ def func(self):
54
+ return ufunc(self)
55
+ func.__name__ = '__{}__'.format(name)
56
+ return func
57
+
58
+
59
+ class NDArrayOperatorsMixin:
60
+ """Mixin defining all operator special methods using __array_ufunc__.
61
+
62
+ This class implements the special methods for almost all of Python's
63
+ builtin operators defined in the `operator` module, including comparisons
64
+ (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
65
+ deferring to the ``__array_ufunc__`` method, which subclasses must
66
+ implement.
67
+
68
+ It is useful for writing classes that do not inherit from `numpy.ndarray`,
69
+ but that should support arithmetic and numpy universal functions like
70
+ arrays as described in `A Mechanism for Overriding Ufuncs
71
+ <https://numpy.org/neps/nep-0013-ufunc-overrides.html>`_.
72
+
73
+ As an trivial example, consider this implementation of an ``ArrayLike``
74
+ class that simply wraps a NumPy array and ensures that the result of any
75
+ arithmetic operation is also an ``ArrayLike`` object::
76
+
77
+ class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
78
+ def __init__(self, value):
79
+ self.value = np.asarray(value)
80
+
81
+ # One might also consider adding the built-in list type to this
82
+ # list, to support operations like np.add(array_like, list)
83
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
84
+
85
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
86
+ out = kwargs.get('out', ())
87
+ for x in inputs + out:
88
+ # Only support operations with instances of _HANDLED_TYPES.
89
+ # Use ArrayLike instead of type(self) for isinstance to
90
+ # allow subclasses that don't override __array_ufunc__ to
91
+ # handle ArrayLike objects.
92
+ if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
93
+ return NotImplemented
94
+
95
+ # Defer to the implementation of the ufunc on unwrapped values.
96
+ inputs = tuple(x.value if isinstance(x, ArrayLike) else x
97
+ for x in inputs)
98
+ if out:
99
+ kwargs['out'] = tuple(
100
+ x.value if isinstance(x, ArrayLike) else x
101
+ for x in out)
102
+ result = getattr(ufunc, method)(*inputs, **kwargs)
103
+
104
+ if type(result) is tuple:
105
+ # multiple return values
106
+ return tuple(type(self)(x) for x in result)
107
+ elif method == 'at':
108
+ # no return value
109
+ return None
110
+ else:
111
+ # one return value
112
+ return type(self)(result)
113
+
114
+ def __repr__(self):
115
+ return '%s(%r)' % (type(self).__name__, self.value)
116
+
117
+ In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
118
+ the result is always another ``ArrayLike``:
119
+
120
+ >>> x = ArrayLike([1, 2, 3])
121
+ >>> x - 1
122
+ ArrayLike(array([0, 1, 2]))
123
+ >>> 1 - x
124
+ ArrayLike(array([ 0, -1, -2]))
125
+ >>> np.arange(3) - x
126
+ ArrayLike(array([-1, -1, -1]))
127
+ >>> x - np.arange(3)
128
+ ArrayLike(array([1, 1, 1]))
129
+
130
+ Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
131
+ with arbitrary, unrecognized types. This ensures that interactions with
132
+ ArrayLike preserve a well-defined casting hierarchy.
133
+
134
+ .. versionadded:: 1.13
135
+ """
136
+ __slots__ = ()
137
+ # Like np.ndarray, this mixin class implements "Option 1" from the ufunc
138
+ # overrides NEP.
139
+
140
+ # comparisons don't have reflected and in-place versions
141
+ __lt__ = _binary_method(um.less, 'lt')
142
+ __le__ = _binary_method(um.less_equal, 'le')
143
+ __eq__ = _binary_method(um.equal, 'eq')
144
+ __ne__ = _binary_method(um.not_equal, 'ne')
145
+ __gt__ = _binary_method(um.greater, 'gt')
146
+ __ge__ = _binary_method(um.greater_equal, 'ge')
147
+
148
+ # numeric methods
149
+ __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
150
+ __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
151
+ __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
152
+ __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
153
+ um.matmul, 'matmul')
154
+ # Python 3 does not use __div__, __rdiv__, or __idiv__
155
+ __truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
156
+ um.true_divide, 'truediv')
157
+ __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
158
+ um.floor_divide, 'floordiv')
159
+ __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
160
+ __divmod__ = _binary_method(um.divmod, 'divmod')
161
+ __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
162
+ # __idivmod__ does not exist
163
+ # TODO: handle the optional third argument for __pow__?
164
+ __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
165
+ __lshift__, __rlshift__, __ilshift__ = _numeric_methods(
166
+ um.left_shift, 'lshift')
167
+ __rshift__, __rrshift__, __irshift__ = _numeric_methods(
168
+ um.right_shift, 'rshift')
169
+ __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
170
+ __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
171
+ __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
172
+
173
+ # unary methods
174
+ __neg__ = _unary_method(um.negative, 'neg')
175
+ __pos__ = _unary_method(um.positive, 'pos')
176
+ __abs__ = _unary_method(um.absolute, 'abs')
177
+ __invert__ = _unary_method(um.invert, 'invert')
venv/lib/python3.10/site-packages/numpy/lib/nanfunctions.pyi ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.core.fromnumeric import (
2
+ amin,
3
+ amax,
4
+ argmin,
5
+ argmax,
6
+ sum,
7
+ prod,
8
+ cumsum,
9
+ cumprod,
10
+ mean,
11
+ var,
12
+ std
13
+ )
14
+
15
+ from numpy.lib.function_base import (
16
+ median,
17
+ percentile,
18
+ quantile,
19
+ )
20
+
21
+ __all__: list[str]
22
+
23
+ # NOTE: In reaility these functions are not aliases but distinct functions
24
+ # with identical signatures.
25
+ nanmin = amin
26
+ nanmax = amax
27
+ nanargmin = argmin
28
+ nanargmax = argmax
29
+ nansum = sum
30
+ nanprod = prod
31
+ nancumsum = cumsum
32
+ nancumprod = cumprod
33
+ nanmean = mean
34
+ nanvar = var
35
+ nanstd = std
36
+ nanmedian = median
37
+ nanpercentile = percentile
38
+ nanquantile = quantile
venv/lib/python3.10/site-packages/numpy/lib/recfunctions.py ADDED
@@ -0,0 +1,1673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Collection of utilities to manipulate structured arrays.
3
+
4
+ Most of these functions were initially implemented by John Hunter for
5
+ matplotlib. They have been rewritten and extended for convenience.
6
+
7
+ """
8
+ import itertools
9
+ import numpy as np
10
+ import numpy.ma as ma
11
+ from numpy import ndarray, recarray
12
+ from numpy.ma import MaskedArray
13
+ from numpy.ma.mrecords import MaskedRecords
14
+ from numpy.core.overrides import array_function_dispatch
15
+ from numpy.lib._iotools import _is_string_like
16
+
17
+ _check_fill_value = np.ma.core._check_fill_value
18
+
19
+
20
+ __all__ = [
21
+ 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
22
+ 'drop_fields', 'find_duplicates', 'flatten_descr',
23
+ 'get_fieldstructure', 'get_names', 'get_names_flat',
24
+ 'join_by', 'merge_arrays', 'rec_append_fields',
25
+ 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
26
+ 'rename_fields', 'repack_fields', 'require_fields',
27
+ 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
28
+ ]
29
+
30
+
31
+ def _recursive_fill_fields_dispatcher(input, output):
32
+ return (input, output)
33
+
34
+
35
+ @array_function_dispatch(_recursive_fill_fields_dispatcher)
36
+ def recursive_fill_fields(input, output):
37
+ """
38
+ Fills fields from output with fields from input,
39
+ with support for nested structures.
40
+
41
+ Parameters
42
+ ----------
43
+ input : ndarray
44
+ Input array.
45
+ output : ndarray
46
+ Output array.
47
+
48
+ Notes
49
+ -----
50
+ * `output` should be at least the same size as `input`
51
+
52
+ Examples
53
+ --------
54
+ >>> from numpy.lib import recfunctions as rfn
55
+ >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
56
+ >>> b = np.zeros((3,), dtype=a.dtype)
57
+ >>> rfn.recursive_fill_fields(a, b)
58
+ array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
59
+
60
+ """
61
+ newdtype = output.dtype
62
+ for field in newdtype.names:
63
+ try:
64
+ current = input[field]
65
+ except ValueError:
66
+ continue
67
+ if current.dtype.names is not None:
68
+ recursive_fill_fields(current, output[field])
69
+ else:
70
+ output[field][:len(current)] = current
71
+ return output
72
+
73
+
74
+ def _get_fieldspec(dtype):
75
+ """
76
+ Produce a list of name/dtype pairs corresponding to the dtype fields
77
+
78
+ Similar to dtype.descr, but the second item of each tuple is a dtype, not a
79
+ string. As a result, this handles subarray dtypes
80
+
81
+ Can be passed to the dtype constructor to reconstruct the dtype, noting that
82
+ this (deliberately) discards field offsets.
83
+
84
+ Examples
85
+ --------
86
+ >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
87
+ >>> dt.descr
88
+ [(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
89
+ >>> _get_fieldspec(dt)
90
+ [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
91
+
92
+ """
93
+ if dtype.names is None:
94
+ # .descr returns a nameless field, so we should too
95
+ return [('', dtype)]
96
+ else:
97
+ fields = ((name, dtype.fields[name]) for name in dtype.names)
98
+ # keep any titles, if present
99
+ return [
100
+ (name if len(f) == 2 else (f[2], name), f[0])
101
+ for name, f in fields
102
+ ]
103
+
104
+
105
+ def get_names(adtype):
106
+ """
107
+ Returns the field names of the input datatype as a tuple. Input datatype
108
+ must have fields otherwise error is raised.
109
+
110
+ Parameters
111
+ ----------
112
+ adtype : dtype
113
+ Input datatype
114
+
115
+ Examples
116
+ --------
117
+ >>> from numpy.lib import recfunctions as rfn
118
+ >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
119
+ ('A',)
120
+ >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
121
+ ('A', 'B')
122
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
123
+ >>> rfn.get_names(adtype)
124
+ ('a', ('b', ('ba', 'bb')))
125
+ """
126
+ listnames = []
127
+ names = adtype.names
128
+ for name in names:
129
+ current = adtype[name]
130
+ if current.names is not None:
131
+ listnames.append((name, tuple(get_names(current))))
132
+ else:
133
+ listnames.append(name)
134
+ return tuple(listnames)
135
+
136
+
137
+ def get_names_flat(adtype):
138
+ """
139
+ Returns the field names of the input datatype as a tuple. Input datatype
140
+ must have fields otherwise error is raised.
141
+ Nested structure are flattened beforehand.
142
+
143
+ Parameters
144
+ ----------
145
+ adtype : dtype
146
+ Input datatype
147
+
148
+ Examples
149
+ --------
150
+ >>> from numpy.lib import recfunctions as rfn
151
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
152
+ False
153
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
154
+ ('A', 'B')
155
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
156
+ >>> rfn.get_names_flat(adtype)
157
+ ('a', 'b', 'ba', 'bb')
158
+ """
159
+ listnames = []
160
+ names = adtype.names
161
+ for name in names:
162
+ listnames.append(name)
163
+ current = adtype[name]
164
+ if current.names is not None:
165
+ listnames.extend(get_names_flat(current))
166
+ return tuple(listnames)
167
+
168
+
169
+ def flatten_descr(ndtype):
170
+ """
171
+ Flatten a structured data-type description.
172
+
173
+ Examples
174
+ --------
175
+ >>> from numpy.lib import recfunctions as rfn
176
+ >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
177
+ >>> rfn.flatten_descr(ndtype)
178
+ (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
179
+
180
+ """
181
+ names = ndtype.names
182
+ if names is None:
183
+ return (('', ndtype),)
184
+ else:
185
+ descr = []
186
+ for field in names:
187
+ (typ, _) = ndtype.fields[field]
188
+ if typ.names is not None:
189
+ descr.extend(flatten_descr(typ))
190
+ else:
191
+ descr.append((field, typ))
192
+ return tuple(descr)
193
+
194
+
195
+ def _zip_dtype(seqarrays, flatten=False):
196
+ newdtype = []
197
+ if flatten:
198
+ for a in seqarrays:
199
+ newdtype.extend(flatten_descr(a.dtype))
200
+ else:
201
+ for a in seqarrays:
202
+ current = a.dtype
203
+ if current.names is not None and len(current.names) == 1:
204
+ # special case - dtypes of 1 field are flattened
205
+ newdtype.extend(_get_fieldspec(current))
206
+ else:
207
+ newdtype.append(('', current))
208
+ return np.dtype(newdtype)
209
+
210
+
211
+ def _zip_descr(seqarrays, flatten=False):
212
+ """
213
+ Combine the dtype description of a series of arrays.
214
+
215
+ Parameters
216
+ ----------
217
+ seqarrays : sequence of arrays
218
+ Sequence of arrays
219
+ flatten : {boolean}, optional
220
+ Whether to collapse nested descriptions.
221
+ """
222
+ return _zip_dtype(seqarrays, flatten=flatten).descr
223
+
224
+
225
+ def get_fieldstructure(adtype, lastname=None, parents=None,):
226
+ """
227
+ Returns a dictionary with fields indexing lists of their parent fields.
228
+
229
+ This function is used to simplify access to fields nested in other fields.
230
+
231
+ Parameters
232
+ ----------
233
+ adtype : np.dtype
234
+ Input datatype
235
+ lastname : optional
236
+ Last processed field name (used internally during recursion).
237
+ parents : dictionary
238
+ Dictionary of parent fields (used interbally during recursion).
239
+
240
+ Examples
241
+ --------
242
+ >>> from numpy.lib import recfunctions as rfn
243
+ >>> ndtype = np.dtype([('A', int),
244
+ ... ('B', [('BA', int),
245
+ ... ('BB', [('BBA', int), ('BBB', int)])])])
246
+ >>> rfn.get_fieldstructure(ndtype)
247
+ ... # XXX: possible regression, order of BBA and BBB is swapped
248
+ {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
249
+
250
+ """
251
+ if parents is None:
252
+ parents = {}
253
+ names = adtype.names
254
+ for name in names:
255
+ current = adtype[name]
256
+ if current.names is not None:
257
+ if lastname:
258
+ parents[name] = [lastname, ]
259
+ else:
260
+ parents[name] = []
261
+ parents.update(get_fieldstructure(current, name, parents))
262
+ else:
263
+ lastparent = [_ for _ in (parents.get(lastname, []) or [])]
264
+ if lastparent:
265
+ lastparent.append(lastname)
266
+ elif lastname:
267
+ lastparent = [lastname, ]
268
+ parents[name] = lastparent or []
269
+ return parents
270
+
271
+
272
+ def _izip_fields_flat(iterable):
273
+ """
274
+ Returns an iterator of concatenated fields from a sequence of arrays,
275
+ collapsing any nested structure.
276
+
277
+ """
278
+ for element in iterable:
279
+ if isinstance(element, np.void):
280
+ yield from _izip_fields_flat(tuple(element))
281
+ else:
282
+ yield element
283
+
284
+
285
+ def _izip_fields(iterable):
286
+ """
287
+ Returns an iterator of concatenated fields from a sequence of arrays.
288
+
289
+ """
290
+ for element in iterable:
291
+ if (hasattr(element, '__iter__') and
292
+ not isinstance(element, str)):
293
+ yield from _izip_fields(element)
294
+ elif isinstance(element, np.void) and len(tuple(element)) == 1:
295
+ # this statement is the same from the previous expression
296
+ yield from _izip_fields(element)
297
+ else:
298
+ yield element
299
+
300
+
301
+ def _izip_records(seqarrays, fill_value=None, flatten=True):
302
+ """
303
+ Returns an iterator of concatenated items from a sequence of arrays.
304
+
305
+ Parameters
306
+ ----------
307
+ seqarrays : sequence of arrays
308
+ Sequence of arrays.
309
+ fill_value : {None, integer}
310
+ Value used to pad shorter iterables.
311
+ flatten : {True, False},
312
+ Whether to
313
+ """
314
+
315
+ # Should we flatten the items, or just use a nested approach
316
+ if flatten:
317
+ zipfunc = _izip_fields_flat
318
+ else:
319
+ zipfunc = _izip_fields
320
+
321
+ for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
322
+ yield tuple(zipfunc(tup))
323
+
324
+
325
+ def _fix_output(output, usemask=True, asrecarray=False):
326
+ """
327
+ Private function: return a recarray, a ndarray, a MaskedArray
328
+ or a MaskedRecords depending on the input parameters
329
+ """
330
+ if not isinstance(output, MaskedArray):
331
+ usemask = False
332
+ if usemask:
333
+ if asrecarray:
334
+ output = output.view(MaskedRecords)
335
+ else:
336
+ output = ma.filled(output)
337
+ if asrecarray:
338
+ output = output.view(recarray)
339
+ return output
340
+
341
+
342
+ def _fix_defaults(output, defaults=None):
343
+ """
344
+ Update the fill_value and masked data of `output`
345
+ from the default given in a dictionary defaults.
346
+ """
347
+ names = output.dtype.names
348
+ (data, mask, fill_value) = (output.data, output.mask, output.fill_value)
349
+ for (k, v) in (defaults or {}).items():
350
+ if k in names:
351
+ fill_value[k] = v
352
+ data[k][mask[k]] = v
353
+ return output
354
+
355
+
356
+ def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
357
+ usemask=None, asrecarray=None):
358
+ return seqarrays
359
+
360
+
361
+ @array_function_dispatch(_merge_arrays_dispatcher)
362
+ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
363
+ usemask=False, asrecarray=False):
364
+ """
365
+ Merge arrays field by field.
366
+
367
+ Parameters
368
+ ----------
369
+ seqarrays : sequence of ndarrays
370
+ Sequence of arrays
371
+ fill_value : {float}, optional
372
+ Filling value used to pad missing data on the shorter arrays.
373
+ flatten : {False, True}, optional
374
+ Whether to collapse nested fields.
375
+ usemask : {False, True}, optional
376
+ Whether to return a masked array or not.
377
+ asrecarray : {False, True}, optional
378
+ Whether to return a recarray (MaskedRecords) or not.
379
+
380
+ Examples
381
+ --------
382
+ >>> from numpy.lib import recfunctions as rfn
383
+ >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
384
+ array([( 1, 10.), ( 2, 20.), (-1, 30.)],
385
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
386
+
387
+ >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
388
+ ... np.array([10., 20., 30.])), usemask=False)
389
+ array([(1, 10.0), (2, 20.0), (-1, 30.0)],
390
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
391
+ >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
392
+ ... np.array([10., 20., 30.])),
393
+ ... usemask=False, asrecarray=True)
394
+ rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
395
+ dtype=[('a', '<i8'), ('f1', '<f8')])
396
+
397
+ Notes
398
+ -----
399
+ * Without a mask, the missing value will be filled with something,
400
+ depending on what its corresponding type:
401
+
402
+ * ``-1`` for integers
403
+ * ``-1.0`` for floating point numbers
404
+ * ``'-'`` for characters
405
+ * ``'-1'`` for strings
406
+ * ``True`` for boolean values
407
+ * XXX: I just obtained these values empirically
408
+ """
409
+ # Only one item in the input sequence ?
410
+ if (len(seqarrays) == 1):
411
+ seqarrays = np.asanyarray(seqarrays[0])
412
+ # Do we have a single ndarray as input ?
413
+ if isinstance(seqarrays, (ndarray, np.void)):
414
+ seqdtype = seqarrays.dtype
415
+ # Make sure we have named fields
416
+ if seqdtype.names is None:
417
+ seqdtype = np.dtype([('', seqdtype)])
418
+ if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
419
+ # Minimal processing needed: just make sure everything's a-ok
420
+ seqarrays = seqarrays.ravel()
421
+ # Find what type of array we must return
422
+ if usemask:
423
+ if asrecarray:
424
+ seqtype = MaskedRecords
425
+ else:
426
+ seqtype = MaskedArray
427
+ elif asrecarray:
428
+ seqtype = recarray
429
+ else:
430
+ seqtype = ndarray
431
+ return seqarrays.view(dtype=seqdtype, type=seqtype)
432
+ else:
433
+ seqarrays = (seqarrays,)
434
+ else:
435
+ # Make sure we have arrays in the input sequence
436
+ seqarrays = [np.asanyarray(_m) for _m in seqarrays]
437
+ # Find the sizes of the inputs and their maximum
438
+ sizes = tuple(a.size for a in seqarrays)
439
+ maxlength = max(sizes)
440
+ # Get the dtype of the output (flattening if needed)
441
+ newdtype = _zip_dtype(seqarrays, flatten=flatten)
442
+ # Initialize the sequences for data and mask
443
+ seqdata = []
444
+ seqmask = []
445
+ # If we expect some kind of MaskedArray, make a special loop.
446
+ if usemask:
447
+ for (a, n) in zip(seqarrays, sizes):
448
+ nbmissing = (maxlength - n)
449
+ # Get the data and mask
450
+ data = a.ravel().__array__()
451
+ mask = ma.getmaskarray(a).ravel()
452
+ # Get the filling value (if needed)
453
+ if nbmissing:
454
+ fval = _check_fill_value(fill_value, a.dtype)
455
+ if isinstance(fval, (ndarray, np.void)):
456
+ if len(fval.dtype) == 1:
457
+ fval = fval.item()[0]
458
+ fmsk = True
459
+ else:
460
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
461
+ fmsk = np.ones((1,), dtype=mask.dtype)
462
+ else:
463
+ fval = None
464
+ fmsk = True
465
+ # Store an iterator padding the input to the expected length
466
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
467
+ seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
468
+ # Create an iterator for the data
469
+ data = tuple(_izip_records(seqdata, flatten=flatten))
470
+ output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
471
+ mask=list(_izip_records(seqmask, flatten=flatten)))
472
+ if asrecarray:
473
+ output = output.view(MaskedRecords)
474
+ else:
475
+ # Same as before, without the mask we don't need...
476
+ for (a, n) in zip(seqarrays, sizes):
477
+ nbmissing = (maxlength - n)
478
+ data = a.ravel().__array__()
479
+ if nbmissing:
480
+ fval = _check_fill_value(fill_value, a.dtype)
481
+ if isinstance(fval, (ndarray, np.void)):
482
+ if len(fval.dtype) == 1:
483
+ fval = fval.item()[0]
484
+ else:
485
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
486
+ else:
487
+ fval = None
488
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
489
+ output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
490
+ dtype=newdtype, count=maxlength)
491
+ if asrecarray:
492
+ output = output.view(recarray)
493
+ # And we're done...
494
+ return output
495
+
496
+
497
+ def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
498
+ return (base,)
499
+
500
+
501
+ @array_function_dispatch(_drop_fields_dispatcher)
502
+ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
503
+ """
504
+ Return a new array with fields in `drop_names` dropped.
505
+
506
+ Nested fields are supported.
507
+
508
+ .. versionchanged:: 1.18.0
509
+ `drop_fields` returns an array with 0 fields if all fields are dropped,
510
+ rather than returning ``None`` as it did previously.
511
+
512
+ Parameters
513
+ ----------
514
+ base : array
515
+ Input array
516
+ drop_names : string or sequence
517
+ String or sequence of strings corresponding to the names of the
518
+ fields to drop.
519
+ usemask : {False, True}, optional
520
+ Whether to return a masked array or not.
521
+ asrecarray : string or sequence, optional
522
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
523
+ a plain ndarray or masked array with flexible dtype. The default
524
+ is False.
525
+
526
+ Examples
527
+ --------
528
+ >>> from numpy.lib import recfunctions as rfn
529
+ >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
530
+ ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
531
+ >>> rfn.drop_fields(a, 'a')
532
+ array([((2., 3),), ((5., 6),)],
533
+ dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
534
+ >>> rfn.drop_fields(a, 'ba')
535
+ array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
536
+ >>> rfn.drop_fields(a, ['ba', 'bb'])
537
+ array([(1,), (4,)], dtype=[('a', '<i8')])
538
+ """
539
+ if _is_string_like(drop_names):
540
+ drop_names = [drop_names]
541
+ else:
542
+ drop_names = set(drop_names)
543
+
544
+ def _drop_descr(ndtype, drop_names):
545
+ names = ndtype.names
546
+ newdtype = []
547
+ for name in names:
548
+ current = ndtype[name]
549
+ if name in drop_names:
550
+ continue
551
+ if current.names is not None:
552
+ descr = _drop_descr(current, drop_names)
553
+ if descr:
554
+ newdtype.append((name, descr))
555
+ else:
556
+ newdtype.append((name, current))
557
+ return newdtype
558
+
559
+ newdtype = _drop_descr(base.dtype, drop_names)
560
+
561
+ output = np.empty(base.shape, dtype=newdtype)
562
+ output = recursive_fill_fields(base, output)
563
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
564
+
565
+
566
+ def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
567
+ """
568
+ Return a new array keeping only the fields in `keep_names`,
569
+ and preserving the order of those fields.
570
+
571
+ Parameters
572
+ ----------
573
+ base : array
574
+ Input array
575
+ keep_names : string or sequence
576
+ String or sequence of strings corresponding to the names of the
577
+ fields to keep. Order of the names will be preserved.
578
+ usemask : {False, True}, optional
579
+ Whether to return a masked array or not.
580
+ asrecarray : string or sequence, optional
581
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
582
+ a plain ndarray or masked array with flexible dtype. The default
583
+ is False.
584
+ """
585
+ newdtype = [(n, base.dtype[n]) for n in keep_names]
586
+ output = np.empty(base.shape, dtype=newdtype)
587
+ output = recursive_fill_fields(base, output)
588
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
589
+
590
+
591
+ def _rec_drop_fields_dispatcher(base, drop_names):
592
+ return (base,)
593
+
594
+
595
+ @array_function_dispatch(_rec_drop_fields_dispatcher)
596
+ def rec_drop_fields(base, drop_names):
597
+ """
598
+ Returns a new numpy.recarray with fields in `drop_names` dropped.
599
+ """
600
+ return drop_fields(base, drop_names, usemask=False, asrecarray=True)
601
+
602
+
603
+ def _rename_fields_dispatcher(base, namemapper):
604
+ return (base,)
605
+
606
+
607
+ @array_function_dispatch(_rename_fields_dispatcher)
608
+ def rename_fields(base, namemapper):
609
+ """
610
+ Rename the fields from a flexible-datatype ndarray or recarray.
611
+
612
+ Nested fields are supported.
613
+
614
+ Parameters
615
+ ----------
616
+ base : ndarray
617
+ Input array whose fields must be modified.
618
+ namemapper : dictionary
619
+ Dictionary mapping old field names to their new version.
620
+
621
+ Examples
622
+ --------
623
+ >>> from numpy.lib import recfunctions as rfn
624
+ >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
625
+ ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
626
+ >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
627
+ array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
628
+ dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
629
+
630
+ """
631
+ def _recursive_rename_fields(ndtype, namemapper):
632
+ newdtype = []
633
+ for name in ndtype.names:
634
+ newname = namemapper.get(name, name)
635
+ current = ndtype[name]
636
+ if current.names is not None:
637
+ newdtype.append(
638
+ (newname, _recursive_rename_fields(current, namemapper))
639
+ )
640
+ else:
641
+ newdtype.append((newname, current))
642
+ return newdtype
643
+ newdtype = _recursive_rename_fields(base.dtype, namemapper)
644
+ return base.view(newdtype)
645
+
646
+
647
+ def _append_fields_dispatcher(base, names, data, dtypes=None,
648
+ fill_value=None, usemask=None, asrecarray=None):
649
+ yield base
650
+ yield from data
651
+
652
+
653
+ @array_function_dispatch(_append_fields_dispatcher)
654
+ def append_fields(base, names, data, dtypes=None,
655
+ fill_value=-1, usemask=True, asrecarray=False):
656
+ """
657
+ Add new fields to an existing array.
658
+
659
+ The names of the fields are given with the `names` arguments,
660
+ the corresponding values with the `data` arguments.
661
+ If a single field is appended, `names`, `data` and `dtypes` do not have
662
+ to be lists but just values.
663
+
664
+ Parameters
665
+ ----------
666
+ base : array
667
+ Input array to extend.
668
+ names : string, sequence
669
+ String or sequence of strings corresponding to the names
670
+ of the new fields.
671
+ data : array or sequence of arrays
672
+ Array or sequence of arrays storing the fields to add to the base.
673
+ dtypes : sequence of datatypes, optional
674
+ Datatype or sequence of datatypes.
675
+ If None, the datatypes are estimated from the `data`.
676
+ fill_value : {float}, optional
677
+ Filling value used to pad missing data on the shorter arrays.
678
+ usemask : {False, True}, optional
679
+ Whether to return a masked array or not.
680
+ asrecarray : {False, True}, optional
681
+ Whether to return a recarray (MaskedRecords) or not.
682
+
683
+ """
684
+ # Check the names
685
+ if isinstance(names, (tuple, list)):
686
+ if len(names) != len(data):
687
+ msg = "The number of arrays does not match the number of names"
688
+ raise ValueError(msg)
689
+ elif isinstance(names, str):
690
+ names = [names, ]
691
+ data = [data, ]
692
+ #
693
+ if dtypes is None:
694
+ data = [np.array(a, copy=False, subok=True) for a in data]
695
+ data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
696
+ else:
697
+ if not isinstance(dtypes, (tuple, list)):
698
+ dtypes = [dtypes, ]
699
+ if len(data) != len(dtypes):
700
+ if len(dtypes) == 1:
701
+ dtypes = dtypes * len(data)
702
+ else:
703
+ msg = "The dtypes argument must be None, a dtype, or a list."
704
+ raise ValueError(msg)
705
+ data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
706
+ for (a, n, d) in zip(data, names, dtypes)]
707
+ #
708
+ base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
709
+ if len(data) > 1:
710
+ data = merge_arrays(data, flatten=True, usemask=usemask,
711
+ fill_value=fill_value)
712
+ else:
713
+ data = data.pop()
714
+ #
715
+ output = ma.masked_all(
716
+ max(len(base), len(data)),
717
+ dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
718
+ output = recursive_fill_fields(base, output)
719
+ output = recursive_fill_fields(data, output)
720
+ #
721
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
722
+
723
+
724
+ def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
725
+ yield base
726
+ yield from data
727
+
728
+
729
+ @array_function_dispatch(_rec_append_fields_dispatcher)
730
+ def rec_append_fields(base, names, data, dtypes=None):
731
+ """
732
+ Add new fields to an existing array.
733
+
734
+ The names of the fields are given with the `names` arguments,
735
+ the corresponding values with the `data` arguments.
736
+ If a single field is appended, `names`, `data` and `dtypes` do not have
737
+ to be lists but just values.
738
+
739
+ Parameters
740
+ ----------
741
+ base : array
742
+ Input array to extend.
743
+ names : string, sequence
744
+ String or sequence of strings corresponding to the names
745
+ of the new fields.
746
+ data : array or sequence of arrays
747
+ Array or sequence of arrays storing the fields to add to the base.
748
+ dtypes : sequence of datatypes, optional
749
+ Datatype or sequence of datatypes.
750
+ If None, the datatypes are estimated from the `data`.
751
+
752
+ See Also
753
+ --------
754
+ append_fields
755
+
756
+ Returns
757
+ -------
758
+ appended_array : np.recarray
759
+ """
760
+ return append_fields(base, names, data=data, dtypes=dtypes,
761
+ asrecarray=True, usemask=False)
762
+
763
+
764
+ def _repack_fields_dispatcher(a, align=None, recurse=None):
765
+ return (a,)
766
+
767
+
768
+ @array_function_dispatch(_repack_fields_dispatcher)
769
+ def repack_fields(a, align=False, recurse=False):
770
+ """
771
+ Re-pack the fields of a structured array or dtype in memory.
772
+
773
+ The memory layout of structured datatypes allows fields at arbitrary
774
+ byte offsets. This means the fields can be separated by padding bytes,
775
+ their offsets can be non-monotonically increasing, and they can overlap.
776
+
777
+ This method removes any overlaps and reorders the fields in memory so they
778
+ have increasing byte offsets, and adds or removes padding bytes depending
779
+ on the `align` option, which behaves like the `align` option to
780
+ `numpy.dtype`.
781
+
782
+ If `align=False`, this method produces a "packed" memory layout in which
783
+ each field starts at the byte the previous field ended, and any padding
784
+ bytes are removed.
785
+
786
+ If `align=True`, this methods produces an "aligned" memory layout in which
787
+ each field's offset is a multiple of its alignment, and the total itemsize
788
+ is a multiple of the largest alignment, by adding padding bytes as needed.
789
+
790
+ Parameters
791
+ ----------
792
+ a : ndarray or dtype
793
+ array or dtype for which to repack the fields.
794
+ align : boolean
795
+ If true, use an "aligned" memory layout, otherwise use a "packed" layout.
796
+ recurse : boolean
797
+ If True, also repack nested structures.
798
+
799
+ Returns
800
+ -------
801
+ repacked : ndarray or dtype
802
+ Copy of `a` with fields repacked, or `a` itself if no repacking was
803
+ needed.
804
+
805
+ Examples
806
+ --------
807
+
808
+ >>> from numpy.lib import recfunctions as rfn
809
+ >>> def print_offsets(d):
810
+ ... print("offsets:", [d.fields[name][1] for name in d.names])
811
+ ... print("itemsize:", d.itemsize)
812
+ ...
813
+ >>> dt = np.dtype('u1, <i8, <f8', align=True)
814
+ >>> dt
815
+ dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \
816
+ 'offsets': [0, 8, 16], 'itemsize': 24}, align=True)
817
+ >>> print_offsets(dt)
818
+ offsets: [0, 8, 16]
819
+ itemsize: 24
820
+ >>> packed_dt = rfn.repack_fields(dt)
821
+ >>> packed_dt
822
+ dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
823
+ >>> print_offsets(packed_dt)
824
+ offsets: [0, 1, 9]
825
+ itemsize: 17
826
+
827
+ """
828
+ if not isinstance(a, np.dtype):
829
+ dt = repack_fields(a.dtype, align=align, recurse=recurse)
830
+ return a.astype(dt, copy=False)
831
+
832
+ if a.names is None:
833
+ return a
834
+
835
+ fieldinfo = []
836
+ for name in a.names:
837
+ tup = a.fields[name]
838
+ if recurse:
839
+ fmt = repack_fields(tup[0], align=align, recurse=True)
840
+ else:
841
+ fmt = tup[0]
842
+
843
+ if len(tup) == 3:
844
+ name = (tup[2], name)
845
+
846
+ fieldinfo.append((name, fmt))
847
+
848
+ dt = np.dtype(fieldinfo, align=align)
849
+ return np.dtype((a.type, dt))
850
+
851
+ def _get_fields_and_offsets(dt, offset=0):
852
+ """
853
+ Returns a flat list of (dtype, count, offset) tuples of all the
854
+ scalar fields in the dtype "dt", including nested fields, in left
855
+ to right order.
856
+ """
857
+
858
+ # counts up elements in subarrays, including nested subarrays, and returns
859
+ # base dtype and count
860
+ def count_elem(dt):
861
+ count = 1
862
+ while dt.shape != ():
863
+ for size in dt.shape:
864
+ count *= size
865
+ dt = dt.base
866
+ return dt, count
867
+
868
+ fields = []
869
+ for name in dt.names:
870
+ field = dt.fields[name]
871
+ f_dt, f_offset = field[0], field[1]
872
+ f_dt, n = count_elem(f_dt)
873
+
874
+ if f_dt.names is None:
875
+ fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
876
+ else:
877
+ subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
878
+ size = f_dt.itemsize
879
+
880
+ for i in range(n):
881
+ if i == 0:
882
+ # optimization: avoid list comprehension if no subarray
883
+ fields.extend(subfields)
884
+ else:
885
+ fields.extend([(d, c, o + i*size) for d, c, o in subfields])
886
+ return fields
887
+
888
+ def _common_stride(offsets, counts, itemsize):
889
+ """
890
+ Returns the stride between the fields, or None if the stride is not
891
+ constant. The values in "counts" designate the lengths of
892
+ subarrays. Subarrays are treated as many contiguous fields, with
893
+ always positive stride.
894
+ """
895
+ if len(offsets) <= 1:
896
+ return itemsize
897
+
898
+ negative = offsets[1] < offsets[0] # negative stride
899
+ if negative:
900
+ # reverse, so offsets will be ascending
901
+ it = zip(reversed(offsets), reversed(counts))
902
+ else:
903
+ it = zip(offsets, counts)
904
+
905
+ prev_offset = None
906
+ stride = None
907
+ for offset, count in it:
908
+ if count != 1: # subarray: always c-contiguous
909
+ if negative:
910
+ return None # subarrays can never have a negative stride
911
+ if stride is None:
912
+ stride = itemsize
913
+ if stride != itemsize:
914
+ return None
915
+ end_offset = offset + (count - 1) * itemsize
916
+ else:
917
+ end_offset = offset
918
+
919
+ if prev_offset is not None:
920
+ new_stride = offset - prev_offset
921
+ if stride is None:
922
+ stride = new_stride
923
+ if stride != new_stride:
924
+ return None
925
+
926
+ prev_offset = end_offset
927
+
928
+ if negative:
929
+ return -stride
930
+ return stride
931
+
932
+
933
+ def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
934
+ casting=None):
935
+ return (arr,)
936
+
937
+ @array_function_dispatch(_structured_to_unstructured_dispatcher)
938
+ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
939
+ """
940
+ Converts an n-D structured array into an (n+1)-D unstructured array.
941
+
942
+ The new array will have a new last dimension equal in size to the
943
+ number of field-elements of the input array. If not supplied, the output
944
+ datatype is determined from the numpy type promotion rules applied to all
945
+ the field datatypes.
946
+
947
+ Nested fields, as well as each element of any subarray fields, all count
948
+ as a single field-elements.
949
+
950
+ Parameters
951
+ ----------
952
+ arr : ndarray
953
+ Structured array or dtype to convert. Cannot contain object datatype.
954
+ dtype : dtype, optional
955
+ The dtype of the output unstructured array.
956
+ copy : bool, optional
957
+ If true, always return a copy. If false, a view is returned if
958
+ possible, such as when the `dtype` and strides of the fields are
959
+ suitable and the array subtype is one of `np.ndarray`, `np.recarray`
960
+ or `np.memmap`.
961
+
962
+ .. versionchanged:: 1.25.0
963
+ A view can now be returned if the fields are separated by a
964
+ uniform stride.
965
+
966
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
967
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
968
+ data casting may occur.
969
+
970
+ Returns
971
+ -------
972
+ unstructured : ndarray
973
+ Unstructured array with one more dimension.
974
+
975
+ Examples
976
+ --------
977
+
978
+ >>> from numpy.lib import recfunctions as rfn
979
+ >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
980
+ >>> a
981
+ array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
982
+ (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
983
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
984
+ >>> rfn.structured_to_unstructured(a)
985
+ array([[0., 0., 0., 0., 0.],
986
+ [0., 0., 0., 0., 0.],
987
+ [0., 0., 0., 0., 0.],
988
+ [0., 0., 0., 0., 0.]])
989
+
990
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
991
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
992
+ >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
993
+ array([ 3. , 5.5, 9. , 11. ])
994
+
995
+ """
996
+ if arr.dtype.names is None:
997
+ raise ValueError('arr must be a structured array')
998
+
999
+ fields = _get_fields_and_offsets(arr.dtype)
1000
+ n_fields = len(fields)
1001
+ if n_fields == 0 and dtype is None:
1002
+ raise ValueError("arr has no fields. Unable to guess dtype")
1003
+ elif n_fields == 0:
1004
+ # too many bugs elsewhere for this to work now
1005
+ raise NotImplementedError("arr with no fields is not supported")
1006
+
1007
+ dts, counts, offsets = zip(*fields)
1008
+ names = ['f{}'.format(n) for n in range(n_fields)]
1009
+
1010
+ if dtype is None:
1011
+ out_dtype = np.result_type(*[dt.base for dt in dts])
1012
+ else:
1013
+ out_dtype = np.dtype(dtype)
1014
+
1015
+ # Use a series of views and casts to convert to an unstructured array:
1016
+
1017
+ # first view using flattened fields (doesn't work for object arrays)
1018
+ # Note: dts may include a shape for subarrays
1019
+ flattened_fields = np.dtype({'names': names,
1020
+ 'formats': dts,
1021
+ 'offsets': offsets,
1022
+ 'itemsize': arr.dtype.itemsize})
1023
+ arr = arr.view(flattened_fields)
1024
+
1025
+ # we only allow a few types to be unstructured by manipulating the
1026
+ # strides, because we know it won't work with, for example, np.matrix nor
1027
+ # np.ma.MaskedArray.
1028
+ can_view = type(arr) in (np.ndarray, np.recarray, np.memmap)
1029
+ if (not copy) and can_view and all(dt.base == out_dtype for dt in dts):
1030
+ # all elements have the right dtype already; if they have a common
1031
+ # stride, we can just return a view
1032
+ common_stride = _common_stride(offsets, counts, out_dtype.itemsize)
1033
+ if common_stride is not None:
1034
+ wrap = arr.__array_wrap__
1035
+
1036
+ new_shape = arr.shape + (sum(counts), out_dtype.itemsize)
1037
+ new_strides = arr.strides + (abs(common_stride), 1)
1038
+
1039
+ arr = arr[..., np.newaxis].view(np.uint8) # view as bytes
1040
+ arr = arr[..., min(offsets):] # remove the leading unused data
1041
+ arr = np.lib.stride_tricks.as_strided(arr,
1042
+ new_shape,
1043
+ new_strides,
1044
+ subok=True)
1045
+
1046
+ # cast and drop the last dimension again
1047
+ arr = arr.view(out_dtype)[..., 0]
1048
+
1049
+ if common_stride < 0:
1050
+ arr = arr[..., ::-1] # reverse, if the stride was negative
1051
+ if type(arr) is not type(wrap.__self__):
1052
+ # Some types (e.g. recarray) turn into an ndarray along the
1053
+ # way, so we have to wrap it again in order to match the
1054
+ # behavior with copy=True.
1055
+ arr = wrap(arr)
1056
+ return arr
1057
+
1058
+ # next cast to a packed format with all fields converted to new dtype
1059
+ packed_fields = np.dtype({'names': names,
1060
+ 'formats': [(out_dtype, dt.shape) for dt in dts]})
1061
+ arr = arr.astype(packed_fields, copy=copy, casting=casting)
1062
+
1063
+ # finally is it safe to view the packed fields as the unstructured type
1064
+ return arr.view((out_dtype, (sum(counts),)))
1065
+
1066
+
1067
+ def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
1068
+ align=None, copy=None, casting=None):
1069
+ return (arr,)
1070
+
1071
+ @array_function_dispatch(_unstructured_to_structured_dispatcher)
1072
+ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
1073
+ copy=False, casting='unsafe'):
1074
+ """
1075
+ Converts an n-D unstructured array into an (n-1)-D structured array.
1076
+
1077
+ The last dimension of the input array is converted into a structure, with
1078
+ number of field-elements equal to the size of the last dimension of the
1079
+ input array. By default all output fields have the input array's dtype, but
1080
+ an output structured dtype with an equal number of fields-elements can be
1081
+ supplied instead.
1082
+
1083
+ Nested fields, as well as each element of any subarray fields, all count
1084
+ towards the number of field-elements.
1085
+
1086
+ Parameters
1087
+ ----------
1088
+ arr : ndarray
1089
+ Unstructured array or dtype to convert.
1090
+ dtype : dtype, optional
1091
+ The structured dtype of the output array
1092
+ names : list of strings, optional
1093
+ If dtype is not supplied, this specifies the field names for the output
1094
+ dtype, in order. The field dtypes will be the same as the input array.
1095
+ align : boolean, optional
1096
+ Whether to create an aligned memory layout.
1097
+ copy : bool, optional
1098
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
1099
+ copy. If false, and `dtype` requirements are satisfied, a view is
1100
+ returned.
1101
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
1102
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
1103
+ data casting may occur.
1104
+
1105
+ Returns
1106
+ -------
1107
+ structured : ndarray
1108
+ Structured array with fewer dimensions.
1109
+
1110
+ Examples
1111
+ --------
1112
+
1113
+ >>> from numpy.lib import recfunctions as rfn
1114
+ >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
1115
+ >>> a = np.arange(20).reshape((4,5))
1116
+ >>> a
1117
+ array([[ 0, 1, 2, 3, 4],
1118
+ [ 5, 6, 7, 8, 9],
1119
+ [10, 11, 12, 13, 14],
1120
+ [15, 16, 17, 18, 19]])
1121
+ >>> rfn.unstructured_to_structured(a, dt)
1122
+ array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
1123
+ (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
1124
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
1125
+
1126
+ """
1127
+ if arr.shape == ():
1128
+ raise ValueError('arr must have at least one dimension')
1129
+ n_elem = arr.shape[-1]
1130
+ if n_elem == 0:
1131
+ # too many bugs elsewhere for this to work now
1132
+ raise NotImplementedError("last axis with size 0 is not supported")
1133
+
1134
+ if dtype is None:
1135
+ if names is None:
1136
+ names = ['f{}'.format(n) for n in range(n_elem)]
1137
+ out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
1138
+ fields = _get_fields_and_offsets(out_dtype)
1139
+ dts, counts, offsets = zip(*fields)
1140
+ else:
1141
+ if names is not None:
1142
+ raise ValueError("don't supply both dtype and names")
1143
+ # if dtype is the args of np.dtype, construct it
1144
+ dtype = np.dtype(dtype)
1145
+ # sanity check of the input dtype
1146
+ fields = _get_fields_and_offsets(dtype)
1147
+ if len(fields) == 0:
1148
+ dts, counts, offsets = [], [], []
1149
+ else:
1150
+ dts, counts, offsets = zip(*fields)
1151
+
1152
+ if n_elem != sum(counts):
1153
+ raise ValueError('The length of the last dimension of arr must '
1154
+ 'be equal to the number of fields in dtype')
1155
+ out_dtype = dtype
1156
+ if align and not out_dtype.isalignedstruct:
1157
+ raise ValueError("align was True but dtype is not aligned")
1158
+
1159
+ names = ['f{}'.format(n) for n in range(len(fields))]
1160
+
1161
+ # Use a series of views and casts to convert to a structured array:
1162
+
1163
+ # first view as a packed structured array of one dtype
1164
+ packed_fields = np.dtype({'names': names,
1165
+ 'formats': [(arr.dtype, dt.shape) for dt in dts]})
1166
+ arr = np.ascontiguousarray(arr).view(packed_fields)
1167
+
1168
+ # next cast to an unpacked but flattened format with varied dtypes
1169
+ flattened_fields = np.dtype({'names': names,
1170
+ 'formats': dts,
1171
+ 'offsets': offsets,
1172
+ 'itemsize': out_dtype.itemsize})
1173
+ arr = arr.astype(flattened_fields, copy=copy, casting=casting)
1174
+
1175
+ # finally view as the final nested dtype and remove the last axis
1176
+ return arr.view(out_dtype)[..., 0]
1177
+
1178
+ def _apply_along_fields_dispatcher(func, arr):
1179
+ return (arr,)
1180
+
1181
+ @array_function_dispatch(_apply_along_fields_dispatcher)
1182
+ def apply_along_fields(func, arr):
1183
+ """
1184
+ Apply function 'func' as a reduction across fields of a structured array.
1185
+
1186
+ This is similar to `apply_along_axis`, but treats the fields of a
1187
+ structured array as an extra axis. The fields are all first cast to a
1188
+ common type following the type-promotion rules from `numpy.result_type`
1189
+ applied to the field's dtypes.
1190
+
1191
+ Parameters
1192
+ ----------
1193
+ func : function
1194
+ Function to apply on the "field" dimension. This function must
1195
+ support an `axis` argument, like np.mean, np.sum, etc.
1196
+ arr : ndarray
1197
+ Structured array for which to apply func.
1198
+
1199
+ Returns
1200
+ -------
1201
+ out : ndarray
1202
+ Result of the recution operation
1203
+
1204
+ Examples
1205
+ --------
1206
+
1207
+ >>> from numpy.lib import recfunctions as rfn
1208
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
1209
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
1210
+ >>> rfn.apply_along_fields(np.mean, b)
1211
+ array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
1212
+ >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
1213
+ array([ 3. , 5.5, 9. , 11. ])
1214
+
1215
+ """
1216
+ if arr.dtype.names is None:
1217
+ raise ValueError('arr must be a structured array')
1218
+
1219
+ uarr = structured_to_unstructured(arr)
1220
+ return func(uarr, axis=-1)
1221
+ # works and avoids axis requirement, but very, very slow:
1222
+ #return np.apply_along_axis(func, -1, uarr)
1223
+
1224
+ def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
1225
+ return dst, src
1226
+
1227
+ @array_function_dispatch(_assign_fields_by_name_dispatcher)
1228
+ def assign_fields_by_name(dst, src, zero_unassigned=True):
1229
+ """
1230
+ Assigns values from one structured array to another by field name.
1231
+
1232
+ Normally in numpy >= 1.14, assignment of one structured array to another
1233
+ copies fields "by position", meaning that the first field from the src is
1234
+ copied to the first field of the dst, and so on, regardless of field name.
1235
+
1236
+ This function instead copies "by field name", such that fields in the dst
1237
+ are assigned from the identically named field in the src. This applies
1238
+ recursively for nested structures. This is how structure assignment worked
1239
+ in numpy >= 1.6 to <= 1.13.
1240
+
1241
+ Parameters
1242
+ ----------
1243
+ dst : ndarray
1244
+ src : ndarray
1245
+ The source and destination arrays during assignment.
1246
+ zero_unassigned : bool, optional
1247
+ If True, fields in the dst for which there was no matching
1248
+ field in the src are filled with the value 0 (zero). This
1249
+ was the behavior of numpy <= 1.13. If False, those fields
1250
+ are not modified.
1251
+ """
1252
+
1253
+ if dst.dtype.names is None:
1254
+ dst[...] = src
1255
+ return
1256
+
1257
+ for name in dst.dtype.names:
1258
+ if name not in src.dtype.names:
1259
+ if zero_unassigned:
1260
+ dst[name] = 0
1261
+ else:
1262
+ assign_fields_by_name(dst[name], src[name],
1263
+ zero_unassigned)
1264
+
1265
+ def _require_fields_dispatcher(array, required_dtype):
1266
+ return (array,)
1267
+
1268
+ @array_function_dispatch(_require_fields_dispatcher)
1269
+ def require_fields(array, required_dtype):
1270
+ """
1271
+ Casts a structured array to a new dtype using assignment by field-name.
1272
+
1273
+ This function assigns from the old to the new array by name, so the
1274
+ value of a field in the output array is the value of the field with the
1275
+ same name in the source array. This has the effect of creating a new
1276
+ ndarray containing only the fields "required" by the required_dtype.
1277
+
1278
+ If a field name in the required_dtype does not exist in the
1279
+ input array, that field is created and set to 0 in the output array.
1280
+
1281
+ Parameters
1282
+ ----------
1283
+ a : ndarray
1284
+ array to cast
1285
+ required_dtype : dtype
1286
+ datatype for output array
1287
+
1288
+ Returns
1289
+ -------
1290
+ out : ndarray
1291
+ array with the new dtype, with field values copied from the fields in
1292
+ the input array with the same name
1293
+
1294
+ Examples
1295
+ --------
1296
+
1297
+ >>> from numpy.lib import recfunctions as rfn
1298
+ >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
1299
+ >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
1300
+ array([(1., 1), (1., 1), (1., 1), (1., 1)],
1301
+ dtype=[('b', '<f4'), ('c', 'u1')])
1302
+ >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
1303
+ array([(1., 0), (1., 0), (1., 0), (1., 0)],
1304
+ dtype=[('b', '<f4'), ('newf', 'u1')])
1305
+
1306
+ """
1307
+ out = np.empty(array.shape, dtype=required_dtype)
1308
+ assign_fields_by_name(out, array)
1309
+ return out
1310
+
1311
+
1312
+ def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
1313
+ asrecarray=None, autoconvert=None):
1314
+ return arrays
1315
+
1316
+
1317
+ @array_function_dispatch(_stack_arrays_dispatcher)
1318
+ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
1319
+ autoconvert=False):
1320
+ """
1321
+ Superposes arrays fields by fields
1322
+
1323
+ Parameters
1324
+ ----------
1325
+ arrays : array or sequence
1326
+ Sequence of input arrays.
1327
+ defaults : dictionary, optional
1328
+ Dictionary mapping field names to the corresponding default values.
1329
+ usemask : {True, False}, optional
1330
+ Whether to return a MaskedArray (or MaskedRecords is
1331
+ `asrecarray==True`) or a ndarray.
1332
+ asrecarray : {False, True}, optional
1333
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
1334
+ or just a flexible-type ndarray.
1335
+ autoconvert : {False, True}, optional
1336
+ Whether automatically cast the type of the field to the maximum.
1337
+
1338
+ Examples
1339
+ --------
1340
+ >>> from numpy.lib import recfunctions as rfn
1341
+ >>> x = np.array([1, 2,])
1342
+ >>> rfn.stack_arrays(x) is x
1343
+ True
1344
+ >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
1345
+ >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
1346
+ ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
1347
+ >>> test = rfn.stack_arrays((z,zz))
1348
+ >>> test
1349
+ masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
1350
+ (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
1351
+ mask=[(False, False, True), (False, False, True),
1352
+ (False, False, False), (False, False, False),
1353
+ (False, False, False)],
1354
+ fill_value=(b'N/A', 1.e+20, 1.e+20),
1355
+ dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
1356
+
1357
+ """
1358
+ if isinstance(arrays, ndarray):
1359
+ return arrays
1360
+ elif len(arrays) == 1:
1361
+ return arrays[0]
1362
+ seqarrays = [np.asanyarray(a).ravel() for a in arrays]
1363
+ nrecords = [len(a) for a in seqarrays]
1364
+ ndtype = [a.dtype for a in seqarrays]
1365
+ fldnames = [d.names for d in ndtype]
1366
+ #
1367
+ dtype_l = ndtype[0]
1368
+ newdescr = _get_fieldspec(dtype_l)
1369
+ names = [n for n, d in newdescr]
1370
+ for dtype_n in ndtype[1:]:
1371
+ for fname, fdtype in _get_fieldspec(dtype_n):
1372
+ if fname not in names:
1373
+ newdescr.append((fname, fdtype))
1374
+ names.append(fname)
1375
+ else:
1376
+ nameidx = names.index(fname)
1377
+ _, cdtype = newdescr[nameidx]
1378
+ if autoconvert:
1379
+ newdescr[nameidx] = (fname, max(fdtype, cdtype))
1380
+ elif fdtype != cdtype:
1381
+ raise TypeError("Incompatible type '%s' <> '%s'" %
1382
+ (cdtype, fdtype))
1383
+ # Only one field: use concatenate
1384
+ if len(newdescr) == 1:
1385
+ output = ma.concatenate(seqarrays)
1386
+ else:
1387
+ #
1388
+ output = ma.masked_all((np.sum(nrecords),), newdescr)
1389
+ offset = np.cumsum(np.r_[0, nrecords])
1390
+ seen = []
1391
+ for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
1392
+ names = a.dtype.names
1393
+ if names is None:
1394
+ output['f%i' % len(seen)][i:j] = a
1395
+ else:
1396
+ for name in n:
1397
+ output[name][i:j] = a[name]
1398
+ if name not in seen:
1399
+ seen.append(name)
1400
+ #
1401
+ return _fix_output(_fix_defaults(output, defaults),
1402
+ usemask=usemask, asrecarray=asrecarray)
1403
+
1404
+
1405
+ def _find_duplicates_dispatcher(
1406
+ a, key=None, ignoremask=None, return_index=None):
1407
+ return (a,)
1408
+
1409
+
1410
+ @array_function_dispatch(_find_duplicates_dispatcher)
1411
+ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
1412
+ """
1413
+ Find the duplicates in a structured array along a given key
1414
+
1415
+ Parameters
1416
+ ----------
1417
+ a : array-like
1418
+ Input array
1419
+ key : {string, None}, optional
1420
+ Name of the fields along which to check the duplicates.
1421
+ If None, the search is performed by records
1422
+ ignoremask : {True, False}, optional
1423
+ Whether masked data should be discarded or considered as duplicates.
1424
+ return_index : {False, True}, optional
1425
+ Whether to return the indices of the duplicated values.
1426
+
1427
+ Examples
1428
+ --------
1429
+ >>> from numpy.lib import recfunctions as rfn
1430
+ >>> ndtype = [('a', int)]
1431
+ >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
1432
+ ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
1433
+ >>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
1434
+ (masked_array(data=[(1,), (1,), (2,), (2,)],
1435
+ mask=[(False,), (False,), (False,), (False,)],
1436
+ fill_value=(999999,),
1437
+ dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
1438
+ """
1439
+ a = np.asanyarray(a).ravel()
1440
+ # Get a dictionary of fields
1441
+ fields = get_fieldstructure(a.dtype)
1442
+ # Get the sorting data (by selecting the corresponding field)
1443
+ base = a
1444
+ if key:
1445
+ for f in fields[key]:
1446
+ base = base[f]
1447
+ base = base[key]
1448
+ # Get the sorting indices and the sorted data
1449
+ sortidx = base.argsort()
1450
+ sortedbase = base[sortidx]
1451
+ sorteddata = sortedbase.filled()
1452
+ # Compare the sorting data
1453
+ flag = (sorteddata[:-1] == sorteddata[1:])
1454
+ # If masked data must be ignored, set the flag to false where needed
1455
+ if ignoremask:
1456
+ sortedmask = sortedbase.recordmask
1457
+ flag[sortedmask[1:]] = False
1458
+ flag = np.concatenate(([False], flag))
1459
+ # We need to take the point on the left as well (else we're missing it)
1460
+ flag[:-1] = flag[:-1] + flag[1:]
1461
+ duplicates = a[sortidx][flag]
1462
+ if return_index:
1463
+ return (duplicates, sortidx[flag])
1464
+ else:
1465
+ return duplicates
1466
+
1467
+
1468
+ def _join_by_dispatcher(
1469
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
1470
+ defaults=None, usemask=None, asrecarray=None):
1471
+ return (r1, r2)
1472
+
1473
+
1474
+ @array_function_dispatch(_join_by_dispatcher)
1475
+ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
1476
+ defaults=None, usemask=True, asrecarray=False):
1477
+ """
1478
+ Join arrays `r1` and `r2` on key `key`.
1479
+
1480
+ The key should be either a string or a sequence of string corresponding
1481
+ to the fields used to join the array. An exception is raised if the
1482
+ `key` field cannot be found in the two input arrays. Neither `r1` nor
1483
+ `r2` should have any duplicates along `key`: the presence of duplicates
1484
+ will make the output quite unreliable. Note that duplicates are not
1485
+ looked for by the algorithm.
1486
+
1487
+ Parameters
1488
+ ----------
1489
+ key : {string, sequence}
1490
+ A string or a sequence of strings corresponding to the fields used
1491
+ for comparison.
1492
+ r1, r2 : arrays
1493
+ Structured arrays.
1494
+ jointype : {'inner', 'outer', 'leftouter'}, optional
1495
+ If 'inner', returns the elements common to both r1 and r2.
1496
+ If 'outer', returns the common elements as well as the elements of
1497
+ r1 not in r2 and the elements of not in r2.
1498
+ If 'leftouter', returns the common elements and the elements of r1
1499
+ not in r2.
1500
+ r1postfix : string, optional
1501
+ String appended to the names of the fields of r1 that are present
1502
+ in r2 but absent of the key.
1503
+ r2postfix : string, optional
1504
+ String appended to the names of the fields of r2 that are present
1505
+ in r1 but absent of the key.
1506
+ defaults : {dictionary}, optional
1507
+ Dictionary mapping field names to the corresponding default values.
1508
+ usemask : {True, False}, optional
1509
+ Whether to return a MaskedArray (or MaskedRecords is
1510
+ `asrecarray==True`) or a ndarray.
1511
+ asrecarray : {False, True}, optional
1512
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
1513
+ or just a flexible-type ndarray.
1514
+
1515
+ Notes
1516
+ -----
1517
+ * The output is sorted along the key.
1518
+ * A temporary array is formed by dropping the fields not in the key for
1519
+ the two arrays and concatenating the result. This array is then
1520
+ sorted, and the common entries selected. The output is constructed by
1521
+ filling the fields with the selected entries. Matching is not
1522
+ preserved if there are some duplicates...
1523
+
1524
+ """
1525
+ # Check jointype
1526
+ if jointype not in ('inner', 'outer', 'leftouter'):
1527
+ raise ValueError(
1528
+ "The 'jointype' argument should be in 'inner', "
1529
+ "'outer' or 'leftouter' (got '%s' instead)" % jointype
1530
+ )
1531
+ # If we have a single key, put it in a tuple
1532
+ if isinstance(key, str):
1533
+ key = (key,)
1534
+
1535
+ # Check the keys
1536
+ if len(set(key)) != len(key):
1537
+ dup = next(x for n,x in enumerate(key) if x in key[n+1:])
1538
+ raise ValueError("duplicate join key %r" % dup)
1539
+ for name in key:
1540
+ if name not in r1.dtype.names:
1541
+ raise ValueError('r1 does not have key field %r' % name)
1542
+ if name not in r2.dtype.names:
1543
+ raise ValueError('r2 does not have key field %r' % name)
1544
+
1545
+ # Make sure we work with ravelled arrays
1546
+ r1 = r1.ravel()
1547
+ r2 = r2.ravel()
1548
+ # Fixme: nb2 below is never used. Commenting out for pyflakes.
1549
+ # (nb1, nb2) = (len(r1), len(r2))
1550
+ nb1 = len(r1)
1551
+ (r1names, r2names) = (r1.dtype.names, r2.dtype.names)
1552
+
1553
+ # Check the names for collision
1554
+ collisions = (set(r1names) & set(r2names)) - set(key)
1555
+ if collisions and not (r1postfix or r2postfix):
1556
+ msg = "r1 and r2 contain common names, r1postfix and r2postfix "
1557
+ msg += "can't both be empty"
1558
+ raise ValueError(msg)
1559
+
1560
+ # Make temporary arrays of just the keys
1561
+ # (use order of keys in `r1` for back-compatibility)
1562
+ key1 = [ n for n in r1names if n in key ]
1563
+ r1k = _keep_fields(r1, key1)
1564
+ r2k = _keep_fields(r2, key1)
1565
+
1566
+ # Concatenate the two arrays for comparison
1567
+ aux = ma.concatenate((r1k, r2k))
1568
+ idx_sort = aux.argsort(order=key)
1569
+ aux = aux[idx_sort]
1570
+ #
1571
+ # Get the common keys
1572
+ flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
1573
+ flag_in[:-1] = flag_in[1:] + flag_in[:-1]
1574
+ idx_in = idx_sort[flag_in]
1575
+ idx_1 = idx_in[(idx_in < nb1)]
1576
+ idx_2 = idx_in[(idx_in >= nb1)] - nb1
1577
+ (r1cmn, r2cmn) = (len(idx_1), len(idx_2))
1578
+ if jointype == 'inner':
1579
+ (r1spc, r2spc) = (0, 0)
1580
+ elif jointype == 'outer':
1581
+ idx_out = idx_sort[~flag_in]
1582
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
1583
+ idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
1584
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
1585
+ elif jointype == 'leftouter':
1586
+ idx_out = idx_sort[~flag_in]
1587
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
1588
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
1589
+ # Select the entries from each input
1590
+ (s1, s2) = (r1[idx_1], r2[idx_2])
1591
+ #
1592
+ # Build the new description of the output array .......
1593
+ # Start with the key fields
1594
+ ndtype = _get_fieldspec(r1k.dtype)
1595
+
1596
+ # Add the fields from r1
1597
+ for fname, fdtype in _get_fieldspec(r1.dtype):
1598
+ if fname not in key:
1599
+ ndtype.append((fname, fdtype))
1600
+
1601
+ # Add the fields from r2
1602
+ for fname, fdtype in _get_fieldspec(r2.dtype):
1603
+ # Have we seen the current name already ?
1604
+ # we need to rebuild this list every time
1605
+ names = list(name for name, dtype in ndtype)
1606
+ try:
1607
+ nameidx = names.index(fname)
1608
+ except ValueError:
1609
+ #... we haven't: just add the description to the current list
1610
+ ndtype.append((fname, fdtype))
1611
+ else:
1612
+ # collision
1613
+ _, cdtype = ndtype[nameidx]
1614
+ if fname in key:
1615
+ # The current field is part of the key: take the largest dtype
1616
+ ndtype[nameidx] = (fname, max(fdtype, cdtype))
1617
+ else:
1618
+ # The current field is not part of the key: add the suffixes,
1619
+ # and place the new field adjacent to the old one
1620
+ ndtype[nameidx:nameidx + 1] = [
1621
+ (fname + r1postfix, cdtype),
1622
+ (fname + r2postfix, fdtype)
1623
+ ]
1624
+ # Rebuild a dtype from the new fields
1625
+ ndtype = np.dtype(ndtype)
1626
+ # Find the largest nb of common fields :
1627
+ # r1cmn and r2cmn should be equal, but...
1628
+ cmn = max(r1cmn, r2cmn)
1629
+ # Construct an empty array
1630
+ output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
1631
+ names = output.dtype.names
1632
+ for f in r1names:
1633
+ selected = s1[f]
1634
+ if f not in names or (f in r2names and not r2postfix and f not in key):
1635
+ f += r1postfix
1636
+ current = output[f]
1637
+ current[:r1cmn] = selected[:r1cmn]
1638
+ if jointype in ('outer', 'leftouter'):
1639
+ current[cmn:cmn + r1spc] = selected[r1cmn:]
1640
+ for f in r2names:
1641
+ selected = s2[f]
1642
+ if f not in names or (f in r1names and not r1postfix and f not in key):
1643
+ f += r2postfix
1644
+ current = output[f]
1645
+ current[:r2cmn] = selected[:r2cmn]
1646
+ if (jointype == 'outer') and r2spc:
1647
+ current[-r2spc:] = selected[r2cmn:]
1648
+ # Sort and finalize the output
1649
+ output.sort(order=key)
1650
+ kwargs = dict(usemask=usemask, asrecarray=asrecarray)
1651
+ return _fix_output(_fix_defaults(output, defaults), **kwargs)
1652
+
1653
+
1654
+ def _rec_join_dispatcher(
1655
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
1656
+ defaults=None):
1657
+ return (r1, r2)
1658
+
1659
+
1660
+ @array_function_dispatch(_rec_join_dispatcher)
1661
+ def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
1662
+ defaults=None):
1663
+ """
1664
+ Join arrays `r1` and `r2` on keys.
1665
+ Alternative to join_by, that always returns a np.recarray.
1666
+
1667
+ See Also
1668
+ --------
1669
+ join_by : equivalent function
1670
+ """
1671
+ kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
1672
+ defaults=defaults, usemask=False, asrecarray=True)
1673
+ return join_by(key, r1, r2, **kwargs)
venv/lib/python3.10/site-packages/numpy/lib/shape_base.py ADDED
@@ -0,0 +1,1274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import numpy.core.numeric as _nx
4
+ from numpy.core.numeric import asarray, zeros, array, asanyarray
5
+ from numpy.core.fromnumeric import reshape, transpose
6
+ from numpy.core.multiarray import normalize_axis_index
7
+ from numpy.core import overrides
8
+ from numpy.core import vstack, atleast_3d
9
+ from numpy.core.numeric import normalize_axis_tuple
10
+ from numpy.core.shape_base import _arrays_for_stack_dispatcher
11
+ from numpy.lib.index_tricks import ndindex
12
+ from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
13
+
14
+
15
+ __all__ = [
16
+ 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
17
+ 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
18
+ 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
19
+ 'put_along_axis'
20
+ ]
21
+
22
+
23
+ array_function_dispatch = functools.partial(
24
+ overrides.array_function_dispatch, module='numpy')
25
+
26
+
27
+ def _make_along_axis_idx(arr_shape, indices, axis):
28
+ # compute dimensions to iterate over
29
+ if not _nx.issubdtype(indices.dtype, _nx.integer):
30
+ raise IndexError('`indices` must be an integer array')
31
+ if len(arr_shape) != indices.ndim:
32
+ raise ValueError(
33
+ "`indices` and `arr` must have the same number of dimensions")
34
+ shape_ones = (1,) * indices.ndim
35
+ dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
36
+
37
+ # build a fancy index, consisting of orthogonal aranges, with the
38
+ # requested index inserted at the right location
39
+ fancy_index = []
40
+ for dim, n in zip(dest_dims, arr_shape):
41
+ if dim is None:
42
+ fancy_index.append(indices)
43
+ else:
44
+ ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
45
+ fancy_index.append(_nx.arange(n).reshape(ind_shape))
46
+
47
+ return tuple(fancy_index)
48
+
49
+
50
+ def _take_along_axis_dispatcher(arr, indices, axis):
51
+ return (arr, indices)
52
+
53
+
54
+ @array_function_dispatch(_take_along_axis_dispatcher)
55
+ def take_along_axis(arr, indices, axis):
56
+ """
57
+ Take values from the input array by matching 1d index and data slices.
58
+
59
+ This iterates over matching 1d slices oriented along the specified axis in
60
+ the index and data arrays, and uses the former to look up values in the
61
+ latter. These slices can be different lengths.
62
+
63
+ Functions returning an index along an axis, like `argsort` and
64
+ `argpartition`, produce suitable indices for this function.
65
+
66
+ .. versionadded:: 1.15.0
67
+
68
+ Parameters
69
+ ----------
70
+ arr : ndarray (Ni..., M, Nk...)
71
+ Source array
72
+ indices : ndarray (Ni..., J, Nk...)
73
+ Indices to take along each 1d slice of `arr`. This must match the
74
+ dimension of arr, but dimensions Ni and Nj only need to broadcast
75
+ against `arr`.
76
+ axis : int
77
+ The axis to take 1d slices along. If axis is None, the input array is
78
+ treated as if it had first been flattened to 1d, for consistency with
79
+ `sort` and `argsort`.
80
+
81
+ Returns
82
+ -------
83
+ out: ndarray (Ni..., J, Nk...)
84
+ The indexed result.
85
+
86
+ Notes
87
+ -----
88
+ This is equivalent to (but faster than) the following use of `ndindex` and
89
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
90
+
91
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
92
+ J = indices.shape[axis] # Need not equal M
93
+ out = np.empty(Ni + (J,) + Nk)
94
+
95
+ for ii in ndindex(Ni):
96
+ for kk in ndindex(Nk):
97
+ a_1d = a [ii + s_[:,] + kk]
98
+ indices_1d = indices[ii + s_[:,] + kk]
99
+ out_1d = out [ii + s_[:,] + kk]
100
+ for j in range(J):
101
+ out_1d[j] = a_1d[indices_1d[j]]
102
+
103
+ Equivalently, eliminating the inner loop, the last two lines would be::
104
+
105
+ out_1d[:] = a_1d[indices_1d]
106
+
107
+ See Also
108
+ --------
109
+ take : Take along an axis, using the same indices for every 1d slice
110
+ put_along_axis :
111
+ Put values into the destination array by matching 1d index and data slices
112
+
113
+ Examples
114
+ --------
115
+
116
+ For this sample array
117
+
118
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
119
+
120
+ We can sort either by using sort directly, or argsort and this function
121
+
122
+ >>> np.sort(a, axis=1)
123
+ array([[10, 20, 30],
124
+ [40, 50, 60]])
125
+ >>> ai = np.argsort(a, axis=1)
126
+ >>> ai
127
+ array([[0, 2, 1],
128
+ [1, 2, 0]])
129
+ >>> np.take_along_axis(a, ai, axis=1)
130
+ array([[10, 20, 30],
131
+ [40, 50, 60]])
132
+
133
+ The same works for max and min, if you maintain the trivial dimension
134
+ with ``keepdims``:
135
+
136
+ >>> np.max(a, axis=1, keepdims=True)
137
+ array([[30],
138
+ [60]])
139
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
140
+ >>> ai
141
+ array([[1],
142
+ [0]])
143
+ >>> np.take_along_axis(a, ai, axis=1)
144
+ array([[30],
145
+ [60]])
146
+
147
+ If we want to get the max and min at the same time, we can stack the
148
+ indices first
149
+
150
+ >>> ai_min = np.argmin(a, axis=1, keepdims=True)
151
+ >>> ai_max = np.argmax(a, axis=1, keepdims=True)
152
+ >>> ai = np.concatenate([ai_min, ai_max], axis=1)
153
+ >>> ai
154
+ array([[0, 1],
155
+ [1, 0]])
156
+ >>> np.take_along_axis(a, ai, axis=1)
157
+ array([[10, 30],
158
+ [40, 60]])
159
+ """
160
+ # normalize inputs
161
+ if axis is None:
162
+ arr = arr.flat
163
+ arr_shape = (len(arr),) # flatiter has no .shape
164
+ axis = 0
165
+ else:
166
+ axis = normalize_axis_index(axis, arr.ndim)
167
+ arr_shape = arr.shape
168
+
169
+ # use the fancy index
170
+ return arr[_make_along_axis_idx(arr_shape, indices, axis)]
171
+
172
+
173
+ def _put_along_axis_dispatcher(arr, indices, values, axis):
174
+ return (arr, indices, values)
175
+
176
+
177
+ @array_function_dispatch(_put_along_axis_dispatcher)
178
+ def put_along_axis(arr, indices, values, axis):
179
+ """
180
+ Put values into the destination array by matching 1d index and data slices.
181
+
182
+ This iterates over matching 1d slices oriented along the specified axis in
183
+ the index and data arrays, and uses the former to place values into the
184
+ latter. These slices can be different lengths.
185
+
186
+ Functions returning an index along an axis, like `argsort` and
187
+ `argpartition`, produce suitable indices for this function.
188
+
189
+ .. versionadded:: 1.15.0
190
+
191
+ Parameters
192
+ ----------
193
+ arr : ndarray (Ni..., M, Nk...)
194
+ Destination array.
195
+ indices : ndarray (Ni..., J, Nk...)
196
+ Indices to change along each 1d slice of `arr`. This must match the
197
+ dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
198
+ against `arr`.
199
+ values : array_like (Ni..., J, Nk...)
200
+ values to insert at those indices. Its shape and dimension are
201
+ broadcast to match that of `indices`.
202
+ axis : int
203
+ The axis to take 1d slices along. If axis is None, the destination
204
+ array is treated as if a flattened 1d view had been created of it.
205
+
206
+ Notes
207
+ -----
208
+ This is equivalent to (but faster than) the following use of `ndindex` and
209
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
210
+
211
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
212
+ J = indices.shape[axis] # Need not equal M
213
+
214
+ for ii in ndindex(Ni):
215
+ for kk in ndindex(Nk):
216
+ a_1d = a [ii + s_[:,] + kk]
217
+ indices_1d = indices[ii + s_[:,] + kk]
218
+ values_1d = values [ii + s_[:,] + kk]
219
+ for j in range(J):
220
+ a_1d[indices_1d[j]] = values_1d[j]
221
+
222
+ Equivalently, eliminating the inner loop, the last two lines would be::
223
+
224
+ a_1d[indices_1d] = values_1d
225
+
226
+ See Also
227
+ --------
228
+ take_along_axis :
229
+ Take values from the input array by matching 1d index and data slices
230
+
231
+ Examples
232
+ --------
233
+
234
+ For this sample array
235
+
236
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
237
+
238
+ We can replace the maximum values with:
239
+
240
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
241
+ >>> ai
242
+ array([[1],
243
+ [0]])
244
+ >>> np.put_along_axis(a, ai, 99, axis=1)
245
+ >>> a
246
+ array([[10, 99, 20],
247
+ [99, 40, 50]])
248
+
249
+ """
250
+ # normalize inputs
251
+ if axis is None:
252
+ arr = arr.flat
253
+ axis = 0
254
+ arr_shape = (len(arr),) # flatiter has no .shape
255
+ else:
256
+ axis = normalize_axis_index(axis, arr.ndim)
257
+ arr_shape = arr.shape
258
+
259
+ # use the fancy index
260
+ arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
261
+
262
+
263
+ def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
264
+ return (arr,)
265
+
266
+
267
+ @array_function_dispatch(_apply_along_axis_dispatcher)
268
+ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
269
+ """
270
+ Apply a function to 1-D slices along the given axis.
271
+
272
+ Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays
273
+ and `a` is a 1-D slice of `arr` along `axis`.
274
+
275
+ This is equivalent to (but faster than) the following use of `ndindex` and
276
+ `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
277
+
278
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
279
+ for ii in ndindex(Ni):
280
+ for kk in ndindex(Nk):
281
+ f = func1d(arr[ii + s_[:,] + kk])
282
+ Nj = f.shape
283
+ for jj in ndindex(Nj):
284
+ out[ii + jj + kk] = f[jj]
285
+
286
+ Equivalently, eliminating the inner loop, this can be expressed as::
287
+
288
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
289
+ for ii in ndindex(Ni):
290
+ for kk in ndindex(Nk):
291
+ out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])
292
+
293
+ Parameters
294
+ ----------
295
+ func1d : function (M,) -> (Nj...)
296
+ This function should accept 1-D arrays. It is applied to 1-D
297
+ slices of `arr` along the specified axis.
298
+ axis : integer
299
+ Axis along which `arr` is sliced.
300
+ arr : ndarray (Ni..., M, Nk...)
301
+ Input array.
302
+ args : any
303
+ Additional arguments to `func1d`.
304
+ kwargs : any
305
+ Additional named arguments to `func1d`.
306
+
307
+ .. versionadded:: 1.9.0
308
+
309
+
310
+ Returns
311
+ -------
312
+ out : ndarray (Ni..., Nj..., Nk...)
313
+ The output array. The shape of `out` is identical to the shape of
314
+ `arr`, except along the `axis` dimension. This axis is removed, and
315
+ replaced with new dimensions equal to the shape of the return value
316
+ of `func1d`. So if `func1d` returns a scalar `out` will have one
317
+ fewer dimensions than `arr`.
318
+
319
+ See Also
320
+ --------
321
+ apply_over_axes : Apply a function repeatedly over multiple axes.
322
+
323
+ Examples
324
+ --------
325
+ >>> def my_func(a):
326
+ ... \"\"\"Average first and last element of a 1-D array\"\"\"
327
+ ... return (a[0] + a[-1]) * 0.5
328
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
329
+ >>> np.apply_along_axis(my_func, 0, b)
330
+ array([4., 5., 6.])
331
+ >>> np.apply_along_axis(my_func, 1, b)
332
+ array([2., 5., 8.])
333
+
334
+ For a function that returns a 1D array, the number of dimensions in
335
+ `outarr` is the same as `arr`.
336
+
337
+ >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
338
+ >>> np.apply_along_axis(sorted, 1, b)
339
+ array([[1, 7, 8],
340
+ [3, 4, 9],
341
+ [2, 5, 6]])
342
+
343
+ For a function that returns a higher dimensional array, those dimensions
344
+ are inserted in place of the `axis` dimension.
345
+
346
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
347
+ >>> np.apply_along_axis(np.diag, -1, b)
348
+ array([[[1, 0, 0],
349
+ [0, 2, 0],
350
+ [0, 0, 3]],
351
+ [[4, 0, 0],
352
+ [0, 5, 0],
353
+ [0, 0, 6]],
354
+ [[7, 0, 0],
355
+ [0, 8, 0],
356
+ [0, 0, 9]]])
357
+ """
358
+ # handle negative axes
359
+ arr = asanyarray(arr)
360
+ nd = arr.ndim
361
+ axis = normalize_axis_index(axis, nd)
362
+
363
+ # arr, with the iteration axis at the end
364
+ in_dims = list(range(nd))
365
+ inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])
366
+
367
+ # compute indices for the iteration axes, and append a trailing ellipsis to
368
+ # prevent 0d arrays decaying to scalars, which fixes gh-8642
369
+ inds = ndindex(inarr_view.shape[:-1])
370
+ inds = (ind + (Ellipsis,) for ind in inds)
371
+
372
+ # invoke the function on the first item
373
+ try:
374
+ ind0 = next(inds)
375
+ except StopIteration:
376
+ raise ValueError(
377
+ 'Cannot apply_along_axis when any iteration dimensions are 0'
378
+ ) from None
379
+ res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
380
+
381
+ # build a buffer for storing evaluations of func1d.
382
+ # remove the requested axis, and add the new ones on the end.
383
+ # laid out so that each write is contiguous.
384
+ # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
385
+ buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
386
+
387
+ # permutation of axes such that out = buff.transpose(buff_permute)
388
+ buff_dims = list(range(buff.ndim))
389
+ buff_permute = (
390
+ buff_dims[0 : axis] +
391
+ buff_dims[buff.ndim-res.ndim : buff.ndim] +
392
+ buff_dims[axis : buff.ndim-res.ndim]
393
+ )
394
+
395
+ # matrices have a nasty __array_prepare__ and __array_wrap__
396
+ if not isinstance(res, matrix):
397
+ buff = res.__array_prepare__(buff)
398
+
399
+ # save the first result, then compute and save all remaining results
400
+ buff[ind0] = res
401
+ for ind in inds:
402
+ buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
403
+
404
+ if not isinstance(res, matrix):
405
+ # wrap the array, to preserve subclasses
406
+ buff = res.__array_wrap__(buff)
407
+
408
+ # finally, rotate the inserted axes back to where they belong
409
+ return transpose(buff, buff_permute)
410
+
411
+ else:
412
+ # matrices have to be transposed first, because they collapse dimensions!
413
+ out_arr = transpose(buff, buff_permute)
414
+ return res.__array_wrap__(out_arr)
415
+
416
+
417
+ def _apply_over_axes_dispatcher(func, a, axes):
418
+ return (a,)
419
+
420
+
421
+ @array_function_dispatch(_apply_over_axes_dispatcher)
422
+ def apply_over_axes(func, a, axes):
423
+ """
424
+ Apply a function repeatedly over multiple axes.
425
+
426
+ `func` is called as `res = func(a, axis)`, where `axis` is the first
427
+ element of `axes`. The result `res` of the function call must have
428
+ either the same dimensions as `a` or one less dimension. If `res`
429
+ has one less dimension than `a`, a dimension is inserted before
430
+ `axis`. The call to `func` is then repeated for each axis in `axes`,
431
+ with `res` as the first argument.
432
+
433
+ Parameters
434
+ ----------
435
+ func : function
436
+ This function must take two arguments, `func(a, axis)`.
437
+ a : array_like
438
+ Input array.
439
+ axes : array_like
440
+ Axes over which `func` is applied; the elements must be integers.
441
+
442
+ Returns
443
+ -------
444
+ apply_over_axis : ndarray
445
+ The output array. The number of dimensions is the same as `a`,
446
+ but the shape can be different. This depends on whether `func`
447
+ changes the shape of its output with respect to its input.
448
+
449
+ See Also
450
+ --------
451
+ apply_along_axis :
452
+ Apply a function to 1-D slices of an array along the given axis.
453
+
454
+ Notes
455
+ -----
456
+ This function is equivalent to tuple axis arguments to reorderable ufuncs
457
+ with keepdims=True. Tuple axis arguments to ufuncs have been available since
458
+ version 1.7.0.
459
+
460
+ Examples
461
+ --------
462
+ >>> a = np.arange(24).reshape(2,3,4)
463
+ >>> a
464
+ array([[[ 0, 1, 2, 3],
465
+ [ 4, 5, 6, 7],
466
+ [ 8, 9, 10, 11]],
467
+ [[12, 13, 14, 15],
468
+ [16, 17, 18, 19],
469
+ [20, 21, 22, 23]]])
470
+
471
+ Sum over axes 0 and 2. The result has same number of dimensions
472
+ as the original array:
473
+
474
+ >>> np.apply_over_axes(np.sum, a, [0,2])
475
+ array([[[ 60],
476
+ [ 92],
477
+ [124]]])
478
+
479
+ Tuple axis arguments to ufuncs are equivalent:
480
+
481
+ >>> np.sum(a, axis=(0,2), keepdims=True)
482
+ array([[[ 60],
483
+ [ 92],
484
+ [124]]])
485
+
486
+ """
487
+ val = asarray(a)
488
+ N = a.ndim
489
+ if array(axes).ndim == 0:
490
+ axes = (axes,)
491
+ for axis in axes:
492
+ if axis < 0:
493
+ axis = N + axis
494
+ args = (val, axis)
495
+ res = func(*args)
496
+ if res.ndim == val.ndim:
497
+ val = res
498
+ else:
499
+ res = expand_dims(res, axis)
500
+ if res.ndim == val.ndim:
501
+ val = res
502
+ else:
503
+ raise ValueError("function is not returning "
504
+ "an array of the correct shape")
505
+ return val
506
+
507
+
508
+ def _expand_dims_dispatcher(a, axis):
509
+ return (a,)
510
+
511
+
512
+ @array_function_dispatch(_expand_dims_dispatcher)
513
+ def expand_dims(a, axis):
514
+ """
515
+ Expand the shape of an array.
516
+
517
+ Insert a new axis that will appear at the `axis` position in the expanded
518
+ array shape.
519
+
520
+ Parameters
521
+ ----------
522
+ a : array_like
523
+ Input array.
524
+ axis : int or tuple of ints
525
+ Position in the expanded axes where the new axis (or axes) is placed.
526
+
527
+ .. deprecated:: 1.13.0
528
+ Passing an axis where ``axis > a.ndim`` will be treated as
529
+ ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
530
+ be treated as ``axis == 0``. This behavior is deprecated.
531
+
532
+ .. versionchanged:: 1.18.0
533
+ A tuple of axes is now supported. Out of range axes as
534
+ described above are now forbidden and raise an `AxisError`.
535
+
536
+ Returns
537
+ -------
538
+ result : ndarray
539
+ View of `a` with the number of dimensions increased.
540
+
541
+ See Also
542
+ --------
543
+ squeeze : The inverse operation, removing singleton dimensions
544
+ reshape : Insert, remove, and combine dimensions, and resize existing ones
545
+ doc.indexing, atleast_1d, atleast_2d, atleast_3d
546
+
547
+ Examples
548
+ --------
549
+ >>> x = np.array([1, 2])
550
+ >>> x.shape
551
+ (2,)
552
+
553
+ The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
554
+
555
+ >>> y = np.expand_dims(x, axis=0)
556
+ >>> y
557
+ array([[1, 2]])
558
+ >>> y.shape
559
+ (1, 2)
560
+
561
+ The following is equivalent to ``x[:, np.newaxis]``:
562
+
563
+ >>> y = np.expand_dims(x, axis=1)
564
+ >>> y
565
+ array([[1],
566
+ [2]])
567
+ >>> y.shape
568
+ (2, 1)
569
+
570
+ ``axis`` may also be a tuple:
571
+
572
+ >>> y = np.expand_dims(x, axis=(0, 1))
573
+ >>> y
574
+ array([[[1, 2]]])
575
+
576
+ >>> y = np.expand_dims(x, axis=(2, 0))
577
+ >>> y
578
+ array([[[1],
579
+ [2]]])
580
+
581
+ Note that some examples may use ``None`` instead of ``np.newaxis``. These
582
+ are the same objects:
583
+
584
+ >>> np.newaxis is None
585
+ True
586
+
587
+ """
588
+ if isinstance(a, matrix):
589
+ a = asarray(a)
590
+ else:
591
+ a = asanyarray(a)
592
+
593
+ if type(axis) not in (tuple, list):
594
+ axis = (axis,)
595
+
596
+ out_ndim = len(axis) + a.ndim
597
+ axis = normalize_axis_tuple(axis, out_ndim)
598
+
599
+ shape_it = iter(a.shape)
600
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
601
+
602
+ return a.reshape(shape)
603
+
604
+
605
+ row_stack = vstack
606
+
607
+
608
+ def _column_stack_dispatcher(tup):
609
+ return _arrays_for_stack_dispatcher(tup)
610
+
611
+
612
+ @array_function_dispatch(_column_stack_dispatcher)
613
+ def column_stack(tup):
614
+ """
615
+ Stack 1-D arrays as columns into a 2-D array.
616
+
617
+ Take a sequence of 1-D arrays and stack them as columns
618
+ to make a single 2-D array. 2-D arrays are stacked as-is,
619
+ just like with `hstack`. 1-D arrays are turned into 2-D columns
620
+ first.
621
+
622
+ Parameters
623
+ ----------
624
+ tup : sequence of 1-D or 2-D arrays.
625
+ Arrays to stack. All of them must have the same first dimension.
626
+
627
+ Returns
628
+ -------
629
+ stacked : 2-D array
630
+ The array formed by stacking the given arrays.
631
+
632
+ See Also
633
+ --------
634
+ stack, hstack, vstack, concatenate
635
+
636
+ Examples
637
+ --------
638
+ >>> a = np.array((1,2,3))
639
+ >>> b = np.array((2,3,4))
640
+ >>> np.column_stack((a,b))
641
+ array([[1, 2],
642
+ [2, 3],
643
+ [3, 4]])
644
+
645
+ """
646
+ arrays = []
647
+ for v in tup:
648
+ arr = asanyarray(v)
649
+ if arr.ndim < 2:
650
+ arr = array(arr, copy=False, subok=True, ndmin=2).T
651
+ arrays.append(arr)
652
+ return _nx.concatenate(arrays, 1)
653
+
654
+
655
+ def _dstack_dispatcher(tup):
656
+ return _arrays_for_stack_dispatcher(tup)
657
+
658
+
659
+ @array_function_dispatch(_dstack_dispatcher)
660
+ def dstack(tup):
661
+ """
662
+ Stack arrays in sequence depth wise (along third axis).
663
+
664
+ This is equivalent to concatenation along the third axis after 2-D arrays
665
+ of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
666
+ `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
667
+ `dsplit`.
668
+
669
+ This function makes most sense for arrays with up to 3 dimensions. For
670
+ instance, for pixel-data with a height (first axis), width (second axis),
671
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
672
+ `block` provide more general stacking and concatenation operations.
673
+
674
+ Parameters
675
+ ----------
676
+ tup : sequence of arrays
677
+ The arrays must have the same shape along all but the third axis.
678
+ 1-D or 2-D arrays must have the same shape.
679
+
680
+ Returns
681
+ -------
682
+ stacked : ndarray
683
+ The array formed by stacking the given arrays, will be at least 3-D.
684
+
685
+ See Also
686
+ --------
687
+ concatenate : Join a sequence of arrays along an existing axis.
688
+ stack : Join a sequence of arrays along a new axis.
689
+ block : Assemble an nd-array from nested lists of blocks.
690
+ vstack : Stack arrays in sequence vertically (row wise).
691
+ hstack : Stack arrays in sequence horizontally (column wise).
692
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
693
+ dsplit : Split array along third axis.
694
+
695
+ Examples
696
+ --------
697
+ >>> a = np.array((1,2,3))
698
+ >>> b = np.array((2,3,4))
699
+ >>> np.dstack((a,b))
700
+ array([[[1, 2],
701
+ [2, 3],
702
+ [3, 4]]])
703
+
704
+ >>> a = np.array([[1],[2],[3]])
705
+ >>> b = np.array([[2],[3],[4]])
706
+ >>> np.dstack((a,b))
707
+ array([[[1, 2]],
708
+ [[2, 3]],
709
+ [[3, 4]]])
710
+
711
+ """
712
+ arrs = atleast_3d(*tup)
713
+ if not isinstance(arrs, list):
714
+ arrs = [arrs]
715
+ return _nx.concatenate(arrs, 2)
716
+
717
+
718
+ def _replace_zero_by_x_arrays(sub_arys):
719
+ for i in range(len(sub_arys)):
720
+ if _nx.ndim(sub_arys[i]) == 0:
721
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
722
+ elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
723
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
724
+ return sub_arys
725
+
726
+
727
+ def _array_split_dispatcher(ary, indices_or_sections, axis=None):
728
+ return (ary, indices_or_sections)
729
+
730
+
731
+ @array_function_dispatch(_array_split_dispatcher)
732
+ def array_split(ary, indices_or_sections, axis=0):
733
+ """
734
+ Split an array into multiple sub-arrays.
735
+
736
+ Please refer to the ``split`` documentation. The only difference
737
+ between these functions is that ``array_split`` allows
738
+ `indices_or_sections` to be an integer that does *not* equally
739
+ divide the axis. For an array of length l that should be split
740
+ into n sections, it returns l % n sub-arrays of size l//n + 1
741
+ and the rest of size l//n.
742
+
743
+ See Also
744
+ --------
745
+ split : Split array into multiple sub-arrays of equal size.
746
+
747
+ Examples
748
+ --------
749
+ >>> x = np.arange(8.0)
750
+ >>> np.array_split(x, 3)
751
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
752
+
753
+ >>> x = np.arange(9)
754
+ >>> np.array_split(x, 4)
755
+ [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])]
756
+
757
+ """
758
+ try:
759
+ Ntotal = ary.shape[axis]
760
+ except AttributeError:
761
+ Ntotal = len(ary)
762
+ try:
763
+ # handle array case.
764
+ Nsections = len(indices_or_sections) + 1
765
+ div_points = [0] + list(indices_or_sections) + [Ntotal]
766
+ except TypeError:
767
+ # indices_or_sections is a scalar, not an array.
768
+ Nsections = int(indices_or_sections)
769
+ if Nsections <= 0:
770
+ raise ValueError('number sections must be larger than 0.') from None
771
+ Neach_section, extras = divmod(Ntotal, Nsections)
772
+ section_sizes = ([0] +
773
+ extras * [Neach_section+1] +
774
+ (Nsections-extras) * [Neach_section])
775
+ div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
776
+
777
+ sub_arys = []
778
+ sary = _nx.swapaxes(ary, axis, 0)
779
+ for i in range(Nsections):
780
+ st = div_points[i]
781
+ end = div_points[i + 1]
782
+ sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
783
+
784
+ return sub_arys
785
+
786
+
787
+ def _split_dispatcher(ary, indices_or_sections, axis=None):
788
+ return (ary, indices_or_sections)
789
+
790
+
791
+ @array_function_dispatch(_split_dispatcher)
792
+ def split(ary, indices_or_sections, axis=0):
793
+ """
794
+ Split an array into multiple sub-arrays as views into `ary`.
795
+
796
+ Parameters
797
+ ----------
798
+ ary : ndarray
799
+ Array to be divided into sub-arrays.
800
+ indices_or_sections : int or 1-D array
801
+ If `indices_or_sections` is an integer, N, the array will be divided
802
+ into N equal arrays along `axis`. If such a split is not possible,
803
+ an error is raised.
804
+
805
+ If `indices_or_sections` is a 1-D array of sorted integers, the entries
806
+ indicate where along `axis` the array is split. For example,
807
+ ``[2, 3]`` would, for ``axis=0``, result in
808
+
809
+ - ary[:2]
810
+ - ary[2:3]
811
+ - ary[3:]
812
+
813
+ If an index exceeds the dimension of the array along `axis`,
814
+ an empty sub-array is returned correspondingly.
815
+ axis : int, optional
816
+ The axis along which to split, default is 0.
817
+
818
+ Returns
819
+ -------
820
+ sub-arrays : list of ndarrays
821
+ A list of sub-arrays as views into `ary`.
822
+
823
+ Raises
824
+ ------
825
+ ValueError
826
+ If `indices_or_sections` is given as an integer, but
827
+ a split does not result in equal division.
828
+
829
+ See Also
830
+ --------
831
+ array_split : Split an array into multiple sub-arrays of equal or
832
+ near-equal size. Does not raise an exception if
833
+ an equal division cannot be made.
834
+ hsplit : Split array into multiple sub-arrays horizontally (column-wise).
835
+ vsplit : Split array into multiple sub-arrays vertically (row wise).
836
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
837
+ concatenate : Join a sequence of arrays along an existing axis.
838
+ stack : Join a sequence of arrays along a new axis.
839
+ hstack : Stack arrays in sequence horizontally (column wise).
840
+ vstack : Stack arrays in sequence vertically (row wise).
841
+ dstack : Stack arrays in sequence depth wise (along third dimension).
842
+
843
+ Examples
844
+ --------
845
+ >>> x = np.arange(9.0)
846
+ >>> np.split(x, 3)
847
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
848
+
849
+ >>> x = np.arange(8.0)
850
+ >>> np.split(x, [3, 5, 6, 10])
851
+ [array([0., 1., 2.]),
852
+ array([3., 4.]),
853
+ array([5.]),
854
+ array([6., 7.]),
855
+ array([], dtype=float64)]
856
+
857
+ """
858
+ try:
859
+ len(indices_or_sections)
860
+ except TypeError:
861
+ sections = indices_or_sections
862
+ N = ary.shape[axis]
863
+ if N % sections:
864
+ raise ValueError(
865
+ 'array split does not result in an equal division') from None
866
+ return array_split(ary, indices_or_sections, axis)
867
+
868
+
869
+ def _hvdsplit_dispatcher(ary, indices_or_sections):
870
+ return (ary, indices_or_sections)
871
+
872
+
873
+ @array_function_dispatch(_hvdsplit_dispatcher)
874
+ def hsplit(ary, indices_or_sections):
875
+ """
876
+ Split an array into multiple sub-arrays horizontally (column-wise).
877
+
878
+ Please refer to the `split` documentation. `hsplit` is equivalent
879
+ to `split` with ``axis=1``, the array is always split along the second
880
+ axis except for 1-D arrays, where it is split at ``axis=0``.
881
+
882
+ See Also
883
+ --------
884
+ split : Split an array into multiple sub-arrays of equal size.
885
+
886
+ Examples
887
+ --------
888
+ >>> x = np.arange(16.0).reshape(4, 4)
889
+ >>> x
890
+ array([[ 0., 1., 2., 3.],
891
+ [ 4., 5., 6., 7.],
892
+ [ 8., 9., 10., 11.],
893
+ [12., 13., 14., 15.]])
894
+ >>> np.hsplit(x, 2)
895
+ [array([[ 0., 1.],
896
+ [ 4., 5.],
897
+ [ 8., 9.],
898
+ [12., 13.]]),
899
+ array([[ 2., 3.],
900
+ [ 6., 7.],
901
+ [10., 11.],
902
+ [14., 15.]])]
903
+ >>> np.hsplit(x, np.array([3, 6]))
904
+ [array([[ 0., 1., 2.],
905
+ [ 4., 5., 6.],
906
+ [ 8., 9., 10.],
907
+ [12., 13., 14.]]),
908
+ array([[ 3.],
909
+ [ 7.],
910
+ [11.],
911
+ [15.]]),
912
+ array([], shape=(4, 0), dtype=float64)]
913
+
914
+ With a higher dimensional array the split is still along the second axis.
915
+
916
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
917
+ >>> x
918
+ array([[[0., 1.],
919
+ [2., 3.]],
920
+ [[4., 5.],
921
+ [6., 7.]]])
922
+ >>> np.hsplit(x, 2)
923
+ [array([[[0., 1.]],
924
+ [[4., 5.]]]),
925
+ array([[[2., 3.]],
926
+ [[6., 7.]]])]
927
+
928
+ With a 1-D array, the split is along axis 0.
929
+
930
+ >>> x = np.array([0, 1, 2, 3, 4, 5])
931
+ >>> np.hsplit(x, 2)
932
+ [array([0, 1, 2]), array([3, 4, 5])]
933
+
934
+ """
935
+ if _nx.ndim(ary) == 0:
936
+ raise ValueError('hsplit only works on arrays of 1 or more dimensions')
937
+ if ary.ndim > 1:
938
+ return split(ary, indices_or_sections, 1)
939
+ else:
940
+ return split(ary, indices_or_sections, 0)
941
+
942
+
943
+ @array_function_dispatch(_hvdsplit_dispatcher)
944
+ def vsplit(ary, indices_or_sections):
945
+ """
946
+ Split an array into multiple sub-arrays vertically (row-wise).
947
+
948
+ Please refer to the ``split`` documentation. ``vsplit`` is equivalent
949
+ to ``split`` with `axis=0` (default), the array is always split along the
950
+ first axis regardless of the array dimension.
951
+
952
+ See Also
953
+ --------
954
+ split : Split an array into multiple sub-arrays of equal size.
955
+
956
+ Examples
957
+ --------
958
+ >>> x = np.arange(16.0).reshape(4, 4)
959
+ >>> x
960
+ array([[ 0., 1., 2., 3.],
961
+ [ 4., 5., 6., 7.],
962
+ [ 8., 9., 10., 11.],
963
+ [12., 13., 14., 15.]])
964
+ >>> np.vsplit(x, 2)
965
+ [array([[0., 1., 2., 3.],
966
+ [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
967
+ [12., 13., 14., 15.]])]
968
+ >>> np.vsplit(x, np.array([3, 6]))
969
+ [array([[ 0., 1., 2., 3.],
970
+ [ 4., 5., 6., 7.],
971
+ [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]
972
+
973
+ With a higher dimensional array the split is still along the first axis.
974
+
975
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
976
+ >>> x
977
+ array([[[0., 1.],
978
+ [2., 3.]],
979
+ [[4., 5.],
980
+ [6., 7.]]])
981
+ >>> np.vsplit(x, 2)
982
+ [array([[[0., 1.],
983
+ [2., 3.]]]), array([[[4., 5.],
984
+ [6., 7.]]])]
985
+
986
+ """
987
+ if _nx.ndim(ary) < 2:
988
+ raise ValueError('vsplit only works on arrays of 2 or more dimensions')
989
+ return split(ary, indices_or_sections, 0)
990
+
991
+
992
+ @array_function_dispatch(_hvdsplit_dispatcher)
993
+ def dsplit(ary, indices_or_sections):
994
+ """
995
+ Split array into multiple sub-arrays along the 3rd axis (depth).
996
+
997
+ Please refer to the `split` documentation. `dsplit` is equivalent
998
+ to `split` with ``axis=2``, the array is always split along the third
999
+ axis provided the array dimension is greater than or equal to 3.
1000
+
1001
+ See Also
1002
+ --------
1003
+ split : Split an array into multiple sub-arrays of equal size.
1004
+
1005
+ Examples
1006
+ --------
1007
+ >>> x = np.arange(16.0).reshape(2, 2, 4)
1008
+ >>> x
1009
+ array([[[ 0., 1., 2., 3.],
1010
+ [ 4., 5., 6., 7.]],
1011
+ [[ 8., 9., 10., 11.],
1012
+ [12., 13., 14., 15.]]])
1013
+ >>> np.dsplit(x, 2)
1014
+ [array([[[ 0., 1.],
1015
+ [ 4., 5.]],
1016
+ [[ 8., 9.],
1017
+ [12., 13.]]]), array([[[ 2., 3.],
1018
+ [ 6., 7.]],
1019
+ [[10., 11.],
1020
+ [14., 15.]]])]
1021
+ >>> np.dsplit(x, np.array([3, 6]))
1022
+ [array([[[ 0., 1., 2.],
1023
+ [ 4., 5., 6.]],
1024
+ [[ 8., 9., 10.],
1025
+ [12., 13., 14.]]]),
1026
+ array([[[ 3.],
1027
+ [ 7.]],
1028
+ [[11.],
1029
+ [15.]]]),
1030
+ array([], shape=(2, 2, 0), dtype=float64)]
1031
+ """
1032
+ if _nx.ndim(ary) < 3:
1033
+ raise ValueError('dsplit only works on arrays of 3 or more dimensions')
1034
+ return split(ary, indices_or_sections, 2)
1035
+
1036
+
1037
+ def get_array_prepare(*args):
1038
+ """Find the wrapper for the array with the highest priority.
1039
+
1040
+ In case of ties, leftmost wins. If no wrapper is found, return None
1041
+ """
1042
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
1043
+ x.__array_prepare__) for i, x in enumerate(args)
1044
+ if hasattr(x, '__array_prepare__'))
1045
+ if wrappers:
1046
+ return wrappers[-1][-1]
1047
+ return None
1048
+
1049
+
1050
+ def get_array_wrap(*args):
1051
+ """Find the wrapper for the array with the highest priority.
1052
+
1053
+ In case of ties, leftmost wins. If no wrapper is found, return None
1054
+ """
1055
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
1056
+ x.__array_wrap__) for i, x in enumerate(args)
1057
+ if hasattr(x, '__array_wrap__'))
1058
+ if wrappers:
1059
+ return wrappers[-1][-1]
1060
+ return None
1061
+
1062
+
1063
+ def _kron_dispatcher(a, b):
1064
+ return (a, b)
1065
+
1066
+
1067
+ @array_function_dispatch(_kron_dispatcher)
1068
+ def kron(a, b):
1069
+ """
1070
+ Kronecker product of two arrays.
1071
+
1072
+ Computes the Kronecker product, a composite array made of blocks of the
1073
+ second array scaled by the first.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ a, b : array_like
1078
+
1079
+ Returns
1080
+ -------
1081
+ out : ndarray
1082
+
1083
+ See Also
1084
+ --------
1085
+ outer : The outer product
1086
+
1087
+ Notes
1088
+ -----
1089
+ The function assumes that the number of dimensions of `a` and `b`
1090
+ are the same, if necessary prepending the smallest with ones.
1091
+ If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
1092
+ the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
1093
+ The elements are products of elements from `a` and `b`, organized
1094
+ explicitly by::
1095
+
1096
+ kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
1097
+
1098
+ where::
1099
+
1100
+ kt = it * st + jt, t = 0,...,N
1101
+
1102
+ In the common 2-D case (N=1), the block structure can be visualized::
1103
+
1104
+ [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
1105
+ [ ... ... ],
1106
+ [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
1107
+
1108
+
1109
+ Examples
1110
+ --------
1111
+ >>> np.kron([1,10,100], [5,6,7])
1112
+ array([ 5, 6, 7, ..., 500, 600, 700])
1113
+ >>> np.kron([5,6,7], [1,10,100])
1114
+ array([ 5, 50, 500, ..., 7, 70, 700])
1115
+
1116
+ >>> np.kron(np.eye(2), np.ones((2,2)))
1117
+ array([[1., 1., 0., 0.],
1118
+ [1., 1., 0., 0.],
1119
+ [0., 0., 1., 1.],
1120
+ [0., 0., 1., 1.]])
1121
+
1122
+ >>> a = np.arange(100).reshape((2,5,2,5))
1123
+ >>> b = np.arange(24).reshape((2,3,4))
1124
+ >>> c = np.kron(a,b)
1125
+ >>> c.shape
1126
+ (2, 10, 6, 20)
1127
+ >>> I = (1,3,0,2)
1128
+ >>> J = (0,2,1)
1129
+ >>> J1 = (0,) + J # extend to ndim=4
1130
+ >>> S1 = (1,) + b.shape
1131
+ >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
1132
+ >>> c[K] == a[I]*b[J]
1133
+ True
1134
+
1135
+ """
1136
+ # Working:
1137
+ # 1. Equalise the shapes by prepending smaller array with 1s
1138
+ # 2. Expand shapes of both the arrays by adding new axes at
1139
+ # odd positions for 1st array and even positions for 2nd
1140
+ # 3. Compute the product of the modified array
1141
+ # 4. The inner most array elements now contain the rows of
1142
+ # the Kronecker product
1143
+ # 5. Reshape the result to kron's shape, which is same as
1144
+ # product of shapes of the two arrays.
1145
+ b = asanyarray(b)
1146
+ a = array(a, copy=False, subok=True, ndmin=b.ndim)
1147
+ is_any_mat = isinstance(a, matrix) or isinstance(b, matrix)
1148
+ ndb, nda = b.ndim, a.ndim
1149
+ nd = max(ndb, nda)
1150
+
1151
+ if (nda == 0 or ndb == 0):
1152
+ return _nx.multiply(a, b)
1153
+
1154
+ as_ = a.shape
1155
+ bs = b.shape
1156
+ if not a.flags.contiguous:
1157
+ a = reshape(a, as_)
1158
+ if not b.flags.contiguous:
1159
+ b = reshape(b, bs)
1160
+
1161
+ # Equalise the shapes by prepending smaller one with 1s
1162
+ as_ = (1,)*max(0, ndb-nda) + as_
1163
+ bs = (1,)*max(0, nda-ndb) + bs
1164
+
1165
+ # Insert empty dimensions
1166
+ a_arr = expand_dims(a, axis=tuple(range(ndb-nda)))
1167
+ b_arr = expand_dims(b, axis=tuple(range(nda-ndb)))
1168
+
1169
+ # Compute the product
1170
+ a_arr = expand_dims(a_arr, axis=tuple(range(1, nd*2, 2)))
1171
+ b_arr = expand_dims(b_arr, axis=tuple(range(0, nd*2, 2)))
1172
+ # In case of `mat`, convert result to `array`
1173
+ result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))
1174
+
1175
+ # Reshape back
1176
+ result = result.reshape(_nx.multiply(as_, bs))
1177
+
1178
+ return result if not is_any_mat else matrix(result, copy=False)
1179
+
1180
+
1181
+ def _tile_dispatcher(A, reps):
1182
+ return (A, reps)
1183
+
1184
+
1185
+ @array_function_dispatch(_tile_dispatcher)
1186
+ def tile(A, reps):
1187
+ """
1188
+ Construct an array by repeating A the number of times given by reps.
1189
+
1190
+ If `reps` has length ``d``, the result will have dimension of
1191
+ ``max(d, A.ndim)``.
1192
+
1193
+ If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
1194
+ axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
1195
+ or shape (1, 1, 3) for 3-D replication. If this is not the desired
1196
+ behavior, promote `A` to d-dimensions manually before calling this
1197
+ function.
1198
+
1199
+ If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
1200
+ Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
1201
+ (1, 1, 2, 2).
1202
+
1203
+ Note : Although tile may be used for broadcasting, it is strongly
1204
+ recommended to use numpy's broadcasting operations and functions.
1205
+
1206
+ Parameters
1207
+ ----------
1208
+ A : array_like
1209
+ The input array.
1210
+ reps : array_like
1211
+ The number of repetitions of `A` along each axis.
1212
+
1213
+ Returns
1214
+ -------
1215
+ c : ndarray
1216
+ The tiled output array.
1217
+
1218
+ See Also
1219
+ --------
1220
+ repeat : Repeat elements of an array.
1221
+ broadcast_to : Broadcast an array to a new shape
1222
+
1223
+ Examples
1224
+ --------
1225
+ >>> a = np.array([0, 1, 2])
1226
+ >>> np.tile(a, 2)
1227
+ array([0, 1, 2, 0, 1, 2])
1228
+ >>> np.tile(a, (2, 2))
1229
+ array([[0, 1, 2, 0, 1, 2],
1230
+ [0, 1, 2, 0, 1, 2]])
1231
+ >>> np.tile(a, (2, 1, 2))
1232
+ array([[[0, 1, 2, 0, 1, 2]],
1233
+ [[0, 1, 2, 0, 1, 2]]])
1234
+
1235
+ >>> b = np.array([[1, 2], [3, 4]])
1236
+ >>> np.tile(b, 2)
1237
+ array([[1, 2, 1, 2],
1238
+ [3, 4, 3, 4]])
1239
+ >>> np.tile(b, (2, 1))
1240
+ array([[1, 2],
1241
+ [3, 4],
1242
+ [1, 2],
1243
+ [3, 4]])
1244
+
1245
+ >>> c = np.array([1,2,3,4])
1246
+ >>> np.tile(c,(4,1))
1247
+ array([[1, 2, 3, 4],
1248
+ [1, 2, 3, 4],
1249
+ [1, 2, 3, 4],
1250
+ [1, 2, 3, 4]])
1251
+ """
1252
+ try:
1253
+ tup = tuple(reps)
1254
+ except TypeError:
1255
+ tup = (reps,)
1256
+ d = len(tup)
1257
+ if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
1258
+ # Fixes the problem that the function does not make a copy if A is a
1259
+ # numpy array and the repetitions are 1 in all dimensions
1260
+ return _nx.array(A, copy=True, subok=True, ndmin=d)
1261
+ else:
1262
+ # Note that no copy of zero-sized arrays is made. However since they
1263
+ # have no data there is no risk of an inadvertent overwrite.
1264
+ c = _nx.array(A, copy=False, subok=True, ndmin=d)
1265
+ if (d < c.ndim):
1266
+ tup = (1,)*(c.ndim-d) + tup
1267
+ shape_out = tuple(s*t for s, t in zip(c.shape, tup))
1268
+ n = c.size
1269
+ if n > 0:
1270
+ for dim_in, nrep in zip(c.shape, tup):
1271
+ if nrep != 1:
1272
+ c = c.reshape(-1, n).repeat(nrep, 0)
1273
+ n //= dim_in
1274
+ return c.reshape(shape_out)
venv/lib/python3.10/site-packages/numpy/lib/utils.py ADDED
@@ -0,0 +1,1211 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import textwrap
4
+ import types
5
+ import re
6
+ import warnings
7
+ import functools
8
+ import platform
9
+
10
+ from .._utils import set_module
11
+ from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
12
+ from numpy.core import ndarray, ufunc, asarray
13
+ import numpy as np
14
+
15
+ __all__ = [
16
+ 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',
17
+ 'deprecate_with_doc', 'get_include', 'info', 'source', 'who',
18
+ 'lookfor', 'byte_bounds', 'safe_eval', 'show_runtime'
19
+ ]
20
+
21
+
22
+ def show_runtime():
23
+ """
24
+ Print information about various resources in the system
25
+ including available intrinsic support and BLAS/LAPACK library
26
+ in use
27
+
28
+ .. versionadded:: 1.24.0
29
+
30
+ See Also
31
+ --------
32
+ show_config : Show libraries in the system on which NumPy was built.
33
+
34
+ Notes
35
+ -----
36
+ 1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_
37
+ library if available.
38
+ 2. SIMD related information is derived from ``__cpu_features__``,
39
+ ``__cpu_baseline__`` and ``__cpu_dispatch__``
40
+
41
+ """
42
+ from numpy.core._multiarray_umath import (
43
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
44
+ )
45
+ from pprint import pprint
46
+ config_found = [{
47
+ "numpy_version": np.__version__,
48
+ "python": sys.version,
49
+ "uname": platform.uname(),
50
+ }]
51
+ features_found, features_not_found = [], []
52
+ for feature in __cpu_dispatch__:
53
+ if __cpu_features__[feature]:
54
+ features_found.append(feature)
55
+ else:
56
+ features_not_found.append(feature)
57
+ config_found.append({
58
+ "simd_extensions": {
59
+ "baseline": __cpu_baseline__,
60
+ "found": features_found,
61
+ "not_found": features_not_found
62
+ }
63
+ })
64
+ try:
65
+ from threadpoolctl import threadpool_info
66
+ config_found.extend(threadpool_info())
67
+ except ImportError:
68
+ print("WARNING: `threadpoolctl` not found in system!"
69
+ " Install it by `pip install threadpoolctl`."
70
+ " Once installed, try `np.show_runtime` again"
71
+ " for more detailed build information")
72
+ pprint(config_found)
73
+
74
+
75
+ def get_include():
76
+ """
77
+ Return the directory that contains the NumPy \\*.h header files.
78
+
79
+ Extension modules that need to compile against NumPy should use this
80
+ function to locate the appropriate include directory.
81
+
82
+ Notes
83
+ -----
84
+ When using ``distutils``, for example in ``setup.py``::
85
+
86
+ import numpy as np
87
+ ...
88
+ Extension('extension_name', ...
89
+ include_dirs=[np.get_include()])
90
+ ...
91
+
92
+ """
93
+ import numpy
94
+ if numpy.show_config is None:
95
+ # running from numpy source directory
96
+ d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
97
+ else:
98
+ # using installed numpy core headers
99
+ import numpy.core as core
100
+ d = os.path.join(os.path.dirname(core.__file__), 'include')
101
+ return d
102
+
103
+
104
+ class _Deprecate:
105
+ """
106
+ Decorator class to deprecate old functions.
107
+
108
+ Refer to `deprecate` for details.
109
+
110
+ See Also
111
+ --------
112
+ deprecate
113
+
114
+ """
115
+
116
+ def __init__(self, old_name=None, new_name=None, message=None):
117
+ self.old_name = old_name
118
+ self.new_name = new_name
119
+ self.message = message
120
+
121
+ def __call__(self, func, *args, **kwargs):
122
+ """
123
+ Decorator call. Refer to ``decorate``.
124
+
125
+ """
126
+ old_name = self.old_name
127
+ new_name = self.new_name
128
+ message = self.message
129
+
130
+ if old_name is None:
131
+ old_name = func.__name__
132
+ if new_name is None:
133
+ depdoc = "`%s` is deprecated!" % old_name
134
+ else:
135
+ depdoc = "`%s` is deprecated, use `%s` instead!" % \
136
+ (old_name, new_name)
137
+
138
+ if message is not None:
139
+ depdoc += "\n" + message
140
+
141
+ @functools.wraps(func)
142
+ def newfunc(*args, **kwds):
143
+ warnings.warn(depdoc, DeprecationWarning, stacklevel=2)
144
+ return func(*args, **kwds)
145
+
146
+ newfunc.__name__ = old_name
147
+ doc = func.__doc__
148
+ if doc is None:
149
+ doc = depdoc
150
+ else:
151
+ lines = doc.expandtabs().split('\n')
152
+ indent = _get_indent(lines[1:])
153
+ if lines[0].lstrip():
154
+ # Indent the original first line to let inspect.cleandoc()
155
+ # dedent the docstring despite the deprecation notice.
156
+ doc = indent * ' ' + doc
157
+ else:
158
+ # Remove the same leading blank lines as cleandoc() would.
159
+ skip = len(lines[0]) + 1
160
+ for line in lines[1:]:
161
+ if len(line) > indent:
162
+ break
163
+ skip += len(line) + 1
164
+ doc = doc[skip:]
165
+ depdoc = textwrap.indent(depdoc, ' ' * indent)
166
+ doc = '\n\n'.join([depdoc, doc])
167
+ newfunc.__doc__ = doc
168
+
169
+ return newfunc
170
+
171
+
172
+ def _get_indent(lines):
173
+ """
174
+ Determines the leading whitespace that could be removed from all the lines.
175
+ """
176
+ indent = sys.maxsize
177
+ for line in lines:
178
+ content = len(line.lstrip())
179
+ if content:
180
+ indent = min(indent, len(line) - content)
181
+ if indent == sys.maxsize:
182
+ indent = 0
183
+ return indent
184
+
185
+
186
+ def deprecate(*args, **kwargs):
187
+ """
188
+ Issues a DeprecationWarning, adds warning to `old_name`'s
189
+ docstring, rebinds ``old_name.__name__`` and returns the new
190
+ function object.
191
+
192
+ This function may also be used as a decorator.
193
+
194
+ Parameters
195
+ ----------
196
+ func : function
197
+ The function to be deprecated.
198
+ old_name : str, optional
199
+ The name of the function to be deprecated. Default is None, in
200
+ which case the name of `func` is used.
201
+ new_name : str, optional
202
+ The new name for the function. Default is None, in which case the
203
+ deprecation message is that `old_name` is deprecated. If given, the
204
+ deprecation message is that `old_name` is deprecated and `new_name`
205
+ should be used instead.
206
+ message : str, optional
207
+ Additional explanation of the deprecation. Displayed in the
208
+ docstring after the warning.
209
+
210
+ Returns
211
+ -------
212
+ old_func : function
213
+ The deprecated function.
214
+
215
+ Examples
216
+ --------
217
+ Note that ``olduint`` returns a value after printing Deprecation
218
+ Warning:
219
+
220
+ >>> olduint = np.deprecate(np.uint)
221
+ DeprecationWarning: `uint64` is deprecated! # may vary
222
+ >>> olduint(6)
223
+ 6
224
+
225
+ """
226
+ # Deprecate may be run as a function or as a decorator
227
+ # If run as a function, we initialise the decorator class
228
+ # and execute its __call__ method.
229
+
230
+ if args:
231
+ fn = args[0]
232
+ args = args[1:]
233
+
234
+ return _Deprecate(*args, **kwargs)(fn)
235
+ else:
236
+ return _Deprecate(*args, **kwargs)
237
+
238
+
239
+ def deprecate_with_doc(msg):
240
+ """
241
+ Deprecates a function and includes the deprecation in its docstring.
242
+
243
+ This function is used as a decorator. It returns an object that can be
244
+ used to issue a DeprecationWarning, by passing the to-be decorated
245
+ function as argument, this adds warning to the to-be decorated function's
246
+ docstring and returns the new function object.
247
+
248
+ See Also
249
+ --------
250
+ deprecate : Decorate a function such that it issues a `DeprecationWarning`
251
+
252
+ Parameters
253
+ ----------
254
+ msg : str
255
+ Additional explanation of the deprecation. Displayed in the
256
+ docstring after the warning.
257
+
258
+ Returns
259
+ -------
260
+ obj : object
261
+
262
+ """
263
+ return _Deprecate(message=msg)
264
+
265
+
266
+ #--------------------------------------------
267
+ # Determine if two arrays can share memory
268
+ #--------------------------------------------
269
+
270
+ def byte_bounds(a):
271
+ """
272
+ Returns pointers to the end-points of an array.
273
+
274
+ Parameters
275
+ ----------
276
+ a : ndarray
277
+ Input array. It must conform to the Python-side of the array
278
+ interface.
279
+
280
+ Returns
281
+ -------
282
+ (low, high) : tuple of 2 integers
283
+ The first integer is the first byte of the array, the second
284
+ integer is just past the last byte of the array. If `a` is not
285
+ contiguous it will not use every byte between the (`low`, `high`)
286
+ values.
287
+
288
+ Examples
289
+ --------
290
+ >>> I = np.eye(2, dtype='f'); I.dtype
291
+ dtype('float32')
292
+ >>> low, high = np.byte_bounds(I)
293
+ >>> high - low == I.size*I.itemsize
294
+ True
295
+ >>> I = np.eye(2); I.dtype
296
+ dtype('float64')
297
+ >>> low, high = np.byte_bounds(I)
298
+ >>> high - low == I.size*I.itemsize
299
+ True
300
+
301
+ """
302
+ ai = a.__array_interface__
303
+ a_data = ai['data'][0]
304
+ astrides = ai['strides']
305
+ ashape = ai['shape']
306
+ bytes_a = asarray(a).dtype.itemsize
307
+
308
+ a_low = a_high = a_data
309
+ if astrides is None:
310
+ # contiguous case
311
+ a_high += a.size * bytes_a
312
+ else:
313
+ for shape, stride in zip(ashape, astrides):
314
+ if stride < 0:
315
+ a_low += (shape-1)*stride
316
+ else:
317
+ a_high += (shape-1)*stride
318
+ a_high += bytes_a
319
+ return a_low, a_high
320
+
321
+
322
+ #-----------------------------------------------------------------------------
323
+ # Function for output and information on the variables used.
324
+ #-----------------------------------------------------------------------------
325
+
326
+
327
+ def who(vardict=None):
328
+ """
329
+ Print the NumPy arrays in the given dictionary.
330
+
331
+ If there is no dictionary passed in or `vardict` is None then returns
332
+ NumPy arrays in the globals() dictionary (all NumPy arrays in the
333
+ namespace).
334
+
335
+ Parameters
336
+ ----------
337
+ vardict : dict, optional
338
+ A dictionary possibly containing ndarrays. Default is globals().
339
+
340
+ Returns
341
+ -------
342
+ out : None
343
+ Returns 'None'.
344
+
345
+ Notes
346
+ -----
347
+ Prints out the name, shape, bytes and type of all of the ndarrays
348
+ present in `vardict`.
349
+
350
+ Examples
351
+ --------
352
+ >>> a = np.arange(10)
353
+ >>> b = np.ones(20)
354
+ >>> np.who()
355
+ Name Shape Bytes Type
356
+ ===========================================================
357
+ a 10 80 int64
358
+ b 20 160 float64
359
+ Upper bound on total bytes = 240
360
+
361
+ >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
362
+ ... 'idx':5}
363
+ >>> np.who(d)
364
+ Name Shape Bytes Type
365
+ ===========================================================
366
+ x 2 16 float64
367
+ y 3 24 float64
368
+ Upper bound on total bytes = 40
369
+
370
+ """
371
+ if vardict is None:
372
+ frame = sys._getframe().f_back
373
+ vardict = frame.f_globals
374
+ sta = []
375
+ cache = {}
376
+ for name in vardict.keys():
377
+ if isinstance(vardict[name], ndarray):
378
+ var = vardict[name]
379
+ idv = id(var)
380
+ if idv in cache.keys():
381
+ namestr = name + " (%s)" % cache[idv]
382
+ original = 0
383
+ else:
384
+ cache[idv] = name
385
+ namestr = name
386
+ original = 1
387
+ shapestr = " x ".join(map(str, var.shape))
388
+ bytestr = str(var.nbytes)
389
+ sta.append([namestr, shapestr, bytestr, var.dtype.name,
390
+ original])
391
+
392
+ maxname = 0
393
+ maxshape = 0
394
+ maxbyte = 0
395
+ totalbytes = 0
396
+ for val in sta:
397
+ if maxname < len(val[0]):
398
+ maxname = len(val[0])
399
+ if maxshape < len(val[1]):
400
+ maxshape = len(val[1])
401
+ if maxbyte < len(val[2]):
402
+ maxbyte = len(val[2])
403
+ if val[4]:
404
+ totalbytes += int(val[2])
405
+
406
+ if len(sta) > 0:
407
+ sp1 = max(10, maxname)
408
+ sp2 = max(10, maxshape)
409
+ sp3 = max(10, maxbyte)
410
+ prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
411
+ print(prval + "\n" + "="*(len(prval)+5) + "\n")
412
+
413
+ for val in sta:
414
+ print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
415
+ val[1], ' '*(sp2-len(val[1])+5),
416
+ val[2], ' '*(sp3-len(val[2])+5),
417
+ val[3]))
418
+ print("\nUpper bound on total bytes = %d" % totalbytes)
419
+ return
420
+
421
+ #-----------------------------------------------------------------------------
422
+
423
+
424
+ # NOTE: pydoc defines a help function which works similarly to this
425
+ # except it uses a pager to take over the screen.
426
+
427
+ # combine name and arguments and split to multiple lines of width
428
+ # characters. End lines on a comma and begin argument list indented with
429
+ # the rest of the arguments.
430
+ def _split_line(name, arguments, width):
431
+ firstwidth = len(name)
432
+ k = firstwidth
433
+ newstr = name
434
+ sepstr = ", "
435
+ arglist = arguments.split(sepstr)
436
+ for argument in arglist:
437
+ if k == firstwidth:
438
+ addstr = ""
439
+ else:
440
+ addstr = sepstr
441
+ k = k + len(argument) + len(addstr)
442
+ if k > width:
443
+ k = firstwidth + 1 + len(argument)
444
+ newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
445
+ else:
446
+ newstr = newstr + addstr + argument
447
+ return newstr
448
+
449
+ _namedict = None
450
+ _dictlist = None
451
+
452
+ # Traverse all module directories underneath globals
453
+ # to see if something is defined
454
+ def _makenamedict(module='numpy'):
455
+ module = __import__(module, globals(), locals(), [])
456
+ thedict = {module.__name__:module.__dict__}
457
+ dictlist = [module.__name__]
458
+ totraverse = [module.__dict__]
459
+ while True:
460
+ if len(totraverse) == 0:
461
+ break
462
+ thisdict = totraverse.pop(0)
463
+ for x in thisdict.keys():
464
+ if isinstance(thisdict[x], types.ModuleType):
465
+ modname = thisdict[x].__name__
466
+ if modname not in dictlist:
467
+ moddict = thisdict[x].__dict__
468
+ dictlist.append(modname)
469
+ totraverse.append(moddict)
470
+ thedict[modname] = moddict
471
+ return thedict, dictlist
472
+
473
+
474
+ def _info(obj, output=None):
475
+ """Provide information about ndarray obj.
476
+
477
+ Parameters
478
+ ----------
479
+ obj : ndarray
480
+ Must be ndarray, not checked.
481
+ output
482
+ Where printed output goes.
483
+
484
+ Notes
485
+ -----
486
+ Copied over from the numarray module prior to its removal.
487
+ Adapted somewhat as only numpy is an option now.
488
+
489
+ Called by info.
490
+
491
+ """
492
+ extra = ""
493
+ tic = ""
494
+ bp = lambda x: x
495
+ cls = getattr(obj, '__class__', type(obj))
496
+ nm = getattr(cls, '__name__', cls)
497
+ strides = obj.strides
498
+ endian = obj.dtype.byteorder
499
+
500
+ if output is None:
501
+ output = sys.stdout
502
+
503
+ print("class: ", nm, file=output)
504
+ print("shape: ", obj.shape, file=output)
505
+ print("strides: ", strides, file=output)
506
+ print("itemsize: ", obj.itemsize, file=output)
507
+ print("aligned: ", bp(obj.flags.aligned), file=output)
508
+ print("contiguous: ", bp(obj.flags.contiguous), file=output)
509
+ print("fortran: ", obj.flags.fortran, file=output)
510
+ print(
511
+ "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
512
+ file=output
513
+ )
514
+ print("byteorder: ", end=' ', file=output)
515
+ if endian in ['|', '=']:
516
+ print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
517
+ byteswap = False
518
+ elif endian == '>':
519
+ print("%sbig%s" % (tic, tic), file=output)
520
+ byteswap = sys.byteorder != "big"
521
+ else:
522
+ print("%slittle%s" % (tic, tic), file=output)
523
+ byteswap = sys.byteorder != "little"
524
+ print("byteswap: ", bp(byteswap), file=output)
525
+ print("type: %s" % obj.dtype, file=output)
526
+
527
+
528
+ @set_module('numpy')
529
+ def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
530
+ """
531
+ Get help information for an array, function, class, or module.
532
+
533
+ Parameters
534
+ ----------
535
+ object : object or str, optional
536
+ Input object or name to get information about. If `object` is
537
+ an `ndarray` instance, information about the array is printed.
538
+ If `object` is a numpy object, its docstring is given. If it is
539
+ a string, available modules are searched for matching objects.
540
+ If None, information about `info` itself is returned.
541
+ maxwidth : int, optional
542
+ Printing width.
543
+ output : file like object, optional
544
+ File like object that the output is written to, default is
545
+ ``None``, in which case ``sys.stdout`` will be used.
546
+ The object has to be opened in 'w' or 'a' mode.
547
+ toplevel : str, optional
548
+ Start search at this level.
549
+
550
+ See Also
551
+ --------
552
+ source, lookfor
553
+
554
+ Notes
555
+ -----
556
+ When used interactively with an object, ``np.info(obj)`` is equivalent
557
+ to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
558
+ prompt.
559
+
560
+ Examples
561
+ --------
562
+ >>> np.info(np.polyval) # doctest: +SKIP
563
+ polyval(p, x)
564
+ Evaluate the polynomial p at x.
565
+ ...
566
+
567
+ When using a string for `object` it is possible to get multiple results.
568
+
569
+ >>> np.info('fft') # doctest: +SKIP
570
+ *** Found in numpy ***
571
+ Core FFT routines
572
+ ...
573
+ *** Found in numpy.fft ***
574
+ fft(a, n=None, axis=-1)
575
+ ...
576
+ *** Repeat reference found in numpy.fft.fftpack ***
577
+ *** Total of 3 references found. ***
578
+
579
+ When the argument is an array, information about the array is printed.
580
+
581
+ >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
582
+ >>> np.info(a)
583
+ class: ndarray
584
+ shape: (2, 3)
585
+ strides: (24, 8)
586
+ itemsize: 8
587
+ aligned: True
588
+ contiguous: True
589
+ fortran: False
590
+ data pointer: 0x562b6e0d2860 # may vary
591
+ byteorder: little
592
+ byteswap: False
593
+ type: complex64
594
+
595
+ """
596
+ global _namedict, _dictlist
597
+ # Local import to speed up numpy's import time.
598
+ import pydoc
599
+ import inspect
600
+
601
+ if (hasattr(object, '_ppimport_importer') or
602
+ hasattr(object, '_ppimport_module')):
603
+ object = object._ppimport_module
604
+ elif hasattr(object, '_ppimport_attr'):
605
+ object = object._ppimport_attr
606
+
607
+ if output is None:
608
+ output = sys.stdout
609
+
610
+ if object is None:
611
+ info(info)
612
+ elif isinstance(object, ndarray):
613
+ _info(object, output=output)
614
+ elif isinstance(object, str):
615
+ if _namedict is None:
616
+ _namedict, _dictlist = _makenamedict(toplevel)
617
+ numfound = 0
618
+ objlist = []
619
+ for namestr in _dictlist:
620
+ try:
621
+ obj = _namedict[namestr][object]
622
+ if id(obj) in objlist:
623
+ print("\n "
624
+ "*** Repeat reference found in %s *** " % namestr,
625
+ file=output
626
+ )
627
+ else:
628
+ objlist.append(id(obj))
629
+ print(" *** Found in %s ***" % namestr, file=output)
630
+ info(obj)
631
+ print("-"*maxwidth, file=output)
632
+ numfound += 1
633
+ except KeyError:
634
+ pass
635
+ if numfound == 0:
636
+ print("Help for %s not found." % object, file=output)
637
+ else:
638
+ print("\n "
639
+ "*** Total of %d references found. ***" % numfound,
640
+ file=output
641
+ )
642
+
643
+ elif inspect.isfunction(object) or inspect.ismethod(object):
644
+ name = object.__name__
645
+ try:
646
+ arguments = str(inspect.signature(object))
647
+ except Exception:
648
+ arguments = "()"
649
+
650
+ if len(name+arguments) > maxwidth:
651
+ argstr = _split_line(name, arguments, maxwidth)
652
+ else:
653
+ argstr = name + arguments
654
+
655
+ print(" " + argstr + "\n", file=output)
656
+ print(inspect.getdoc(object), file=output)
657
+
658
+ elif inspect.isclass(object):
659
+ name = object.__name__
660
+ try:
661
+ arguments = str(inspect.signature(object))
662
+ except Exception:
663
+ arguments = "()"
664
+
665
+ if len(name+arguments) > maxwidth:
666
+ argstr = _split_line(name, arguments, maxwidth)
667
+ else:
668
+ argstr = name + arguments
669
+
670
+ print(" " + argstr + "\n", file=output)
671
+ doc1 = inspect.getdoc(object)
672
+ if doc1 is None:
673
+ if hasattr(object, '__init__'):
674
+ print(inspect.getdoc(object.__init__), file=output)
675
+ else:
676
+ print(inspect.getdoc(object), file=output)
677
+
678
+ methods = pydoc.allmethods(object)
679
+
680
+ public_methods = [meth for meth in methods if meth[0] != '_']
681
+ if public_methods:
682
+ print("\n\nMethods:\n", file=output)
683
+ for meth in public_methods:
684
+ thisobj = getattr(object, meth, None)
685
+ if thisobj is not None:
686
+ methstr, other = pydoc.splitdoc(
687
+ inspect.getdoc(thisobj) or "None"
688
+ )
689
+ print(" %s -- %s" % (meth, methstr), file=output)
690
+
691
+ elif hasattr(object, '__doc__'):
692
+ print(inspect.getdoc(object), file=output)
693
+
694
+
695
+ @set_module('numpy')
696
+ def source(object, output=sys.stdout):
697
+ """
698
+ Print or write to a file the source code for a NumPy object.
699
+
700
+ The source code is only returned for objects written in Python. Many
701
+ functions and classes are defined in C and will therefore not return
702
+ useful information.
703
+
704
+ Parameters
705
+ ----------
706
+ object : numpy object
707
+ Input object. This can be any object (function, class, module,
708
+ ...).
709
+ output : file object, optional
710
+ If `output` not supplied then source code is printed to screen
711
+ (sys.stdout). File object must be created with either write 'w' or
712
+ append 'a' modes.
713
+
714
+ See Also
715
+ --------
716
+ lookfor, info
717
+
718
+ Examples
719
+ --------
720
+ >>> np.source(np.interp) #doctest: +SKIP
721
+ In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
722
+ def interp(x, xp, fp, left=None, right=None):
723
+ \"\"\".... (full docstring printed)\"\"\"
724
+ if isinstance(x, (float, int, number)):
725
+ return compiled_interp([x], xp, fp, left, right).item()
726
+ else:
727
+ return compiled_interp(x, xp, fp, left, right)
728
+
729
+ The source code is only returned for objects written in Python.
730
+
731
+ >>> np.source(np.array) #doctest: +SKIP
732
+ Not available for this object.
733
+
734
+ """
735
+ # Local import to speed up numpy's import time.
736
+ import inspect
737
+ try:
738
+ print("In file: %s\n" % inspect.getsourcefile(object), file=output)
739
+ print(inspect.getsource(object), file=output)
740
+ except Exception:
741
+ print("Not available for this object.", file=output)
742
+
743
+
744
+ # Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
745
+ # where kind: "func", "class", "module", "object"
746
+ # and index: index in breadth-first namespace traversal
747
+ _lookfor_caches = {}
748
+
749
+ # regexp whose match indicates that the string may contain a function
750
+ # signature
751
+ _function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
752
+
753
+
754
+ @set_module('numpy')
755
+ def lookfor(what, module=None, import_modules=True, regenerate=False,
756
+ output=None):
757
+ """
758
+ Do a keyword search on docstrings.
759
+
760
+ A list of objects that matched the search is displayed,
761
+ sorted by relevance. All given keywords need to be found in the
762
+ docstring for it to be returned as a result, but the order does
763
+ not matter.
764
+
765
+ Parameters
766
+ ----------
767
+ what : str
768
+ String containing words to look for.
769
+ module : str or list, optional
770
+ Name of module(s) whose docstrings to go through.
771
+ import_modules : bool, optional
772
+ Whether to import sub-modules in packages. Default is True.
773
+ regenerate : bool, optional
774
+ Whether to re-generate the docstring cache. Default is False.
775
+ output : file-like, optional
776
+ File-like object to write the output to. If omitted, use a pager.
777
+
778
+ See Also
779
+ --------
780
+ source, info
781
+
782
+ Notes
783
+ -----
784
+ Relevance is determined only roughly, by checking if the keywords occur
785
+ in the function name, at the start of a docstring, etc.
786
+
787
+ Examples
788
+ --------
789
+ >>> np.lookfor('binary representation') # doctest: +SKIP
790
+ Search results for 'binary representation'
791
+ ------------------------------------------
792
+ numpy.binary_repr
793
+ Return the binary representation of the input number as a string.
794
+ numpy.core.setup_common.long_double_representation
795
+ Given a binary dump as given by GNU od -b, look for long double
796
+ numpy.base_repr
797
+ Return a string representation of a number in the given base system.
798
+ ...
799
+
800
+ """
801
+ import pydoc
802
+
803
+ # Cache
804
+ cache = _lookfor_generate_cache(module, import_modules, regenerate)
805
+
806
+ # Search
807
+ # XXX: maybe using a real stemming search engine would be better?
808
+ found = []
809
+ whats = str(what).lower().split()
810
+ if not whats:
811
+ return
812
+
813
+ for name, (docstring, kind, index) in cache.items():
814
+ if kind in ('module', 'object'):
815
+ # don't show modules or objects
816
+ continue
817
+ doc = docstring.lower()
818
+ if all(w in doc for w in whats):
819
+ found.append(name)
820
+
821
+ # Relevance sort
822
+ # XXX: this is full Harrison-Stetson heuristics now,
823
+ # XXX: it probably could be improved
824
+
825
+ kind_relevance = {'func': 1000, 'class': 1000,
826
+ 'module': -1000, 'object': -1000}
827
+
828
+ def relevance(name, docstr, kind, index):
829
+ r = 0
830
+ # do the keywords occur within the start of the docstring?
831
+ first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
832
+ r += sum([200 for w in whats if w in first_doc])
833
+ # do the keywords occur in the function name?
834
+ r += sum([30 for w in whats if w in name])
835
+ # is the full name long?
836
+ r += -len(name) * 5
837
+ # is the object of bad type?
838
+ r += kind_relevance.get(kind, -1000)
839
+ # is the object deep in namespace hierarchy?
840
+ r += -name.count('.') * 10
841
+ r += max(-index / 100, -100)
842
+ return r
843
+
844
+ def relevance_value(a):
845
+ return relevance(a, *cache[a])
846
+ found.sort(key=relevance_value)
847
+
848
+ # Pretty-print
849
+ s = "Search results for '%s'" % (' '.join(whats))
850
+ help_text = [s, "-"*len(s)]
851
+ for name in found[::-1]:
852
+ doc, kind, ix = cache[name]
853
+
854
+ doclines = [line.strip() for line in doc.strip().split("\n")
855
+ if line.strip()]
856
+
857
+ # find a suitable short description
858
+ try:
859
+ first_doc = doclines[0].strip()
860
+ if _function_signature_re.search(first_doc):
861
+ first_doc = doclines[1].strip()
862
+ except IndexError:
863
+ first_doc = ""
864
+ help_text.append("%s\n %s" % (name, first_doc))
865
+
866
+ if not found:
867
+ help_text.append("Nothing found.")
868
+
869
+ # Output
870
+ if output is not None:
871
+ output.write("\n".join(help_text))
872
+ elif len(help_text) > 10:
873
+ pager = pydoc.getpager()
874
+ pager("\n".join(help_text))
875
+ else:
876
+ print("\n".join(help_text))
877
+
878
+ def _lookfor_generate_cache(module, import_modules, regenerate):
879
+ """
880
+ Generate docstring cache for given module.
881
+
882
+ Parameters
883
+ ----------
884
+ module : str, None, module
885
+ Module for which to generate docstring cache
886
+ import_modules : bool
887
+ Whether to import sub-modules in packages.
888
+ regenerate : bool
889
+ Re-generate the docstring cache
890
+
891
+ Returns
892
+ -------
893
+ cache : dict {obj_full_name: (docstring, kind, index), ...}
894
+ Docstring cache for the module, either cached one (regenerate=False)
895
+ or newly generated.
896
+
897
+ """
898
+ # Local import to speed up numpy's import time.
899
+ import inspect
900
+
901
+ from io import StringIO
902
+
903
+ if module is None:
904
+ module = "numpy"
905
+
906
+ if isinstance(module, str):
907
+ try:
908
+ __import__(module)
909
+ except ImportError:
910
+ return {}
911
+ module = sys.modules[module]
912
+ elif isinstance(module, list) or isinstance(module, tuple):
913
+ cache = {}
914
+ for mod in module:
915
+ cache.update(_lookfor_generate_cache(mod, import_modules,
916
+ regenerate))
917
+ return cache
918
+
919
+ if id(module) in _lookfor_caches and not regenerate:
920
+ return _lookfor_caches[id(module)]
921
+
922
+ # walk items and collect docstrings
923
+ cache = {}
924
+ _lookfor_caches[id(module)] = cache
925
+ seen = {}
926
+ index = 0
927
+ stack = [(module.__name__, module)]
928
+ while stack:
929
+ name, item = stack.pop(0)
930
+ if id(item) in seen:
931
+ continue
932
+ seen[id(item)] = True
933
+
934
+ index += 1
935
+ kind = "object"
936
+
937
+ if inspect.ismodule(item):
938
+ kind = "module"
939
+ try:
940
+ _all = item.__all__
941
+ except AttributeError:
942
+ _all = None
943
+
944
+ # import sub-packages
945
+ if import_modules and hasattr(item, '__path__'):
946
+ for pth in item.__path__:
947
+ for mod_path in os.listdir(pth):
948
+ this_py = os.path.join(pth, mod_path)
949
+ init_py = os.path.join(pth, mod_path, '__init__.py')
950
+ if (os.path.isfile(this_py) and
951
+ mod_path.endswith('.py')):
952
+ to_import = mod_path[:-3]
953
+ elif os.path.isfile(init_py):
954
+ to_import = mod_path
955
+ else:
956
+ continue
957
+ if to_import == '__init__':
958
+ continue
959
+
960
+ try:
961
+ old_stdout = sys.stdout
962
+ old_stderr = sys.stderr
963
+ try:
964
+ sys.stdout = StringIO()
965
+ sys.stderr = StringIO()
966
+ __import__("%s.%s" % (name, to_import))
967
+ finally:
968
+ sys.stdout = old_stdout
969
+ sys.stderr = old_stderr
970
+ except KeyboardInterrupt:
971
+ # Assume keyboard interrupt came from a user
972
+ raise
973
+ except BaseException:
974
+ # Ignore also SystemExit and pytests.importorskip
975
+ # `Skipped` (these are BaseExceptions; gh-22345)
976
+ continue
977
+
978
+ for n, v in _getmembers(item):
979
+ try:
980
+ item_name = getattr(v, '__name__', "%s.%s" % (name, n))
981
+ mod_name = getattr(v, '__module__', None)
982
+ except NameError:
983
+ # ref. SWIG's global cvars
984
+ # NameError: Unknown C global variable
985
+ item_name = "%s.%s" % (name, n)
986
+ mod_name = None
987
+ if '.' not in item_name and mod_name:
988
+ item_name = "%s.%s" % (mod_name, item_name)
989
+
990
+ if not item_name.startswith(name + '.'):
991
+ # don't crawl "foreign" objects
992
+ if isinstance(v, ufunc):
993
+ # ... unless they are ufuncs
994
+ pass
995
+ else:
996
+ continue
997
+ elif not (inspect.ismodule(v) or _all is None or n in _all):
998
+ continue
999
+ stack.append(("%s.%s" % (name, n), v))
1000
+ elif inspect.isclass(item):
1001
+ kind = "class"
1002
+ for n, v in _getmembers(item):
1003
+ stack.append(("%s.%s" % (name, n), v))
1004
+ elif hasattr(item, "__call__"):
1005
+ kind = "func"
1006
+
1007
+ try:
1008
+ doc = inspect.getdoc(item)
1009
+ except NameError:
1010
+ # ref SWIG's NameError: Unknown C global variable
1011
+ doc = None
1012
+ if doc is not None:
1013
+ cache[name] = (doc, kind, index)
1014
+
1015
+ return cache
1016
+
1017
+ def _getmembers(item):
1018
+ import inspect
1019
+ try:
1020
+ members = inspect.getmembers(item)
1021
+ except Exception:
1022
+ members = [(x, getattr(item, x)) for x in dir(item)
1023
+ if hasattr(item, x)]
1024
+ return members
1025
+
1026
+
1027
+ def safe_eval(source):
1028
+ """
1029
+ Protected string evaluation.
1030
+
1031
+ Evaluate a string containing a Python literal expression without
1032
+ allowing the execution of arbitrary non-literal code.
1033
+
1034
+ .. warning::
1035
+
1036
+ This function is identical to :py:meth:`ast.literal_eval` and
1037
+ has the same security implications. It may not always be safe
1038
+ to evaluate large input strings.
1039
+
1040
+ Parameters
1041
+ ----------
1042
+ source : str
1043
+ The string to evaluate.
1044
+
1045
+ Returns
1046
+ -------
1047
+ obj : object
1048
+ The result of evaluating `source`.
1049
+
1050
+ Raises
1051
+ ------
1052
+ SyntaxError
1053
+ If the code has invalid Python syntax, or if it contains
1054
+ non-literal code.
1055
+
1056
+ Examples
1057
+ --------
1058
+ >>> np.safe_eval('1')
1059
+ 1
1060
+ >>> np.safe_eval('[1, 2, 3]')
1061
+ [1, 2, 3]
1062
+ >>> np.safe_eval('{"foo": ("bar", 10.0)}')
1063
+ {'foo': ('bar', 10.0)}
1064
+
1065
+ >>> np.safe_eval('import os')
1066
+ Traceback (most recent call last):
1067
+ ...
1068
+ SyntaxError: invalid syntax
1069
+
1070
+ >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
1071
+ Traceback (most recent call last):
1072
+ ...
1073
+ ValueError: malformed node or string: <_ast.Call object at 0x...>
1074
+
1075
+ """
1076
+ # Local import to speed up numpy's import time.
1077
+ import ast
1078
+ return ast.literal_eval(source)
1079
+
1080
+
1081
+ def _median_nancheck(data, result, axis):
1082
+ """
1083
+ Utility function to check median result from data for NaN values at the end
1084
+ and return NaN in that case. Input result can also be a MaskedArray.
1085
+
1086
+ Parameters
1087
+ ----------
1088
+ data : array
1089
+ Sorted input data to median function
1090
+ result : Array or MaskedArray
1091
+ Result of median function.
1092
+ axis : int
1093
+ Axis along which the median was computed.
1094
+
1095
+ Returns
1096
+ -------
1097
+ result : scalar or ndarray
1098
+ Median or NaN in axes which contained NaN in the input. If the input
1099
+ was an array, NaN will be inserted in-place. If a scalar, either the
1100
+ input itself or a scalar NaN.
1101
+ """
1102
+ if data.size == 0:
1103
+ return result
1104
+ potential_nans = data.take(-1, axis=axis)
1105
+ n = np.isnan(potential_nans)
1106
+ # masked NaN values are ok, although for masked the copyto may fail for
1107
+ # unmasked ones (this was always broken) when the result is a scalar.
1108
+ if np.ma.isMaskedArray(n):
1109
+ n = n.filled(False)
1110
+
1111
+ if not n.any():
1112
+ return result
1113
+
1114
+ # Without given output, it is possible that the current result is a
1115
+ # numpy scalar, which is not writeable. If so, just return nan.
1116
+ if isinstance(result, np.generic):
1117
+ return potential_nans
1118
+
1119
+ # Otherwise copy NaNs (if there are any)
1120
+ np.copyto(result, potential_nans, where=n)
1121
+ return result
1122
+
1123
+ def _opt_info():
1124
+ """
1125
+ Returns a string contains the supported CPU features by the current build.
1126
+
1127
+ The string format can be explained as follows:
1128
+ - dispatched features that are supported by the running machine
1129
+ end with `*`.
1130
+ - dispatched features that are "not" supported by the running machine
1131
+ end with `?`.
1132
+ - remained features are representing the baseline.
1133
+ """
1134
+ from numpy.core._multiarray_umath import (
1135
+ __cpu_features__, __cpu_baseline__, __cpu_dispatch__
1136
+ )
1137
+
1138
+ if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
1139
+ return ''
1140
+
1141
+ enabled_features = ' '.join(__cpu_baseline__)
1142
+ for feature in __cpu_dispatch__:
1143
+ if __cpu_features__[feature]:
1144
+ enabled_features += f" {feature}*"
1145
+ else:
1146
+ enabled_features += f" {feature}?"
1147
+
1148
+ return enabled_features
1149
+
1150
+
1151
+ def drop_metadata(dtype, /):
1152
+ """
1153
+ Returns the dtype unchanged if it contained no metadata or a copy of the
1154
+ dtype if it (or any of its structure dtypes) contained metadata.
1155
+
1156
+ This utility is used by `np.save` and `np.savez` to drop metadata before
1157
+ saving.
1158
+
1159
+ .. note::
1160
+
1161
+ Due to its limitation this function may move to a more appropriate
1162
+ home or change in the future and is considered semi-public API only.
1163
+
1164
+ .. warning::
1165
+
1166
+ This function does not preserve more strange things like record dtypes
1167
+ and user dtypes may simply return the wrong thing. If you need to be
1168
+ sure about the latter, check the result with:
1169
+ ``np.can_cast(new_dtype, dtype, casting="no")``.
1170
+
1171
+ """
1172
+ if dtype.fields is not None:
1173
+ found_metadata = dtype.metadata is not None
1174
+
1175
+ names = []
1176
+ formats = []
1177
+ offsets = []
1178
+ titles = []
1179
+ for name, field in dtype.fields.items():
1180
+ field_dt = drop_metadata(field[0])
1181
+ if field_dt is not field[0]:
1182
+ found_metadata = True
1183
+
1184
+ names.append(name)
1185
+ formats.append(field_dt)
1186
+ offsets.append(field[1])
1187
+ titles.append(None if len(field) < 3 else field[2])
1188
+
1189
+ if not found_metadata:
1190
+ return dtype
1191
+
1192
+ structure = dict(
1193
+ names=names, formats=formats, offsets=offsets, titles=titles,
1194
+ itemsize=dtype.itemsize)
1195
+
1196
+ # NOTE: Could pass (dtype.type, structure) to preserve record dtypes...
1197
+ return np.dtype(structure, align=dtype.isalignedstruct)
1198
+ elif dtype.subdtype is not None:
1199
+ # subarray dtype
1200
+ subdtype, shape = dtype.subdtype
1201
+ new_subdtype = drop_metadata(subdtype)
1202
+ if dtype.metadata is None and new_subdtype is subdtype:
1203
+ return dtype
1204
+
1205
+ return np.dtype((new_subdtype, shape))
1206
+ else:
1207
+ # Normal unstructured dtype
1208
+ if dtype.metadata is None:
1209
+ return dtype
1210
+ # Note that `dt.str` doesn't round-trip e.g. for user-dtypes.
1211
+ return np.dtype(dtype.str)
venv/lib/python3.10/site-packages/numpy/testing/__init__.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Common test support for all numpy test scripts.
2
+
3
+ This single module should provide all the common functionality for numpy tests
4
+ in a single location, so that test scripts can just import it and work right
5
+ away.
6
+
7
+ """
8
+ from unittest import TestCase
9
+
10
+ from . import _private
11
+ from ._private.utils import *
12
+ from ._private.utils import (_assert_valid_refcount, _gen_alignment_data)
13
+ from ._private import extbuild
14
+ from . import overrides
15
+
16
+ __all__ = (
17
+ _private.utils.__all__ + ['TestCase', 'overrides']
18
+ )
19
+
20
+ from numpy._pytesttester import PytestTester
21
+ test = PytestTester(__name__)
22
+ del PytestTester
venv/lib/python3.10/site-packages/numpy/testing/__init__.pyi ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy._pytesttester import PytestTester
2
+
3
+ from unittest import (
4
+ TestCase as TestCase,
5
+ )
6
+
7
+ from numpy.testing._private.utils import (
8
+ assert_equal as assert_equal,
9
+ assert_almost_equal as assert_almost_equal,
10
+ assert_approx_equal as assert_approx_equal,
11
+ assert_array_equal as assert_array_equal,
12
+ assert_array_less as assert_array_less,
13
+ assert_string_equal as assert_string_equal,
14
+ assert_array_almost_equal as assert_array_almost_equal,
15
+ assert_raises as assert_raises,
16
+ build_err_msg as build_err_msg,
17
+ decorate_methods as decorate_methods,
18
+ jiffies as jiffies,
19
+ memusage as memusage,
20
+ print_assert_equal as print_assert_equal,
21
+ rundocs as rundocs,
22
+ runstring as runstring,
23
+ verbose as verbose,
24
+ measure as measure,
25
+ assert_ as assert_,
26
+ assert_array_almost_equal_nulp as assert_array_almost_equal_nulp,
27
+ assert_raises_regex as assert_raises_regex,
28
+ assert_array_max_ulp as assert_array_max_ulp,
29
+ assert_warns as assert_warns,
30
+ assert_no_warnings as assert_no_warnings,
31
+ assert_allclose as assert_allclose,
32
+ IgnoreException as IgnoreException,
33
+ clear_and_catch_warnings as clear_and_catch_warnings,
34
+ SkipTest as SkipTest,
35
+ KnownFailureException as KnownFailureException,
36
+ temppath as temppath,
37
+ tempdir as tempdir,
38
+ IS_PYPY as IS_PYPY,
39
+ IS_PYSTON as IS_PYSTON,
40
+ HAS_REFCOUNT as HAS_REFCOUNT,
41
+ suppress_warnings as suppress_warnings,
42
+ assert_array_compare as assert_array_compare,
43
+ assert_no_gc_cycles as assert_no_gc_cycles,
44
+ break_cycles as break_cycles,
45
+ HAS_LAPACK64 as HAS_LAPACK64,
46
+ )
47
+
48
+ __all__: list[str]
49
+ __path__: list[str]
50
+ test: PytestTester
venv/lib/python3.10/site-packages/numpy/testing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (790 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/testing/__pycache__/overrides.cpython-310.pyc ADDED
Binary file (2.58 kB). View file
 
venv/lib/python3.10/site-packages/numpy/testing/__pycache__/print_coercion_tables.cpython-310.pyc ADDED
Binary file (4.81 kB). View file
 
venv/lib/python3.10/site-packages/numpy/testing/__pycache__/setup.cpython-310.pyc ADDED
Binary file (834 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/testing/_private/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/numpy/testing/_private/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (190 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/testing/_private/__pycache__/extbuild.cpython-310.pyc ADDED
Binary file (7.29 kB). View file
 
venv/lib/python3.10/site-packages/numpy/testing/_private/__pycache__/utils.cpython-310.pyc ADDED
Binary file (70.3 kB). View file
 
venv/lib/python3.10/site-packages/numpy/testing/_private/extbuild.py ADDED
@@ -0,0 +1,248 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Build a c-extension module on-the-fly in tests.
3
+ See build_and_import_extensions for usage hints
4
+
5
+ """
6
+
7
+ import os
8
+ import pathlib
9
+ import subprocess
10
+ import sys
11
+ import sysconfig
12
+ import textwrap
13
+
14
+ __all__ = ['build_and_import_extension', 'compile_extension_module']
15
+
16
+
17
+ def build_and_import_extension(
18
+ modname, functions, *, prologue="", build_dir=None,
19
+ include_dirs=[], more_init=""):
20
+ """
21
+ Build and imports a c-extension module `modname` from a list of function
22
+ fragments `functions`.
23
+
24
+
25
+ Parameters
26
+ ----------
27
+ functions : list of fragments
28
+ Each fragment is a sequence of func_name, calling convention, snippet.
29
+ prologue : string
30
+ Code to precede the rest, usually extra ``#include`` or ``#define``
31
+ macros.
32
+ build_dir : pathlib.Path
33
+ Where to build the module, usually a temporary directory
34
+ include_dirs : list
35
+ Extra directories to find include files when compiling
36
+ more_init : string
37
+ Code to appear in the module PyMODINIT_FUNC
38
+
39
+ Returns
40
+ -------
41
+ out: module
42
+ The module will have been loaded and is ready for use
43
+
44
+ Examples
45
+ --------
46
+ >>> functions = [("test_bytes", "METH_O", \"\"\"
47
+ if ( !PyBytesCheck(args)) {
48
+ Py_RETURN_FALSE;
49
+ }
50
+ Py_RETURN_TRUE;
51
+ \"\"\")]
52
+ >>> mod = build_and_import_extension("testme", functions)
53
+ >>> assert not mod.test_bytes(u'abc')
54
+ >>> assert mod.test_bytes(b'abc')
55
+ """
56
+ body = prologue + _make_methods(functions, modname)
57
+ init = """PyObject *mod = PyModule_Create(&moduledef);
58
+ """
59
+ if not build_dir:
60
+ build_dir = pathlib.Path('.')
61
+ if more_init:
62
+ init += """#define INITERROR return NULL
63
+ """
64
+ init += more_init
65
+ init += "\nreturn mod;"
66
+ source_string = _make_source(modname, init, body)
67
+ try:
68
+ mod_so = compile_extension_module(
69
+ modname, build_dir, include_dirs, source_string)
70
+ except Exception as e:
71
+ # shorten the exception chain
72
+ raise RuntimeError(f"could not compile in {build_dir}:") from e
73
+ import importlib.util
74
+ spec = importlib.util.spec_from_file_location(modname, mod_so)
75
+ foo = importlib.util.module_from_spec(spec)
76
+ spec.loader.exec_module(foo)
77
+ return foo
78
+
79
+
80
+ def compile_extension_module(
81
+ name, builddir, include_dirs,
82
+ source_string, libraries=[], library_dirs=[]):
83
+ """
84
+ Build an extension module and return the filename of the resulting
85
+ native code file.
86
+
87
+ Parameters
88
+ ----------
89
+ name : string
90
+ name of the module, possibly including dots if it is a module inside a
91
+ package.
92
+ builddir : pathlib.Path
93
+ Where to build the module, usually a temporary directory
94
+ include_dirs : list
95
+ Extra directories to find include files when compiling
96
+ libraries : list
97
+ Libraries to link into the extension module
98
+ library_dirs: list
99
+ Where to find the libraries, ``-L`` passed to the linker
100
+ """
101
+ modname = name.split('.')[-1]
102
+ dirname = builddir / name
103
+ dirname.mkdir(exist_ok=True)
104
+ cfile = _convert_str_to_file(source_string, dirname)
105
+ include_dirs = include_dirs + [sysconfig.get_config_var('INCLUDEPY')]
106
+
107
+ return _c_compile(
108
+ cfile, outputfilename=dirname / modname,
109
+ include_dirs=include_dirs, libraries=[], library_dirs=[],
110
+ )
111
+
112
+
113
+ def _convert_str_to_file(source, dirname):
114
+ """Helper function to create a file ``source.c`` in `dirname` that contains
115
+ the string in `source`. Returns the file name
116
+ """
117
+ filename = dirname / 'source.c'
118
+ with filename.open('w') as f:
119
+ f.write(str(source))
120
+ return filename
121
+
122
+
123
+ def _make_methods(functions, modname):
124
+ """ Turns the name, signature, code in functions into complete functions
125
+ and lists them in a methods_table. Then turns the methods_table into a
126
+ ``PyMethodDef`` structure and returns the resulting code fragment ready
127
+ for compilation
128
+ """
129
+ methods_table = []
130
+ codes = []
131
+ for funcname, flags, code in functions:
132
+ cfuncname = "%s_%s" % (modname, funcname)
133
+ if 'METH_KEYWORDS' in flags:
134
+ signature = '(PyObject *self, PyObject *args, PyObject *kwargs)'
135
+ else:
136
+ signature = '(PyObject *self, PyObject *args)'
137
+ methods_table.append(
138
+ "{\"%s\", (PyCFunction)%s, %s}," % (funcname, cfuncname, flags))
139
+ func_code = """
140
+ static PyObject* {cfuncname}{signature}
141
+ {{
142
+ {code}
143
+ }}
144
+ """.format(cfuncname=cfuncname, signature=signature, code=code)
145
+ codes.append(func_code)
146
+
147
+ body = "\n".join(codes) + """
148
+ static PyMethodDef methods[] = {
149
+ %(methods)s
150
+ { NULL }
151
+ };
152
+ static struct PyModuleDef moduledef = {
153
+ PyModuleDef_HEAD_INIT,
154
+ "%(modname)s", /* m_name */
155
+ NULL, /* m_doc */
156
+ -1, /* m_size */
157
+ methods, /* m_methods */
158
+ };
159
+ """ % dict(methods='\n'.join(methods_table), modname=modname)
160
+ return body
161
+
162
+
163
+ def _make_source(name, init, body):
164
+ """ Combines the code fragments into source code ready to be compiled
165
+ """
166
+ code = """
167
+ #include <Python.h>
168
+
169
+ %(body)s
170
+
171
+ PyMODINIT_FUNC
172
+ PyInit_%(name)s(void) {
173
+ %(init)s
174
+ }
175
+ """ % dict(
176
+ name=name, init=init, body=body,
177
+ )
178
+ return code
179
+
180
+
181
+ def _c_compile(cfile, outputfilename, include_dirs=[], libraries=[],
182
+ library_dirs=[]):
183
+ if sys.platform == 'win32':
184
+ compile_extra = ["/we4013"]
185
+ link_extra = ["/LIBPATH:" + os.path.join(sys.base_prefix, 'libs')]
186
+ elif sys.platform.startswith('linux'):
187
+ compile_extra = [
188
+ "-O0", "-g", "-Werror=implicit-function-declaration", "-fPIC"]
189
+ link_extra = []
190
+ else:
191
+ compile_extra = link_extra = []
192
+ pass
193
+ if sys.platform == 'win32':
194
+ link_extra = link_extra + ['/DEBUG'] # generate .pdb file
195
+ if sys.platform == 'darwin':
196
+ # support Fink & Darwinports
197
+ for s in ('/sw/', '/opt/local/'):
198
+ if (s + 'include' not in include_dirs
199
+ and os.path.exists(s + 'include')):
200
+ include_dirs.append(s + 'include')
201
+ if s + 'lib' not in library_dirs and os.path.exists(s + 'lib'):
202
+ library_dirs.append(s + 'lib')
203
+
204
+ outputfilename = outputfilename.with_suffix(get_so_suffix())
205
+ build(
206
+ cfile, outputfilename,
207
+ compile_extra, link_extra,
208
+ include_dirs, libraries, library_dirs)
209
+ return outputfilename
210
+
211
+
212
+ def build(cfile, outputfilename, compile_extra, link_extra,
213
+ include_dirs, libraries, library_dirs):
214
+ "use meson to build"
215
+
216
+ build_dir = cfile.parent / "build"
217
+ os.makedirs(build_dir, exist_ok=True)
218
+ so_name = outputfilename.parts[-1]
219
+ with open(cfile.parent / "meson.build", "wt") as fid:
220
+ includes = ['-I' + d for d in include_dirs]
221
+ link_dirs = ['-L' + d for d in library_dirs]
222
+ fid.write(textwrap.dedent(f"""\
223
+ project('foo', 'c')
224
+ shared_module('{so_name}', '{cfile.parts[-1]}',
225
+ c_args: {includes} + {compile_extra},
226
+ link_args: {link_dirs} + {link_extra},
227
+ link_with: {libraries},
228
+ name_prefix: '',
229
+ name_suffix: 'dummy',
230
+ )
231
+ """))
232
+ if sys.platform == "win32":
233
+ subprocess.check_call(["meson", "setup",
234
+ "--buildtype=release",
235
+ "--vsenv", ".."],
236
+ cwd=build_dir,
237
+ )
238
+ else:
239
+ subprocess.check_call(["meson", "setup", "--vsenv", ".."],
240
+ cwd=build_dir
241
+ )
242
+ subprocess.check_call(["meson", "compile"], cwd=build_dir)
243
+ os.rename(str(build_dir / so_name) + ".dummy", cfile.parent / so_name)
244
+
245
+ def get_so_suffix():
246
+ ret = sysconfig.get_config_var('EXT_SUFFIX')
247
+ assert ret
248
+ return ret
venv/lib/python3.10/site-packages/numpy/testing/_private/utils.py ADDED
@@ -0,0 +1,2509 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Utility function to facilitate testing.
3
+
4
+ """
5
+ import os
6
+ import sys
7
+ import platform
8
+ import re
9
+ import gc
10
+ import operator
11
+ import warnings
12
+ from functools import partial, wraps
13
+ import shutil
14
+ import contextlib
15
+ from tempfile import mkdtemp, mkstemp
16
+ from unittest.case import SkipTest
17
+ from warnings import WarningMessage
18
+ import pprint
19
+ import sysconfig
20
+
21
+ import numpy as np
22
+ from numpy.core import (
23
+ intp, float32, empty, arange, array_repr, ndarray, isnat, array)
24
+ from numpy import isfinite, isnan, isinf
25
+ import numpy.linalg._umath_linalg
26
+
27
+ from io import StringIO
28
+
29
+ __all__ = [
30
+ 'assert_equal', 'assert_almost_equal', 'assert_approx_equal',
31
+ 'assert_array_equal', 'assert_array_less', 'assert_string_equal',
32
+ 'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
33
+ 'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
34
+ 'rundocs', 'runstring', 'verbose', 'measure',
35
+ 'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
36
+ 'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
37
+ 'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
38
+ 'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
39
+ 'HAS_REFCOUNT', "IS_WASM", 'suppress_warnings', 'assert_array_compare',
40
+ 'assert_no_gc_cycles', 'break_cycles', 'HAS_LAPACK64', 'IS_PYSTON',
41
+ '_OLD_PROMOTION', 'IS_MUSL', '_SUPPORTS_SVE'
42
+ ]
43
+
44
+
45
+ class KnownFailureException(Exception):
46
+ '''Raise this exception to mark a test as a known failing test.'''
47
+ pass
48
+
49
+
50
+ KnownFailureTest = KnownFailureException # backwards compat
51
+ verbose = 0
52
+
53
+ IS_WASM = platform.machine() in ["wasm32", "wasm64"]
54
+ IS_PYPY = sys.implementation.name == 'pypy'
55
+ IS_PYSTON = hasattr(sys, "pyston_version_info")
56
+ HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None and not IS_PYSTON
57
+ HAS_LAPACK64 = numpy.linalg._umath_linalg._ilp64
58
+
59
+ _OLD_PROMOTION = lambda: np._get_promotion_state() == 'legacy'
60
+
61
+ IS_MUSL = False
62
+ # alternate way is
63
+ # from packaging.tags import sys_tags
64
+ # _tags = list(sys_tags())
65
+ # if 'musllinux' in _tags[0].platform:
66
+ _v = sysconfig.get_config_var('HOST_GNU_TYPE') or ''
67
+ if 'musl' in _v:
68
+ IS_MUSL = True
69
+
70
+
71
+ def assert_(val, msg=''):
72
+ """
73
+ Assert that works in release mode.
74
+ Accepts callable msg to allow deferring evaluation until failure.
75
+
76
+ The Python built-in ``assert`` does not work when executing code in
77
+ optimized mode (the ``-O`` flag) - no byte-code is generated for it.
78
+
79
+ For documentation on usage, refer to the Python documentation.
80
+
81
+ """
82
+ __tracebackhide__ = True # Hide traceback for py.test
83
+ if not val:
84
+ try:
85
+ smsg = msg()
86
+ except TypeError:
87
+ smsg = msg
88
+ raise AssertionError(smsg)
89
+
90
+
91
+ if os.name == 'nt':
92
+ # Code "stolen" from enthought/debug/memusage.py
93
+ def GetPerformanceAttributes(object, counter, instance=None,
94
+ inum=-1, format=None, machine=None):
95
+ # NOTE: Many counters require 2 samples to give accurate results,
96
+ # including "% Processor Time" (as by definition, at any instant, a
97
+ # thread's CPU usage is either 0 or 100). To read counters like this,
98
+ # you should copy this function, but keep the counter open, and call
99
+ # CollectQueryData() each time you need to know.
100
+ # See http://msdn.microsoft.com/library/en-us/dnperfmo/html/perfmonpt2.asp (dead link)
101
+ # My older explanation for this was that the "AddCounter" process
102
+ # forced the CPU to 100%, but the above makes more sense :)
103
+ import win32pdh
104
+ if format is None:
105
+ format = win32pdh.PDH_FMT_LONG
106
+ path = win32pdh.MakeCounterPath( (machine, object, instance, None,
107
+ inum, counter))
108
+ hq = win32pdh.OpenQuery()
109
+ try:
110
+ hc = win32pdh.AddCounter(hq, path)
111
+ try:
112
+ win32pdh.CollectQueryData(hq)
113
+ type, val = win32pdh.GetFormattedCounterValue(hc, format)
114
+ return val
115
+ finally:
116
+ win32pdh.RemoveCounter(hc)
117
+ finally:
118
+ win32pdh.CloseQuery(hq)
119
+
120
+ def memusage(processName="python", instance=0):
121
+ # from win32pdhutil, part of the win32all package
122
+ import win32pdh
123
+ return GetPerformanceAttributes("Process", "Virtual Bytes",
124
+ processName, instance,
125
+ win32pdh.PDH_FMT_LONG, None)
126
+ elif sys.platform[:5] == 'linux':
127
+
128
+ def memusage(_proc_pid_stat=f'/proc/{os.getpid()}/stat'):
129
+ """
130
+ Return virtual memory size in bytes of the running python.
131
+
132
+ """
133
+ try:
134
+ with open(_proc_pid_stat) as f:
135
+ l = f.readline().split(' ')
136
+ return int(l[22])
137
+ except Exception:
138
+ return
139
+ else:
140
+ def memusage():
141
+ """
142
+ Return memory usage of running python. [Not implemented]
143
+
144
+ """
145
+ raise NotImplementedError
146
+
147
+
148
+ if sys.platform[:5] == 'linux':
149
+ def jiffies(_proc_pid_stat=f'/proc/{os.getpid()}/stat', _load_time=[]):
150
+ """
151
+ Return number of jiffies elapsed.
152
+
153
+ Return number of jiffies (1/100ths of a second) that this
154
+ process has been scheduled in user mode. See man 5 proc.
155
+
156
+ """
157
+ import time
158
+ if not _load_time:
159
+ _load_time.append(time.time())
160
+ try:
161
+ with open(_proc_pid_stat) as f:
162
+ l = f.readline().split(' ')
163
+ return int(l[13])
164
+ except Exception:
165
+ return int(100*(time.time()-_load_time[0]))
166
+ else:
167
+ # os.getpid is not in all platforms available.
168
+ # Using time is safe but inaccurate, especially when process
169
+ # was suspended or sleeping.
170
+ def jiffies(_load_time=[]):
171
+ """
172
+ Return number of jiffies elapsed.
173
+
174
+ Return number of jiffies (1/100ths of a second) that this
175
+ process has been scheduled in user mode. See man 5 proc.
176
+
177
+ """
178
+ import time
179
+ if not _load_time:
180
+ _load_time.append(time.time())
181
+ return int(100*(time.time()-_load_time[0]))
182
+
183
+
184
+ def build_err_msg(arrays, err_msg, header='Items are not equal:',
185
+ verbose=True, names=('ACTUAL', 'DESIRED'), precision=8):
186
+ msg = ['\n' + header]
187
+ if err_msg:
188
+ if err_msg.find('\n') == -1 and len(err_msg) < 79-len(header):
189
+ msg = [msg[0] + ' ' + err_msg]
190
+ else:
191
+ msg.append(err_msg)
192
+ if verbose:
193
+ for i, a in enumerate(arrays):
194
+
195
+ if isinstance(a, ndarray):
196
+ # precision argument is only needed if the objects are ndarrays
197
+ r_func = partial(array_repr, precision=precision)
198
+ else:
199
+ r_func = repr
200
+
201
+ try:
202
+ r = r_func(a)
203
+ except Exception as exc:
204
+ r = f'[repr failed for <{type(a).__name__}>: {exc}]'
205
+ if r.count('\n') > 3:
206
+ r = '\n'.join(r.splitlines()[:3])
207
+ r += '...'
208
+ msg.append(f' {names[i]}: {r}')
209
+ return '\n'.join(msg)
210
+
211
+
212
+ def assert_equal(actual, desired, err_msg='', verbose=True):
213
+ """
214
+ Raises an AssertionError if two objects are not equal.
215
+
216
+ Given two objects (scalars, lists, tuples, dictionaries or numpy arrays),
217
+ check that all elements of these objects are equal. An exception is raised
218
+ at the first conflicting values.
219
+
220
+ When one of `actual` and `desired` is a scalar and the other is array_like,
221
+ the function checks that each element of the array_like object is equal to
222
+ the scalar.
223
+
224
+ This function handles NaN comparisons as if NaN was a "normal" number.
225
+ That is, AssertionError is not raised if both objects have NaNs in the same
226
+ positions. This is in contrast to the IEEE standard on NaNs, which says
227
+ that NaN compared to anything must return False.
228
+
229
+ Parameters
230
+ ----------
231
+ actual : array_like
232
+ The object to check.
233
+ desired : array_like
234
+ The expected object.
235
+ err_msg : str, optional
236
+ The error message to be printed in case of failure.
237
+ verbose : bool, optional
238
+ If True, the conflicting values are appended to the error message.
239
+
240
+ Raises
241
+ ------
242
+ AssertionError
243
+ If actual and desired are not equal.
244
+
245
+ Examples
246
+ --------
247
+ >>> np.testing.assert_equal([4,5], [4,6])
248
+ Traceback (most recent call last):
249
+ ...
250
+ AssertionError:
251
+ Items are not equal:
252
+ item=1
253
+ ACTUAL: 5
254
+ DESIRED: 6
255
+
256
+ The following comparison does not raise an exception. There are NaNs
257
+ in the inputs, but they are in the same positions.
258
+
259
+ >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
260
+
261
+ """
262
+ __tracebackhide__ = True # Hide traceback for py.test
263
+ if isinstance(desired, dict):
264
+ if not isinstance(actual, dict):
265
+ raise AssertionError(repr(type(actual)))
266
+ assert_equal(len(actual), len(desired), err_msg, verbose)
267
+ for k, i in desired.items():
268
+ if k not in actual:
269
+ raise AssertionError(repr(k))
270
+ assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}',
271
+ verbose)
272
+ return
273
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
274
+ assert_equal(len(actual), len(desired), err_msg, verbose)
275
+ for k in range(len(desired)):
276
+ assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}',
277
+ verbose)
278
+ return
279
+ from numpy.core import ndarray, isscalar, signbit
280
+ from numpy.lib import iscomplexobj, real, imag
281
+ if isinstance(actual, ndarray) or isinstance(desired, ndarray):
282
+ return assert_array_equal(actual, desired, err_msg, verbose)
283
+ msg = build_err_msg([actual, desired], err_msg, verbose=verbose)
284
+
285
+ # Handle complex numbers: separate into real/imag to handle
286
+ # nan/inf/negative zero correctly
287
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
288
+ try:
289
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
290
+ except (ValueError, TypeError):
291
+ usecomplex = False
292
+
293
+ if usecomplex:
294
+ if iscomplexobj(actual):
295
+ actualr = real(actual)
296
+ actuali = imag(actual)
297
+ else:
298
+ actualr = actual
299
+ actuali = 0
300
+ if iscomplexobj(desired):
301
+ desiredr = real(desired)
302
+ desiredi = imag(desired)
303
+ else:
304
+ desiredr = desired
305
+ desiredi = 0
306
+ try:
307
+ assert_equal(actualr, desiredr)
308
+ assert_equal(actuali, desiredi)
309
+ except AssertionError:
310
+ raise AssertionError(msg)
311
+
312
+ # isscalar test to check cases such as [np.nan] != np.nan
313
+ if isscalar(desired) != isscalar(actual):
314
+ raise AssertionError(msg)
315
+
316
+ try:
317
+ isdesnat = isnat(desired)
318
+ isactnat = isnat(actual)
319
+ dtypes_match = (np.asarray(desired).dtype.type ==
320
+ np.asarray(actual).dtype.type)
321
+ if isdesnat and isactnat:
322
+ # If both are NaT (and have the same dtype -- datetime or
323
+ # timedelta) they are considered equal.
324
+ if dtypes_match:
325
+ return
326
+ else:
327
+ raise AssertionError(msg)
328
+
329
+ except (TypeError, ValueError, NotImplementedError):
330
+ pass
331
+
332
+ # Inf/nan/negative zero handling
333
+ try:
334
+ isdesnan = isnan(desired)
335
+ isactnan = isnan(actual)
336
+ if isdesnan and isactnan:
337
+ return # both nan, so equal
338
+
339
+ # handle signed zero specially for floats
340
+ array_actual = np.asarray(actual)
341
+ array_desired = np.asarray(desired)
342
+ if (array_actual.dtype.char in 'Mm' or
343
+ array_desired.dtype.char in 'Mm'):
344
+ # version 1.18
345
+ # until this version, isnan failed for datetime64 and timedelta64.
346
+ # Now it succeeds but comparison to scalar with a different type
347
+ # emits a DeprecationWarning.
348
+ # Avoid that by skipping the next check
349
+ raise NotImplementedError('cannot compare to a scalar '
350
+ 'with a different type')
351
+
352
+ if desired == 0 and actual == 0:
353
+ if not signbit(desired) == signbit(actual):
354
+ raise AssertionError(msg)
355
+
356
+ except (TypeError, ValueError, NotImplementedError):
357
+ pass
358
+
359
+ try:
360
+ # Explicitly use __eq__ for comparison, gh-2552
361
+ if not (desired == actual):
362
+ raise AssertionError(msg)
363
+
364
+ except (DeprecationWarning, FutureWarning) as e:
365
+ # this handles the case when the two types are not even comparable
366
+ if 'elementwise == comparison' in e.args[0]:
367
+ raise AssertionError(msg)
368
+ else:
369
+ raise
370
+
371
+
372
+ def print_assert_equal(test_string, actual, desired):
373
+ """
374
+ Test if two objects are equal, and print an error message if test fails.
375
+
376
+ The test is performed with ``actual == desired``.
377
+
378
+ Parameters
379
+ ----------
380
+ test_string : str
381
+ The message supplied to AssertionError.
382
+ actual : object
383
+ The object to test for equality against `desired`.
384
+ desired : object
385
+ The expected result.
386
+
387
+ Examples
388
+ --------
389
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 1])
390
+ >>> np.testing.print_assert_equal('Test XYZ of func xyz', [0, 1], [0, 2])
391
+ Traceback (most recent call last):
392
+ ...
393
+ AssertionError: Test XYZ of func xyz failed
394
+ ACTUAL:
395
+ [0, 1]
396
+ DESIRED:
397
+ [0, 2]
398
+
399
+ """
400
+ __tracebackhide__ = True # Hide traceback for py.test
401
+ import pprint
402
+
403
+ if not (actual == desired):
404
+ msg = StringIO()
405
+ msg.write(test_string)
406
+ msg.write(' failed\nACTUAL: \n')
407
+ pprint.pprint(actual, msg)
408
+ msg.write('DESIRED: \n')
409
+ pprint.pprint(desired, msg)
410
+ raise AssertionError(msg.getvalue())
411
+
412
+
413
+ @np._no_nep50_warning()
414
+ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True):
415
+ """
416
+ Raises an AssertionError if two items are not equal up to desired
417
+ precision.
418
+
419
+ .. note:: It is recommended to use one of `assert_allclose`,
420
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
421
+ instead of this function for more consistent floating point
422
+ comparisons.
423
+
424
+ The test verifies that the elements of `actual` and `desired` satisfy.
425
+
426
+ ``abs(desired-actual) < float64(1.5 * 10**(-decimal))``
427
+
428
+ That is a looser test than originally documented, but agrees with what the
429
+ actual implementation in `assert_array_almost_equal` did up to rounding
430
+ vagaries. An exception is raised at conflicting values. For ndarrays this
431
+ delegates to assert_array_almost_equal
432
+
433
+ Parameters
434
+ ----------
435
+ actual : array_like
436
+ The object to check.
437
+ desired : array_like
438
+ The expected object.
439
+ decimal : int, optional
440
+ Desired precision, default is 7.
441
+ err_msg : str, optional
442
+ The error message to be printed in case of failure.
443
+ verbose : bool, optional
444
+ If True, the conflicting values are appended to the error message.
445
+
446
+ Raises
447
+ ------
448
+ AssertionError
449
+ If actual and desired are not equal up to specified precision.
450
+
451
+ See Also
452
+ --------
453
+ assert_allclose: Compare two array_like objects for equality with desired
454
+ relative and/or absolute precision.
455
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
456
+
457
+ Examples
458
+ --------
459
+ >>> from numpy.testing import assert_almost_equal
460
+ >>> assert_almost_equal(2.3333333333333, 2.33333334)
461
+ >>> assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
462
+ Traceback (most recent call last):
463
+ ...
464
+ AssertionError:
465
+ Arrays are not almost equal to 10 decimals
466
+ ACTUAL: 2.3333333333333
467
+ DESIRED: 2.33333334
468
+
469
+ >>> assert_almost_equal(np.array([1.0,2.3333333333333]),
470
+ ... np.array([1.0,2.33333334]), decimal=9)
471
+ Traceback (most recent call last):
472
+ ...
473
+ AssertionError:
474
+ Arrays are not almost equal to 9 decimals
475
+ <BLANKLINE>
476
+ Mismatched elements: 1 / 2 (50%)
477
+ Max absolute difference: 6.66669964e-09
478
+ Max relative difference: 2.85715698e-09
479
+ x: array([1. , 2.333333333])
480
+ y: array([1. , 2.33333334])
481
+
482
+ """
483
+ __tracebackhide__ = True # Hide traceback for py.test
484
+ from numpy.core import ndarray
485
+ from numpy.lib import iscomplexobj, real, imag
486
+
487
+ # Handle complex numbers: separate into real/imag to handle
488
+ # nan/inf/negative zero correctly
489
+ # XXX: catch ValueError for subclasses of ndarray where iscomplex fail
490
+ try:
491
+ usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
492
+ except ValueError:
493
+ usecomplex = False
494
+
495
+ def _build_err_msg():
496
+ header = ('Arrays are not almost equal to %d decimals' % decimal)
497
+ return build_err_msg([actual, desired], err_msg, verbose=verbose,
498
+ header=header)
499
+
500
+ if usecomplex:
501
+ if iscomplexobj(actual):
502
+ actualr = real(actual)
503
+ actuali = imag(actual)
504
+ else:
505
+ actualr = actual
506
+ actuali = 0
507
+ if iscomplexobj(desired):
508
+ desiredr = real(desired)
509
+ desiredi = imag(desired)
510
+ else:
511
+ desiredr = desired
512
+ desiredi = 0
513
+ try:
514
+ assert_almost_equal(actualr, desiredr, decimal=decimal)
515
+ assert_almost_equal(actuali, desiredi, decimal=decimal)
516
+ except AssertionError:
517
+ raise AssertionError(_build_err_msg())
518
+
519
+ if isinstance(actual, (ndarray, tuple, list)) \
520
+ or isinstance(desired, (ndarray, tuple, list)):
521
+ return assert_array_almost_equal(actual, desired, decimal, err_msg)
522
+ try:
523
+ # If one of desired/actual is not finite, handle it specially here:
524
+ # check that both are nan if any is a nan, and test for equality
525
+ # otherwise
526
+ if not (isfinite(desired) and isfinite(actual)):
527
+ if isnan(desired) or isnan(actual):
528
+ if not (isnan(desired) and isnan(actual)):
529
+ raise AssertionError(_build_err_msg())
530
+ else:
531
+ if not desired == actual:
532
+ raise AssertionError(_build_err_msg())
533
+ return
534
+ except (NotImplementedError, TypeError):
535
+ pass
536
+ if abs(desired - actual) >= np.float64(1.5 * 10.0**(-decimal)):
537
+ raise AssertionError(_build_err_msg())
538
+
539
+
540
+ @np._no_nep50_warning()
541
+ def assert_approx_equal(actual, desired, significant=7, err_msg='',
542
+ verbose=True):
543
+ """
544
+ Raises an AssertionError if two items are not equal up to significant
545
+ digits.
546
+
547
+ .. note:: It is recommended to use one of `assert_allclose`,
548
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
549
+ instead of this function for more consistent floating point
550
+ comparisons.
551
+
552
+ Given two numbers, check that they are approximately equal.
553
+ Approximately equal is defined as the number of significant digits
554
+ that agree.
555
+
556
+ Parameters
557
+ ----------
558
+ actual : scalar
559
+ The object to check.
560
+ desired : scalar
561
+ The expected object.
562
+ significant : int, optional
563
+ Desired precision, default is 7.
564
+ err_msg : str, optional
565
+ The error message to be printed in case of failure.
566
+ verbose : bool, optional
567
+ If True, the conflicting values are appended to the error message.
568
+
569
+ Raises
570
+ ------
571
+ AssertionError
572
+ If actual and desired are not equal up to specified precision.
573
+
574
+ See Also
575
+ --------
576
+ assert_allclose: Compare two array_like objects for equality with desired
577
+ relative and/or absolute precision.
578
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
579
+
580
+ Examples
581
+ --------
582
+ >>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
583
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
584
+ ... significant=8)
585
+ >>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
586
+ ... significant=8)
587
+ Traceback (most recent call last):
588
+ ...
589
+ AssertionError:
590
+ Items are not equal to 8 significant digits:
591
+ ACTUAL: 1.234567e-21
592
+ DESIRED: 1.2345672e-21
593
+
594
+ the evaluated condition that raises the exception is
595
+
596
+ >>> abs(0.12345670e-20/1e-21 - 0.12345672e-20/1e-21) >= 10**-(8-1)
597
+ True
598
+
599
+ """
600
+ __tracebackhide__ = True # Hide traceback for py.test
601
+ import numpy as np
602
+
603
+ (actual, desired) = map(float, (actual, desired))
604
+ if desired == actual:
605
+ return
606
+ # Normalized the numbers to be in range (-10.0,10.0)
607
+ # scale = float(pow(10,math.floor(math.log10(0.5*(abs(desired)+abs(actual))))))
608
+ with np.errstate(invalid='ignore'):
609
+ scale = 0.5*(np.abs(desired) + np.abs(actual))
610
+ scale = np.power(10, np.floor(np.log10(scale)))
611
+ try:
612
+ sc_desired = desired/scale
613
+ except ZeroDivisionError:
614
+ sc_desired = 0.0
615
+ try:
616
+ sc_actual = actual/scale
617
+ except ZeroDivisionError:
618
+ sc_actual = 0.0
619
+ msg = build_err_msg(
620
+ [actual, desired], err_msg,
621
+ header='Items are not equal to %d significant digits:' % significant,
622
+ verbose=verbose)
623
+ try:
624
+ # If one of desired/actual is not finite, handle it specially here:
625
+ # check that both are nan if any is a nan, and test for equality
626
+ # otherwise
627
+ if not (isfinite(desired) and isfinite(actual)):
628
+ if isnan(desired) or isnan(actual):
629
+ if not (isnan(desired) and isnan(actual)):
630
+ raise AssertionError(msg)
631
+ else:
632
+ if not desired == actual:
633
+ raise AssertionError(msg)
634
+ return
635
+ except (TypeError, NotImplementedError):
636
+ pass
637
+ if np.abs(sc_desired - sc_actual) >= np.power(10., -(significant-1)):
638
+ raise AssertionError(msg)
639
+
640
+
641
+ @np._no_nep50_warning()
642
+ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
643
+ precision=6, equal_nan=True, equal_inf=True,
644
+ *, strict=False):
645
+ __tracebackhide__ = True # Hide traceback for py.test
646
+ from numpy.core import (array2string, isnan, inf, bool_, errstate,
647
+ all, max, object_)
648
+
649
+ x = np.asanyarray(x)
650
+ y = np.asanyarray(y)
651
+
652
+ # original array for output formatting
653
+ ox, oy = x, y
654
+
655
+ def isnumber(x):
656
+ return x.dtype.char in '?bhilqpBHILQPefdgFDG'
657
+
658
+ def istime(x):
659
+ return x.dtype.char in "Mm"
660
+
661
+ def func_assert_same_pos(x, y, func=isnan, hasval='nan'):
662
+ """Handling nan/inf.
663
+
664
+ Combine results of running func on x and y, checking that they are True
665
+ at the same locations.
666
+
667
+ """
668
+ __tracebackhide__ = True # Hide traceback for py.test
669
+
670
+ x_id = func(x)
671
+ y_id = func(y)
672
+ # We include work-arounds here to handle three types of slightly
673
+ # pathological ndarray subclasses:
674
+ # (1) all() on `masked` array scalars can return masked arrays, so we
675
+ # use != True
676
+ # (2) __eq__ on some ndarray subclasses returns Python booleans
677
+ # instead of element-wise comparisons, so we cast to bool_() and
678
+ # use isinstance(..., bool) checks
679
+ # (3) subclasses with bare-bones __array_function__ implementations may
680
+ # not implement np.all(), so favor using the .all() method
681
+ # We are not committed to supporting such subclasses, but it's nice to
682
+ # support them if possible.
683
+ if bool_(x_id == y_id).all() != True:
684
+ msg = build_err_msg([x, y],
685
+ err_msg + '\nx and y %s location mismatch:'
686
+ % (hasval), verbose=verbose, header=header,
687
+ names=('x', 'y'), precision=precision)
688
+ raise AssertionError(msg)
689
+ # If there is a scalar, then here we know the array has the same
690
+ # flag as it everywhere, so we should return the scalar flag.
691
+ if isinstance(x_id, bool) or x_id.ndim == 0:
692
+ return bool_(x_id)
693
+ elif isinstance(y_id, bool) or y_id.ndim == 0:
694
+ return bool_(y_id)
695
+ else:
696
+ return y_id
697
+
698
+ try:
699
+ if strict:
700
+ cond = x.shape == y.shape and x.dtype == y.dtype
701
+ else:
702
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
703
+ if not cond:
704
+ if x.shape != y.shape:
705
+ reason = f'\n(shapes {x.shape}, {y.shape} mismatch)'
706
+ else:
707
+ reason = f'\n(dtypes {x.dtype}, {y.dtype} mismatch)'
708
+ msg = build_err_msg([x, y],
709
+ err_msg
710
+ + reason,
711
+ verbose=verbose, header=header,
712
+ names=('x', 'y'), precision=precision)
713
+ raise AssertionError(msg)
714
+
715
+ flagged = bool_(False)
716
+ if isnumber(x) and isnumber(y):
717
+ if equal_nan:
718
+ flagged = func_assert_same_pos(x, y, func=isnan, hasval='nan')
719
+
720
+ if equal_inf:
721
+ flagged |= func_assert_same_pos(x, y,
722
+ func=lambda xy: xy == +inf,
723
+ hasval='+inf')
724
+ flagged |= func_assert_same_pos(x, y,
725
+ func=lambda xy: xy == -inf,
726
+ hasval='-inf')
727
+
728
+ elif istime(x) and istime(y):
729
+ # If one is datetime64 and the other timedelta64 there is no point
730
+ if equal_nan and x.dtype.type == y.dtype.type:
731
+ flagged = func_assert_same_pos(x, y, func=isnat, hasval="NaT")
732
+
733
+ if flagged.ndim > 0:
734
+ x, y = x[~flagged], y[~flagged]
735
+ # Only do the comparison if actual values are left
736
+ if x.size == 0:
737
+ return
738
+ elif flagged:
739
+ # no sense doing comparison if everything is flagged.
740
+ return
741
+
742
+ val = comparison(x, y)
743
+
744
+ if isinstance(val, bool):
745
+ cond = val
746
+ reduced = array([val])
747
+ else:
748
+ reduced = val.ravel()
749
+ cond = reduced.all()
750
+
751
+ # The below comparison is a hack to ensure that fully masked
752
+ # results, for which val.ravel().all() returns np.ma.masked,
753
+ # do not trigger a failure (np.ma.masked != True evaluates as
754
+ # np.ma.masked, which is falsy).
755
+ if cond != True:
756
+ n_mismatch = reduced.size - reduced.sum(dtype=intp)
757
+ n_elements = flagged.size if flagged.ndim != 0 else reduced.size
758
+ percent_mismatch = 100 * n_mismatch / n_elements
759
+ remarks = [
760
+ 'Mismatched elements: {} / {} ({:.3g}%)'.format(
761
+ n_mismatch, n_elements, percent_mismatch)]
762
+
763
+ with errstate(all='ignore'):
764
+ # ignore errors for non-numeric types
765
+ with contextlib.suppress(TypeError):
766
+ error = abs(x - y)
767
+ if np.issubdtype(x.dtype, np.unsignedinteger):
768
+ error2 = abs(y - x)
769
+ np.minimum(error, error2, out=error)
770
+ max_abs_error = max(error)
771
+ if getattr(error, 'dtype', object_) == object_:
772
+ remarks.append('Max absolute difference: '
773
+ + str(max_abs_error))
774
+ else:
775
+ remarks.append('Max absolute difference: '
776
+ + array2string(max_abs_error))
777
+
778
+ # note: this definition of relative error matches that one
779
+ # used by assert_allclose (found in np.isclose)
780
+ # Filter values where the divisor would be zero
781
+ nonzero = bool_(y != 0)
782
+ if all(~nonzero):
783
+ max_rel_error = array(inf)
784
+ else:
785
+ max_rel_error = max(error[nonzero] / abs(y[nonzero]))
786
+ if getattr(error, 'dtype', object_) == object_:
787
+ remarks.append('Max relative difference: '
788
+ + str(max_rel_error))
789
+ else:
790
+ remarks.append('Max relative difference: '
791
+ + array2string(max_rel_error))
792
+
793
+ err_msg += '\n' + '\n'.join(remarks)
794
+ msg = build_err_msg([ox, oy], err_msg,
795
+ verbose=verbose, header=header,
796
+ names=('x', 'y'), precision=precision)
797
+ raise AssertionError(msg)
798
+ except ValueError:
799
+ import traceback
800
+ efmt = traceback.format_exc()
801
+ header = f'error during assertion:\n\n{efmt}\n\n{header}'
802
+
803
+ msg = build_err_msg([x, y], err_msg, verbose=verbose, header=header,
804
+ names=('x', 'y'), precision=precision)
805
+ raise ValueError(msg)
806
+
807
+
808
+ def assert_array_equal(x, y, err_msg='', verbose=True, *, strict=False):
809
+ """
810
+ Raises an AssertionError if two array_like objects are not equal.
811
+
812
+ Given two array_like objects, check that the shape is equal and all
813
+ elements of these objects are equal (but see the Notes for the special
814
+ handling of a scalar). An exception is raised at shape mismatch or
815
+ conflicting values. In contrast to the standard usage in numpy, NaNs
816
+ are compared like numbers, no assertion is raised if both objects have
817
+ NaNs in the same positions.
818
+
819
+ The usual caution for verifying equality with floating point numbers is
820
+ advised.
821
+
822
+ Parameters
823
+ ----------
824
+ x : array_like
825
+ The actual object to check.
826
+ y : array_like
827
+ The desired, expected object.
828
+ err_msg : str, optional
829
+ The error message to be printed in case of failure.
830
+ verbose : bool, optional
831
+ If True, the conflicting values are appended to the error message.
832
+ strict : bool, optional
833
+ If True, raise an AssertionError when either the shape or the data
834
+ type of the array_like objects does not match. The special
835
+ handling for scalars mentioned in the Notes section is disabled.
836
+
837
+ .. versionadded:: 1.24.0
838
+
839
+ Raises
840
+ ------
841
+ AssertionError
842
+ If actual and desired objects are not equal.
843
+
844
+ See Also
845
+ --------
846
+ assert_allclose: Compare two array_like objects for equality with desired
847
+ relative and/or absolute precision.
848
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
849
+
850
+ Notes
851
+ -----
852
+ When one of `x` and `y` is a scalar and the other is array_like, the
853
+ function checks that each element of the array_like object is equal to
854
+ the scalar. This behaviour can be disabled with the `strict` parameter.
855
+
856
+ Examples
857
+ --------
858
+ The first assert does not raise an exception:
859
+
860
+ >>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
861
+ ... [np.exp(0),2.33333, np.nan])
862
+
863
+ Assert fails with numerical imprecision with floats:
864
+
865
+ >>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
866
+ ... [1, np.sqrt(np.pi)**2, np.nan])
867
+ Traceback (most recent call last):
868
+ ...
869
+ AssertionError:
870
+ Arrays are not equal
871
+ <BLANKLINE>
872
+ Mismatched elements: 1 / 3 (33.3%)
873
+ Max absolute difference: 4.4408921e-16
874
+ Max relative difference: 1.41357986e-16
875
+ x: array([1. , 3.141593, nan])
876
+ y: array([1. , 3.141593, nan])
877
+
878
+ Use `assert_allclose` or one of the nulp (number of floating point values)
879
+ functions for these cases instead:
880
+
881
+ >>> np.testing.assert_allclose([1.0,np.pi,np.nan],
882
+ ... [1, np.sqrt(np.pi)**2, np.nan],
883
+ ... rtol=1e-10, atol=0)
884
+
885
+ As mentioned in the Notes section, `assert_array_equal` has special
886
+ handling for scalars. Here the test checks that each value in `x` is 3:
887
+
888
+ >>> x = np.full((2, 5), fill_value=3)
889
+ >>> np.testing.assert_array_equal(x, 3)
890
+
891
+ Use `strict` to raise an AssertionError when comparing a scalar with an
892
+ array:
893
+
894
+ >>> np.testing.assert_array_equal(x, 3, strict=True)
895
+ Traceback (most recent call last):
896
+ ...
897
+ AssertionError:
898
+ Arrays are not equal
899
+ <BLANKLINE>
900
+ (shapes (2, 5), () mismatch)
901
+ x: array([[3, 3, 3, 3, 3],
902
+ [3, 3, 3, 3, 3]])
903
+ y: array(3)
904
+
905
+ The `strict` parameter also ensures that the array data types match:
906
+
907
+ >>> x = np.array([2, 2, 2])
908
+ >>> y = np.array([2., 2., 2.], dtype=np.float32)
909
+ >>> np.testing.assert_array_equal(x, y, strict=True)
910
+ Traceback (most recent call last):
911
+ ...
912
+ AssertionError:
913
+ Arrays are not equal
914
+ <BLANKLINE>
915
+ (dtypes int64, float32 mismatch)
916
+ x: array([2, 2, 2])
917
+ y: array([2., 2., 2.], dtype=float32)
918
+ """
919
+ __tracebackhide__ = True # Hide traceback for py.test
920
+ assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
921
+ verbose=verbose, header='Arrays are not equal',
922
+ strict=strict)
923
+
924
+
925
+ @np._no_nep50_warning()
926
+ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
927
+ """
928
+ Raises an AssertionError if two objects are not equal up to desired
929
+ precision.
930
+
931
+ .. note:: It is recommended to use one of `assert_allclose`,
932
+ `assert_array_almost_equal_nulp` or `assert_array_max_ulp`
933
+ instead of this function for more consistent floating point
934
+ comparisons.
935
+
936
+ The test verifies identical shapes and that the elements of ``actual`` and
937
+ ``desired`` satisfy.
938
+
939
+ ``abs(desired-actual) < 1.5 * 10**(-decimal)``
940
+
941
+ That is a looser test than originally documented, but agrees with what the
942
+ actual implementation did up to rounding vagaries. An exception is raised
943
+ at shape mismatch or conflicting values. In contrast to the standard usage
944
+ in numpy, NaNs are compared like numbers, no assertion is raised if both
945
+ objects have NaNs in the same positions.
946
+
947
+ Parameters
948
+ ----------
949
+ x : array_like
950
+ The actual object to check.
951
+ y : array_like
952
+ The desired, expected object.
953
+ decimal : int, optional
954
+ Desired precision, default is 6.
955
+ err_msg : str, optional
956
+ The error message to be printed in case of failure.
957
+ verbose : bool, optional
958
+ If True, the conflicting values are appended to the error message.
959
+
960
+ Raises
961
+ ------
962
+ AssertionError
963
+ If actual and desired are not equal up to specified precision.
964
+
965
+ See Also
966
+ --------
967
+ assert_allclose: Compare two array_like objects for equality with desired
968
+ relative and/or absolute precision.
969
+ assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
970
+
971
+ Examples
972
+ --------
973
+ the first assert does not raise an exception
974
+
975
+ >>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
976
+ ... [1.0,2.333,np.nan])
977
+
978
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
979
+ ... [1.0,2.33339,np.nan], decimal=5)
980
+ Traceback (most recent call last):
981
+ ...
982
+ AssertionError:
983
+ Arrays are not almost equal to 5 decimals
984
+ <BLANKLINE>
985
+ Mismatched elements: 1 / 3 (33.3%)
986
+ Max absolute difference: 6.e-05
987
+ Max relative difference: 2.57136612e-05
988
+ x: array([1. , 2.33333, nan])
989
+ y: array([1. , 2.33339, nan])
990
+
991
+ >>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
992
+ ... [1.0,2.33333, 5], decimal=5)
993
+ Traceback (most recent call last):
994
+ ...
995
+ AssertionError:
996
+ Arrays are not almost equal to 5 decimals
997
+ <BLANKLINE>
998
+ x and y nan location mismatch:
999
+ x: array([1. , 2.33333, nan])
1000
+ y: array([1. , 2.33333, 5. ])
1001
+
1002
+ """
1003
+ __tracebackhide__ = True # Hide traceback for py.test
1004
+ from numpy.core import number, float_, result_type
1005
+ from numpy.core.numerictypes import issubdtype
1006
+ from numpy.core.fromnumeric import any as npany
1007
+
1008
+ def compare(x, y):
1009
+ try:
1010
+ if npany(isinf(x)) or npany(isinf(y)):
1011
+ xinfid = isinf(x)
1012
+ yinfid = isinf(y)
1013
+ if not (xinfid == yinfid).all():
1014
+ return False
1015
+ # if one item, x and y is +- inf
1016
+ if x.size == y.size == 1:
1017
+ return x == y
1018
+ x = x[~xinfid]
1019
+ y = y[~yinfid]
1020
+ except (TypeError, NotImplementedError):
1021
+ pass
1022
+
1023
+ # make sure y is an inexact type to avoid abs(MIN_INT); will cause
1024
+ # casting of x later.
1025
+ dtype = result_type(y, 1.)
1026
+ y = np.asanyarray(y, dtype)
1027
+ z = abs(x - y)
1028
+
1029
+ if not issubdtype(z.dtype, number):
1030
+ z = z.astype(float_) # handle object arrays
1031
+
1032
+ return z < 1.5 * 10.0**(-decimal)
1033
+
1034
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
1035
+ header=('Arrays are not almost equal to %d decimals' % decimal),
1036
+ precision=decimal)
1037
+
1038
+
1039
+ def assert_array_less(x, y, err_msg='', verbose=True):
1040
+ """
1041
+ Raises an AssertionError if two array_like objects are not ordered by less
1042
+ than.
1043
+
1044
+ Given two array_like objects, check that the shape is equal and all
1045
+ elements of the first object are strictly smaller than those of the
1046
+ second object. An exception is raised at shape mismatch or incorrectly
1047
+ ordered values. Shape mismatch does not raise if an object has zero
1048
+ dimension. In contrast to the standard usage in numpy, NaNs are
1049
+ compared, no assertion is raised if both objects have NaNs in the same
1050
+ positions.
1051
+
1052
+ Parameters
1053
+ ----------
1054
+ x : array_like
1055
+ The smaller object to check.
1056
+ y : array_like
1057
+ The larger object to compare.
1058
+ err_msg : string
1059
+ The error message to be printed in case of failure.
1060
+ verbose : bool
1061
+ If True, the conflicting values are appended to the error message.
1062
+
1063
+ Raises
1064
+ ------
1065
+ AssertionError
1066
+ If x is not strictly smaller than y, element-wise.
1067
+
1068
+ See Also
1069
+ --------
1070
+ assert_array_equal: tests objects for equality
1071
+ assert_array_almost_equal: test objects for equality up to precision
1072
+
1073
+ Examples
1074
+ --------
1075
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
1076
+ >>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
1077
+ Traceback (most recent call last):
1078
+ ...
1079
+ AssertionError:
1080
+ Arrays are not less-ordered
1081
+ <BLANKLINE>
1082
+ Mismatched elements: 1 / 3 (33.3%)
1083
+ Max absolute difference: 1.
1084
+ Max relative difference: 0.5
1085
+ x: array([ 1., 1., nan])
1086
+ y: array([ 1., 2., nan])
1087
+
1088
+ >>> np.testing.assert_array_less([1.0, 4.0], 3)
1089
+ Traceback (most recent call last):
1090
+ ...
1091
+ AssertionError:
1092
+ Arrays are not less-ordered
1093
+ <BLANKLINE>
1094
+ Mismatched elements: 1 / 2 (50%)
1095
+ Max absolute difference: 2.
1096
+ Max relative difference: 0.66666667
1097
+ x: array([1., 4.])
1098
+ y: array(3)
1099
+
1100
+ >>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
1101
+ Traceback (most recent call last):
1102
+ ...
1103
+ AssertionError:
1104
+ Arrays are not less-ordered
1105
+ <BLANKLINE>
1106
+ (shapes (3,), (1,) mismatch)
1107
+ x: array([1., 2., 3.])
1108
+ y: array([4])
1109
+
1110
+ """
1111
+ __tracebackhide__ = True # Hide traceback for py.test
1112
+ assert_array_compare(operator.__lt__, x, y, err_msg=err_msg,
1113
+ verbose=verbose,
1114
+ header='Arrays are not less-ordered',
1115
+ equal_inf=False)
1116
+
1117
+
1118
+ def runstring(astr, dict):
1119
+ exec(astr, dict)
1120
+
1121
+
1122
+ def assert_string_equal(actual, desired):
1123
+ """
1124
+ Test if two strings are equal.
1125
+
1126
+ If the given strings are equal, `assert_string_equal` does nothing.
1127
+ If they are not equal, an AssertionError is raised, and the diff
1128
+ between the strings is shown.
1129
+
1130
+ Parameters
1131
+ ----------
1132
+ actual : str
1133
+ The string to test for equality against the expected string.
1134
+ desired : str
1135
+ The expected string.
1136
+
1137
+ Examples
1138
+ --------
1139
+ >>> np.testing.assert_string_equal('abc', 'abc')
1140
+ >>> np.testing.assert_string_equal('abc', 'abcd')
1141
+ Traceback (most recent call last):
1142
+ File "<stdin>", line 1, in <module>
1143
+ ...
1144
+ AssertionError: Differences in strings:
1145
+ - abc+ abcd? +
1146
+
1147
+ """
1148
+ # delay import of difflib to reduce startup time
1149
+ __tracebackhide__ = True # Hide traceback for py.test
1150
+ import difflib
1151
+
1152
+ if not isinstance(actual, str):
1153
+ raise AssertionError(repr(type(actual)))
1154
+ if not isinstance(desired, str):
1155
+ raise AssertionError(repr(type(desired)))
1156
+ if desired == actual:
1157
+ return
1158
+
1159
+ diff = list(difflib.Differ().compare(actual.splitlines(True),
1160
+ desired.splitlines(True)))
1161
+ diff_list = []
1162
+ while diff:
1163
+ d1 = diff.pop(0)
1164
+ if d1.startswith(' '):
1165
+ continue
1166
+ if d1.startswith('- '):
1167
+ l = [d1]
1168
+ d2 = diff.pop(0)
1169
+ if d2.startswith('? '):
1170
+ l.append(d2)
1171
+ d2 = diff.pop(0)
1172
+ if not d2.startswith('+ '):
1173
+ raise AssertionError(repr(d2))
1174
+ l.append(d2)
1175
+ if diff:
1176
+ d3 = diff.pop(0)
1177
+ if d3.startswith('? '):
1178
+ l.append(d3)
1179
+ else:
1180
+ diff.insert(0, d3)
1181
+ if d2[2:] == d1[2:]:
1182
+ continue
1183
+ diff_list.extend(l)
1184
+ continue
1185
+ raise AssertionError(repr(d1))
1186
+ if not diff_list:
1187
+ return
1188
+ msg = f"Differences in strings:\n{''.join(diff_list).rstrip()}"
1189
+ if actual != desired:
1190
+ raise AssertionError(msg)
1191
+
1192
+
1193
+ def rundocs(filename=None, raise_on_error=True):
1194
+ """
1195
+ Run doctests found in the given file.
1196
+
1197
+ By default `rundocs` raises an AssertionError on failure.
1198
+
1199
+ Parameters
1200
+ ----------
1201
+ filename : str
1202
+ The path to the file for which the doctests are run.
1203
+ raise_on_error : bool
1204
+ Whether to raise an AssertionError when a doctest fails. Default is
1205
+ True.
1206
+
1207
+ Notes
1208
+ -----
1209
+ The doctests can be run by the user/developer by adding the ``doctests``
1210
+ argument to the ``test()`` call. For example, to run all tests (including
1211
+ doctests) for `numpy.lib`:
1212
+
1213
+ >>> np.lib.test(doctests=True) # doctest: +SKIP
1214
+ """
1215
+ from numpy.distutils.misc_util import exec_mod_from_location
1216
+ import doctest
1217
+ if filename is None:
1218
+ f = sys._getframe(1)
1219
+ filename = f.f_globals['__file__']
1220
+ name = os.path.splitext(os.path.basename(filename))[0]
1221
+ m = exec_mod_from_location(name, filename)
1222
+
1223
+ tests = doctest.DocTestFinder().find(m)
1224
+ runner = doctest.DocTestRunner(verbose=False)
1225
+
1226
+ msg = []
1227
+ if raise_on_error:
1228
+ out = lambda s: msg.append(s)
1229
+ else:
1230
+ out = None
1231
+
1232
+ for test in tests:
1233
+ runner.run(test, out=out)
1234
+
1235
+ if runner.failures > 0 and raise_on_error:
1236
+ raise AssertionError("Some doctests failed:\n%s" % "\n".join(msg))
1237
+
1238
+
1239
+ def check_support_sve():
1240
+ """
1241
+ gh-22982
1242
+ """
1243
+
1244
+ import subprocess
1245
+ cmd = 'lscpu'
1246
+ try:
1247
+ output = subprocess.run(cmd, capture_output=True, text=True)
1248
+ return 'sve' in output.stdout
1249
+ except OSError:
1250
+ return False
1251
+
1252
+
1253
+ _SUPPORTS_SVE = check_support_sve()
1254
+
1255
+ #
1256
+ # assert_raises and assert_raises_regex are taken from unittest.
1257
+ #
1258
+ import unittest
1259
+
1260
+
1261
+ class _Dummy(unittest.TestCase):
1262
+ def nop(self):
1263
+ pass
1264
+
1265
+
1266
+ _d = _Dummy('nop')
1267
+
1268
+
1269
+ def assert_raises(*args, **kwargs):
1270
+ """
1271
+ assert_raises(exception_class, callable, *args, **kwargs)
1272
+ assert_raises(exception_class)
1273
+
1274
+ Fail unless an exception of class exception_class is thrown
1275
+ by callable when invoked with arguments args and keyword
1276
+ arguments kwargs. If a different type of exception is
1277
+ thrown, it will not be caught, and the test case will be
1278
+ deemed to have suffered an error, exactly as for an
1279
+ unexpected exception.
1280
+
1281
+ Alternatively, `assert_raises` can be used as a context manager:
1282
+
1283
+ >>> from numpy.testing import assert_raises
1284
+ >>> with assert_raises(ZeroDivisionError):
1285
+ ... 1 / 0
1286
+
1287
+ is equivalent to
1288
+
1289
+ >>> def div(x, y):
1290
+ ... return x / y
1291
+ >>> assert_raises(ZeroDivisionError, div, 1, 0)
1292
+
1293
+ """
1294
+ __tracebackhide__ = True # Hide traceback for py.test
1295
+ return _d.assertRaises(*args, **kwargs)
1296
+
1297
+
1298
+ def assert_raises_regex(exception_class, expected_regexp, *args, **kwargs):
1299
+ """
1300
+ assert_raises_regex(exception_class, expected_regexp, callable, *args,
1301
+ **kwargs)
1302
+ assert_raises_regex(exception_class, expected_regexp)
1303
+
1304
+ Fail unless an exception of class exception_class and with message that
1305
+ matches expected_regexp is thrown by callable when invoked with arguments
1306
+ args and keyword arguments kwargs.
1307
+
1308
+ Alternatively, can be used as a context manager like `assert_raises`.
1309
+
1310
+ Notes
1311
+ -----
1312
+ .. versionadded:: 1.9.0
1313
+
1314
+ """
1315
+ __tracebackhide__ = True # Hide traceback for py.test
1316
+ return _d.assertRaisesRegex(exception_class, expected_regexp, *args, **kwargs)
1317
+
1318
+
1319
+ def decorate_methods(cls, decorator, testmatch=None):
1320
+ """
1321
+ Apply a decorator to all methods in a class matching a regular expression.
1322
+
1323
+ The given decorator is applied to all public methods of `cls` that are
1324
+ matched by the regular expression `testmatch`
1325
+ (``testmatch.search(methodname)``). Methods that are private, i.e. start
1326
+ with an underscore, are ignored.
1327
+
1328
+ Parameters
1329
+ ----------
1330
+ cls : class
1331
+ Class whose methods to decorate.
1332
+ decorator : function
1333
+ Decorator to apply to methods
1334
+ testmatch : compiled regexp or str, optional
1335
+ The regular expression. Default value is None, in which case the
1336
+ nose default (``re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)``)
1337
+ is used.
1338
+ If `testmatch` is a string, it is compiled to a regular expression
1339
+ first.
1340
+
1341
+ """
1342
+ if testmatch is None:
1343
+ testmatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
1344
+ else:
1345
+ testmatch = re.compile(testmatch)
1346
+ cls_attr = cls.__dict__
1347
+
1348
+ # delayed import to reduce startup time
1349
+ from inspect import isfunction
1350
+
1351
+ methods = [_m for _m in cls_attr.values() if isfunction(_m)]
1352
+ for function in methods:
1353
+ try:
1354
+ if hasattr(function, 'compat_func_name'):
1355
+ funcname = function.compat_func_name
1356
+ else:
1357
+ funcname = function.__name__
1358
+ except AttributeError:
1359
+ # not a function
1360
+ continue
1361
+ if testmatch.search(funcname) and not funcname.startswith('_'):
1362
+ setattr(cls, funcname, decorator(function))
1363
+ return
1364
+
1365
+
1366
+ def measure(code_str, times=1, label=None):
1367
+ """
1368
+ Return elapsed time for executing code in the namespace of the caller.
1369
+
1370
+ The supplied code string is compiled with the Python builtin ``compile``.
1371
+ The precision of the timing is 10 milli-seconds. If the code will execute
1372
+ fast on this timescale, it can be executed many times to get reasonable
1373
+ timing accuracy.
1374
+
1375
+ Parameters
1376
+ ----------
1377
+ code_str : str
1378
+ The code to be timed.
1379
+ times : int, optional
1380
+ The number of times the code is executed. Default is 1. The code is
1381
+ only compiled once.
1382
+ label : str, optional
1383
+ A label to identify `code_str` with. This is passed into ``compile``
1384
+ as the second argument (for run-time error messages).
1385
+
1386
+ Returns
1387
+ -------
1388
+ elapsed : float
1389
+ Total elapsed time in seconds for executing `code_str` `times` times.
1390
+
1391
+ Examples
1392
+ --------
1393
+ >>> times = 10
1394
+ >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
1395
+ >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP
1396
+ Time for a single execution : 0.005 s
1397
+
1398
+ """
1399
+ frame = sys._getframe(1)
1400
+ locs, globs = frame.f_locals, frame.f_globals
1401
+
1402
+ code = compile(code_str, f'Test name: {label} ', 'exec')
1403
+ i = 0
1404
+ elapsed = jiffies()
1405
+ while i < times:
1406
+ i += 1
1407
+ exec(code, globs, locs)
1408
+ elapsed = jiffies() - elapsed
1409
+ return 0.01*elapsed
1410
+
1411
+
1412
+ def _assert_valid_refcount(op):
1413
+ """
1414
+ Check that ufuncs don't mishandle refcount of object `1`.
1415
+ Used in a few regression tests.
1416
+ """
1417
+ if not HAS_REFCOUNT:
1418
+ return True
1419
+
1420
+ import gc
1421
+ import numpy as np
1422
+
1423
+ b = np.arange(100*100).reshape(100, 100)
1424
+ c = b
1425
+ i = 1
1426
+
1427
+ gc.disable()
1428
+ try:
1429
+ rc = sys.getrefcount(i)
1430
+ for j in range(15):
1431
+ d = op(b, c)
1432
+ assert_(sys.getrefcount(i) >= rc)
1433
+ finally:
1434
+ gc.enable()
1435
+ del d # for pyflakes
1436
+
1437
+
1438
+ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
1439
+ err_msg='', verbose=True):
1440
+ """
1441
+ Raises an AssertionError if two objects are not equal up to desired
1442
+ tolerance.
1443
+
1444
+ Given two array_like objects, check that their shapes and all elements
1445
+ are equal (but see the Notes for the special handling of a scalar). An
1446
+ exception is raised if the shapes mismatch or any values conflict. In
1447
+ contrast to the standard usage in numpy, NaNs are compared like numbers,
1448
+ no assertion is raised if both objects have NaNs in the same positions.
1449
+
1450
+ The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
1451
+ that ``allclose`` has different default values). It compares the difference
1452
+ between `actual` and `desired` to ``atol + rtol * abs(desired)``.
1453
+
1454
+ .. versionadded:: 1.5.0
1455
+
1456
+ Parameters
1457
+ ----------
1458
+ actual : array_like
1459
+ Array obtained.
1460
+ desired : array_like
1461
+ Array desired.
1462
+ rtol : float, optional
1463
+ Relative tolerance.
1464
+ atol : float, optional
1465
+ Absolute tolerance.
1466
+ equal_nan : bool, optional.
1467
+ If True, NaNs will compare equal.
1468
+ err_msg : str, optional
1469
+ The error message to be printed in case of failure.
1470
+ verbose : bool, optional
1471
+ If True, the conflicting values are appended to the error message.
1472
+
1473
+ Raises
1474
+ ------
1475
+ AssertionError
1476
+ If actual and desired are not equal up to specified precision.
1477
+
1478
+ See Also
1479
+ --------
1480
+ assert_array_almost_equal_nulp, assert_array_max_ulp
1481
+
1482
+ Notes
1483
+ -----
1484
+ When one of `actual` and `desired` is a scalar and the other is
1485
+ array_like, the function checks that each element of the array_like
1486
+ object is equal to the scalar.
1487
+
1488
+ Examples
1489
+ --------
1490
+ >>> x = [1e-5, 1e-3, 1e-1]
1491
+ >>> y = np.arccos(np.cos(x))
1492
+ >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
1493
+
1494
+ """
1495
+ __tracebackhide__ = True # Hide traceback for py.test
1496
+ import numpy as np
1497
+
1498
+ def compare(x, y):
1499
+ return np.core.numeric.isclose(x, y, rtol=rtol, atol=atol,
1500
+ equal_nan=equal_nan)
1501
+
1502
+ actual, desired = np.asanyarray(actual), np.asanyarray(desired)
1503
+ header = f'Not equal to tolerance rtol={rtol:g}, atol={atol:g}'
1504
+ assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
1505
+ verbose=verbose, header=header, equal_nan=equal_nan)
1506
+
1507
+
1508
+ def assert_array_almost_equal_nulp(x, y, nulp=1):
1509
+ """
1510
+ Compare two arrays relatively to their spacing.
1511
+
1512
+ This is a relatively robust method to compare two arrays whose amplitude
1513
+ is variable.
1514
+
1515
+ Parameters
1516
+ ----------
1517
+ x, y : array_like
1518
+ Input arrays.
1519
+ nulp : int, optional
1520
+ The maximum number of unit in the last place for tolerance (see Notes).
1521
+ Default is 1.
1522
+
1523
+ Returns
1524
+ -------
1525
+ None
1526
+
1527
+ Raises
1528
+ ------
1529
+ AssertionError
1530
+ If the spacing between `x` and `y` for one or more elements is larger
1531
+ than `nulp`.
1532
+
1533
+ See Also
1534
+ --------
1535
+ assert_array_max_ulp : Check that all items of arrays differ in at most
1536
+ N Units in the Last Place.
1537
+ spacing : Return the distance between x and the nearest adjacent number.
1538
+
1539
+ Notes
1540
+ -----
1541
+ An assertion is raised if the following condition is not met::
1542
+
1543
+ abs(x - y) <= nulp * spacing(maximum(abs(x), abs(y)))
1544
+
1545
+ Examples
1546
+ --------
1547
+ >>> x = np.array([1., 1e-10, 1e-20])
1548
+ >>> eps = np.finfo(x.dtype).eps
1549
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps/2 + x)
1550
+
1551
+ >>> np.testing.assert_array_almost_equal_nulp(x, x*eps + x)
1552
+ Traceback (most recent call last):
1553
+ ...
1554
+ AssertionError: X and Y are not equal to 1 ULP (max is 2)
1555
+
1556
+ """
1557
+ __tracebackhide__ = True # Hide traceback for py.test
1558
+ import numpy as np
1559
+ ax = np.abs(x)
1560
+ ay = np.abs(y)
1561
+ ref = nulp * np.spacing(np.where(ax > ay, ax, ay))
1562
+ if not np.all(np.abs(x-y) <= ref):
1563
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
1564
+ msg = "X and Y are not equal to %d ULP" % nulp
1565
+ else:
1566
+ max_nulp = np.max(nulp_diff(x, y))
1567
+ msg = "X and Y are not equal to %d ULP (max is %g)" % (nulp, max_nulp)
1568
+ raise AssertionError(msg)
1569
+
1570
+
1571
+ def assert_array_max_ulp(a, b, maxulp=1, dtype=None):
1572
+ """
1573
+ Check that all items of arrays differ in at most N Units in the Last Place.
1574
+
1575
+ Parameters
1576
+ ----------
1577
+ a, b : array_like
1578
+ Input arrays to be compared.
1579
+ maxulp : int, optional
1580
+ The maximum number of units in the last place that elements of `a` and
1581
+ `b` can differ. Default is 1.
1582
+ dtype : dtype, optional
1583
+ Data-type to convert `a` and `b` to if given. Default is None.
1584
+
1585
+ Returns
1586
+ -------
1587
+ ret : ndarray
1588
+ Array containing number of representable floating point numbers between
1589
+ items in `a` and `b`.
1590
+
1591
+ Raises
1592
+ ------
1593
+ AssertionError
1594
+ If one or more elements differ by more than `maxulp`.
1595
+
1596
+ Notes
1597
+ -----
1598
+ For computing the ULP difference, this API does not differentiate between
1599
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
1600
+ is zero).
1601
+
1602
+ See Also
1603
+ --------
1604
+ assert_array_almost_equal_nulp : Compare two arrays relatively to their
1605
+ spacing.
1606
+
1607
+ Examples
1608
+ --------
1609
+ >>> a = np.linspace(0., 1., 100)
1610
+ >>> res = np.testing.assert_array_max_ulp(a, np.arcsin(np.sin(a)))
1611
+
1612
+ """
1613
+ __tracebackhide__ = True # Hide traceback for py.test
1614
+ import numpy as np
1615
+ ret = nulp_diff(a, b, dtype)
1616
+ if not np.all(ret <= maxulp):
1617
+ raise AssertionError("Arrays are not almost equal up to %g "
1618
+ "ULP (max difference is %g ULP)" %
1619
+ (maxulp, np.max(ret)))
1620
+ return ret
1621
+
1622
+
1623
+ def nulp_diff(x, y, dtype=None):
1624
+ """For each item in x and y, return the number of representable floating
1625
+ points between them.
1626
+
1627
+ Parameters
1628
+ ----------
1629
+ x : array_like
1630
+ first input array
1631
+ y : array_like
1632
+ second input array
1633
+ dtype : dtype, optional
1634
+ Data-type to convert `x` and `y` to if given. Default is None.
1635
+
1636
+ Returns
1637
+ -------
1638
+ nulp : array_like
1639
+ number of representable floating point numbers between each item in x
1640
+ and y.
1641
+
1642
+ Notes
1643
+ -----
1644
+ For computing the ULP difference, this API does not differentiate between
1645
+ various representations of NAN (ULP difference between 0x7fc00000 and 0xffc00000
1646
+ is zero).
1647
+
1648
+ Examples
1649
+ --------
1650
+ # By definition, epsilon is the smallest number such as 1 + eps != 1, so
1651
+ # there should be exactly one ULP between 1 and 1 + eps
1652
+ >>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1653
+ 1.0
1654
+ """
1655
+ import numpy as np
1656
+ if dtype:
1657
+ x = np.asarray(x, dtype=dtype)
1658
+ y = np.asarray(y, dtype=dtype)
1659
+ else:
1660
+ x = np.asarray(x)
1661
+ y = np.asarray(y)
1662
+
1663
+ t = np.common_type(x, y)
1664
+ if np.iscomplexobj(x) or np.iscomplexobj(y):
1665
+ raise NotImplementedError("_nulp not implemented for complex array")
1666
+
1667
+ x = np.array([x], dtype=t)
1668
+ y = np.array([y], dtype=t)
1669
+
1670
+ x[np.isnan(x)] = np.nan
1671
+ y[np.isnan(y)] = np.nan
1672
+
1673
+ if not x.shape == y.shape:
1674
+ raise ValueError("x and y do not have the same shape: %s - %s" %
1675
+ (x.shape, y.shape))
1676
+
1677
+ def _diff(rx, ry, vdt):
1678
+ diff = np.asarray(rx-ry, dtype=vdt)
1679
+ return np.abs(diff)
1680
+
1681
+ rx = integer_repr(x)
1682
+ ry = integer_repr(y)
1683
+ return _diff(rx, ry, t)
1684
+
1685
+
1686
+ def _integer_repr(x, vdt, comp):
1687
+ # Reinterpret binary representation of the float as sign-magnitude:
1688
+ # take into account two-complement representation
1689
+ # See also
1690
+ # https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
1691
+ rx = x.view(vdt)
1692
+ if not (rx.size == 1):
1693
+ rx[rx < 0] = comp - rx[rx < 0]
1694
+ else:
1695
+ if rx < 0:
1696
+ rx = comp - rx
1697
+
1698
+ return rx
1699
+
1700
+
1701
+ def integer_repr(x):
1702
+ """Return the signed-magnitude interpretation of the binary representation
1703
+ of x."""
1704
+ import numpy as np
1705
+ if x.dtype == np.float16:
1706
+ return _integer_repr(x, np.int16, np.int16(-2**15))
1707
+ elif x.dtype == np.float32:
1708
+ return _integer_repr(x, np.int32, np.int32(-2**31))
1709
+ elif x.dtype == np.float64:
1710
+ return _integer_repr(x, np.int64, np.int64(-2**63))
1711
+ else:
1712
+ raise ValueError(f'Unsupported dtype {x.dtype}')
1713
+
1714
+
1715
+ @contextlib.contextmanager
1716
+ def _assert_warns_context(warning_class, name=None):
1717
+ __tracebackhide__ = True # Hide traceback for py.test
1718
+ with suppress_warnings() as sup:
1719
+ l = sup.record(warning_class)
1720
+ yield
1721
+ if not len(l) > 0:
1722
+ name_str = f' when calling {name}' if name is not None else ''
1723
+ raise AssertionError("No warning raised" + name_str)
1724
+
1725
+
1726
+ def assert_warns(warning_class, *args, **kwargs):
1727
+ """
1728
+ Fail unless the given callable throws the specified warning.
1729
+
1730
+ A warning of class warning_class should be thrown by the callable when
1731
+ invoked with arguments args and keyword arguments kwargs.
1732
+ If a different type of warning is thrown, it will not be caught.
1733
+
1734
+ If called with all arguments other than the warning class omitted, may be
1735
+ used as a context manager:
1736
+
1737
+ with assert_warns(SomeWarning):
1738
+ do_something()
1739
+
1740
+ The ability to be used as a context manager is new in NumPy v1.11.0.
1741
+
1742
+ .. versionadded:: 1.4.0
1743
+
1744
+ Parameters
1745
+ ----------
1746
+ warning_class : class
1747
+ The class defining the warning that `func` is expected to throw.
1748
+ func : callable, optional
1749
+ Callable to test
1750
+ *args : Arguments
1751
+ Arguments for `func`.
1752
+ **kwargs : Kwargs
1753
+ Keyword arguments for `func`.
1754
+
1755
+ Returns
1756
+ -------
1757
+ The value returned by `func`.
1758
+
1759
+ Examples
1760
+ --------
1761
+ >>> import warnings
1762
+ >>> def deprecated_func(num):
1763
+ ... warnings.warn("Please upgrade", DeprecationWarning)
1764
+ ... return num*num
1765
+ >>> with np.testing.assert_warns(DeprecationWarning):
1766
+ ... assert deprecated_func(4) == 16
1767
+ >>> # or passing a func
1768
+ >>> ret = np.testing.assert_warns(DeprecationWarning, deprecated_func, 4)
1769
+ >>> assert ret == 16
1770
+ """
1771
+ if not args:
1772
+ return _assert_warns_context(warning_class)
1773
+
1774
+ func = args[0]
1775
+ args = args[1:]
1776
+ with _assert_warns_context(warning_class, name=func.__name__):
1777
+ return func(*args, **kwargs)
1778
+
1779
+
1780
+ @contextlib.contextmanager
1781
+ def _assert_no_warnings_context(name=None):
1782
+ __tracebackhide__ = True # Hide traceback for py.test
1783
+ with warnings.catch_warnings(record=True) as l:
1784
+ warnings.simplefilter('always')
1785
+ yield
1786
+ if len(l) > 0:
1787
+ name_str = f' when calling {name}' if name is not None else ''
1788
+ raise AssertionError(f'Got warnings{name_str}: {l}')
1789
+
1790
+
1791
+ def assert_no_warnings(*args, **kwargs):
1792
+ """
1793
+ Fail if the given callable produces any warnings.
1794
+
1795
+ If called with all arguments omitted, may be used as a context manager:
1796
+
1797
+ with assert_no_warnings():
1798
+ do_something()
1799
+
1800
+ The ability to be used as a context manager is new in NumPy v1.11.0.
1801
+
1802
+ .. versionadded:: 1.7.0
1803
+
1804
+ Parameters
1805
+ ----------
1806
+ func : callable
1807
+ The callable to test.
1808
+ \\*args : Arguments
1809
+ Arguments passed to `func`.
1810
+ \\*\\*kwargs : Kwargs
1811
+ Keyword arguments passed to `func`.
1812
+
1813
+ Returns
1814
+ -------
1815
+ The value returned by `func`.
1816
+
1817
+ """
1818
+ if not args:
1819
+ return _assert_no_warnings_context()
1820
+
1821
+ func = args[0]
1822
+ args = args[1:]
1823
+ with _assert_no_warnings_context(name=func.__name__):
1824
+ return func(*args, **kwargs)
1825
+
1826
+
1827
+ def _gen_alignment_data(dtype=float32, type='binary', max_size=24):
1828
+ """
1829
+ generator producing data with different alignment and offsets
1830
+ to test simd vectorization
1831
+
1832
+ Parameters
1833
+ ----------
1834
+ dtype : dtype
1835
+ data type to produce
1836
+ type : string
1837
+ 'unary': create data for unary operations, creates one input
1838
+ and output array
1839
+ 'binary': create data for unary operations, creates two input
1840
+ and output array
1841
+ max_size : integer
1842
+ maximum size of data to produce
1843
+
1844
+ Returns
1845
+ -------
1846
+ if type is 'unary' yields one output, one input array and a message
1847
+ containing information on the data
1848
+ if type is 'binary' yields one output array, two input array and a message
1849
+ containing information on the data
1850
+
1851
+ """
1852
+ ufmt = 'unary offset=(%d, %d), size=%d, dtype=%r, %s'
1853
+ bfmt = 'binary offset=(%d, %d, %d), size=%d, dtype=%r, %s'
1854
+ for o in range(3):
1855
+ for s in range(o + 2, max(o + 3, max_size)):
1856
+ if type == 'unary':
1857
+ inp = lambda: arange(s, dtype=dtype)[o:]
1858
+ out = empty((s,), dtype=dtype)[o:]
1859
+ yield out, inp(), ufmt % (o, o, s, dtype, 'out of place')
1860
+ d = inp()
1861
+ yield d, d, ufmt % (o, o, s, dtype, 'in place')
1862
+ yield out[1:], inp()[:-1], ufmt % \
1863
+ (o + 1, o, s - 1, dtype, 'out of place')
1864
+ yield out[:-1], inp()[1:], ufmt % \
1865
+ (o, o + 1, s - 1, dtype, 'out of place')
1866
+ yield inp()[:-1], inp()[1:], ufmt % \
1867
+ (o, o + 1, s - 1, dtype, 'aliased')
1868
+ yield inp()[1:], inp()[:-1], ufmt % \
1869
+ (o + 1, o, s - 1, dtype, 'aliased')
1870
+ if type == 'binary':
1871
+ inp1 = lambda: arange(s, dtype=dtype)[o:]
1872
+ inp2 = lambda: arange(s, dtype=dtype)[o:]
1873
+ out = empty((s,), dtype=dtype)[o:]
1874
+ yield out, inp1(), inp2(), bfmt % \
1875
+ (o, o, o, s, dtype, 'out of place')
1876
+ d = inp1()
1877
+ yield d, d, inp2(), bfmt % \
1878
+ (o, o, o, s, dtype, 'in place1')
1879
+ d = inp2()
1880
+ yield d, inp1(), d, bfmt % \
1881
+ (o, o, o, s, dtype, 'in place2')
1882
+ yield out[1:], inp1()[:-1], inp2()[:-1], bfmt % \
1883
+ (o + 1, o, o, s - 1, dtype, 'out of place')
1884
+ yield out[:-1], inp1()[1:], inp2()[:-1], bfmt % \
1885
+ (o, o + 1, o, s - 1, dtype, 'out of place')
1886
+ yield out[:-1], inp1()[:-1], inp2()[1:], bfmt % \
1887
+ (o, o, o + 1, s - 1, dtype, 'out of place')
1888
+ yield inp1()[1:], inp1()[:-1], inp2()[:-1], bfmt % \
1889
+ (o + 1, o, o, s - 1, dtype, 'aliased')
1890
+ yield inp1()[:-1], inp1()[1:], inp2()[:-1], bfmt % \
1891
+ (o, o + 1, o, s - 1, dtype, 'aliased')
1892
+ yield inp1()[:-1], inp1()[:-1], inp2()[1:], bfmt % \
1893
+ (o, o, o + 1, s - 1, dtype, 'aliased')
1894
+
1895
+
1896
+ class IgnoreException(Exception):
1897
+ "Ignoring this exception due to disabled feature"
1898
+ pass
1899
+
1900
+
1901
+ @contextlib.contextmanager
1902
+ def tempdir(*args, **kwargs):
1903
+ """Context manager to provide a temporary test folder.
1904
+
1905
+ All arguments are passed as this to the underlying tempfile.mkdtemp
1906
+ function.
1907
+
1908
+ """
1909
+ tmpdir = mkdtemp(*args, **kwargs)
1910
+ try:
1911
+ yield tmpdir
1912
+ finally:
1913
+ shutil.rmtree(tmpdir)
1914
+
1915
+
1916
+ @contextlib.contextmanager
1917
+ def temppath(*args, **kwargs):
1918
+ """Context manager for temporary files.
1919
+
1920
+ Context manager that returns the path to a closed temporary file. Its
1921
+ parameters are the same as for tempfile.mkstemp and are passed directly
1922
+ to that function. The underlying file is removed when the context is
1923
+ exited, so it should be closed at that time.
1924
+
1925
+ Windows does not allow a temporary file to be opened if it is already
1926
+ open, so the underlying file must be closed after opening before it
1927
+ can be opened again.
1928
+
1929
+ """
1930
+ fd, path = mkstemp(*args, **kwargs)
1931
+ os.close(fd)
1932
+ try:
1933
+ yield path
1934
+ finally:
1935
+ os.remove(path)
1936
+
1937
+
1938
+ class clear_and_catch_warnings(warnings.catch_warnings):
1939
+ """ Context manager that resets warning registry for catching warnings
1940
+
1941
+ Warnings can be slippery, because, whenever a warning is triggered, Python
1942
+ adds a ``__warningregistry__`` member to the *calling* module. This makes
1943
+ it impossible to retrigger the warning in this module, whatever you put in
1944
+ the warnings filters. This context manager accepts a sequence of `modules`
1945
+ as a keyword argument to its constructor and:
1946
+
1947
+ * stores and removes any ``__warningregistry__`` entries in given `modules`
1948
+ on entry;
1949
+ * resets ``__warningregistry__`` to its previous state on exit.
1950
+
1951
+ This makes it possible to trigger any warning afresh inside the context
1952
+ manager without disturbing the state of warnings outside.
1953
+
1954
+ For compatibility with Python 3.0, please consider all arguments to be
1955
+ keyword-only.
1956
+
1957
+ Parameters
1958
+ ----------
1959
+ record : bool, optional
1960
+ Specifies whether warnings should be captured by a custom
1961
+ implementation of ``warnings.showwarning()`` and be appended to a list
1962
+ returned by the context manager. Otherwise None is returned by the
1963
+ context manager. The objects appended to the list are arguments whose
1964
+ attributes mirror the arguments to ``showwarning()``.
1965
+ modules : sequence, optional
1966
+ Sequence of modules for which to reset warnings registry on entry and
1967
+ restore on exit. To work correctly, all 'ignore' filters should
1968
+ filter by one of these modules.
1969
+
1970
+ Examples
1971
+ --------
1972
+ >>> import warnings
1973
+ >>> with np.testing.clear_and_catch_warnings(
1974
+ ... modules=[np.core.fromnumeric]):
1975
+ ... warnings.simplefilter('always')
1976
+ ... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
1977
+ ... # do something that raises a warning but ignore those in
1978
+ ... # np.core.fromnumeric
1979
+ """
1980
+ class_modules = ()
1981
+
1982
+ def __init__(self, record=False, modules=()):
1983
+ self.modules = set(modules).union(self.class_modules)
1984
+ self._warnreg_copies = {}
1985
+ super().__init__(record=record)
1986
+
1987
+ def __enter__(self):
1988
+ for mod in self.modules:
1989
+ if hasattr(mod, '__warningregistry__'):
1990
+ mod_reg = mod.__warningregistry__
1991
+ self._warnreg_copies[mod] = mod_reg.copy()
1992
+ mod_reg.clear()
1993
+ return super().__enter__()
1994
+
1995
+ def __exit__(self, *exc_info):
1996
+ super().__exit__(*exc_info)
1997
+ for mod in self.modules:
1998
+ if hasattr(mod, '__warningregistry__'):
1999
+ mod.__warningregistry__.clear()
2000
+ if mod in self._warnreg_copies:
2001
+ mod.__warningregistry__.update(self._warnreg_copies[mod])
2002
+
2003
+
2004
+ class suppress_warnings:
2005
+ """
2006
+ Context manager and decorator doing much the same as
2007
+ ``warnings.catch_warnings``.
2008
+
2009
+ However, it also provides a filter mechanism to work around
2010
+ https://bugs.python.org/issue4180.
2011
+
2012
+ This bug causes Python before 3.4 to not reliably show warnings again
2013
+ after they have been ignored once (even within catch_warnings). It
2014
+ means that no "ignore" filter can be used easily, since following
2015
+ tests might need to see the warning. Additionally it allows easier
2016
+ specificity for testing warnings and can be nested.
2017
+
2018
+ Parameters
2019
+ ----------
2020
+ forwarding_rule : str, optional
2021
+ One of "always", "once", "module", or "location". Analogous to
2022
+ the usual warnings module filter mode, it is useful to reduce
2023
+ noise mostly on the outmost level. Unsuppressed and unrecorded
2024
+ warnings will be forwarded based on this rule. Defaults to "always".
2025
+ "location" is equivalent to the warnings "default", match by exact
2026
+ location the warning warning originated from.
2027
+
2028
+ Notes
2029
+ -----
2030
+ Filters added inside the context manager will be discarded again
2031
+ when leaving it. Upon entering all filters defined outside a
2032
+ context will be applied automatically.
2033
+
2034
+ When a recording filter is added, matching warnings are stored in the
2035
+ ``log`` attribute as well as in the list returned by ``record``.
2036
+
2037
+ If filters are added and the ``module`` keyword is given, the
2038
+ warning registry of this module will additionally be cleared when
2039
+ applying it, entering the context, or exiting it. This could cause
2040
+ warnings to appear a second time after leaving the context if they
2041
+ were configured to be printed once (default) and were already
2042
+ printed before the context was entered.
2043
+
2044
+ Nesting this context manager will work as expected when the
2045
+ forwarding rule is "always" (default). Unfiltered and unrecorded
2046
+ warnings will be passed out and be matched by the outer level.
2047
+ On the outmost level they will be printed (or caught by another
2048
+ warnings context). The forwarding rule argument can modify this
2049
+ behaviour.
2050
+
2051
+ Like ``catch_warnings`` this context manager is not threadsafe.
2052
+
2053
+ Examples
2054
+ --------
2055
+
2056
+ With a context manager::
2057
+
2058
+ with np.testing.suppress_warnings() as sup:
2059
+ sup.filter(DeprecationWarning, "Some text")
2060
+ sup.filter(module=np.ma.core)
2061
+ log = sup.record(FutureWarning, "Does this occur?")
2062
+ command_giving_warnings()
2063
+ # The FutureWarning was given once, the filtered warnings were
2064
+ # ignored. All other warnings abide outside settings (may be
2065
+ # printed/error)
2066
+ assert_(len(log) == 1)
2067
+ assert_(len(sup.log) == 1) # also stored in log attribute
2068
+
2069
+ Or as a decorator::
2070
+
2071
+ sup = np.testing.suppress_warnings()
2072
+ sup.filter(module=np.ma.core) # module must match exactly
2073
+ @sup
2074
+ def some_function():
2075
+ # do something which causes a warning in np.ma.core
2076
+ pass
2077
+ """
2078
+ def __init__(self, forwarding_rule="always"):
2079
+ self._entered = False
2080
+
2081
+ # Suppressions are either instance or defined inside one with block:
2082
+ self._suppressions = []
2083
+
2084
+ if forwarding_rule not in {"always", "module", "once", "location"}:
2085
+ raise ValueError("unsupported forwarding rule.")
2086
+ self._forwarding_rule = forwarding_rule
2087
+
2088
+ def _clear_registries(self):
2089
+ if hasattr(warnings, "_filters_mutated"):
2090
+ # clearing the registry should not be necessary on new pythons,
2091
+ # instead the filters should be mutated.
2092
+ warnings._filters_mutated()
2093
+ return
2094
+ # Simply clear the registry, this should normally be harmless,
2095
+ # note that on new pythons it would be invalidated anyway.
2096
+ for module in self._tmp_modules:
2097
+ if hasattr(module, "__warningregistry__"):
2098
+ module.__warningregistry__.clear()
2099
+
2100
+ def _filter(self, category=Warning, message="", module=None, record=False):
2101
+ if record:
2102
+ record = [] # The log where to store warnings
2103
+ else:
2104
+ record = None
2105
+ if self._entered:
2106
+ if module is None:
2107
+ warnings.filterwarnings(
2108
+ "always", category=category, message=message)
2109
+ else:
2110
+ module_regex = module.__name__.replace('.', r'\.') + '$'
2111
+ warnings.filterwarnings(
2112
+ "always", category=category, message=message,
2113
+ module=module_regex)
2114
+ self._tmp_modules.add(module)
2115
+ self._clear_registries()
2116
+
2117
+ self._tmp_suppressions.append(
2118
+ (category, message, re.compile(message, re.I), module, record))
2119
+ else:
2120
+ self._suppressions.append(
2121
+ (category, message, re.compile(message, re.I), module, record))
2122
+
2123
+ return record
2124
+
2125
+ def filter(self, category=Warning, message="", module=None):
2126
+ """
2127
+ Add a new suppressing filter or apply it if the state is entered.
2128
+
2129
+ Parameters
2130
+ ----------
2131
+ category : class, optional
2132
+ Warning class to filter
2133
+ message : string, optional
2134
+ Regular expression matching the warning message.
2135
+ module : module, optional
2136
+ Module to filter for. Note that the module (and its file)
2137
+ must match exactly and cannot be a submodule. This may make
2138
+ it unreliable for external modules.
2139
+
2140
+ Notes
2141
+ -----
2142
+ When added within a context, filters are only added inside
2143
+ the context and will be forgotten when the context is exited.
2144
+ """
2145
+ self._filter(category=category, message=message, module=module,
2146
+ record=False)
2147
+
2148
+ def record(self, category=Warning, message="", module=None):
2149
+ """
2150
+ Append a new recording filter or apply it if the state is entered.
2151
+
2152
+ All warnings matching will be appended to the ``log`` attribute.
2153
+
2154
+ Parameters
2155
+ ----------
2156
+ category : class, optional
2157
+ Warning class to filter
2158
+ message : string, optional
2159
+ Regular expression matching the warning message.
2160
+ module : module, optional
2161
+ Module to filter for. Note that the module (and its file)
2162
+ must match exactly and cannot be a submodule. This may make
2163
+ it unreliable for external modules.
2164
+
2165
+ Returns
2166
+ -------
2167
+ log : list
2168
+ A list which will be filled with all matched warnings.
2169
+
2170
+ Notes
2171
+ -----
2172
+ When added within a context, filters are only added inside
2173
+ the context and will be forgotten when the context is exited.
2174
+ """
2175
+ return self._filter(category=category, message=message, module=module,
2176
+ record=True)
2177
+
2178
+ def __enter__(self):
2179
+ if self._entered:
2180
+ raise RuntimeError("cannot enter suppress_warnings twice.")
2181
+
2182
+ self._orig_show = warnings.showwarning
2183
+ self._filters = warnings.filters
2184
+ warnings.filters = self._filters[:]
2185
+
2186
+ self._entered = True
2187
+ self._tmp_suppressions = []
2188
+ self._tmp_modules = set()
2189
+ self._forwarded = set()
2190
+
2191
+ self.log = [] # reset global log (no need to keep same list)
2192
+
2193
+ for cat, mess, _, mod, log in self._suppressions:
2194
+ if log is not None:
2195
+ del log[:] # clear the log
2196
+ if mod is None:
2197
+ warnings.filterwarnings(
2198
+ "always", category=cat, message=mess)
2199
+ else:
2200
+ module_regex = mod.__name__.replace('.', r'\.') + '$'
2201
+ warnings.filterwarnings(
2202
+ "always", category=cat, message=mess,
2203
+ module=module_regex)
2204
+ self._tmp_modules.add(mod)
2205
+ warnings.showwarning = self._showwarning
2206
+ self._clear_registries()
2207
+
2208
+ return self
2209
+
2210
+ def __exit__(self, *exc_info):
2211
+ warnings.showwarning = self._orig_show
2212
+ warnings.filters = self._filters
2213
+ self._clear_registries()
2214
+ self._entered = False
2215
+ del self._orig_show
2216
+ del self._filters
2217
+
2218
+ def _showwarning(self, message, category, filename, lineno,
2219
+ *args, use_warnmsg=None, **kwargs):
2220
+ for cat, _, pattern, mod, rec in (
2221
+ self._suppressions + self._tmp_suppressions)[::-1]:
2222
+ if (issubclass(category, cat) and
2223
+ pattern.match(message.args[0]) is not None):
2224
+ if mod is None:
2225
+ # Message and category match, either recorded or ignored
2226
+ if rec is not None:
2227
+ msg = WarningMessage(message, category, filename,
2228
+ lineno, **kwargs)
2229
+ self.log.append(msg)
2230
+ rec.append(msg)
2231
+ return
2232
+ # Use startswith, because warnings strips the c or o from
2233
+ # .pyc/.pyo files.
2234
+ elif mod.__file__.startswith(filename):
2235
+ # The message and module (filename) match
2236
+ if rec is not None:
2237
+ msg = WarningMessage(message, category, filename,
2238
+ lineno, **kwargs)
2239
+ self.log.append(msg)
2240
+ rec.append(msg)
2241
+ return
2242
+
2243
+ # There is no filter in place, so pass to the outside handler
2244
+ # unless we should only pass it once
2245
+ if self._forwarding_rule == "always":
2246
+ if use_warnmsg is None:
2247
+ self._orig_show(message, category, filename, lineno,
2248
+ *args, **kwargs)
2249
+ else:
2250
+ self._orig_showmsg(use_warnmsg)
2251
+ return
2252
+
2253
+ if self._forwarding_rule == "once":
2254
+ signature = (message.args, category)
2255
+ elif self._forwarding_rule == "module":
2256
+ signature = (message.args, category, filename)
2257
+ elif self._forwarding_rule == "location":
2258
+ signature = (message.args, category, filename, lineno)
2259
+
2260
+ if signature in self._forwarded:
2261
+ return
2262
+ self._forwarded.add(signature)
2263
+ if use_warnmsg is None:
2264
+ self._orig_show(message, category, filename, lineno, *args,
2265
+ **kwargs)
2266
+ else:
2267
+ self._orig_showmsg(use_warnmsg)
2268
+
2269
+ def __call__(self, func):
2270
+ """
2271
+ Function decorator to apply certain suppressions to a whole
2272
+ function.
2273
+ """
2274
+ @wraps(func)
2275
+ def new_func(*args, **kwargs):
2276
+ with self:
2277
+ return func(*args, **kwargs)
2278
+
2279
+ return new_func
2280
+
2281
+
2282
+ @contextlib.contextmanager
2283
+ def _assert_no_gc_cycles_context(name=None):
2284
+ __tracebackhide__ = True # Hide traceback for py.test
2285
+
2286
+ # not meaningful to test if there is no refcounting
2287
+ if not HAS_REFCOUNT:
2288
+ yield
2289
+ return
2290
+
2291
+ assert_(gc.isenabled())
2292
+ gc.disable()
2293
+ gc_debug = gc.get_debug()
2294
+ try:
2295
+ for i in range(100):
2296
+ if gc.collect() == 0:
2297
+ break
2298
+ else:
2299
+ raise RuntimeError(
2300
+ "Unable to fully collect garbage - perhaps a __del__ method "
2301
+ "is creating more reference cycles?")
2302
+
2303
+ gc.set_debug(gc.DEBUG_SAVEALL)
2304
+ yield
2305
+ # gc.collect returns the number of unreachable objects in cycles that
2306
+ # were found -- we are checking that no cycles were created in the context
2307
+ n_objects_in_cycles = gc.collect()
2308
+ objects_in_cycles = gc.garbage[:]
2309
+ finally:
2310
+ del gc.garbage[:]
2311
+ gc.set_debug(gc_debug)
2312
+ gc.enable()
2313
+
2314
+ if n_objects_in_cycles:
2315
+ name_str = f' when calling {name}' if name is not None else ''
2316
+ raise AssertionError(
2317
+ "Reference cycles were found{}: {} objects were collected, "
2318
+ "of which {} are shown below:{}"
2319
+ .format(
2320
+ name_str,
2321
+ n_objects_in_cycles,
2322
+ len(objects_in_cycles),
2323
+ ''.join(
2324
+ "\n {} object with id={}:\n {}".format(
2325
+ type(o).__name__,
2326
+ id(o),
2327
+ pprint.pformat(o).replace('\n', '\n ')
2328
+ ) for o in objects_in_cycles
2329
+ )
2330
+ )
2331
+ )
2332
+
2333
+
2334
+ def assert_no_gc_cycles(*args, **kwargs):
2335
+ """
2336
+ Fail if the given callable produces any reference cycles.
2337
+
2338
+ If called with all arguments omitted, may be used as a context manager:
2339
+
2340
+ with assert_no_gc_cycles():
2341
+ do_something()
2342
+
2343
+ .. versionadded:: 1.15.0
2344
+
2345
+ Parameters
2346
+ ----------
2347
+ func : callable
2348
+ The callable to test.
2349
+ \\*args : Arguments
2350
+ Arguments passed to `func`.
2351
+ \\*\\*kwargs : Kwargs
2352
+ Keyword arguments passed to `func`.
2353
+
2354
+ Returns
2355
+ -------
2356
+ Nothing. The result is deliberately discarded to ensure that all cycles
2357
+ are found.
2358
+
2359
+ """
2360
+ if not args:
2361
+ return _assert_no_gc_cycles_context()
2362
+
2363
+ func = args[0]
2364
+ args = args[1:]
2365
+ with _assert_no_gc_cycles_context(name=func.__name__):
2366
+ func(*args, **kwargs)
2367
+
2368
+
2369
+ def break_cycles():
2370
+ """
2371
+ Break reference cycles by calling gc.collect
2372
+ Objects can call other objects' methods (for instance, another object's
2373
+ __del__) inside their own __del__. On PyPy, the interpreter only runs
2374
+ between calls to gc.collect, so multiple calls are needed to completely
2375
+ release all cycles.
2376
+ """
2377
+
2378
+ gc.collect()
2379
+ if IS_PYPY:
2380
+ # a few more, just to make sure all the finalizers are called
2381
+ gc.collect()
2382
+ gc.collect()
2383
+ gc.collect()
2384
+ gc.collect()
2385
+
2386
+
2387
+ def requires_memory(free_bytes):
2388
+ """Decorator to skip a test if not enough memory is available"""
2389
+ import pytest
2390
+
2391
+ def decorator(func):
2392
+ @wraps(func)
2393
+ def wrapper(*a, **kw):
2394
+ msg = check_free_memory(free_bytes)
2395
+ if msg is not None:
2396
+ pytest.skip(msg)
2397
+
2398
+ try:
2399
+ return func(*a, **kw)
2400
+ except MemoryError:
2401
+ # Probably ran out of memory regardless: don't regard as failure
2402
+ pytest.xfail("MemoryError raised")
2403
+
2404
+ return wrapper
2405
+
2406
+ return decorator
2407
+
2408
+
2409
+ def check_free_memory(free_bytes):
2410
+ """
2411
+ Check whether `free_bytes` amount of memory is currently free.
2412
+ Returns: None if enough memory available, otherwise error message
2413
+ """
2414
+ env_var = 'NPY_AVAILABLE_MEM'
2415
+ env_value = os.environ.get(env_var)
2416
+ if env_value is not None:
2417
+ try:
2418
+ mem_free = _parse_size(env_value)
2419
+ except ValueError as exc:
2420
+ raise ValueError(f'Invalid environment variable {env_var}: {exc}')
2421
+
2422
+ msg = (f'{free_bytes/1e9} GB memory required, but environment variable '
2423
+ f'NPY_AVAILABLE_MEM={env_value} set')
2424
+ else:
2425
+ mem_free = _get_mem_available()
2426
+
2427
+ if mem_free is None:
2428
+ msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
2429
+ "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
2430
+ "the test.")
2431
+ mem_free = -1
2432
+ else:
2433
+ msg = f'{free_bytes/1e9} GB memory required, but {mem_free/1e9} GB available'
2434
+
2435
+ return msg if mem_free < free_bytes else None
2436
+
2437
+
2438
+ def _parse_size(size_str):
2439
+ """Convert memory size strings ('12 GB' etc.) to float"""
2440
+ suffixes = {'': 1, 'b': 1,
2441
+ 'k': 1000, 'm': 1000**2, 'g': 1000**3, 't': 1000**4,
2442
+ 'kb': 1000, 'mb': 1000**2, 'gb': 1000**3, 'tb': 1000**4,
2443
+ 'kib': 1024, 'mib': 1024**2, 'gib': 1024**3, 'tib': 1024**4}
2444
+
2445
+ size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
2446
+ '|'.join(suffixes.keys())), re.I)
2447
+
2448
+ m = size_re.match(size_str.lower())
2449
+ if not m or m.group(2) not in suffixes:
2450
+ raise ValueError(f'value {size_str!r} not a valid size')
2451
+ return int(float(m.group(1)) * suffixes[m.group(2)])
2452
+
2453
+
2454
+ def _get_mem_available():
2455
+ """Return available memory in bytes, or None if unknown."""
2456
+ try:
2457
+ import psutil
2458
+ return psutil.virtual_memory().available
2459
+ except (ImportError, AttributeError):
2460
+ pass
2461
+
2462
+ if sys.platform.startswith('linux'):
2463
+ info = {}
2464
+ with open('/proc/meminfo') as f:
2465
+ for line in f:
2466
+ p = line.split()
2467
+ info[p[0].strip(':').lower()] = int(p[1]) * 1024
2468
+
2469
+ if 'memavailable' in info:
2470
+ # Linux >= 3.14
2471
+ return info['memavailable']
2472
+ else:
2473
+ return info['memfree'] + info['cached']
2474
+
2475
+ return None
2476
+
2477
+
2478
+ def _no_tracing(func):
2479
+ """
2480
+ Decorator to temporarily turn off tracing for the duration of a test.
2481
+ Needed in tests that check refcounting, otherwise the tracing itself
2482
+ influences the refcounts
2483
+ """
2484
+ if not hasattr(sys, 'gettrace'):
2485
+ return func
2486
+ else:
2487
+ @wraps(func)
2488
+ def wrapper(*args, **kwargs):
2489
+ original_trace = sys.gettrace()
2490
+ try:
2491
+ sys.settrace(None)
2492
+ return func(*args, **kwargs)
2493
+ finally:
2494
+ sys.settrace(original_trace)
2495
+ return wrapper
2496
+
2497
+
2498
+ def _get_glibc_version():
2499
+ try:
2500
+ ver = os.confstr('CS_GNU_LIBC_VERSION').rsplit(' ')[1]
2501
+ except Exception:
2502
+ ver = '0.0'
2503
+
2504
+ return ver
2505
+
2506
+
2507
+ _glibcver = _get_glibc_version()
2508
+ _glibc_older_than = lambda x: (_glibcver != '0.0' and _glibcver < x)
2509
+
venv/lib/python3.10/site-packages/numpy/testing/_private/utils.pyi ADDED
@@ -0,0 +1,402 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import ast
4
+ import types
5
+ import warnings
6
+ import unittest
7
+ import contextlib
8
+ from re import Pattern
9
+ from collections.abc import Callable, Iterable, Sequence
10
+ from typing import (
11
+ Literal as L,
12
+ Any,
13
+ AnyStr,
14
+ ClassVar,
15
+ NoReturn,
16
+ overload,
17
+ type_check_only,
18
+ TypeVar,
19
+ Union,
20
+ Final,
21
+ SupportsIndex,
22
+ )
23
+ if sys.version_info >= (3, 10):
24
+ from typing import ParamSpec
25
+ else:
26
+ from typing_extensions import ParamSpec
27
+
28
+ from numpy import generic, dtype, number, object_, bool_, _FloatValue
29
+ from numpy._typing import (
30
+ NDArray,
31
+ ArrayLike,
32
+ DTypeLike,
33
+ _ArrayLikeNumber_co,
34
+ _ArrayLikeObject_co,
35
+ _ArrayLikeTD64_co,
36
+ _ArrayLikeDT64_co,
37
+ )
38
+
39
+ from unittest.case import (
40
+ SkipTest as SkipTest,
41
+ )
42
+
43
+ _P = ParamSpec("_P")
44
+ _T = TypeVar("_T")
45
+ _ET = TypeVar("_ET", bound=BaseException)
46
+ _FT = TypeVar("_FT", bound=Callable[..., Any])
47
+
48
+ # Must return a bool or an ndarray/generic type
49
+ # that is supported by `np.logical_and.reduce`
50
+ _ComparisonFunc = Callable[
51
+ [NDArray[Any], NDArray[Any]],
52
+ Union[
53
+ bool,
54
+ bool_,
55
+ number[Any],
56
+ NDArray[Union[bool_, number[Any], object_]],
57
+ ],
58
+ ]
59
+
60
+ __all__: list[str]
61
+
62
+ class KnownFailureException(Exception): ...
63
+ class IgnoreException(Exception): ...
64
+
65
+ class clear_and_catch_warnings(warnings.catch_warnings):
66
+ class_modules: ClassVar[tuple[types.ModuleType, ...]]
67
+ modules: set[types.ModuleType]
68
+ @overload
69
+ def __new__(
70
+ cls,
71
+ record: L[False] = ...,
72
+ modules: Iterable[types.ModuleType] = ...,
73
+ ) -> _clear_and_catch_warnings_without_records: ...
74
+ @overload
75
+ def __new__(
76
+ cls,
77
+ record: L[True],
78
+ modules: Iterable[types.ModuleType] = ...,
79
+ ) -> _clear_and_catch_warnings_with_records: ...
80
+ @overload
81
+ def __new__(
82
+ cls,
83
+ record: bool,
84
+ modules: Iterable[types.ModuleType] = ...,
85
+ ) -> clear_and_catch_warnings: ...
86
+ def __enter__(self) -> None | list[warnings.WarningMessage]: ...
87
+ def __exit__(
88
+ self,
89
+ __exc_type: None | type[BaseException] = ...,
90
+ __exc_val: None | BaseException = ...,
91
+ __exc_tb: None | types.TracebackType = ...,
92
+ ) -> None: ...
93
+
94
+ # Type-check only `clear_and_catch_warnings` subclasses for both values of the
95
+ # `record` parameter. Copied from the stdlib `warnings` stubs.
96
+
97
+ @type_check_only
98
+ class _clear_and_catch_warnings_with_records(clear_and_catch_warnings):
99
+ def __enter__(self) -> list[warnings.WarningMessage]: ...
100
+
101
+ @type_check_only
102
+ class _clear_and_catch_warnings_without_records(clear_and_catch_warnings):
103
+ def __enter__(self) -> None: ...
104
+
105
+ class suppress_warnings:
106
+ log: list[warnings.WarningMessage]
107
+ def __init__(
108
+ self,
109
+ forwarding_rule: L["always", "module", "once", "location"] = ...,
110
+ ) -> None: ...
111
+ def filter(
112
+ self,
113
+ category: type[Warning] = ...,
114
+ message: str = ...,
115
+ module: None | types.ModuleType = ...,
116
+ ) -> None: ...
117
+ def record(
118
+ self,
119
+ category: type[Warning] = ...,
120
+ message: str = ...,
121
+ module: None | types.ModuleType = ...,
122
+ ) -> list[warnings.WarningMessage]: ...
123
+ def __enter__(self: _T) -> _T: ...
124
+ def __exit__(
125
+ self,
126
+ __exc_type: None | type[BaseException] = ...,
127
+ __exc_val: None | BaseException = ...,
128
+ __exc_tb: None | types.TracebackType = ...,
129
+ ) -> None: ...
130
+ def __call__(self, func: _FT) -> _FT: ...
131
+
132
+ verbose: int
133
+ IS_PYPY: Final[bool]
134
+ IS_PYSTON: Final[bool]
135
+ HAS_REFCOUNT: Final[bool]
136
+ HAS_LAPACK64: Final[bool]
137
+
138
+ def assert_(val: object, msg: str | Callable[[], str] = ...) -> None: ...
139
+
140
+ # Contrary to runtime we can't do `os.name` checks while type checking,
141
+ # only `sys.platform` checks
142
+ if sys.platform == "win32" or sys.platform == "cygwin":
143
+ def memusage(processName: str = ..., instance: int = ...) -> int: ...
144
+ elif sys.platform == "linux":
145
+ def memusage(_proc_pid_stat: str | bytes | os.PathLike[Any] = ...) -> None | int: ...
146
+ else:
147
+ def memusage() -> NoReturn: ...
148
+
149
+ if sys.platform == "linux":
150
+ def jiffies(
151
+ _proc_pid_stat: str | bytes | os.PathLike[Any] = ...,
152
+ _load_time: list[float] = ...,
153
+ ) -> int: ...
154
+ else:
155
+ def jiffies(_load_time: list[float] = ...) -> int: ...
156
+
157
+ def build_err_msg(
158
+ arrays: Iterable[object],
159
+ err_msg: str,
160
+ header: str = ...,
161
+ verbose: bool = ...,
162
+ names: Sequence[str] = ...,
163
+ precision: None | SupportsIndex = ...,
164
+ ) -> str: ...
165
+
166
+ def assert_equal(
167
+ actual: object,
168
+ desired: object,
169
+ err_msg: str = ...,
170
+ verbose: bool = ...,
171
+ ) -> None: ...
172
+
173
+ def print_assert_equal(
174
+ test_string: str,
175
+ actual: object,
176
+ desired: object,
177
+ ) -> None: ...
178
+
179
+ def assert_almost_equal(
180
+ actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
181
+ desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
182
+ decimal: int = ...,
183
+ err_msg: str = ...,
184
+ verbose: bool = ...,
185
+ ) -> None: ...
186
+
187
+ # Anything that can be coerced into `builtins.float`
188
+ def assert_approx_equal(
189
+ actual: _FloatValue,
190
+ desired: _FloatValue,
191
+ significant: int = ...,
192
+ err_msg: str = ...,
193
+ verbose: bool = ...,
194
+ ) -> None: ...
195
+
196
+ def assert_array_compare(
197
+ comparison: _ComparisonFunc,
198
+ x: ArrayLike,
199
+ y: ArrayLike,
200
+ err_msg: str = ...,
201
+ verbose: bool = ...,
202
+ header: str = ...,
203
+ precision: SupportsIndex = ...,
204
+ equal_nan: bool = ...,
205
+ equal_inf: bool = ...,
206
+ *,
207
+ strict: bool = ...
208
+ ) -> None: ...
209
+
210
+ def assert_array_equal(
211
+ x: ArrayLike,
212
+ y: ArrayLike,
213
+ err_msg: str = ...,
214
+ verbose: bool = ...,
215
+ *,
216
+ strict: bool = ...
217
+ ) -> None: ...
218
+
219
+ def assert_array_almost_equal(
220
+ x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
221
+ y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
222
+ decimal: float = ...,
223
+ err_msg: str = ...,
224
+ verbose: bool = ...,
225
+ ) -> None: ...
226
+
227
+ @overload
228
+ def assert_array_less(
229
+ x: _ArrayLikeNumber_co | _ArrayLikeObject_co,
230
+ y: _ArrayLikeNumber_co | _ArrayLikeObject_co,
231
+ err_msg: str = ...,
232
+ verbose: bool = ...,
233
+ ) -> None: ...
234
+ @overload
235
+ def assert_array_less(
236
+ x: _ArrayLikeTD64_co,
237
+ y: _ArrayLikeTD64_co,
238
+ err_msg: str = ...,
239
+ verbose: bool = ...,
240
+ ) -> None: ...
241
+ @overload
242
+ def assert_array_less(
243
+ x: _ArrayLikeDT64_co,
244
+ y: _ArrayLikeDT64_co,
245
+ err_msg: str = ...,
246
+ verbose: bool = ...,
247
+ ) -> None: ...
248
+
249
+ def runstring(
250
+ astr: str | bytes | types.CodeType,
251
+ dict: None | dict[str, Any],
252
+ ) -> Any: ...
253
+
254
+ def assert_string_equal(actual: str, desired: str) -> None: ...
255
+
256
+ def rundocs(
257
+ filename: None | str | os.PathLike[str] = ...,
258
+ raise_on_error: bool = ...,
259
+ ) -> None: ...
260
+
261
+ def raises(*args: type[BaseException]) -> Callable[[_FT], _FT]: ...
262
+
263
+ @overload
264
+ def assert_raises( # type: ignore
265
+ expected_exception: type[BaseException] | tuple[type[BaseException], ...],
266
+ callable: Callable[_P, Any],
267
+ /,
268
+ *args: _P.args,
269
+ **kwargs: _P.kwargs,
270
+ ) -> None: ...
271
+ @overload
272
+ def assert_raises(
273
+ expected_exception: type[_ET] | tuple[type[_ET], ...],
274
+ *,
275
+ msg: None | str = ...,
276
+ ) -> unittest.case._AssertRaisesContext[_ET]: ...
277
+
278
+ @overload
279
+ def assert_raises_regex(
280
+ expected_exception: type[BaseException] | tuple[type[BaseException], ...],
281
+ expected_regex: str | bytes | Pattern[Any],
282
+ callable: Callable[_P, Any],
283
+ /,
284
+ *args: _P.args,
285
+ **kwargs: _P.kwargs,
286
+ ) -> None: ...
287
+ @overload
288
+ def assert_raises_regex(
289
+ expected_exception: type[_ET] | tuple[type[_ET], ...],
290
+ expected_regex: str | bytes | Pattern[Any],
291
+ *,
292
+ msg: None | str = ...,
293
+ ) -> unittest.case._AssertRaisesContext[_ET]: ...
294
+
295
+ def decorate_methods(
296
+ cls: type[Any],
297
+ decorator: Callable[[Callable[..., Any]], Any],
298
+ testmatch: None | str | bytes | Pattern[Any] = ...,
299
+ ) -> None: ...
300
+
301
+ def measure(
302
+ code_str: str | bytes | ast.mod | ast.AST,
303
+ times: int = ...,
304
+ label: None | str = ...,
305
+ ) -> float: ...
306
+
307
+ @overload
308
+ def assert_allclose(
309
+ actual: _ArrayLikeNumber_co | _ArrayLikeObject_co,
310
+ desired: _ArrayLikeNumber_co | _ArrayLikeObject_co,
311
+ rtol: float = ...,
312
+ atol: float = ...,
313
+ equal_nan: bool = ...,
314
+ err_msg: str = ...,
315
+ verbose: bool = ...,
316
+ ) -> None: ...
317
+ @overload
318
+ def assert_allclose(
319
+ actual: _ArrayLikeTD64_co,
320
+ desired: _ArrayLikeTD64_co,
321
+ rtol: float = ...,
322
+ atol: float = ...,
323
+ equal_nan: bool = ...,
324
+ err_msg: str = ...,
325
+ verbose: bool = ...,
326
+ ) -> None: ...
327
+
328
+ def assert_array_almost_equal_nulp(
329
+ x: _ArrayLikeNumber_co,
330
+ y: _ArrayLikeNumber_co,
331
+ nulp: float = ...,
332
+ ) -> None: ...
333
+
334
+ def assert_array_max_ulp(
335
+ a: _ArrayLikeNumber_co,
336
+ b: _ArrayLikeNumber_co,
337
+ maxulp: float = ...,
338
+ dtype: DTypeLike = ...,
339
+ ) -> NDArray[Any]: ...
340
+
341
+ @overload
342
+ def assert_warns(
343
+ warning_class: type[Warning],
344
+ ) -> contextlib._GeneratorContextManager[None]: ...
345
+ @overload
346
+ def assert_warns(
347
+ warning_class: type[Warning],
348
+ func: Callable[_P, _T],
349
+ /,
350
+ *args: _P.args,
351
+ **kwargs: _P.kwargs,
352
+ ) -> _T: ...
353
+
354
+ @overload
355
+ def assert_no_warnings() -> contextlib._GeneratorContextManager[None]: ...
356
+ @overload
357
+ def assert_no_warnings(
358
+ func: Callable[_P, _T],
359
+ /,
360
+ *args: _P.args,
361
+ **kwargs: _P.kwargs,
362
+ ) -> _T: ...
363
+
364
+ @overload
365
+ def tempdir(
366
+ suffix: None = ...,
367
+ prefix: None = ...,
368
+ dir: None = ...,
369
+ ) -> contextlib._GeneratorContextManager[str]: ...
370
+ @overload
371
+ def tempdir(
372
+ suffix: None | AnyStr = ...,
373
+ prefix: None | AnyStr = ...,
374
+ dir: None | AnyStr | os.PathLike[AnyStr] = ...,
375
+ ) -> contextlib._GeneratorContextManager[AnyStr]: ...
376
+
377
+ @overload
378
+ def temppath(
379
+ suffix: None = ...,
380
+ prefix: None = ...,
381
+ dir: None = ...,
382
+ text: bool = ...,
383
+ ) -> contextlib._GeneratorContextManager[str]: ...
384
+ @overload
385
+ def temppath(
386
+ suffix: None | AnyStr = ...,
387
+ prefix: None | AnyStr = ...,
388
+ dir: None | AnyStr | os.PathLike[AnyStr] = ...,
389
+ text: bool = ...,
390
+ ) -> contextlib._GeneratorContextManager[AnyStr]: ...
391
+
392
+ @overload
393
+ def assert_no_gc_cycles() -> contextlib._GeneratorContextManager[None]: ...
394
+ @overload
395
+ def assert_no_gc_cycles(
396
+ func: Callable[_P, Any],
397
+ /,
398
+ *args: _P.args,
399
+ **kwargs: _P.kwargs,
400
+ ) -> None: ...
401
+
402
+ def break_cycles() -> None: ...
venv/lib/python3.10/site-packages/numpy/testing/overrides.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tools for testing implementations of __array_function__ and ufunc overrides
2
+
3
+
4
+ """
5
+
6
+ from numpy.core.overrides import ARRAY_FUNCTIONS as _array_functions
7
+ from numpy import ufunc as _ufunc
8
+ import numpy.core.umath as _umath
9
+
10
+ def get_overridable_numpy_ufuncs():
11
+ """List all numpy ufuncs overridable via `__array_ufunc__`
12
+
13
+ Parameters
14
+ ----------
15
+ None
16
+
17
+ Returns
18
+ -------
19
+ set
20
+ A set containing all overridable ufuncs in the public numpy API.
21
+ """
22
+ ufuncs = {obj for obj in _umath.__dict__.values()
23
+ if isinstance(obj, _ufunc)}
24
+ return ufuncs
25
+
26
+
27
+ def allows_array_ufunc_override(func):
28
+ """Determine if a function can be overridden via `__array_ufunc__`
29
+
30
+ Parameters
31
+ ----------
32
+ func : callable
33
+ Function that may be overridable via `__array_ufunc__`
34
+
35
+ Returns
36
+ -------
37
+ bool
38
+ `True` if `func` is overridable via `__array_ufunc__` and
39
+ `False` otherwise.
40
+
41
+ Notes
42
+ -----
43
+ This function is equivalent to ``isinstance(func, np.ufunc)`` and
44
+ will work correctly for ufuncs defined outside of Numpy.
45
+
46
+ """
47
+ return isinstance(func, np.ufunc)
48
+
49
+
50
+ def get_overridable_numpy_array_functions():
51
+ """List all numpy functions overridable via `__array_function__`
52
+
53
+ Parameters
54
+ ----------
55
+ None
56
+
57
+ Returns
58
+ -------
59
+ set
60
+ A set containing all functions in the public numpy API that are
61
+ overridable via `__array_function__`.
62
+
63
+ """
64
+ # 'import numpy' doesn't import recfunctions, so make sure it's imported
65
+ # so ufuncs defined there show up in the ufunc listing
66
+ from numpy.lib import recfunctions
67
+ return _array_functions.copy()
68
+
69
+ def allows_array_function_override(func):
70
+ """Determine if a Numpy function can be overridden via `__array_function__`
71
+
72
+ Parameters
73
+ ----------
74
+ func : callable
75
+ Function that may be overridable via `__array_function__`
76
+
77
+ Returns
78
+ -------
79
+ bool
80
+ `True` if `func` is a function in the Numpy API that is
81
+ overridable via `__array_function__` and `False` otherwise.
82
+ """
83
+ return func in _array_functions
venv/lib/python3.10/site-packages/numpy/testing/print_coercion_tables.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """Prints type-coercion tables for the built-in NumPy types
3
+
4
+ """
5
+ import numpy as np
6
+ from collections import namedtuple
7
+
8
+ # Generic object that can be added, but doesn't do anything else
9
+ class GenericObject:
10
+ def __init__(self, v):
11
+ self.v = v
12
+
13
+ def __add__(self, other):
14
+ return self
15
+
16
+ def __radd__(self, other):
17
+ return self
18
+
19
+ dtype = np.dtype('O')
20
+
21
+ def print_cancast_table(ntypes):
22
+ print('X', end=' ')
23
+ for char in ntypes:
24
+ print(char, end=' ')
25
+ print()
26
+ for row in ntypes:
27
+ print(row, end=' ')
28
+ for col in ntypes:
29
+ if np.can_cast(row, col, "equiv"):
30
+ cast = "#"
31
+ elif np.can_cast(row, col, "safe"):
32
+ cast = "="
33
+ elif np.can_cast(row, col, "same_kind"):
34
+ cast = "~"
35
+ elif np.can_cast(row, col, "unsafe"):
36
+ cast = "."
37
+ else:
38
+ cast = " "
39
+ print(cast, end=' ')
40
+ print()
41
+
42
+ def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray, use_promote_types=False):
43
+ print('+', end=' ')
44
+ for char in ntypes:
45
+ print(char, end=' ')
46
+ print()
47
+ for row in ntypes:
48
+ if row == 'O':
49
+ rowtype = GenericObject
50
+ else:
51
+ rowtype = np.obj2sctype(row)
52
+
53
+ print(row, end=' ')
54
+ for col in ntypes:
55
+ if col == 'O':
56
+ coltype = GenericObject
57
+ else:
58
+ coltype = np.obj2sctype(col)
59
+ try:
60
+ if firstarray:
61
+ rowvalue = np.array([rowtype(inputfirstvalue)], dtype=rowtype)
62
+ else:
63
+ rowvalue = rowtype(inputfirstvalue)
64
+ colvalue = coltype(inputsecondvalue)
65
+ if use_promote_types:
66
+ char = np.promote_types(rowvalue.dtype, colvalue.dtype).char
67
+ else:
68
+ value = np.add(rowvalue, colvalue)
69
+ if isinstance(value, np.ndarray):
70
+ char = value.dtype.char
71
+ else:
72
+ char = np.dtype(type(value)).char
73
+ except ValueError:
74
+ char = '!'
75
+ except OverflowError:
76
+ char = '@'
77
+ except TypeError:
78
+ char = '#'
79
+ print(char, end=' ')
80
+ print()
81
+
82
+
83
+ def print_new_cast_table(*, can_cast=True, legacy=False, flags=False):
84
+ """Prints new casts, the values given are default "can-cast" values, not
85
+ actual ones.
86
+ """
87
+ from numpy.core._multiarray_tests import get_all_cast_information
88
+
89
+ cast_table = {
90
+ -1: " ",
91
+ 0: "#", # No cast (classify as equivalent here)
92
+ 1: "#", # equivalent casting
93
+ 2: "=", # safe casting
94
+ 3: "~", # same-kind casting
95
+ 4: ".", # unsafe casting
96
+ }
97
+ flags_table = {
98
+ 0 : "▗", 7: "█",
99
+ 1: "▚", 2: "▐", 4: "▄",
100
+ 3: "▜", 5: "▙",
101
+ 6: "▟",
102
+ }
103
+
104
+ cast_info = namedtuple("cast_info", ["can_cast", "legacy", "flags"])
105
+ no_cast_info = cast_info(" ", " ", " ")
106
+
107
+ casts = get_all_cast_information()
108
+ table = {}
109
+ dtypes = set()
110
+ for cast in casts:
111
+ dtypes.add(cast["from"])
112
+ dtypes.add(cast["to"])
113
+
114
+ if cast["from"] not in table:
115
+ table[cast["from"]] = {}
116
+ to_dict = table[cast["from"]]
117
+
118
+ can_cast = cast_table[cast["casting"]]
119
+ legacy = "L" if cast["legacy"] else "."
120
+ flags = 0
121
+ if cast["requires_pyapi"]:
122
+ flags |= 1
123
+ if cast["supports_unaligned"]:
124
+ flags |= 2
125
+ if cast["no_floatingpoint_errors"]:
126
+ flags |= 4
127
+
128
+ flags = flags_table[flags]
129
+ to_dict[cast["to"]] = cast_info(can_cast=can_cast, legacy=legacy, flags=flags)
130
+
131
+ # The np.dtype(x.type) is a bit strange, because dtype classes do
132
+ # not expose much yet.
133
+ types = np.typecodes["All"]
134
+ def sorter(x):
135
+ # This is a bit weird hack, to get a table as close as possible to
136
+ # the one printing all typecodes (but expecting user-dtypes).
137
+ dtype = np.dtype(x.type)
138
+ try:
139
+ indx = types.index(dtype.char)
140
+ except ValueError:
141
+ indx = np.inf
142
+ return (indx, dtype.char)
143
+
144
+ dtypes = sorted(dtypes, key=sorter)
145
+
146
+ def print_table(field="can_cast"):
147
+ print('X', end=' ')
148
+ for dt in dtypes:
149
+ print(np.dtype(dt.type).char, end=' ')
150
+ print()
151
+ for from_dt in dtypes:
152
+ print(np.dtype(from_dt.type).char, end=' ')
153
+ row = table.get(from_dt, {})
154
+ for to_dt in dtypes:
155
+ print(getattr(row.get(to_dt, no_cast_info), field), end=' ')
156
+ print()
157
+
158
+ if can_cast:
159
+ # Print the actual table:
160
+ print()
161
+ print("Casting: # is equivalent, = is safe, ~ is same-kind, and . is unsafe")
162
+ print()
163
+ print_table("can_cast")
164
+
165
+ if legacy:
166
+ print()
167
+ print("L denotes a legacy cast . a non-legacy one.")
168
+ print()
169
+ print_table("legacy")
170
+
171
+ if flags:
172
+ print()
173
+ print(f"{flags_table[0]}: no flags, {flags_table[1]}: PyAPI, "
174
+ f"{flags_table[2]}: supports unaligned, {flags_table[4]}: no-float-errors")
175
+ print()
176
+ print_table("flags")
177
+
178
+
179
+ if __name__ == '__main__':
180
+ print("can cast")
181
+ print_cancast_table(np.typecodes['All'])
182
+ print()
183
+ print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
184
+ print()
185
+ print("scalar + scalar")
186
+ print_coercion_table(np.typecodes['All'], 0, 0, False)
187
+ print()
188
+ print("scalar + neg scalar")
189
+ print_coercion_table(np.typecodes['All'], 0, -1, False)
190
+ print()
191
+ print("array + scalar")
192
+ print_coercion_table(np.typecodes['All'], 0, 0, True)
193
+ print()
194
+ print("array + neg scalar")
195
+ print_coercion_table(np.typecodes['All'], 0, -1, True)
196
+ print()
197
+ print("promote_types")
198
+ print_coercion_table(np.typecodes['All'], 0, 0, False, True)
199
+ print("New casting type promotion:")
200
+ print_new_cast_table(can_cast=True, legacy=True, flags=True)
venv/lib/python3.10/site-packages/numpy/testing/setup.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ def configuration(parent_package='',top_path=None):
4
+ from numpy.distutils.misc_util import Configuration
5
+ config = Configuration('testing', parent_package, top_path)
6
+
7
+ config.add_subpackage('_private')
8
+ config.add_subpackage('tests')
9
+ config.add_data_files('*.pyi')
10
+ config.add_data_files('_private/*.pyi')
11
+ return config
12
+
13
+ if __name__ == '__main__':
14
+ from numpy.distutils.core import setup
15
+ setup(maintainer="NumPy Developers",
16
+ maintainer_email="[email protected]",
17
+ description="NumPy test module",
18
+ url="https://www.numpy.org",
19
+ license="NumPy License (BSD Style)",
20
+ configuration=configuration,
21
+ )
venv/lib/python3.10/site-packages/numpy/testing/tests/__init__.py ADDED
File without changes
venv/lib/python3.10/site-packages/numpy/testing/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (187 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/testing/tests/__pycache__/test_utils.cpython-310.pyc ADDED
Binary file (53.8 kB). View file
 
venv/lib/python3.10/site-packages/numpy/testing/tests/test_utils.py ADDED
@@ -0,0 +1,1626 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import warnings
2
+ import sys
3
+ import os
4
+ import itertools
5
+ import pytest
6
+ import weakref
7
+
8
+ import numpy as np
9
+ from numpy.testing import (
10
+ assert_equal, assert_array_equal, assert_almost_equal,
11
+ assert_array_almost_equal, assert_array_less, build_err_msg,
12
+ assert_raises, assert_warns, assert_no_warnings, assert_allclose,
13
+ assert_approx_equal, assert_array_almost_equal_nulp, assert_array_max_ulp,
14
+ clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
15
+ tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
16
+ )
17
+
18
+
19
+ class _GenericTest:
20
+
21
+ def _test_equal(self, a, b):
22
+ self._assert_func(a, b)
23
+
24
+ def _test_not_equal(self, a, b):
25
+ with assert_raises(AssertionError):
26
+ self._assert_func(a, b)
27
+
28
+ def test_array_rank1_eq(self):
29
+ """Test two equal array of rank 1 are found equal."""
30
+ a = np.array([1, 2])
31
+ b = np.array([1, 2])
32
+
33
+ self._test_equal(a, b)
34
+
35
+ def test_array_rank1_noteq(self):
36
+ """Test two different array of rank 1 are found not equal."""
37
+ a = np.array([1, 2])
38
+ b = np.array([2, 2])
39
+
40
+ self._test_not_equal(a, b)
41
+
42
+ def test_array_rank2_eq(self):
43
+ """Test two equal array of rank 2 are found equal."""
44
+ a = np.array([[1, 2], [3, 4]])
45
+ b = np.array([[1, 2], [3, 4]])
46
+
47
+ self._test_equal(a, b)
48
+
49
+ def test_array_diffshape(self):
50
+ """Test two arrays with different shapes are found not equal."""
51
+ a = np.array([1, 2])
52
+ b = np.array([[1, 2], [1, 2]])
53
+
54
+ self._test_not_equal(a, b)
55
+
56
+ def test_objarray(self):
57
+ """Test object arrays."""
58
+ a = np.array([1, 1], dtype=object)
59
+ self._test_equal(a, 1)
60
+
61
+ def test_array_likes(self):
62
+ self._test_equal([1, 2, 3], (1, 2, 3))
63
+
64
+
65
+ class TestArrayEqual(_GenericTest):
66
+
67
+ def setup_method(self):
68
+ self._assert_func = assert_array_equal
69
+
70
+ def test_generic_rank1(self):
71
+ """Test rank 1 array for all dtypes."""
72
+ def foo(t):
73
+ a = np.empty(2, t)
74
+ a.fill(1)
75
+ b = a.copy()
76
+ c = a.copy()
77
+ c.fill(0)
78
+ self._test_equal(a, b)
79
+ self._test_not_equal(c, b)
80
+
81
+ # Test numeric types and object
82
+ for t in '?bhilqpBHILQPfdgFDG':
83
+ foo(t)
84
+
85
+ # Test strings
86
+ for t in ['S1', 'U1']:
87
+ foo(t)
88
+
89
+ def test_0_ndim_array(self):
90
+ x = np.array(473963742225900817127911193656584771)
91
+ y = np.array(18535119325151578301457182298393896)
92
+ assert_raises(AssertionError, self._assert_func, x, y)
93
+
94
+ y = x
95
+ self._assert_func(x, y)
96
+
97
+ x = np.array(43)
98
+ y = np.array(10)
99
+ assert_raises(AssertionError, self._assert_func, x, y)
100
+
101
+ y = x
102
+ self._assert_func(x, y)
103
+
104
+ def test_generic_rank3(self):
105
+ """Test rank 3 array for all dtypes."""
106
+ def foo(t):
107
+ a = np.empty((4, 2, 3), t)
108
+ a.fill(1)
109
+ b = a.copy()
110
+ c = a.copy()
111
+ c.fill(0)
112
+ self._test_equal(a, b)
113
+ self._test_not_equal(c, b)
114
+
115
+ # Test numeric types and object
116
+ for t in '?bhilqpBHILQPfdgFDG':
117
+ foo(t)
118
+
119
+ # Test strings
120
+ for t in ['S1', 'U1']:
121
+ foo(t)
122
+
123
+ def test_nan_array(self):
124
+ """Test arrays with nan values in them."""
125
+ a = np.array([1, 2, np.nan])
126
+ b = np.array([1, 2, np.nan])
127
+
128
+ self._test_equal(a, b)
129
+
130
+ c = np.array([1, 2, 3])
131
+ self._test_not_equal(c, b)
132
+
133
+ def test_string_arrays(self):
134
+ """Test two arrays with different shapes are found not equal."""
135
+ a = np.array(['floupi', 'floupa'])
136
+ b = np.array(['floupi', 'floupa'])
137
+
138
+ self._test_equal(a, b)
139
+
140
+ c = np.array(['floupipi', 'floupa'])
141
+
142
+ self._test_not_equal(c, b)
143
+
144
+ def test_recarrays(self):
145
+ """Test record arrays."""
146
+ a = np.empty(2, [('floupi', float), ('floupa', float)])
147
+ a['floupi'] = [1, 2]
148
+ a['floupa'] = [1, 2]
149
+ b = a.copy()
150
+
151
+ self._test_equal(a, b)
152
+
153
+ c = np.empty(2, [('floupipi', float),
154
+ ('floupi', float), ('floupa', float)])
155
+ c['floupipi'] = a['floupi'].copy()
156
+ c['floupa'] = a['floupa'].copy()
157
+
158
+ with pytest.raises(TypeError):
159
+ self._test_not_equal(c, b)
160
+
161
+ def test_masked_nan_inf(self):
162
+ # Regression test for gh-11121
163
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[False, True, False])
164
+ b = np.array([3., np.nan, 6.5])
165
+ self._test_equal(a, b)
166
+ self._test_equal(b, a)
167
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, False, False])
168
+ b = np.array([np.inf, 4., 6.5])
169
+ self._test_equal(a, b)
170
+ self._test_equal(b, a)
171
+
172
+ def test_subclass_that_overrides_eq(self):
173
+ # While we cannot guarantee testing functions will always work for
174
+ # subclasses, the tests should ideally rely only on subclasses having
175
+ # comparison operators, not on them being able to store booleans
176
+ # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
177
+ class MyArray(np.ndarray):
178
+ def __eq__(self, other):
179
+ return bool(np.equal(self, other).all())
180
+
181
+ def __ne__(self, other):
182
+ return not self == other
183
+
184
+ a = np.array([1., 2.]).view(MyArray)
185
+ b = np.array([2., 3.]).view(MyArray)
186
+ assert_(type(a == a), bool)
187
+ assert_(a == a)
188
+ assert_(a != b)
189
+ self._test_equal(a, a)
190
+ self._test_not_equal(a, b)
191
+ self._test_not_equal(b, a)
192
+
193
+ def test_subclass_that_does_not_implement_npall(self):
194
+ class MyArray(np.ndarray):
195
+ def __array_function__(self, *args, **kwargs):
196
+ return NotImplemented
197
+
198
+ a = np.array([1., 2.]).view(MyArray)
199
+ b = np.array([2., 3.]).view(MyArray)
200
+ with assert_raises(TypeError):
201
+ np.all(a)
202
+ self._test_equal(a, a)
203
+ self._test_not_equal(a, b)
204
+ self._test_not_equal(b, a)
205
+
206
+ def test_suppress_overflow_warnings(self):
207
+ # Based on issue #18992
208
+ with pytest.raises(AssertionError):
209
+ with np.errstate(all="raise"):
210
+ np.testing.assert_array_equal(
211
+ np.array([1, 2, 3], np.float32),
212
+ np.array([1, 1e-40, 3], np.float32))
213
+
214
+ def test_array_vs_scalar_is_equal(self):
215
+ """Test comparing an array with a scalar when all values are equal."""
216
+ a = np.array([1., 1., 1.])
217
+ b = 1.
218
+
219
+ self._test_equal(a, b)
220
+
221
+ def test_array_vs_scalar_not_equal(self):
222
+ """Test comparing an array with a scalar when not all values equal."""
223
+ a = np.array([1., 2., 3.])
224
+ b = 1.
225
+
226
+ self._test_not_equal(a, b)
227
+
228
+ def test_array_vs_scalar_strict(self):
229
+ """Test comparing an array with a scalar with strict option."""
230
+ a = np.array([1., 1., 1.])
231
+ b = 1.
232
+
233
+ with pytest.raises(AssertionError):
234
+ assert_array_equal(a, b, strict=True)
235
+
236
+ def test_array_vs_array_strict(self):
237
+ """Test comparing two arrays with strict option."""
238
+ a = np.array([1., 1., 1.])
239
+ b = np.array([1., 1., 1.])
240
+
241
+ assert_array_equal(a, b, strict=True)
242
+
243
+ def test_array_vs_float_array_strict(self):
244
+ """Test comparing two arrays with strict option."""
245
+ a = np.array([1, 1, 1])
246
+ b = np.array([1., 1., 1.])
247
+
248
+ with pytest.raises(AssertionError):
249
+ assert_array_equal(a, b, strict=True)
250
+
251
+
252
+ class TestBuildErrorMessage:
253
+
254
+ def test_build_err_msg_defaults(self):
255
+ x = np.array([1.00001, 2.00002, 3.00003])
256
+ y = np.array([1.00002, 2.00003, 3.00004])
257
+ err_msg = 'There is a mismatch'
258
+
259
+ a = build_err_msg([x, y], err_msg)
260
+ b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
261
+ '1.00001, 2.00002, 3.00003])\n DESIRED: array([1.00002, '
262
+ '2.00003, 3.00004])')
263
+ assert_equal(a, b)
264
+
265
+ def test_build_err_msg_no_verbose(self):
266
+ x = np.array([1.00001, 2.00002, 3.00003])
267
+ y = np.array([1.00002, 2.00003, 3.00004])
268
+ err_msg = 'There is a mismatch'
269
+
270
+ a = build_err_msg([x, y], err_msg, verbose=False)
271
+ b = '\nItems are not equal: There is a mismatch'
272
+ assert_equal(a, b)
273
+
274
+ def test_build_err_msg_custom_names(self):
275
+ x = np.array([1.00001, 2.00002, 3.00003])
276
+ y = np.array([1.00002, 2.00003, 3.00004])
277
+ err_msg = 'There is a mismatch'
278
+
279
+ a = build_err_msg([x, y], err_msg, names=('FOO', 'BAR'))
280
+ b = ('\nItems are not equal: There is a mismatch\n FOO: array(['
281
+ '1.00001, 2.00002, 3.00003])\n BAR: array([1.00002, 2.00003, '
282
+ '3.00004])')
283
+ assert_equal(a, b)
284
+
285
+ def test_build_err_msg_custom_precision(self):
286
+ x = np.array([1.000000001, 2.00002, 3.00003])
287
+ y = np.array([1.000000002, 2.00003, 3.00004])
288
+ err_msg = 'There is a mismatch'
289
+
290
+ a = build_err_msg([x, y], err_msg, precision=10)
291
+ b = ('\nItems are not equal: There is a mismatch\n ACTUAL: array(['
292
+ '1.000000001, 2.00002 , 3.00003 ])\n DESIRED: array(['
293
+ '1.000000002, 2.00003 , 3.00004 ])')
294
+ assert_equal(a, b)
295
+
296
+
297
+ class TestEqual(TestArrayEqual):
298
+
299
+ def setup_method(self):
300
+ self._assert_func = assert_equal
301
+
302
+ def test_nan_items(self):
303
+ self._assert_func(np.nan, np.nan)
304
+ self._assert_func([np.nan], [np.nan])
305
+ self._test_not_equal(np.nan, [np.nan])
306
+ self._test_not_equal(np.nan, 1)
307
+
308
+ def test_inf_items(self):
309
+ self._assert_func(np.inf, np.inf)
310
+ self._assert_func([np.inf], [np.inf])
311
+ self._test_not_equal(np.inf, [np.inf])
312
+
313
+ def test_datetime(self):
314
+ self._test_equal(
315
+ np.datetime64("2017-01-01", "s"),
316
+ np.datetime64("2017-01-01", "s")
317
+ )
318
+ self._test_equal(
319
+ np.datetime64("2017-01-01", "s"),
320
+ np.datetime64("2017-01-01", "m")
321
+ )
322
+
323
+ # gh-10081
324
+ self._test_not_equal(
325
+ np.datetime64("2017-01-01", "s"),
326
+ np.datetime64("2017-01-02", "s")
327
+ )
328
+ self._test_not_equal(
329
+ np.datetime64("2017-01-01", "s"),
330
+ np.datetime64("2017-01-02", "m")
331
+ )
332
+
333
+ def test_nat_items(self):
334
+ # not a datetime
335
+ nadt_no_unit = np.datetime64("NaT")
336
+ nadt_s = np.datetime64("NaT", "s")
337
+ nadt_d = np.datetime64("NaT", "ns")
338
+ # not a timedelta
339
+ natd_no_unit = np.timedelta64("NaT")
340
+ natd_s = np.timedelta64("NaT", "s")
341
+ natd_d = np.timedelta64("NaT", "ns")
342
+
343
+ dts = [nadt_no_unit, nadt_s, nadt_d]
344
+ tds = [natd_no_unit, natd_s, natd_d]
345
+ for a, b in itertools.product(dts, dts):
346
+ self._assert_func(a, b)
347
+ self._assert_func([a], [b])
348
+ self._test_not_equal([a], b)
349
+
350
+ for a, b in itertools.product(tds, tds):
351
+ self._assert_func(a, b)
352
+ self._assert_func([a], [b])
353
+ self._test_not_equal([a], b)
354
+
355
+ for a, b in itertools.product(tds, dts):
356
+ self._test_not_equal(a, b)
357
+ self._test_not_equal(a, [b])
358
+ self._test_not_equal([a], [b])
359
+ self._test_not_equal([a], np.datetime64("2017-01-01", "s"))
360
+ self._test_not_equal([b], np.datetime64("2017-01-01", "s"))
361
+ self._test_not_equal([a], np.timedelta64(123, "s"))
362
+ self._test_not_equal([b], np.timedelta64(123, "s"))
363
+
364
+ def test_non_numeric(self):
365
+ self._assert_func('ab', 'ab')
366
+ self._test_not_equal('ab', 'abb')
367
+
368
+ def test_complex_item(self):
369
+ self._assert_func(complex(1, 2), complex(1, 2))
370
+ self._assert_func(complex(1, np.nan), complex(1, np.nan))
371
+ self._test_not_equal(complex(1, np.nan), complex(1, 2))
372
+ self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
373
+ self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
374
+
375
+ def test_negative_zero(self):
376
+ self._test_not_equal(np.PZERO, np.NZERO)
377
+
378
+ def test_complex(self):
379
+ x = np.array([complex(1, 2), complex(1, np.nan)])
380
+ y = np.array([complex(1, 2), complex(1, 2)])
381
+ self._assert_func(x, x)
382
+ self._test_not_equal(x, y)
383
+
384
+ def test_object(self):
385
+ #gh-12942
386
+ import datetime
387
+ a = np.array([datetime.datetime(2000, 1, 1),
388
+ datetime.datetime(2000, 1, 2)])
389
+ self._test_not_equal(a, a[::-1])
390
+
391
+
392
+ class TestArrayAlmostEqual(_GenericTest):
393
+
394
+ def setup_method(self):
395
+ self._assert_func = assert_array_almost_equal
396
+
397
+ def test_closeness(self):
398
+ # Note that in the course of time we ended up with
399
+ # `abs(x - y) < 1.5 * 10**(-decimal)`
400
+ # instead of the previously documented
401
+ # `abs(x - y) < 0.5 * 10**(-decimal)`
402
+ # so this check serves to preserve the wrongness.
403
+
404
+ # test scalars
405
+ self._assert_func(1.499999, 0.0, decimal=0)
406
+ assert_raises(AssertionError,
407
+ lambda: self._assert_func(1.5, 0.0, decimal=0))
408
+
409
+ # test arrays
410
+ self._assert_func([1.499999], [0.0], decimal=0)
411
+ assert_raises(AssertionError,
412
+ lambda: self._assert_func([1.5], [0.0], decimal=0))
413
+
414
+ def test_simple(self):
415
+ x = np.array([1234.2222])
416
+ y = np.array([1234.2223])
417
+
418
+ self._assert_func(x, y, decimal=3)
419
+ self._assert_func(x, y, decimal=4)
420
+ assert_raises(AssertionError,
421
+ lambda: self._assert_func(x, y, decimal=5))
422
+
423
+ def test_nan(self):
424
+ anan = np.array([np.nan])
425
+ aone = np.array([1])
426
+ ainf = np.array([np.inf])
427
+ self._assert_func(anan, anan)
428
+ assert_raises(AssertionError,
429
+ lambda: self._assert_func(anan, aone))
430
+ assert_raises(AssertionError,
431
+ lambda: self._assert_func(anan, ainf))
432
+ assert_raises(AssertionError,
433
+ lambda: self._assert_func(ainf, anan))
434
+
435
+ def test_inf(self):
436
+ a = np.array([[1., 2.], [3., 4.]])
437
+ b = a.copy()
438
+ a[0, 0] = np.inf
439
+ assert_raises(AssertionError,
440
+ lambda: self._assert_func(a, b))
441
+ b[0, 0] = -np.inf
442
+ assert_raises(AssertionError,
443
+ lambda: self._assert_func(a, b))
444
+
445
+ def test_subclass(self):
446
+ a = np.array([[1., 2.], [3., 4.]])
447
+ b = np.ma.masked_array([[1., 2.], [0., 4.]],
448
+ [[False, False], [True, False]])
449
+ self._assert_func(a, b)
450
+ self._assert_func(b, a)
451
+ self._assert_func(b, b)
452
+
453
+ # Test fully masked as well (see gh-11123).
454
+ a = np.ma.MaskedArray(3.5, mask=True)
455
+ b = np.array([3., 4., 6.5])
456
+ self._test_equal(a, b)
457
+ self._test_equal(b, a)
458
+ a = np.ma.masked
459
+ b = np.array([3., 4., 6.5])
460
+ self._test_equal(a, b)
461
+ self._test_equal(b, a)
462
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
463
+ b = np.array([1., 2., 3.])
464
+ self._test_equal(a, b)
465
+ self._test_equal(b, a)
466
+ a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
467
+ b = np.array(1.)
468
+ self._test_equal(a, b)
469
+ self._test_equal(b, a)
470
+
471
+ def test_subclass_that_cannot_be_bool(self):
472
+ # While we cannot guarantee testing functions will always work for
473
+ # subclasses, the tests should ideally rely only on subclasses having
474
+ # comparison operators, not on them being able to store booleans
475
+ # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
476
+ class MyArray(np.ndarray):
477
+ def __eq__(self, other):
478
+ return super().__eq__(other).view(np.ndarray)
479
+
480
+ def __lt__(self, other):
481
+ return super().__lt__(other).view(np.ndarray)
482
+
483
+ def all(self, *args, **kwargs):
484
+ raise NotImplementedError
485
+
486
+ a = np.array([1., 2.]).view(MyArray)
487
+ self._assert_func(a, a)
488
+
489
+
490
+ class TestAlmostEqual(_GenericTest):
491
+
492
+ def setup_method(self):
493
+ self._assert_func = assert_almost_equal
494
+
495
+ def test_closeness(self):
496
+ # Note that in the course of time we ended up with
497
+ # `abs(x - y) < 1.5 * 10**(-decimal)`
498
+ # instead of the previously documented
499
+ # `abs(x - y) < 0.5 * 10**(-decimal)`
500
+ # so this check serves to preserve the wrongness.
501
+
502
+ # test scalars
503
+ self._assert_func(1.499999, 0.0, decimal=0)
504
+ assert_raises(AssertionError,
505
+ lambda: self._assert_func(1.5, 0.0, decimal=0))
506
+
507
+ # test arrays
508
+ self._assert_func([1.499999], [0.0], decimal=0)
509
+ assert_raises(AssertionError,
510
+ lambda: self._assert_func([1.5], [0.0], decimal=0))
511
+
512
+ def test_nan_item(self):
513
+ self._assert_func(np.nan, np.nan)
514
+ assert_raises(AssertionError,
515
+ lambda: self._assert_func(np.nan, 1))
516
+ assert_raises(AssertionError,
517
+ lambda: self._assert_func(np.nan, np.inf))
518
+ assert_raises(AssertionError,
519
+ lambda: self._assert_func(np.inf, np.nan))
520
+
521
+ def test_inf_item(self):
522
+ self._assert_func(np.inf, np.inf)
523
+ self._assert_func(-np.inf, -np.inf)
524
+ assert_raises(AssertionError,
525
+ lambda: self._assert_func(np.inf, 1))
526
+ assert_raises(AssertionError,
527
+ lambda: self._assert_func(-np.inf, np.inf))
528
+
529
+ def test_simple_item(self):
530
+ self._test_not_equal(1, 2)
531
+
532
+ def test_complex_item(self):
533
+ self._assert_func(complex(1, 2), complex(1, 2))
534
+ self._assert_func(complex(1, np.nan), complex(1, np.nan))
535
+ self._assert_func(complex(np.inf, np.nan), complex(np.inf, np.nan))
536
+ self._test_not_equal(complex(1, np.nan), complex(1, 2))
537
+ self._test_not_equal(complex(np.nan, 1), complex(1, np.nan))
538
+ self._test_not_equal(complex(np.nan, np.inf), complex(np.nan, 2))
539
+
540
+ def test_complex(self):
541
+ x = np.array([complex(1, 2), complex(1, np.nan)])
542
+ z = np.array([complex(1, 2), complex(np.nan, 1)])
543
+ y = np.array([complex(1, 2), complex(1, 2)])
544
+ self._assert_func(x, x)
545
+ self._test_not_equal(x, y)
546
+ self._test_not_equal(x, z)
547
+
548
+ def test_error_message(self):
549
+ """Check the message is formatted correctly for the decimal value.
550
+ Also check the message when input includes inf or nan (gh12200)"""
551
+ x = np.array([1.00000000001, 2.00000000002, 3.00003])
552
+ y = np.array([1.00000000002, 2.00000000003, 3.00004])
553
+
554
+ # Test with a different amount of decimal digits
555
+ with pytest.raises(AssertionError) as exc_info:
556
+ self._assert_func(x, y, decimal=12)
557
+ msgs = str(exc_info.value).split('\n')
558
+ assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
559
+ assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
560
+ assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
561
+ assert_equal(
562
+ msgs[6],
563
+ ' x: array([1.00000000001, 2.00000000002, 3.00003 ])')
564
+ assert_equal(
565
+ msgs[7],
566
+ ' y: array([1.00000000002, 2.00000000003, 3.00004 ])')
567
+
568
+ # With the default value of decimal digits, only the 3rd element
569
+ # differs. Note that we only check for the formatting of the arrays
570
+ # themselves.
571
+ with pytest.raises(AssertionError) as exc_info:
572
+ self._assert_func(x, y)
573
+ msgs = str(exc_info.value).split('\n')
574
+ assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
575
+ assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
576
+ assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
577
+ assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')
578
+ assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')
579
+
580
+ # Check the error message when input includes inf
581
+ x = np.array([np.inf, 0])
582
+ y = np.array([np.inf, 1])
583
+ with pytest.raises(AssertionError) as exc_info:
584
+ self._assert_func(x, y)
585
+ msgs = str(exc_info.value).split('\n')
586
+ assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
587
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
588
+ assert_equal(msgs[5], 'Max relative difference: 1.')
589
+ assert_equal(msgs[6], ' x: array([inf, 0.])')
590
+ assert_equal(msgs[7], ' y: array([inf, 1.])')
591
+
592
+ # Check the error message when dividing by zero
593
+ x = np.array([1, 2])
594
+ y = np.array([0, 0])
595
+ with pytest.raises(AssertionError) as exc_info:
596
+ self._assert_func(x, y)
597
+ msgs = str(exc_info.value).split('\n')
598
+ assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
599
+ assert_equal(msgs[4], 'Max absolute difference: 2')
600
+ assert_equal(msgs[5], 'Max relative difference: inf')
601
+
602
+ def test_error_message_2(self):
603
+ """Check the message is formatted correctly when either x or y is a scalar."""
604
+ x = 2
605
+ y = np.ones(20)
606
+ with pytest.raises(AssertionError) as exc_info:
607
+ self._assert_func(x, y)
608
+ msgs = str(exc_info.value).split('\n')
609
+ assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
610
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
611
+ assert_equal(msgs[5], 'Max relative difference: 1.')
612
+
613
+ y = 2
614
+ x = np.ones(20)
615
+ with pytest.raises(AssertionError) as exc_info:
616
+ self._assert_func(x, y)
617
+ msgs = str(exc_info.value).split('\n')
618
+ assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
619
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
620
+ assert_equal(msgs[5], 'Max relative difference: 0.5')
621
+
622
+ def test_subclass_that_cannot_be_bool(self):
623
+ # While we cannot guarantee testing functions will always work for
624
+ # subclasses, the tests should ideally rely only on subclasses having
625
+ # comparison operators, not on them being able to store booleans
626
+ # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
627
+ class MyArray(np.ndarray):
628
+ def __eq__(self, other):
629
+ return super().__eq__(other).view(np.ndarray)
630
+
631
+ def __lt__(self, other):
632
+ return super().__lt__(other).view(np.ndarray)
633
+
634
+ def all(self, *args, **kwargs):
635
+ raise NotImplementedError
636
+
637
+ a = np.array([1., 2.]).view(MyArray)
638
+ self._assert_func(a, a)
639
+
640
+
641
+ class TestApproxEqual:
642
+
643
+ def setup_method(self):
644
+ self._assert_func = assert_approx_equal
645
+
646
+ def test_simple_0d_arrays(self):
647
+ x = np.array(1234.22)
648
+ y = np.array(1234.23)
649
+
650
+ self._assert_func(x, y, significant=5)
651
+ self._assert_func(x, y, significant=6)
652
+ assert_raises(AssertionError,
653
+ lambda: self._assert_func(x, y, significant=7))
654
+
655
+ def test_simple_items(self):
656
+ x = 1234.22
657
+ y = 1234.23
658
+
659
+ self._assert_func(x, y, significant=4)
660
+ self._assert_func(x, y, significant=5)
661
+ self._assert_func(x, y, significant=6)
662
+ assert_raises(AssertionError,
663
+ lambda: self._assert_func(x, y, significant=7))
664
+
665
+ def test_nan_array(self):
666
+ anan = np.array(np.nan)
667
+ aone = np.array(1)
668
+ ainf = np.array(np.inf)
669
+ self._assert_func(anan, anan)
670
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
671
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
672
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
673
+
674
+ def test_nan_items(self):
675
+ anan = np.array(np.nan)
676
+ aone = np.array(1)
677
+ ainf = np.array(np.inf)
678
+ self._assert_func(anan, anan)
679
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
680
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
681
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
682
+
683
+
684
+ class TestArrayAssertLess:
685
+
686
+ def setup_method(self):
687
+ self._assert_func = assert_array_less
688
+
689
+ def test_simple_arrays(self):
690
+ x = np.array([1.1, 2.2])
691
+ y = np.array([1.2, 2.3])
692
+
693
+ self._assert_func(x, y)
694
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
695
+
696
+ y = np.array([1.0, 2.3])
697
+
698
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
699
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
700
+
701
+ def test_rank2(self):
702
+ x = np.array([[1.1, 2.2], [3.3, 4.4]])
703
+ y = np.array([[1.2, 2.3], [3.4, 4.5]])
704
+
705
+ self._assert_func(x, y)
706
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
707
+
708
+ y = np.array([[1.0, 2.3], [3.4, 4.5]])
709
+
710
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
711
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
712
+
713
+ def test_rank3(self):
714
+ x = np.ones(shape=(2, 2, 2))
715
+ y = np.ones(shape=(2, 2, 2))+1
716
+
717
+ self._assert_func(x, y)
718
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
719
+
720
+ y[0, 0, 0] = 0
721
+
722
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
723
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
724
+
725
+ def test_simple_items(self):
726
+ x = 1.1
727
+ y = 2.2
728
+
729
+ self._assert_func(x, y)
730
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
731
+
732
+ y = np.array([2.2, 3.3])
733
+
734
+ self._assert_func(x, y)
735
+ assert_raises(AssertionError, lambda: self._assert_func(y, x))
736
+
737
+ y = np.array([1.0, 3.3])
738
+
739
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
740
+
741
+ def test_nan_noncompare(self):
742
+ anan = np.array(np.nan)
743
+ aone = np.array(1)
744
+ ainf = np.array(np.inf)
745
+ self._assert_func(anan, anan)
746
+ assert_raises(AssertionError, lambda: self._assert_func(aone, anan))
747
+ assert_raises(AssertionError, lambda: self._assert_func(anan, aone))
748
+ assert_raises(AssertionError, lambda: self._assert_func(anan, ainf))
749
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, anan))
750
+
751
+ def test_nan_noncompare_array(self):
752
+ x = np.array([1.1, 2.2, 3.3])
753
+ anan = np.array(np.nan)
754
+
755
+ assert_raises(AssertionError, lambda: self._assert_func(x, anan))
756
+ assert_raises(AssertionError, lambda: self._assert_func(anan, x))
757
+
758
+ x = np.array([1.1, 2.2, np.nan])
759
+
760
+ assert_raises(AssertionError, lambda: self._assert_func(x, anan))
761
+ assert_raises(AssertionError, lambda: self._assert_func(anan, x))
762
+
763
+ y = np.array([1.0, 2.0, np.nan])
764
+
765
+ self._assert_func(y, x)
766
+ assert_raises(AssertionError, lambda: self._assert_func(x, y))
767
+
768
+ def test_inf_compare(self):
769
+ aone = np.array(1)
770
+ ainf = np.array(np.inf)
771
+
772
+ self._assert_func(aone, ainf)
773
+ self._assert_func(-ainf, aone)
774
+ self._assert_func(-ainf, ainf)
775
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, aone))
776
+ assert_raises(AssertionError, lambda: self._assert_func(aone, -ainf))
777
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, ainf))
778
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, -ainf))
779
+ assert_raises(AssertionError, lambda: self._assert_func(-ainf, -ainf))
780
+
781
+ def test_inf_compare_array(self):
782
+ x = np.array([1.1, 2.2, np.inf])
783
+ ainf = np.array(np.inf)
784
+
785
+ assert_raises(AssertionError, lambda: self._assert_func(x, ainf))
786
+ assert_raises(AssertionError, lambda: self._assert_func(ainf, x))
787
+ assert_raises(AssertionError, lambda: self._assert_func(x, -ainf))
788
+ assert_raises(AssertionError, lambda: self._assert_func(-x, -ainf))
789
+ assert_raises(AssertionError, lambda: self._assert_func(-ainf, -x))
790
+ self._assert_func(-ainf, x)
791
+
792
+
793
+ class TestWarns:
794
+
795
+ def test_warn(self):
796
+ def f():
797
+ warnings.warn("yo")
798
+ return 3
799
+
800
+ before_filters = sys.modules['warnings'].filters[:]
801
+ assert_equal(assert_warns(UserWarning, f), 3)
802
+ after_filters = sys.modules['warnings'].filters
803
+
804
+ assert_raises(AssertionError, assert_no_warnings, f)
805
+ assert_equal(assert_no_warnings(lambda x: x, 1), 1)
806
+
807
+ # Check that the warnings state is unchanged
808
+ assert_equal(before_filters, after_filters,
809
+ "assert_warns does not preserver warnings state")
810
+
811
+ def test_context_manager(self):
812
+
813
+ before_filters = sys.modules['warnings'].filters[:]
814
+ with assert_warns(UserWarning):
815
+ warnings.warn("yo")
816
+ after_filters = sys.modules['warnings'].filters
817
+
818
+ def no_warnings():
819
+ with assert_no_warnings():
820
+ warnings.warn("yo")
821
+
822
+ assert_raises(AssertionError, no_warnings)
823
+ assert_equal(before_filters, after_filters,
824
+ "assert_warns does not preserver warnings state")
825
+
826
+ def test_warn_wrong_warning(self):
827
+ def f():
828
+ warnings.warn("yo", DeprecationWarning)
829
+
830
+ failed = False
831
+ with warnings.catch_warnings():
832
+ warnings.simplefilter("error", DeprecationWarning)
833
+ try:
834
+ # Should raise a DeprecationWarning
835
+ assert_warns(UserWarning, f)
836
+ failed = True
837
+ except DeprecationWarning:
838
+ pass
839
+
840
+ if failed:
841
+ raise AssertionError("wrong warning caught by assert_warn")
842
+
843
+
844
+ class TestAssertAllclose:
845
+
846
+ def test_simple(self):
847
+ x = 1e-3
848
+ y = 1e-9
849
+
850
+ assert_allclose(x, y, atol=1)
851
+ assert_raises(AssertionError, assert_allclose, x, y)
852
+
853
+ a = np.array([x, y, x, y])
854
+ b = np.array([x, y, x, x])
855
+
856
+ assert_allclose(a, b, atol=1)
857
+ assert_raises(AssertionError, assert_allclose, a, b)
858
+
859
+ b[-1] = y * (1 + 1e-8)
860
+ assert_allclose(a, b)
861
+ assert_raises(AssertionError, assert_allclose, a, b, rtol=1e-9)
862
+
863
+ assert_allclose(6, 10, rtol=0.5)
864
+ assert_raises(AssertionError, assert_allclose, 10, 6, rtol=0.5)
865
+
866
+ def test_min_int(self):
867
+ a = np.array([np.iinfo(np.int_).min], dtype=np.int_)
868
+ # Should not raise:
869
+ assert_allclose(a, a)
870
+
871
+ def test_report_fail_percentage(self):
872
+ a = np.array([1, 1, 1, 1])
873
+ b = np.array([1, 1, 1, 2])
874
+
875
+ with pytest.raises(AssertionError) as exc_info:
876
+ assert_allclose(a, b)
877
+ msg = str(exc_info.value)
878
+ assert_('Mismatched elements: 1 / 4 (25%)\n'
879
+ 'Max absolute difference: 1\n'
880
+ 'Max relative difference: 0.5' in msg)
881
+
882
+ def test_equal_nan(self):
883
+ a = np.array([np.nan])
884
+ b = np.array([np.nan])
885
+ # Should not raise:
886
+ assert_allclose(a, b, equal_nan=True)
887
+
888
+ def test_not_equal_nan(self):
889
+ a = np.array([np.nan])
890
+ b = np.array([np.nan])
891
+ assert_raises(AssertionError, assert_allclose, a, b, equal_nan=False)
892
+
893
+ def test_equal_nan_default(self):
894
+ # Make sure equal_nan default behavior remains unchanged. (All
895
+ # of these functions use assert_array_compare under the hood.)
896
+ # None of these should raise.
897
+ a = np.array([np.nan])
898
+ b = np.array([np.nan])
899
+ assert_array_equal(a, b)
900
+ assert_array_almost_equal(a, b)
901
+ assert_array_less(a, b)
902
+ assert_allclose(a, b)
903
+
904
+ def test_report_max_relative_error(self):
905
+ a = np.array([0, 1])
906
+ b = np.array([0, 2])
907
+
908
+ with pytest.raises(AssertionError) as exc_info:
909
+ assert_allclose(a, b)
910
+ msg = str(exc_info.value)
911
+ assert_('Max relative difference: 0.5' in msg)
912
+
913
+ def test_timedelta(self):
914
+ # see gh-18286
915
+ a = np.array([[1, 2, 3, "NaT"]], dtype="m8[ns]")
916
+ assert_allclose(a, a)
917
+
918
+ def test_error_message_unsigned(self):
919
+ """Check the the message is formatted correctly when overflow can occur
920
+ (gh21768)"""
921
+ # Ensure to test for potential overflow in the case of:
922
+ # x - y
923
+ # and
924
+ # y - x
925
+ x = np.asarray([0, 1, 8], dtype='uint8')
926
+ y = np.asarray([4, 4, 4], dtype='uint8')
927
+ with pytest.raises(AssertionError) as exc_info:
928
+ assert_allclose(x, y, atol=3)
929
+ msgs = str(exc_info.value).split('\n')
930
+ assert_equal(msgs[4], 'Max absolute difference: 4')
931
+
932
+
933
+ class TestArrayAlmostEqualNulp:
934
+
935
+ def test_float64_pass(self):
936
+ # The number of units of least precision
937
+ # In this case, use a few places above the lowest level (ie nulp=1)
938
+ nulp = 5
939
+ x = np.linspace(-20, 20, 50, dtype=np.float64)
940
+ x = 10**x
941
+ x = np.r_[-x, x]
942
+
943
+ # Addition
944
+ eps = np.finfo(x.dtype).eps
945
+ y = x + x*eps*nulp/2.
946
+ assert_array_almost_equal_nulp(x, y, nulp)
947
+
948
+ # Subtraction
949
+ epsneg = np.finfo(x.dtype).epsneg
950
+ y = x - x*epsneg*nulp/2.
951
+ assert_array_almost_equal_nulp(x, y, nulp)
952
+
953
+ def test_float64_fail(self):
954
+ nulp = 5
955
+ x = np.linspace(-20, 20, 50, dtype=np.float64)
956
+ x = 10**x
957
+ x = np.r_[-x, x]
958
+
959
+ eps = np.finfo(x.dtype).eps
960
+ y = x + x*eps*nulp*2.
961
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
962
+ x, y, nulp)
963
+
964
+ epsneg = np.finfo(x.dtype).epsneg
965
+ y = x - x*epsneg*nulp*2.
966
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
967
+ x, y, nulp)
968
+
969
+ def test_float64_ignore_nan(self):
970
+ # Ignore ULP differences between various NAN's
971
+ # Note that MIPS may reverse quiet and signaling nans
972
+ # so we use the builtin version as a base.
973
+ offset = np.uint64(0xffffffff)
974
+ nan1_i64 = np.array(np.nan, dtype=np.float64).view(np.uint64)
975
+ nan2_i64 = nan1_i64 ^ offset # nan payload on MIPS is all ones.
976
+ nan1_f64 = nan1_i64.view(np.float64)
977
+ nan2_f64 = nan2_i64.view(np.float64)
978
+ assert_array_max_ulp(nan1_f64, nan2_f64, 0)
979
+
980
+ def test_float32_pass(self):
981
+ nulp = 5
982
+ x = np.linspace(-20, 20, 50, dtype=np.float32)
983
+ x = 10**x
984
+ x = np.r_[-x, x]
985
+
986
+ eps = np.finfo(x.dtype).eps
987
+ y = x + x*eps*nulp/2.
988
+ assert_array_almost_equal_nulp(x, y, nulp)
989
+
990
+ epsneg = np.finfo(x.dtype).epsneg
991
+ y = x - x*epsneg*nulp/2.
992
+ assert_array_almost_equal_nulp(x, y, nulp)
993
+
994
+ def test_float32_fail(self):
995
+ nulp = 5
996
+ x = np.linspace(-20, 20, 50, dtype=np.float32)
997
+ x = 10**x
998
+ x = np.r_[-x, x]
999
+
1000
+ eps = np.finfo(x.dtype).eps
1001
+ y = x + x*eps*nulp*2.
1002
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1003
+ x, y, nulp)
1004
+
1005
+ epsneg = np.finfo(x.dtype).epsneg
1006
+ y = x - x*epsneg*nulp*2.
1007
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1008
+ x, y, nulp)
1009
+
1010
+ def test_float32_ignore_nan(self):
1011
+ # Ignore ULP differences between various NAN's
1012
+ # Note that MIPS may reverse quiet and signaling nans
1013
+ # so we use the builtin version as a base.
1014
+ offset = np.uint32(0xffff)
1015
+ nan1_i32 = np.array(np.nan, dtype=np.float32).view(np.uint32)
1016
+ nan2_i32 = nan1_i32 ^ offset # nan payload on MIPS is all ones.
1017
+ nan1_f32 = nan1_i32.view(np.float32)
1018
+ nan2_f32 = nan2_i32.view(np.float32)
1019
+ assert_array_max_ulp(nan1_f32, nan2_f32, 0)
1020
+
1021
+ def test_float16_pass(self):
1022
+ nulp = 5
1023
+ x = np.linspace(-4, 4, 10, dtype=np.float16)
1024
+ x = 10**x
1025
+ x = np.r_[-x, x]
1026
+
1027
+ eps = np.finfo(x.dtype).eps
1028
+ y = x + x*eps*nulp/2.
1029
+ assert_array_almost_equal_nulp(x, y, nulp)
1030
+
1031
+ epsneg = np.finfo(x.dtype).epsneg
1032
+ y = x - x*epsneg*nulp/2.
1033
+ assert_array_almost_equal_nulp(x, y, nulp)
1034
+
1035
+ def test_float16_fail(self):
1036
+ nulp = 5
1037
+ x = np.linspace(-4, 4, 10, dtype=np.float16)
1038
+ x = 10**x
1039
+ x = np.r_[-x, x]
1040
+
1041
+ eps = np.finfo(x.dtype).eps
1042
+ y = x + x*eps*nulp*2.
1043
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1044
+ x, y, nulp)
1045
+
1046
+ epsneg = np.finfo(x.dtype).epsneg
1047
+ y = x - x*epsneg*nulp*2.
1048
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1049
+ x, y, nulp)
1050
+
1051
+ def test_float16_ignore_nan(self):
1052
+ # Ignore ULP differences between various NAN's
1053
+ # Note that MIPS may reverse quiet and signaling nans
1054
+ # so we use the builtin version as a base.
1055
+ offset = np.uint16(0xff)
1056
+ nan1_i16 = np.array(np.nan, dtype=np.float16).view(np.uint16)
1057
+ nan2_i16 = nan1_i16 ^ offset # nan payload on MIPS is all ones.
1058
+ nan1_f16 = nan1_i16.view(np.float16)
1059
+ nan2_f16 = nan2_i16.view(np.float16)
1060
+ assert_array_max_ulp(nan1_f16, nan2_f16, 0)
1061
+
1062
+ def test_complex128_pass(self):
1063
+ nulp = 5
1064
+ x = np.linspace(-20, 20, 50, dtype=np.float64)
1065
+ x = 10**x
1066
+ x = np.r_[-x, x]
1067
+ xi = x + x*1j
1068
+
1069
+ eps = np.finfo(x.dtype).eps
1070
+ y = x + x*eps*nulp/2.
1071
+ assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
1072
+ assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
1073
+ # The test condition needs to be at least a factor of sqrt(2) smaller
1074
+ # because the real and imaginary parts both change
1075
+ y = x + x*eps*nulp/4.
1076
+ assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
1077
+
1078
+ epsneg = np.finfo(x.dtype).epsneg
1079
+ y = x - x*epsneg*nulp/2.
1080
+ assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
1081
+ assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
1082
+ y = x - x*epsneg*nulp/4.
1083
+ assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
1084
+
1085
+ def test_complex128_fail(self):
1086
+ nulp = 5
1087
+ x = np.linspace(-20, 20, 50, dtype=np.float64)
1088
+ x = 10**x
1089
+ x = np.r_[-x, x]
1090
+ xi = x + x*1j
1091
+
1092
+ eps = np.finfo(x.dtype).eps
1093
+ y = x + x*eps*nulp*2.
1094
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1095
+ xi, x + y*1j, nulp)
1096
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1097
+ xi, y + x*1j, nulp)
1098
+ # The test condition needs to be at least a factor of sqrt(2) smaller
1099
+ # because the real and imaginary parts both change
1100
+ y = x + x*eps*nulp
1101
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1102
+ xi, y + y*1j, nulp)
1103
+
1104
+ epsneg = np.finfo(x.dtype).epsneg
1105
+ y = x - x*epsneg*nulp*2.
1106
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1107
+ xi, x + y*1j, nulp)
1108
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1109
+ xi, y + x*1j, nulp)
1110
+ y = x - x*epsneg*nulp
1111
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1112
+ xi, y + y*1j, nulp)
1113
+
1114
+ def test_complex64_pass(self):
1115
+ nulp = 5
1116
+ x = np.linspace(-20, 20, 50, dtype=np.float32)
1117
+ x = 10**x
1118
+ x = np.r_[-x, x]
1119
+ xi = x + x*1j
1120
+
1121
+ eps = np.finfo(x.dtype).eps
1122
+ y = x + x*eps*nulp/2.
1123
+ assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
1124
+ assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
1125
+ y = x + x*eps*nulp/4.
1126
+ assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
1127
+
1128
+ epsneg = np.finfo(x.dtype).epsneg
1129
+ y = x - x*epsneg*nulp/2.
1130
+ assert_array_almost_equal_nulp(xi, x + y*1j, nulp)
1131
+ assert_array_almost_equal_nulp(xi, y + x*1j, nulp)
1132
+ y = x - x*epsneg*nulp/4.
1133
+ assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
1134
+
1135
+ def test_complex64_fail(self):
1136
+ nulp = 5
1137
+ x = np.linspace(-20, 20, 50, dtype=np.float32)
1138
+ x = 10**x
1139
+ x = np.r_[-x, x]
1140
+ xi = x + x*1j
1141
+
1142
+ eps = np.finfo(x.dtype).eps
1143
+ y = x + x*eps*nulp*2.
1144
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1145
+ xi, x + y*1j, nulp)
1146
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1147
+ xi, y + x*1j, nulp)
1148
+ y = x + x*eps*nulp
1149
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1150
+ xi, y + y*1j, nulp)
1151
+
1152
+ epsneg = np.finfo(x.dtype).epsneg
1153
+ y = x - x*epsneg*nulp*2.
1154
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1155
+ xi, x + y*1j, nulp)
1156
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1157
+ xi, y + x*1j, nulp)
1158
+ y = x - x*epsneg*nulp
1159
+ assert_raises(AssertionError, assert_array_almost_equal_nulp,
1160
+ xi, y + y*1j, nulp)
1161
+
1162
+
1163
+ class TestULP:
1164
+
1165
+ def test_equal(self):
1166
+ x = np.random.randn(10)
1167
+ assert_array_max_ulp(x, x, maxulp=0)
1168
+
1169
+ def test_single(self):
1170
+ # Generate 1 + small deviation, check that adding eps gives a few UNL
1171
+ x = np.ones(10).astype(np.float32)
1172
+ x += 0.01 * np.random.randn(10).astype(np.float32)
1173
+ eps = np.finfo(np.float32).eps
1174
+ assert_array_max_ulp(x, x+eps, maxulp=20)
1175
+
1176
+ def test_double(self):
1177
+ # Generate 1 + small deviation, check that adding eps gives a few UNL
1178
+ x = np.ones(10).astype(np.float64)
1179
+ x += 0.01 * np.random.randn(10).astype(np.float64)
1180
+ eps = np.finfo(np.float64).eps
1181
+ assert_array_max_ulp(x, x+eps, maxulp=200)
1182
+
1183
+ def test_inf(self):
1184
+ for dt in [np.float32, np.float64]:
1185
+ inf = np.array([np.inf]).astype(dt)
1186
+ big = np.array([np.finfo(dt).max])
1187
+ assert_array_max_ulp(inf, big, maxulp=200)
1188
+
1189
+ def test_nan(self):
1190
+ # Test that nan is 'far' from small, tiny, inf, max and min
1191
+ for dt in [np.float32, np.float64]:
1192
+ if dt == np.float32:
1193
+ maxulp = 1e6
1194
+ else:
1195
+ maxulp = 1e12
1196
+ inf = np.array([np.inf]).astype(dt)
1197
+ nan = np.array([np.nan]).astype(dt)
1198
+ big = np.array([np.finfo(dt).max])
1199
+ tiny = np.array([np.finfo(dt).tiny])
1200
+ zero = np.array([np.PZERO]).astype(dt)
1201
+ nzero = np.array([np.NZERO]).astype(dt)
1202
+ assert_raises(AssertionError,
1203
+ lambda: assert_array_max_ulp(nan, inf,
1204
+ maxulp=maxulp))
1205
+ assert_raises(AssertionError,
1206
+ lambda: assert_array_max_ulp(nan, big,
1207
+ maxulp=maxulp))
1208
+ assert_raises(AssertionError,
1209
+ lambda: assert_array_max_ulp(nan, tiny,
1210
+ maxulp=maxulp))
1211
+ assert_raises(AssertionError,
1212
+ lambda: assert_array_max_ulp(nan, zero,
1213
+ maxulp=maxulp))
1214
+ assert_raises(AssertionError,
1215
+ lambda: assert_array_max_ulp(nan, nzero,
1216
+ maxulp=maxulp))
1217
+
1218
+
1219
+ class TestStringEqual:
1220
+ def test_simple(self):
1221
+ assert_string_equal("hello", "hello")
1222
+ assert_string_equal("hello\nmultiline", "hello\nmultiline")
1223
+
1224
+ with pytest.raises(AssertionError) as exc_info:
1225
+ assert_string_equal("foo\nbar", "hello\nbar")
1226
+ msg = str(exc_info.value)
1227
+ assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
1228
+
1229
+ assert_raises(AssertionError,
1230
+ lambda: assert_string_equal("foo", "hello"))
1231
+
1232
+ def test_regex(self):
1233
+ assert_string_equal("a+*b", "a+*b")
1234
+
1235
+ assert_raises(AssertionError,
1236
+ lambda: assert_string_equal("aaa", "a+b"))
1237
+
1238
+
1239
+ def assert_warn_len_equal(mod, n_in_context):
1240
+ try:
1241
+ mod_warns = mod.__warningregistry__
1242
+ except AttributeError:
1243
+ # the lack of a __warningregistry__
1244
+ # attribute means that no warning has
1245
+ # occurred; this can be triggered in
1246
+ # a parallel test scenario, while in
1247
+ # a serial test scenario an initial
1248
+ # warning (and therefore the attribute)
1249
+ # are always created first
1250
+ mod_warns = {}
1251
+
1252
+ num_warns = len(mod_warns)
1253
+
1254
+ if 'version' in mod_warns:
1255
+ # Python 3 adds a 'version' entry to the registry,
1256
+ # do not count it.
1257
+ num_warns -= 1
1258
+
1259
+ assert_equal(num_warns, n_in_context)
1260
+
1261
+
1262
+ def test_warn_len_equal_call_scenarios():
1263
+ # assert_warn_len_equal is called under
1264
+ # varying circumstances depending on serial
1265
+ # vs. parallel test scenarios; this test
1266
+ # simply aims to probe both code paths and
1267
+ # check that no assertion is uncaught
1268
+
1269
+ # parallel scenario -- no warning issued yet
1270
+ class mod:
1271
+ pass
1272
+
1273
+ mod_inst = mod()
1274
+
1275
+ assert_warn_len_equal(mod=mod_inst,
1276
+ n_in_context=0)
1277
+
1278
+ # serial test scenario -- the __warningregistry__
1279
+ # attribute should be present
1280
+ class mod:
1281
+ def __init__(self):
1282
+ self.__warningregistry__ = {'warning1':1,
1283
+ 'warning2':2}
1284
+
1285
+ mod_inst = mod()
1286
+ assert_warn_len_equal(mod=mod_inst,
1287
+ n_in_context=2)
1288
+
1289
+
1290
+ def _get_fresh_mod():
1291
+ # Get this module, with warning registry empty
1292
+ my_mod = sys.modules[__name__]
1293
+ try:
1294
+ my_mod.__warningregistry__.clear()
1295
+ except AttributeError:
1296
+ # will not have a __warningregistry__ unless warning has been
1297
+ # raised in the module at some point
1298
+ pass
1299
+ return my_mod
1300
+
1301
+
1302
+ def test_clear_and_catch_warnings():
1303
+ # Initial state of module, no warnings
1304
+ my_mod = _get_fresh_mod()
1305
+ assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
1306
+ with clear_and_catch_warnings(modules=[my_mod]):
1307
+ warnings.simplefilter('ignore')
1308
+ warnings.warn('Some warning')
1309
+ assert_equal(my_mod.__warningregistry__, {})
1310
+ # Without specified modules, don't clear warnings during context.
1311
+ # catch_warnings doesn't make an entry for 'ignore'.
1312
+ with clear_and_catch_warnings():
1313
+ warnings.simplefilter('ignore')
1314
+ warnings.warn('Some warning')
1315
+ assert_warn_len_equal(my_mod, 0)
1316
+
1317
+ # Manually adding two warnings to the registry:
1318
+ my_mod.__warningregistry__ = {'warning1': 1,
1319
+ 'warning2': 2}
1320
+
1321
+ # Confirm that specifying module keeps old warning, does not add new
1322
+ with clear_and_catch_warnings(modules=[my_mod]):
1323
+ warnings.simplefilter('ignore')
1324
+ warnings.warn('Another warning')
1325
+ assert_warn_len_equal(my_mod, 2)
1326
+
1327
+ # Another warning, no module spec it clears up registry
1328
+ with clear_and_catch_warnings():
1329
+ warnings.simplefilter('ignore')
1330
+ warnings.warn('Another warning')
1331
+ assert_warn_len_equal(my_mod, 0)
1332
+
1333
+
1334
+ def test_suppress_warnings_module():
1335
+ # Initial state of module, no warnings
1336
+ my_mod = _get_fresh_mod()
1337
+ assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
1338
+
1339
+ def warn_other_module():
1340
+ # Apply along axis is implemented in python; stacklevel=2 means
1341
+ # we end up inside its module, not ours.
1342
+ def warn(arr):
1343
+ warnings.warn("Some warning 2", stacklevel=2)
1344
+ return arr
1345
+ np.apply_along_axis(warn, 0, [0])
1346
+
1347
+ # Test module based warning suppression:
1348
+ assert_warn_len_equal(my_mod, 0)
1349
+ with suppress_warnings() as sup:
1350
+ sup.record(UserWarning)
1351
+ # suppress warning from other module (may have .pyc ending),
1352
+ # if apply_along_axis is moved, had to be changed.
1353
+ sup.filter(module=np.lib.shape_base)
1354
+ warnings.warn("Some warning")
1355
+ warn_other_module()
1356
+ # Check that the suppression did test the file correctly (this module
1357
+ # got filtered)
1358
+ assert_equal(len(sup.log), 1)
1359
+ assert_equal(sup.log[0].message.args[0], "Some warning")
1360
+ assert_warn_len_equal(my_mod, 0)
1361
+ sup = suppress_warnings()
1362
+ # Will have to be changed if apply_along_axis is moved:
1363
+ sup.filter(module=my_mod)
1364
+ with sup:
1365
+ warnings.warn('Some warning')
1366
+ assert_warn_len_equal(my_mod, 0)
1367
+ # And test repeat works:
1368
+ sup.filter(module=my_mod)
1369
+ with sup:
1370
+ warnings.warn('Some warning')
1371
+ assert_warn_len_equal(my_mod, 0)
1372
+
1373
+ # Without specified modules
1374
+ with suppress_warnings():
1375
+ warnings.simplefilter('ignore')
1376
+ warnings.warn('Some warning')
1377
+ assert_warn_len_equal(my_mod, 0)
1378
+
1379
+
1380
+ def test_suppress_warnings_type():
1381
+ # Initial state of module, no warnings
1382
+ my_mod = _get_fresh_mod()
1383
+ assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
1384
+
1385
+ # Test module based warning suppression:
1386
+ with suppress_warnings() as sup:
1387
+ sup.filter(UserWarning)
1388
+ warnings.warn('Some warning')
1389
+ assert_warn_len_equal(my_mod, 0)
1390
+ sup = suppress_warnings()
1391
+ sup.filter(UserWarning)
1392
+ with sup:
1393
+ warnings.warn('Some warning')
1394
+ assert_warn_len_equal(my_mod, 0)
1395
+ # And test repeat works:
1396
+ sup.filter(module=my_mod)
1397
+ with sup:
1398
+ warnings.warn('Some warning')
1399
+ assert_warn_len_equal(my_mod, 0)
1400
+
1401
+ # Without specified modules
1402
+ with suppress_warnings():
1403
+ warnings.simplefilter('ignore')
1404
+ warnings.warn('Some warning')
1405
+ assert_warn_len_equal(my_mod, 0)
1406
+
1407
+
1408
+ def test_suppress_warnings_decorate_no_record():
1409
+ sup = suppress_warnings()
1410
+ sup.filter(UserWarning)
1411
+
1412
+ @sup
1413
+ def warn(category):
1414
+ warnings.warn('Some warning', category)
1415
+
1416
+ with warnings.catch_warnings(record=True) as w:
1417
+ warnings.simplefilter("always")
1418
+ warn(UserWarning) # should be supppressed
1419
+ warn(RuntimeWarning)
1420
+ assert_equal(len(w), 1)
1421
+
1422
+
1423
+ def test_suppress_warnings_record():
1424
+ sup = suppress_warnings()
1425
+ log1 = sup.record()
1426
+
1427
+ with sup:
1428
+ log2 = sup.record(message='Some other warning 2')
1429
+ sup.filter(message='Some warning')
1430
+ warnings.warn('Some warning')
1431
+ warnings.warn('Some other warning')
1432
+ warnings.warn('Some other warning 2')
1433
+
1434
+ assert_equal(len(sup.log), 2)
1435
+ assert_equal(len(log1), 1)
1436
+ assert_equal(len(log2),1)
1437
+ assert_equal(log2[0].message.args[0], 'Some other warning 2')
1438
+
1439
+ # Do it again, with the same context to see if some warnings survived:
1440
+ with sup:
1441
+ log2 = sup.record(message='Some other warning 2')
1442
+ sup.filter(message='Some warning')
1443
+ warnings.warn('Some warning')
1444
+ warnings.warn('Some other warning')
1445
+ warnings.warn('Some other warning 2')
1446
+
1447
+ assert_equal(len(sup.log), 2)
1448
+ assert_equal(len(log1), 1)
1449
+ assert_equal(len(log2), 1)
1450
+ assert_equal(log2[0].message.args[0], 'Some other warning 2')
1451
+
1452
+ # Test nested:
1453
+ with suppress_warnings() as sup:
1454
+ sup.record()
1455
+ with suppress_warnings() as sup2:
1456
+ sup2.record(message='Some warning')
1457
+ warnings.warn('Some warning')
1458
+ warnings.warn('Some other warning')
1459
+ assert_equal(len(sup2.log), 1)
1460
+ assert_equal(len(sup.log), 1)
1461
+
1462
+
1463
+ def test_suppress_warnings_forwarding():
1464
+ def warn_other_module():
1465
+ # Apply along axis is implemented in python; stacklevel=2 means
1466
+ # we end up inside its module, not ours.
1467
+ def warn(arr):
1468
+ warnings.warn("Some warning", stacklevel=2)
1469
+ return arr
1470
+ np.apply_along_axis(warn, 0, [0])
1471
+
1472
+ with suppress_warnings() as sup:
1473
+ sup.record()
1474
+ with suppress_warnings("always"):
1475
+ for i in range(2):
1476
+ warnings.warn("Some warning")
1477
+
1478
+ assert_equal(len(sup.log), 2)
1479
+
1480
+ with suppress_warnings() as sup:
1481
+ sup.record()
1482
+ with suppress_warnings("location"):
1483
+ for i in range(2):
1484
+ warnings.warn("Some warning")
1485
+ warnings.warn("Some warning")
1486
+
1487
+ assert_equal(len(sup.log), 2)
1488
+
1489
+ with suppress_warnings() as sup:
1490
+ sup.record()
1491
+ with suppress_warnings("module"):
1492
+ for i in range(2):
1493
+ warnings.warn("Some warning")
1494
+ warnings.warn("Some warning")
1495
+ warn_other_module()
1496
+
1497
+ assert_equal(len(sup.log), 2)
1498
+
1499
+ with suppress_warnings() as sup:
1500
+ sup.record()
1501
+ with suppress_warnings("once"):
1502
+ for i in range(2):
1503
+ warnings.warn("Some warning")
1504
+ warnings.warn("Some other warning")
1505
+ warn_other_module()
1506
+
1507
+ assert_equal(len(sup.log), 2)
1508
+
1509
+
1510
+ def test_tempdir():
1511
+ with tempdir() as tdir:
1512
+ fpath = os.path.join(tdir, 'tmp')
1513
+ with open(fpath, 'w'):
1514
+ pass
1515
+ assert_(not os.path.isdir(tdir))
1516
+
1517
+ raised = False
1518
+ try:
1519
+ with tempdir() as tdir:
1520
+ raise ValueError()
1521
+ except ValueError:
1522
+ raised = True
1523
+ assert_(raised)
1524
+ assert_(not os.path.isdir(tdir))
1525
+
1526
+
1527
+ def test_temppath():
1528
+ with temppath() as fpath:
1529
+ with open(fpath, 'w'):
1530
+ pass
1531
+ assert_(not os.path.isfile(fpath))
1532
+
1533
+ raised = False
1534
+ try:
1535
+ with temppath() as fpath:
1536
+ raise ValueError()
1537
+ except ValueError:
1538
+ raised = True
1539
+ assert_(raised)
1540
+ assert_(not os.path.isfile(fpath))
1541
+
1542
+
1543
+ class my_cacw(clear_and_catch_warnings):
1544
+
1545
+ class_modules = (sys.modules[__name__],)
1546
+
1547
+
1548
+ def test_clear_and_catch_warnings_inherit():
1549
+ # Test can subclass and add default modules
1550
+ my_mod = _get_fresh_mod()
1551
+ with my_cacw():
1552
+ warnings.simplefilter('ignore')
1553
+ warnings.warn('Some warning')
1554
+ assert_equal(my_mod.__warningregistry__, {})
1555
+
1556
+
1557
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
1558
+ class TestAssertNoGcCycles:
1559
+ """ Test assert_no_gc_cycles """
1560
+ def test_passes(self):
1561
+ def no_cycle():
1562
+ b = []
1563
+ b.append([])
1564
+ return b
1565
+
1566
+ with assert_no_gc_cycles():
1567
+ no_cycle()
1568
+
1569
+ assert_no_gc_cycles(no_cycle)
1570
+
1571
+ def test_asserts(self):
1572
+ def make_cycle():
1573
+ a = []
1574
+ a.append(a)
1575
+ a.append(a)
1576
+ return a
1577
+
1578
+ with assert_raises(AssertionError):
1579
+ with assert_no_gc_cycles():
1580
+ make_cycle()
1581
+
1582
+ with assert_raises(AssertionError):
1583
+ assert_no_gc_cycles(make_cycle)
1584
+
1585
+ @pytest.mark.slow
1586
+ def test_fails(self):
1587
+ """
1588
+ Test that in cases where the garbage cannot be collected, we raise an
1589
+ error, instead of hanging forever trying to clear it.
1590
+ """
1591
+
1592
+ class ReferenceCycleInDel:
1593
+ """
1594
+ An object that not only contains a reference cycle, but creates new
1595
+ cycles whenever it's garbage-collected and its __del__ runs
1596
+ """
1597
+ make_cycle = True
1598
+
1599
+ def __init__(self):
1600
+ self.cycle = self
1601
+
1602
+ def __del__(self):
1603
+ # break the current cycle so that `self` can be freed
1604
+ self.cycle = None
1605
+
1606
+ if ReferenceCycleInDel.make_cycle:
1607
+ # but create a new one so that the garbage collector has more
1608
+ # work to do.
1609
+ ReferenceCycleInDel()
1610
+
1611
+ try:
1612
+ w = weakref.ref(ReferenceCycleInDel())
1613
+ try:
1614
+ with assert_raises(RuntimeError):
1615
+ # this will be unable to get a baseline empty garbage
1616
+ assert_no_gc_cycles(lambda: None)
1617
+ except AssertionError:
1618
+ # the above test is only necessary if the GC actually tried to free
1619
+ # our object anyway, which python 2.7 does not.
1620
+ if w() is not None:
1621
+ pytest.skip("GC does not call __del__ on cyclic objects")
1622
+ raise
1623
+
1624
+ finally:
1625
+ # make sure that we stop creating reference cycles
1626
+ ReferenceCycleInDel.make_cycle = False
venv/lib/python3.10/site-packages/numpy/typing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
venv/lib/python3.10/site-packages/numpy/typing/__pycache__/mypy_plugin.cpython-310.pyc ADDED
Binary file (6.6 kB). View file
 
venv/lib/python3.10/site-packages/numpy/typing/__pycache__/setup.cpython-310.pyc ADDED
Binary file (598 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/typing/tests/data/misc/extended_precision.pyi ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+
3
+ import numpy as np
4
+ from numpy._typing import _80Bit, _96Bit, _128Bit, _256Bit
5
+
6
+ if sys.version_info >= (3, 11):
7
+ from typing import assert_type
8
+ else:
9
+ from typing_extensions import assert_type
10
+
11
+ assert_type(np.uint128(), np.unsignedinteger[_128Bit])
12
+ assert_type(np.uint256(), np.unsignedinteger[_256Bit])
13
+
14
+ assert_type(np.int128(), np.signedinteger[_128Bit])
15
+ assert_type(np.int256(), np.signedinteger[_256Bit])
16
+
17
+ assert_type(np.float80(), np.floating[_80Bit])
18
+ assert_type(np.float96(), np.floating[_96Bit])
19
+ assert_type(np.float128(), np.floating[_128Bit])
20
+ assert_type(np.float256(), np.floating[_256Bit])
21
+
22
+ assert_type(np.complex160(), np.complexfloating[_80Bit, _80Bit])
23
+ assert_type(np.complex192(), np.complexfloating[_96Bit, _96Bit])
24
+ assert_type(np.complex256(), np.complexfloating[_128Bit, _128Bit])
25
+ assert_type(np.complex512(), np.complexfloating[_256Bit, _256Bit])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/arithmetic.pyi ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any
3
+
4
+ import numpy as np
5
+ import numpy.typing as npt
6
+ from numpy._typing import _32Bit,_64Bit, _128Bit
7
+
8
+ if sys.version_info >= (3, 11):
9
+ from typing import assert_type
10
+ else:
11
+ from typing_extensions import assert_type
12
+
13
+ # Can't directly import `np.float128` as it is not available on all platforms
14
+ f16: np.floating[_128Bit]
15
+
16
+ c16 = np.complex128()
17
+ f8 = np.float64()
18
+ i8 = np.int64()
19
+ u8 = np.uint64()
20
+
21
+ c8 = np.complex64()
22
+ f4 = np.float32()
23
+ i4 = np.int32()
24
+ u4 = np.uint32()
25
+
26
+ dt = np.datetime64(0, "D")
27
+ td = np.timedelta64(0, "D")
28
+
29
+ b_ = np.bool_()
30
+
31
+ b = bool()
32
+ c = complex()
33
+ f = float()
34
+ i = int()
35
+
36
+ AR_b: npt.NDArray[np.bool_]
37
+ AR_u: npt.NDArray[np.uint32]
38
+ AR_i: npt.NDArray[np.int64]
39
+ AR_f: npt.NDArray[np.float64]
40
+ AR_c: npt.NDArray[np.complex128]
41
+ AR_m: npt.NDArray[np.timedelta64]
42
+ AR_M: npt.NDArray[np.datetime64]
43
+ AR_O: npt.NDArray[np.object_]
44
+ AR_number: npt.NDArray[np.number[Any]]
45
+
46
+ AR_LIKE_b: list[bool]
47
+ AR_LIKE_u: list[np.uint32]
48
+ AR_LIKE_i: list[int]
49
+ AR_LIKE_f: list[float]
50
+ AR_LIKE_c: list[complex]
51
+ AR_LIKE_m: list[np.timedelta64]
52
+ AR_LIKE_M: list[np.datetime64]
53
+ AR_LIKE_O: list[np.object_]
54
+
55
+ # Array subtraction
56
+
57
+ assert_type(AR_number - AR_number, npt.NDArray[np.number[Any]])
58
+
59
+ assert_type(AR_b - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]])
60
+ assert_type(AR_b - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]])
61
+ assert_type(AR_b - AR_LIKE_f, npt.NDArray[np.floating[Any]])
62
+ assert_type(AR_b - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]])
63
+ assert_type(AR_b - AR_LIKE_m, npt.NDArray[np.timedelta64])
64
+ assert_type(AR_b - AR_LIKE_O, Any)
65
+
66
+ assert_type(AR_LIKE_u - AR_b, npt.NDArray[np.unsignedinteger[Any]])
67
+ assert_type(AR_LIKE_i - AR_b, npt.NDArray[np.signedinteger[Any]])
68
+ assert_type(AR_LIKE_f - AR_b, npt.NDArray[np.floating[Any]])
69
+ assert_type(AR_LIKE_c - AR_b, npt.NDArray[np.complexfloating[Any, Any]])
70
+ assert_type(AR_LIKE_m - AR_b, npt.NDArray[np.timedelta64])
71
+ assert_type(AR_LIKE_M - AR_b, npt.NDArray[np.datetime64])
72
+ assert_type(AR_LIKE_O - AR_b, Any)
73
+
74
+ assert_type(AR_u - AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]])
75
+ assert_type(AR_u - AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]])
76
+ assert_type(AR_u - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]])
77
+ assert_type(AR_u - AR_LIKE_f, npt.NDArray[np.floating[Any]])
78
+ assert_type(AR_u - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]])
79
+ assert_type(AR_u - AR_LIKE_m, npt.NDArray[np.timedelta64])
80
+ assert_type(AR_u - AR_LIKE_O, Any)
81
+
82
+ assert_type(AR_LIKE_b - AR_u, npt.NDArray[np.unsignedinteger[Any]])
83
+ assert_type(AR_LIKE_u - AR_u, npt.NDArray[np.unsignedinteger[Any]])
84
+ assert_type(AR_LIKE_i - AR_u, npt.NDArray[np.signedinteger[Any]])
85
+ assert_type(AR_LIKE_f - AR_u, npt.NDArray[np.floating[Any]])
86
+ assert_type(AR_LIKE_c - AR_u, npt.NDArray[np.complexfloating[Any, Any]])
87
+ assert_type(AR_LIKE_m - AR_u, npt.NDArray[np.timedelta64])
88
+ assert_type(AR_LIKE_M - AR_u, npt.NDArray[np.datetime64])
89
+ assert_type(AR_LIKE_O - AR_u, Any)
90
+
91
+ assert_type(AR_i - AR_LIKE_b, npt.NDArray[np.signedinteger[Any]])
92
+ assert_type(AR_i - AR_LIKE_u, npt.NDArray[np.signedinteger[Any]])
93
+ assert_type(AR_i - AR_LIKE_i, npt.NDArray[np.signedinteger[Any]])
94
+ assert_type(AR_i - AR_LIKE_f, npt.NDArray[np.floating[Any]])
95
+ assert_type(AR_i - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]])
96
+ assert_type(AR_i - AR_LIKE_m, npt.NDArray[np.timedelta64])
97
+ assert_type(AR_i - AR_LIKE_O, Any)
98
+
99
+ assert_type(AR_LIKE_b - AR_i, npt.NDArray[np.signedinteger[Any]])
100
+ assert_type(AR_LIKE_u - AR_i, npt.NDArray[np.signedinteger[Any]])
101
+ assert_type(AR_LIKE_i - AR_i, npt.NDArray[np.signedinteger[Any]])
102
+ assert_type(AR_LIKE_f - AR_i, npt.NDArray[np.floating[Any]])
103
+ assert_type(AR_LIKE_c - AR_i, npt.NDArray[np.complexfloating[Any, Any]])
104
+ assert_type(AR_LIKE_m - AR_i, npt.NDArray[np.timedelta64])
105
+ assert_type(AR_LIKE_M - AR_i, npt.NDArray[np.datetime64])
106
+ assert_type(AR_LIKE_O - AR_i, Any)
107
+
108
+ assert_type(AR_f - AR_LIKE_b, npt.NDArray[np.floating[Any]])
109
+ assert_type(AR_f - AR_LIKE_u, npt.NDArray[np.floating[Any]])
110
+ assert_type(AR_f - AR_LIKE_i, npt.NDArray[np.floating[Any]])
111
+ assert_type(AR_f - AR_LIKE_f, npt.NDArray[np.floating[Any]])
112
+ assert_type(AR_f - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]])
113
+ assert_type(AR_f - AR_LIKE_O, Any)
114
+
115
+ assert_type(AR_LIKE_b - AR_f, npt.NDArray[np.floating[Any]])
116
+ assert_type(AR_LIKE_u - AR_f, npt.NDArray[np.floating[Any]])
117
+ assert_type(AR_LIKE_i - AR_f, npt.NDArray[np.floating[Any]])
118
+ assert_type(AR_LIKE_f - AR_f, npt.NDArray[np.floating[Any]])
119
+ assert_type(AR_LIKE_c - AR_f, npt.NDArray[np.complexfloating[Any, Any]])
120
+ assert_type(AR_LIKE_O - AR_f, Any)
121
+
122
+ assert_type(AR_c - AR_LIKE_b, npt.NDArray[np.complexfloating[Any, Any]])
123
+ assert_type(AR_c - AR_LIKE_u, npt.NDArray[np.complexfloating[Any, Any]])
124
+ assert_type(AR_c - AR_LIKE_i, npt.NDArray[np.complexfloating[Any, Any]])
125
+ assert_type(AR_c - AR_LIKE_f, npt.NDArray[np.complexfloating[Any, Any]])
126
+ assert_type(AR_c - AR_LIKE_c, npt.NDArray[np.complexfloating[Any, Any]])
127
+ assert_type(AR_c - AR_LIKE_O, Any)
128
+
129
+ assert_type(AR_LIKE_b - AR_c, npt.NDArray[np.complexfloating[Any, Any]])
130
+ assert_type(AR_LIKE_u - AR_c, npt.NDArray[np.complexfloating[Any, Any]])
131
+ assert_type(AR_LIKE_i - AR_c, npt.NDArray[np.complexfloating[Any, Any]])
132
+ assert_type(AR_LIKE_f - AR_c, npt.NDArray[np.complexfloating[Any, Any]])
133
+ assert_type(AR_LIKE_c - AR_c, npt.NDArray[np.complexfloating[Any, Any]])
134
+ assert_type(AR_LIKE_O - AR_c, Any)
135
+
136
+ assert_type(AR_m - AR_LIKE_b, npt.NDArray[np.timedelta64])
137
+ assert_type(AR_m - AR_LIKE_u, npt.NDArray[np.timedelta64])
138
+ assert_type(AR_m - AR_LIKE_i, npt.NDArray[np.timedelta64])
139
+ assert_type(AR_m - AR_LIKE_m, npt.NDArray[np.timedelta64])
140
+ assert_type(AR_m - AR_LIKE_O, Any)
141
+
142
+ assert_type(AR_LIKE_b - AR_m, npt.NDArray[np.timedelta64])
143
+ assert_type(AR_LIKE_u - AR_m, npt.NDArray[np.timedelta64])
144
+ assert_type(AR_LIKE_i - AR_m, npt.NDArray[np.timedelta64])
145
+ assert_type(AR_LIKE_m - AR_m, npt.NDArray[np.timedelta64])
146
+ assert_type(AR_LIKE_M - AR_m, npt.NDArray[np.datetime64])
147
+ assert_type(AR_LIKE_O - AR_m, Any)
148
+
149
+ assert_type(AR_M - AR_LIKE_b, npt.NDArray[np.datetime64])
150
+ assert_type(AR_M - AR_LIKE_u, npt.NDArray[np.datetime64])
151
+ assert_type(AR_M - AR_LIKE_i, npt.NDArray[np.datetime64])
152
+ assert_type(AR_M - AR_LIKE_m, npt.NDArray[np.datetime64])
153
+ assert_type(AR_M - AR_LIKE_M, npt.NDArray[np.timedelta64])
154
+ assert_type(AR_M - AR_LIKE_O, Any)
155
+
156
+ assert_type(AR_LIKE_M - AR_M, npt.NDArray[np.timedelta64])
157
+ assert_type(AR_LIKE_O - AR_M, Any)
158
+
159
+ assert_type(AR_O - AR_LIKE_b, Any)
160
+ assert_type(AR_O - AR_LIKE_u, Any)
161
+ assert_type(AR_O - AR_LIKE_i, Any)
162
+ assert_type(AR_O - AR_LIKE_f, Any)
163
+ assert_type(AR_O - AR_LIKE_c, Any)
164
+ assert_type(AR_O - AR_LIKE_m, Any)
165
+ assert_type(AR_O - AR_LIKE_M, Any)
166
+ assert_type(AR_O - AR_LIKE_O, Any)
167
+
168
+ assert_type(AR_LIKE_b - AR_O, Any)
169
+ assert_type(AR_LIKE_u - AR_O, Any)
170
+ assert_type(AR_LIKE_i - AR_O, Any)
171
+ assert_type(AR_LIKE_f - AR_O, Any)
172
+ assert_type(AR_LIKE_c - AR_O, Any)
173
+ assert_type(AR_LIKE_m - AR_O, Any)
174
+ assert_type(AR_LIKE_M - AR_O, Any)
175
+ assert_type(AR_LIKE_O - AR_O, Any)
176
+
177
+ # Array floor division
178
+
179
+ assert_type(AR_b // AR_LIKE_b, npt.NDArray[np.int8])
180
+ assert_type(AR_b // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]])
181
+ assert_type(AR_b // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]])
182
+ assert_type(AR_b // AR_LIKE_f, npt.NDArray[np.floating[Any]])
183
+ assert_type(AR_b // AR_LIKE_O, Any)
184
+
185
+ assert_type(AR_LIKE_b // AR_b, npt.NDArray[np.int8])
186
+ assert_type(AR_LIKE_u // AR_b, npt.NDArray[np.unsignedinteger[Any]])
187
+ assert_type(AR_LIKE_i // AR_b, npt.NDArray[np.signedinteger[Any]])
188
+ assert_type(AR_LIKE_f // AR_b, npt.NDArray[np.floating[Any]])
189
+ assert_type(AR_LIKE_O // AR_b, Any)
190
+
191
+ assert_type(AR_u // AR_LIKE_b, npt.NDArray[np.unsignedinteger[Any]])
192
+ assert_type(AR_u // AR_LIKE_u, npt.NDArray[np.unsignedinteger[Any]])
193
+ assert_type(AR_u // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]])
194
+ assert_type(AR_u // AR_LIKE_f, npt.NDArray[np.floating[Any]])
195
+ assert_type(AR_u // AR_LIKE_O, Any)
196
+
197
+ assert_type(AR_LIKE_b // AR_u, npt.NDArray[np.unsignedinteger[Any]])
198
+ assert_type(AR_LIKE_u // AR_u, npt.NDArray[np.unsignedinteger[Any]])
199
+ assert_type(AR_LIKE_i // AR_u, npt.NDArray[np.signedinteger[Any]])
200
+ assert_type(AR_LIKE_f // AR_u, npt.NDArray[np.floating[Any]])
201
+ assert_type(AR_LIKE_m // AR_u, npt.NDArray[np.timedelta64])
202
+ assert_type(AR_LIKE_O // AR_u, Any)
203
+
204
+ assert_type(AR_i // AR_LIKE_b, npt.NDArray[np.signedinteger[Any]])
205
+ assert_type(AR_i // AR_LIKE_u, npt.NDArray[np.signedinteger[Any]])
206
+ assert_type(AR_i // AR_LIKE_i, npt.NDArray[np.signedinteger[Any]])
207
+ assert_type(AR_i // AR_LIKE_f, npt.NDArray[np.floating[Any]])
208
+ assert_type(AR_i // AR_LIKE_O, Any)
209
+
210
+ assert_type(AR_LIKE_b // AR_i, npt.NDArray[np.signedinteger[Any]])
211
+ assert_type(AR_LIKE_u // AR_i, npt.NDArray[np.signedinteger[Any]])
212
+ assert_type(AR_LIKE_i // AR_i, npt.NDArray[np.signedinteger[Any]])
213
+ assert_type(AR_LIKE_f // AR_i, npt.NDArray[np.floating[Any]])
214
+ assert_type(AR_LIKE_m // AR_i, npt.NDArray[np.timedelta64])
215
+ assert_type(AR_LIKE_O // AR_i, Any)
216
+
217
+ assert_type(AR_f // AR_LIKE_b, npt.NDArray[np.floating[Any]])
218
+ assert_type(AR_f // AR_LIKE_u, npt.NDArray[np.floating[Any]])
219
+ assert_type(AR_f // AR_LIKE_i, npt.NDArray[np.floating[Any]])
220
+ assert_type(AR_f // AR_LIKE_f, npt.NDArray[np.floating[Any]])
221
+ assert_type(AR_f // AR_LIKE_O, Any)
222
+
223
+ assert_type(AR_LIKE_b // AR_f, npt.NDArray[np.floating[Any]])
224
+ assert_type(AR_LIKE_u // AR_f, npt.NDArray[np.floating[Any]])
225
+ assert_type(AR_LIKE_i // AR_f, npt.NDArray[np.floating[Any]])
226
+ assert_type(AR_LIKE_f // AR_f, npt.NDArray[np.floating[Any]])
227
+ assert_type(AR_LIKE_m // AR_f, npt.NDArray[np.timedelta64])
228
+ assert_type(AR_LIKE_O // AR_f, Any)
229
+
230
+ assert_type(AR_m // AR_LIKE_u, npt.NDArray[np.timedelta64])
231
+ assert_type(AR_m // AR_LIKE_i, npt.NDArray[np.timedelta64])
232
+ assert_type(AR_m // AR_LIKE_f, npt.NDArray[np.timedelta64])
233
+ assert_type(AR_m // AR_LIKE_m, npt.NDArray[np.int64])
234
+ assert_type(AR_m // AR_LIKE_O, Any)
235
+
236
+ assert_type(AR_LIKE_m // AR_m, npt.NDArray[np.int64])
237
+ assert_type(AR_LIKE_O // AR_m, Any)
238
+
239
+ assert_type(AR_O // AR_LIKE_b, Any)
240
+ assert_type(AR_O // AR_LIKE_u, Any)
241
+ assert_type(AR_O // AR_LIKE_i, Any)
242
+ assert_type(AR_O // AR_LIKE_f, Any)
243
+ assert_type(AR_O // AR_LIKE_m, Any)
244
+ assert_type(AR_O // AR_LIKE_M, Any)
245
+ assert_type(AR_O // AR_LIKE_O, Any)
246
+
247
+ assert_type(AR_LIKE_b // AR_O, Any)
248
+ assert_type(AR_LIKE_u // AR_O, Any)
249
+ assert_type(AR_LIKE_i // AR_O, Any)
250
+ assert_type(AR_LIKE_f // AR_O, Any)
251
+ assert_type(AR_LIKE_m // AR_O, Any)
252
+ assert_type(AR_LIKE_M // AR_O, Any)
253
+ assert_type(AR_LIKE_O // AR_O, Any)
254
+
255
+ # unary ops
256
+
257
+ assert_type(-f16, np.floating[_128Bit])
258
+ assert_type(-c16, np.complex128)
259
+ assert_type(-c8, np.complex64)
260
+ assert_type(-f8, np.float64)
261
+ assert_type(-f4, np.float32)
262
+ assert_type(-i8, np.int64)
263
+ assert_type(-i4, np.int32)
264
+ assert_type(-u8, np.uint64)
265
+ assert_type(-u4, np.uint32)
266
+ assert_type(-td, np.timedelta64)
267
+ assert_type(-AR_f, npt.NDArray[np.float64])
268
+
269
+ assert_type(+f16, np.floating[_128Bit])
270
+ assert_type(+c16, np.complex128)
271
+ assert_type(+c8, np.complex64)
272
+ assert_type(+f8, np.float64)
273
+ assert_type(+f4, np.float32)
274
+ assert_type(+i8, np.int64)
275
+ assert_type(+i4, np.int32)
276
+ assert_type(+u8, np.uint64)
277
+ assert_type(+u4, np.uint32)
278
+ assert_type(+td, np.timedelta64)
279
+ assert_type(+AR_f, npt.NDArray[np.float64])
280
+
281
+ assert_type(abs(f16), np.floating[_128Bit])
282
+ assert_type(abs(c16), np.float64)
283
+ assert_type(abs(c8), np.float32)
284
+ assert_type(abs(f8), np.float64)
285
+ assert_type(abs(f4), np.float32)
286
+ assert_type(abs(i8), np.int64)
287
+ assert_type(abs(i4), np.int32)
288
+ assert_type(abs(u8), np.uint64)
289
+ assert_type(abs(u4), np.uint32)
290
+ assert_type(abs(td), np.timedelta64)
291
+ assert_type(abs(b_), np.bool_)
292
+
293
+ # Time structures
294
+
295
+ assert_type(dt + td, np.datetime64)
296
+ assert_type(dt + i, np.datetime64)
297
+ assert_type(dt + i4, np.datetime64)
298
+ assert_type(dt + i8, np.datetime64)
299
+ assert_type(dt - dt, np.timedelta64)
300
+ assert_type(dt - i, np.datetime64)
301
+ assert_type(dt - i4, np.datetime64)
302
+ assert_type(dt - i8, np.datetime64)
303
+
304
+ assert_type(td + td, np.timedelta64)
305
+ assert_type(td + i, np.timedelta64)
306
+ assert_type(td + i4, np.timedelta64)
307
+ assert_type(td + i8, np.timedelta64)
308
+ assert_type(td - td, np.timedelta64)
309
+ assert_type(td - i, np.timedelta64)
310
+ assert_type(td - i4, np.timedelta64)
311
+ assert_type(td - i8, np.timedelta64)
312
+ assert_type(td / f, np.timedelta64)
313
+ assert_type(td / f4, np.timedelta64)
314
+ assert_type(td / f8, np.timedelta64)
315
+ assert_type(td / td, np.float64)
316
+ assert_type(td // td, np.int64)
317
+
318
+ # boolean
319
+
320
+ assert_type(b_ / b, np.float64)
321
+ assert_type(b_ / b_, np.float64)
322
+ assert_type(b_ / i, np.float64)
323
+ assert_type(b_ / i8, np.float64)
324
+ assert_type(b_ / i4, np.float64)
325
+ assert_type(b_ / u8, np.float64)
326
+ assert_type(b_ / u4, np.float64)
327
+ assert_type(b_ / f, np.float64)
328
+ assert_type(b_ / f16, np.floating[_128Bit])
329
+ assert_type(b_ / f8, np.float64)
330
+ assert_type(b_ / f4, np.float32)
331
+ assert_type(b_ / c, np.complex128)
332
+ assert_type(b_ / c16, np.complex128)
333
+ assert_type(b_ / c8, np.complex64)
334
+
335
+ assert_type(b / b_, np.float64)
336
+ assert_type(b_ / b_, np.float64)
337
+ assert_type(i / b_, np.float64)
338
+ assert_type(i8 / b_, np.float64)
339
+ assert_type(i4 / b_, np.float64)
340
+ assert_type(u8 / b_, np.float64)
341
+ assert_type(u4 / b_, np.float64)
342
+ assert_type(f / b_, np.float64)
343
+ assert_type(f16 / b_, np.floating[_128Bit])
344
+ assert_type(f8 / b_, np.float64)
345
+ assert_type(f4 / b_, np.float32)
346
+ assert_type(c / b_, np.complex128)
347
+ assert_type(c16 / b_, np.complex128)
348
+ assert_type(c8 / b_, np.complex64)
349
+
350
+ # Complex
351
+
352
+ assert_type(c16 + f16, np.complexfloating[_64Bit | _128Bit, _64Bit | _128Bit])
353
+ assert_type(c16 + c16, np.complex128)
354
+ assert_type(c16 + f8, np.complex128)
355
+ assert_type(c16 + i8, np.complex128)
356
+ assert_type(c16 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
357
+ assert_type(c16 + f4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
358
+ assert_type(c16 + i4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
359
+ assert_type(c16 + b_, np.complex128)
360
+ assert_type(c16 + b, np.complex128)
361
+ assert_type(c16 + c, np.complex128)
362
+ assert_type(c16 + f, np.complex128)
363
+ assert_type(c16 + AR_f, npt.NDArray[np.complexfloating[Any, Any]])
364
+
365
+ assert_type(f16 + c16, np.complexfloating[_64Bit | _128Bit, _64Bit | _128Bit])
366
+ assert_type(c16 + c16, np.complex128)
367
+ assert_type(f8 + c16, np.complex128)
368
+ assert_type(i8 + c16, np.complex128)
369
+ assert_type(c8 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
370
+ assert_type(f4 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
371
+ assert_type(i4 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
372
+ assert_type(b_ + c16, np.complex128)
373
+ assert_type(b + c16, np.complex128)
374
+ assert_type(c + c16, np.complex128)
375
+ assert_type(f + c16, np.complex128)
376
+ assert_type(AR_f + c16, npt.NDArray[np.complexfloating[Any, Any]])
377
+
378
+ assert_type(c8 + f16, np.complexfloating[_32Bit | _128Bit, _32Bit | _128Bit])
379
+ assert_type(c8 + c16, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
380
+ assert_type(c8 + f8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
381
+ assert_type(c8 + i8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
382
+ assert_type(c8 + c8, np.complex64)
383
+ assert_type(c8 + f4, np.complex64)
384
+ assert_type(c8 + i4, np.complex64)
385
+ assert_type(c8 + b_, np.complex64)
386
+ assert_type(c8 + b, np.complex64)
387
+ assert_type(c8 + c, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
388
+ assert_type(c8 + f, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
389
+ assert_type(c8 + AR_f, npt.NDArray[np.complexfloating[Any, Any]])
390
+
391
+ assert_type(f16 + c8, np.complexfloating[_32Bit | _128Bit, _32Bit | _128Bit])
392
+ assert_type(c16 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
393
+ assert_type(f8 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
394
+ assert_type(i8 + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
395
+ assert_type(c8 + c8, np.complex64)
396
+ assert_type(f4 + c8, np.complex64)
397
+ assert_type(i4 + c8, np.complex64)
398
+ assert_type(b_ + c8, np.complex64)
399
+ assert_type(b + c8, np.complex64)
400
+ assert_type(c + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
401
+ assert_type(f + c8, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
402
+ assert_type(AR_f + c8, npt.NDArray[np.complexfloating[Any, Any]])
403
+
404
+ # Float
405
+
406
+ assert_type(f8 + f16, np.floating[_64Bit | _128Bit])
407
+ assert_type(f8 + f8, np.float64)
408
+ assert_type(f8 + i8, np.float64)
409
+ assert_type(f8 + f4, np.floating[_32Bit | _64Bit])
410
+ assert_type(f8 + i4, np.floating[_32Bit | _64Bit])
411
+ assert_type(f8 + b_, np.float64)
412
+ assert_type(f8 + b, np.float64)
413
+ assert_type(f8 + c, np.complex128)
414
+ assert_type(f8 + f, np.float64)
415
+ assert_type(f8 + AR_f, npt.NDArray[np.floating[Any]])
416
+
417
+ assert_type(f16 + f8, np.floating[_64Bit | _128Bit])
418
+ assert_type(f8 + f8, np.float64)
419
+ assert_type(i8 + f8, np.float64)
420
+ assert_type(f4 + f8, np.floating[_32Bit | _64Bit])
421
+ assert_type(i4 + f8, np.floating[_32Bit | _64Bit])
422
+ assert_type(b_ + f8, np.float64)
423
+ assert_type(b + f8, np.float64)
424
+ assert_type(c + f8, np.complex128)
425
+ assert_type(f + f8, np.float64)
426
+ assert_type(AR_f + f8, npt.NDArray[np.floating[Any]])
427
+
428
+ assert_type(f4 + f16, np.floating[_32Bit | _128Bit])
429
+ assert_type(f4 + f8, np.floating[_32Bit | _64Bit])
430
+ assert_type(f4 + i8, np.floating[_32Bit | _64Bit])
431
+ assert_type(f4 + f4, np.float32)
432
+ assert_type(f4 + i4, np.float32)
433
+ assert_type(f4 + b_, np.float32)
434
+ assert_type(f4 + b, np.float32)
435
+ assert_type(f4 + c, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
436
+ assert_type(f4 + f, np.floating[_32Bit | _64Bit])
437
+ assert_type(f4 + AR_f, npt.NDArray[np.floating[Any]])
438
+
439
+ assert_type(f16 + f4, np.floating[_32Bit | _128Bit])
440
+ assert_type(f8 + f4, np.floating[_32Bit | _64Bit])
441
+ assert_type(i8 + f4, np.floating[_32Bit | _64Bit])
442
+ assert_type(f4 + f4, np.float32)
443
+ assert_type(i4 + f4, np.float32)
444
+ assert_type(b_ + f4, np.float32)
445
+ assert_type(b + f4, np.float32)
446
+ assert_type(c + f4, np.complexfloating[_32Bit | _64Bit, _32Bit | _64Bit])
447
+ assert_type(f + f4, np.floating[_32Bit | _64Bit])
448
+ assert_type(AR_f + f4, npt.NDArray[np.floating[Any]])
449
+
450
+ # Int
451
+
452
+ assert_type(i8 + i8, np.int64)
453
+ assert_type(i8 + u8, Any)
454
+ assert_type(i8 + i4, np.signedinteger[_32Bit | _64Bit])
455
+ assert_type(i8 + u4, Any)
456
+ assert_type(i8 + b_, np.int64)
457
+ assert_type(i8 + b, np.int64)
458
+ assert_type(i8 + c, np.complex128)
459
+ assert_type(i8 + f, np.float64)
460
+ assert_type(i8 + AR_f, npt.NDArray[np.floating[Any]])
461
+
462
+ assert_type(u8 + u8, np.uint64)
463
+ assert_type(u8 + i4, Any)
464
+ assert_type(u8 + u4, np.unsignedinteger[_32Bit | _64Bit])
465
+ assert_type(u8 + b_, np.uint64)
466
+ assert_type(u8 + b, np.uint64)
467
+ assert_type(u8 + c, np.complex128)
468
+ assert_type(u8 + f, np.float64)
469
+ assert_type(u8 + AR_f, npt.NDArray[np.floating[Any]])
470
+
471
+ assert_type(i8 + i8, np.int64)
472
+ assert_type(u8 + i8, Any)
473
+ assert_type(i4 + i8, np.signedinteger[_32Bit | _64Bit])
474
+ assert_type(u4 + i8, Any)
475
+ assert_type(b_ + i8, np.int64)
476
+ assert_type(b + i8, np.int64)
477
+ assert_type(c + i8, np.complex128)
478
+ assert_type(f + i8, np.float64)
479
+ assert_type(AR_f + i8, npt.NDArray[np.floating[Any]])
480
+
481
+ assert_type(u8 + u8, np.uint64)
482
+ assert_type(i4 + u8, Any)
483
+ assert_type(u4 + u8, np.unsignedinteger[_32Bit | _64Bit])
484
+ assert_type(b_ + u8, np.uint64)
485
+ assert_type(b + u8, np.uint64)
486
+ assert_type(c + u8, np.complex128)
487
+ assert_type(f + u8, np.float64)
488
+ assert_type(AR_f + u8, npt.NDArray[np.floating[Any]])
489
+
490
+ assert_type(i4 + i8, np.signedinteger[_32Bit | _64Bit])
491
+ assert_type(i4 + i4, np.int32)
492
+ assert_type(i4 + b_, np.int32)
493
+ assert_type(i4 + b, np.int32)
494
+ assert_type(i4 + AR_f, npt.NDArray[np.floating[Any]])
495
+
496
+ assert_type(u4 + i8, Any)
497
+ assert_type(u4 + i4, Any)
498
+ assert_type(u4 + u8, np.unsignedinteger[_32Bit | _64Bit])
499
+ assert_type(u4 + u4, np.uint32)
500
+ assert_type(u4 + b_, np.uint32)
501
+ assert_type(u4 + b, np.uint32)
502
+ assert_type(u4 + AR_f, npt.NDArray[np.floating[Any]])
503
+
504
+ assert_type(i8 + i4, np.signedinteger[_32Bit | _64Bit])
505
+ assert_type(i4 + i4, np.int32)
506
+ assert_type(b_ + i4, np.int32)
507
+ assert_type(b + i4, np.int32)
508
+ assert_type(AR_f + i4, npt.NDArray[np.floating[Any]])
509
+
510
+ assert_type(i8 + u4, Any)
511
+ assert_type(i4 + u4, Any)
512
+ assert_type(u8 + u4, np.unsignedinteger[_32Bit | _64Bit])
513
+ assert_type(u4 + u4, np.uint32)
514
+ assert_type(b_ + u4, np.uint32)
515
+ assert_type(b + u4, np.uint32)
516
+ assert_type(AR_f + u4, npt.NDArray[np.floating[Any]])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/array_constructors.pyi ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any, TypeVar
3
+ from pathlib import Path
4
+ from collections import deque
5
+
6
+ import numpy as np
7
+ import numpy.typing as npt
8
+
9
+ if sys.version_info >= (3, 11):
10
+ from typing import assert_type
11
+ else:
12
+ from typing_extensions import assert_type
13
+
14
+ _SCT = TypeVar("_SCT", bound=np.generic, covariant=True)
15
+
16
+ class SubClass(np.ndarray[Any, np.dtype[_SCT]]): ...
17
+
18
+ i8: np.int64
19
+
20
+ A: npt.NDArray[np.float64]
21
+ B: SubClass[np.float64]
22
+ C: list[int]
23
+
24
+ def func(i: int, j: int, **kwargs: Any) -> SubClass[np.float64]: ...
25
+
26
+ assert_type(np.empty_like(A), npt.NDArray[np.float64])
27
+ assert_type(np.empty_like(B), SubClass[np.float64])
28
+ assert_type(np.empty_like([1, 1.0]), npt.NDArray[Any])
29
+ assert_type(np.empty_like(A, dtype=np.int64), npt.NDArray[np.int64])
30
+ assert_type(np.empty_like(A, dtype='c16'), npt.NDArray[Any])
31
+
32
+ assert_type(np.array(A), npt.NDArray[np.float64])
33
+ assert_type(np.array(B), npt.NDArray[np.float64])
34
+ assert_type(np.array(B, subok=True), SubClass[np.float64])
35
+ assert_type(np.array([1, 1.0]), npt.NDArray[Any])
36
+ assert_type(np.array(deque([1, 2, 3])), npt.NDArray[Any])
37
+ assert_type(np.array(A, dtype=np.int64), npt.NDArray[np.int64])
38
+ assert_type(np.array(A, dtype='c16'), npt.NDArray[Any])
39
+ assert_type(np.array(A, like=A), npt.NDArray[np.float64])
40
+
41
+ assert_type(np.zeros([1, 5, 6]), npt.NDArray[np.float64])
42
+ assert_type(np.zeros([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64])
43
+ assert_type(np.zeros([1, 5, 6], dtype='c16'), npt.NDArray[Any])
44
+
45
+ assert_type(np.empty([1, 5, 6]), npt.NDArray[np.float64])
46
+ assert_type(np.empty([1, 5, 6], dtype=np.int64), npt.NDArray[np.int64])
47
+ assert_type(np.empty([1, 5, 6], dtype='c16'), npt.NDArray[Any])
48
+
49
+ assert_type(np.concatenate(A), npt.NDArray[np.float64])
50
+ assert_type(np.concatenate([A, A]), Any)
51
+ assert_type(np.concatenate([[1], A]), npt.NDArray[Any])
52
+ assert_type(np.concatenate([[1], [1]]), npt.NDArray[Any])
53
+ assert_type(np.concatenate((A, A)), npt.NDArray[np.float64])
54
+ assert_type(np.concatenate(([1], [1])), npt.NDArray[Any])
55
+ assert_type(np.concatenate([1, 1.0]), npt.NDArray[Any])
56
+ assert_type(np.concatenate(A, dtype=np.int64), npt.NDArray[np.int64])
57
+ assert_type(np.concatenate(A, dtype='c16'), npt.NDArray[Any])
58
+ assert_type(np.concatenate([1, 1.0], out=A), npt.NDArray[np.float64])
59
+
60
+ assert_type(np.asarray(A), npt.NDArray[np.float64])
61
+ assert_type(np.asarray(B), npt.NDArray[np.float64])
62
+ assert_type(np.asarray([1, 1.0]), npt.NDArray[Any])
63
+ assert_type(np.asarray(A, dtype=np.int64), npt.NDArray[np.int64])
64
+ assert_type(np.asarray(A, dtype='c16'), npt.NDArray[Any])
65
+
66
+ assert_type(np.asanyarray(A), npt.NDArray[np.float64])
67
+ assert_type(np.asanyarray(B), SubClass[np.float64])
68
+ assert_type(np.asanyarray([1, 1.0]), npt.NDArray[Any])
69
+ assert_type(np.asanyarray(A, dtype=np.int64), npt.NDArray[np.int64])
70
+ assert_type(np.asanyarray(A, dtype='c16'), npt.NDArray[Any])
71
+
72
+ assert_type(np.ascontiguousarray(A), npt.NDArray[np.float64])
73
+ assert_type(np.ascontiguousarray(B), npt.NDArray[np.float64])
74
+ assert_type(np.ascontiguousarray([1, 1.0]), npt.NDArray[Any])
75
+ assert_type(np.ascontiguousarray(A, dtype=np.int64), npt.NDArray[np.int64])
76
+ assert_type(np.ascontiguousarray(A, dtype='c16'), npt.NDArray[Any])
77
+
78
+ assert_type(np.asfortranarray(A), npt.NDArray[np.float64])
79
+ assert_type(np.asfortranarray(B), npt.NDArray[np.float64])
80
+ assert_type(np.asfortranarray([1, 1.0]), npt.NDArray[Any])
81
+ assert_type(np.asfortranarray(A, dtype=np.int64), npt.NDArray[np.int64])
82
+ assert_type(np.asfortranarray(A, dtype='c16'), npt.NDArray[Any])
83
+
84
+ assert_type(np.fromstring("1 1 1", sep=" "), npt.NDArray[np.float64])
85
+ assert_type(np.fromstring(b"1 1 1", sep=" "), npt.NDArray[np.float64])
86
+ assert_type(np.fromstring("1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64])
87
+ assert_type(np.fromstring(b"1 1 1", dtype=np.int64, sep=" "), npt.NDArray[np.int64])
88
+ assert_type(np.fromstring("1 1 1", dtype="c16", sep=" "), npt.NDArray[Any])
89
+ assert_type(np.fromstring(b"1 1 1", dtype="c16", sep=" "), npt.NDArray[Any])
90
+
91
+ assert_type(np.fromfile("test.txt", sep=" "), npt.NDArray[np.float64])
92
+ assert_type(np.fromfile("test.txt", dtype=np.int64, sep=" "), npt.NDArray[np.int64])
93
+ assert_type(np.fromfile("test.txt", dtype="c16", sep=" "), npt.NDArray[Any])
94
+ with open("test.txt") as f:
95
+ assert_type(np.fromfile(f, sep=" "), npt.NDArray[np.float64])
96
+ assert_type(np.fromfile(b"test.txt", sep=" "), npt.NDArray[np.float64])
97
+ assert_type(np.fromfile(Path("test.txt"), sep=" "), npt.NDArray[np.float64])
98
+
99
+ assert_type(np.fromiter("12345", np.float64), npt.NDArray[np.float64])
100
+ assert_type(np.fromiter("12345", float), npt.NDArray[Any])
101
+
102
+ assert_type(np.frombuffer(A), npt.NDArray[np.float64])
103
+ assert_type(np.frombuffer(A, dtype=np.int64), npt.NDArray[np.int64])
104
+ assert_type(np.frombuffer(A, dtype="c16"), npt.NDArray[Any])
105
+
106
+ assert_type(np.arange(False, True), npt.NDArray[np.signedinteger[Any]])
107
+ assert_type(np.arange(10), npt.NDArray[np.signedinteger[Any]])
108
+ assert_type(np.arange(0, 10, step=2), npt.NDArray[np.signedinteger[Any]])
109
+ assert_type(np.arange(10.0), npt.NDArray[np.floating[Any]])
110
+ assert_type(np.arange(start=0, stop=10.0), npt.NDArray[np.floating[Any]])
111
+ assert_type(np.arange(np.timedelta64(0)), npt.NDArray[np.timedelta64])
112
+ assert_type(np.arange(0, np.timedelta64(10)), npt.NDArray[np.timedelta64])
113
+ assert_type(np.arange(np.datetime64("0"), np.datetime64("10")), npt.NDArray[np.datetime64])
114
+ assert_type(np.arange(10, dtype=np.float64), npt.NDArray[np.float64])
115
+ assert_type(np.arange(0, 10, step=2, dtype=np.int16), npt.NDArray[np.int16])
116
+ assert_type(np.arange(10, dtype=int), npt.NDArray[Any])
117
+ assert_type(np.arange(0, 10, dtype="f8"), npt.NDArray[Any])
118
+
119
+ assert_type(np.require(A), npt.NDArray[np.float64])
120
+ assert_type(np.require(B), SubClass[np.float64])
121
+ assert_type(np.require(B, requirements=None), SubClass[np.float64])
122
+ assert_type(np.require(B, dtype=int), np.ndarray[Any, Any])
123
+ assert_type(np.require(B, requirements="E"), np.ndarray[Any, Any])
124
+ assert_type(np.require(B, requirements=["ENSUREARRAY"]), np.ndarray[Any, Any])
125
+ assert_type(np.require(B, requirements={"F", "E"}), np.ndarray[Any, Any])
126
+ assert_type(np.require(B, requirements=["C", "OWNDATA"]), SubClass[np.float64])
127
+ assert_type(np.require(B, requirements="W"), SubClass[np.float64])
128
+ assert_type(np.require(B, requirements="A"), SubClass[np.float64])
129
+ assert_type(np.require(C), np.ndarray[Any, Any])
130
+
131
+ assert_type(np.linspace(0, 10), npt.NDArray[np.floating[Any]])
132
+ assert_type(np.linspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]])
133
+ assert_type(np.linspace(0, 10, dtype=np.int64), npt.NDArray[np.int64])
134
+ assert_type(np.linspace(0, 10, dtype=int), npt.NDArray[Any])
135
+ assert_type(np.linspace(0, 10, retstep=True), tuple[npt.NDArray[np.floating[Any]], np.floating[Any]])
136
+ assert_type(np.linspace(0j, 10, retstep=True), tuple[npt.NDArray[np.complexfloating[Any, Any]], np.complexfloating[Any, Any]])
137
+ assert_type(np.linspace(0, 10, retstep=True, dtype=np.int64), tuple[npt.NDArray[np.int64], np.int64])
138
+ assert_type(np.linspace(0j, 10, retstep=True, dtype=int), tuple[npt.NDArray[Any], Any])
139
+
140
+ assert_type(np.logspace(0, 10), npt.NDArray[np.floating[Any]])
141
+ assert_type(np.logspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]])
142
+ assert_type(np.logspace(0, 10, dtype=np.int64), npt.NDArray[np.int64])
143
+ assert_type(np.logspace(0, 10, dtype=int), npt.NDArray[Any])
144
+
145
+ assert_type(np.geomspace(0, 10), npt.NDArray[np.floating[Any]])
146
+ assert_type(np.geomspace(0, 10j), npt.NDArray[np.complexfloating[Any, Any]])
147
+ assert_type(np.geomspace(0, 10, dtype=np.int64), npt.NDArray[np.int64])
148
+ assert_type(np.geomspace(0, 10, dtype=int), npt.NDArray[Any])
149
+
150
+ assert_type(np.zeros_like(A), npt.NDArray[np.float64])
151
+ assert_type(np.zeros_like(C), npt.NDArray[Any])
152
+ assert_type(np.zeros_like(A, dtype=float), npt.NDArray[Any])
153
+ assert_type(np.zeros_like(B), SubClass[np.float64])
154
+ assert_type(np.zeros_like(B, dtype=np.int64), npt.NDArray[np.int64])
155
+
156
+ assert_type(np.ones_like(A), npt.NDArray[np.float64])
157
+ assert_type(np.ones_like(C), npt.NDArray[Any])
158
+ assert_type(np.ones_like(A, dtype=float), npt.NDArray[Any])
159
+ assert_type(np.ones_like(B), SubClass[np.float64])
160
+ assert_type(np.ones_like(B, dtype=np.int64), npt.NDArray[np.int64])
161
+
162
+ assert_type(np.full_like(A, i8), npt.NDArray[np.float64])
163
+ assert_type(np.full_like(C, i8), npt.NDArray[Any])
164
+ assert_type(np.full_like(A, i8, dtype=int), npt.NDArray[Any])
165
+ assert_type(np.full_like(B, i8), SubClass[np.float64])
166
+ assert_type(np.full_like(B, i8, dtype=np.int64), npt.NDArray[np.int64])
167
+
168
+ assert_type(np.ones(1), npt.NDArray[np.float64])
169
+ assert_type(np.ones([1, 1, 1]), npt.NDArray[np.float64])
170
+ assert_type(np.ones(5, dtype=np.int64), npt.NDArray[np.int64])
171
+ assert_type(np.ones(5, dtype=int), npt.NDArray[Any])
172
+
173
+ assert_type(np.full(1, i8), npt.NDArray[Any])
174
+ assert_type(np.full([1, 1, 1], i8), npt.NDArray[Any])
175
+ assert_type(np.full(1, i8, dtype=np.float64), npt.NDArray[np.float64])
176
+ assert_type(np.full(1, i8, dtype=float), npt.NDArray[Any])
177
+
178
+ assert_type(np.indices([1, 2, 3]), npt.NDArray[np.int_])
179
+ assert_type(np.indices([1, 2, 3], sparse=True), tuple[npt.NDArray[np.int_], ...])
180
+
181
+ assert_type(np.fromfunction(func, (3, 5)), SubClass[np.float64])
182
+
183
+ assert_type(np.identity(10), npt.NDArray[np.float64])
184
+ assert_type(np.identity(10, dtype=np.int64), npt.NDArray[np.int64])
185
+ assert_type(np.identity(10, dtype=int), npt.NDArray[Any])
186
+
187
+ assert_type(np.atleast_1d(A), npt.NDArray[np.float64])
188
+ assert_type(np.atleast_1d(C), npt.NDArray[Any])
189
+ assert_type(np.atleast_1d(A, A), list[npt.NDArray[Any]])
190
+ assert_type(np.atleast_1d(A, C), list[npt.NDArray[Any]])
191
+ assert_type(np.atleast_1d(C, C), list[npt.NDArray[Any]])
192
+
193
+ assert_type(np.atleast_2d(A), npt.NDArray[np.float64])
194
+
195
+ assert_type(np.atleast_3d(A), npt.NDArray[np.float64])
196
+
197
+ assert_type(np.vstack([A, A]), np.ndarray[Any, Any])
198
+ assert_type(np.vstack([A, A], dtype=np.float64), npt.NDArray[np.float64])
199
+ assert_type(np.vstack([A, C]), npt.NDArray[Any])
200
+ assert_type(np.vstack([C, C]), npt.NDArray[Any])
201
+
202
+ assert_type(np.hstack([A, A]), np.ndarray[Any, Any])
203
+ assert_type(np.hstack([A, A], dtype=np.float64), npt.NDArray[np.float64])
204
+
205
+ assert_type(np.stack([A, A]), Any)
206
+ assert_type(np.stack([A, A], dtype=np.float64), npt.NDArray[np.float64])
207
+ assert_type(np.stack([A, C]), npt.NDArray[Any])
208
+ assert_type(np.stack([C, C]), npt.NDArray[Any])
209
+ assert_type(np.stack([A, A], axis=0), Any)
210
+ assert_type(np.stack([A, A], out=B), SubClass[np.float64])
211
+
212
+ assert_type(np.block([[A, A], [A, A]]), npt.NDArray[Any])
213
+ assert_type(np.block(C), npt.NDArray[Any])
214
+
215
+ if sys.version_info >= (3, 12):
216
+ from collections.abc import Buffer
217
+
218
+ def create_array(obj: npt.ArrayLike) -> npt.NDArray[Any]: ...
219
+
220
+ buffer: Buffer
221
+ assert_type(create_array(buffer), npt.NDArray[Any])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/arraypad.pyi ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from collections.abc import Mapping
3
+ from typing import Any, SupportsIndex
4
+
5
+ import numpy as np
6
+ import numpy.typing as npt
7
+
8
+ if sys.version_info >= (3, 11):
9
+ from typing import assert_type
10
+ else:
11
+ from typing_extensions import assert_type
12
+
13
+ def mode_func(
14
+ ar: npt.NDArray[np.number[Any]],
15
+ width: tuple[int, int],
16
+ iaxis: SupportsIndex,
17
+ kwargs: Mapping[str, Any],
18
+ ) -> None: ...
19
+
20
+ AR_i8: npt.NDArray[np.int64]
21
+ AR_f8: npt.NDArray[np.float64]
22
+ AR_LIKE: list[int]
23
+
24
+ assert_type(np.pad(AR_i8, (2, 3), "constant"), npt.NDArray[np.int64])
25
+ assert_type(np.pad(AR_LIKE, (2, 3), "constant"), npt.NDArray[Any])
26
+
27
+ assert_type(np.pad(AR_f8, (2, 3), mode_func), npt.NDArray[np.float64])
28
+ assert_type(np.pad(AR_f8, (2, 3), mode_func, a=1, b=2), npt.NDArray[np.float64])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/arraysetops.pyi ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any
3
+
4
+ import numpy as np
5
+ import numpy.typing as npt
6
+
7
+ if sys.version_info >= (3, 11):
8
+ from typing import assert_type
9
+ else:
10
+ from typing_extensions import assert_type
11
+
12
+ AR_b: npt.NDArray[np.bool_]
13
+ AR_i8: npt.NDArray[np.int64]
14
+ AR_f8: npt.NDArray[np.float64]
15
+ AR_M: npt.NDArray[np.datetime64]
16
+ AR_O: npt.NDArray[np.object_]
17
+
18
+ AR_LIKE_f8: list[float]
19
+
20
+ assert_type(np.ediff1d(AR_b), npt.NDArray[np.int8])
21
+ assert_type(np.ediff1d(AR_i8, to_end=[1, 2, 3]), npt.NDArray[np.int64])
22
+ assert_type(np.ediff1d(AR_M), npt.NDArray[np.timedelta64])
23
+ assert_type(np.ediff1d(AR_O), npt.NDArray[np.object_])
24
+ assert_type(np.ediff1d(AR_LIKE_f8, to_begin=[1, 1.5]), npt.NDArray[Any])
25
+
26
+ assert_type(np.intersect1d(AR_i8, AR_i8), npt.NDArray[np.int64])
27
+ assert_type(np.intersect1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64])
28
+ assert_type(np.intersect1d(AR_f8, AR_i8), npt.NDArray[Any])
29
+ assert_type(np.intersect1d(AR_f8, AR_f8, return_indices=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]])
30
+
31
+ assert_type(np.setxor1d(AR_i8, AR_i8), npt.NDArray[np.int64])
32
+ assert_type(np.setxor1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64])
33
+ assert_type(np.setxor1d(AR_f8, AR_i8), npt.NDArray[Any])
34
+
35
+ assert_type(np.in1d(AR_i8, AR_i8), npt.NDArray[np.bool_])
36
+ assert_type(np.in1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool_])
37
+ assert_type(np.in1d(AR_f8, AR_i8), npt.NDArray[np.bool_])
38
+ assert_type(np.in1d(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool_])
39
+
40
+ assert_type(np.isin(AR_i8, AR_i8), npt.NDArray[np.bool_])
41
+ assert_type(np.isin(AR_M, AR_M, assume_unique=True), npt.NDArray[np.bool_])
42
+ assert_type(np.isin(AR_f8, AR_i8), npt.NDArray[np.bool_])
43
+ assert_type(np.isin(AR_f8, AR_LIKE_f8, invert=True), npt.NDArray[np.bool_])
44
+
45
+ assert_type(np.union1d(AR_i8, AR_i8), npt.NDArray[np.int64])
46
+ assert_type(np.union1d(AR_M, AR_M), npt.NDArray[np.datetime64])
47
+ assert_type(np.union1d(AR_f8, AR_i8), npt.NDArray[Any])
48
+
49
+ assert_type(np.setdiff1d(AR_i8, AR_i8), npt.NDArray[np.int64])
50
+ assert_type(np.setdiff1d(AR_M, AR_M, assume_unique=True), npt.NDArray[np.datetime64])
51
+ assert_type(np.setdiff1d(AR_f8, AR_i8), npt.NDArray[Any])
52
+
53
+ assert_type(np.unique(AR_f8), npt.NDArray[np.float64])
54
+ assert_type(np.unique(AR_LIKE_f8, axis=0), npt.NDArray[Any])
55
+ assert_type(np.unique(AR_f8, return_index=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]])
56
+ assert_type(np.unique(AR_LIKE_f8, return_index=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]])
57
+ assert_type(np.unique(AR_f8, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]])
58
+ assert_type(np.unique(AR_LIKE_f8, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]])
59
+ assert_type(np.unique(AR_f8, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp]])
60
+ assert_type(np.unique(AR_LIKE_f8, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp]])
61
+ assert_type(np.unique(AR_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]])
62
+ assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]])
63
+ assert_type(np.unique(AR_f8, return_index=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]])
64
+ assert_type(np.unique(AR_LIKE_f8, return_index=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]])
65
+ assert_type(np.unique(AR_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp]])
66
+ assert_type(np.unique(AR_LIKE_f8, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp]])
67
+ assert_type(np.unique(AR_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[np.float64], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]])
68
+ assert_type(np.unique(AR_LIKE_f8, return_index=True, return_inverse=True, return_counts=True), tuple[npt.NDArray[Any], npt.NDArray[np.intp], npt.NDArray[np.intp], npt.NDArray[np.intp]])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/arrayterator.pyi ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any
3
+ from collections.abc import Generator
4
+
5
+ import numpy as np
6
+ import numpy.typing as npt
7
+
8
+ if sys.version_info >= (3, 11):
9
+ from typing import assert_type
10
+ else:
11
+ from typing_extensions import assert_type
12
+
13
+ AR_i8: np.ndarray[Any, np.dtype[np.int64]]
14
+ ar_iter = np.lib.Arrayterator(AR_i8)
15
+
16
+ assert_type(ar_iter.var, npt.NDArray[np.int64])
17
+ assert_type(ar_iter.buf_size, None | int)
18
+ assert_type(ar_iter.start, list[int])
19
+ assert_type(ar_iter.stop, list[int])
20
+ assert_type(ar_iter.step, list[int])
21
+ assert_type(ar_iter.shape, tuple[int, ...])
22
+ assert_type(ar_iter.flat, Generator[np.int64, None, None])
23
+
24
+ assert_type(ar_iter.__array__(), npt.NDArray[np.int64])
25
+
26
+ for i in ar_iter:
27
+ assert_type(i, npt.NDArray[np.int64])
28
+
29
+ assert_type(ar_iter[0], np.lib.Arrayterator[Any, np.dtype[np.int64]])
30
+ assert_type(ar_iter[...], np.lib.Arrayterator[Any, np.dtype[np.int64]])
31
+ assert_type(ar_iter[:], np.lib.Arrayterator[Any, np.dtype[np.int64]])
32
+ assert_type(ar_iter[0, 0, 0], np.lib.Arrayterator[Any, np.dtype[np.int64]])
33
+ assert_type(ar_iter[..., 0, :], np.lib.Arrayterator[Any, np.dtype[np.int64]])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/bitwise_ops.pyi ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any
3
+
4
+ import numpy as np
5
+ import numpy.typing as npt
6
+ from numpy._typing import _64Bit, _32Bit
7
+
8
+ if sys.version_info >= (3, 11):
9
+ from typing import assert_type
10
+ else:
11
+ from typing_extensions import assert_type
12
+
13
+ i8 = np.int64(1)
14
+ u8 = np.uint64(1)
15
+
16
+ i4 = np.int32(1)
17
+ u4 = np.uint32(1)
18
+
19
+ b_ = np.bool_(1)
20
+
21
+ b = bool(1)
22
+ i = int(1)
23
+
24
+ AR = np.array([0, 1, 2], dtype=np.int32)
25
+ AR.setflags(write=False)
26
+
27
+
28
+ assert_type(i8 << i8, np.int64)
29
+ assert_type(i8 >> i8, np.int64)
30
+ assert_type(i8 | i8, np.int64)
31
+ assert_type(i8 ^ i8, np.int64)
32
+ assert_type(i8 & i8, np.int64)
33
+
34
+ assert_type(i8 << AR, npt.NDArray[np.signedinteger[Any]])
35
+ assert_type(i8 >> AR, npt.NDArray[np.signedinteger[Any]])
36
+ assert_type(i8 | AR, npt.NDArray[np.signedinteger[Any]])
37
+ assert_type(i8 ^ AR, npt.NDArray[np.signedinteger[Any]])
38
+ assert_type(i8 & AR, npt.NDArray[np.signedinteger[Any]])
39
+
40
+ assert_type(i4 << i4, np.int32)
41
+ assert_type(i4 >> i4, np.int32)
42
+ assert_type(i4 | i4, np.int32)
43
+ assert_type(i4 ^ i4, np.int32)
44
+ assert_type(i4 & i4, np.int32)
45
+
46
+ assert_type(i8 << i4, np.signedinteger[_32Bit | _64Bit])
47
+ assert_type(i8 >> i4, np.signedinteger[_32Bit | _64Bit])
48
+ assert_type(i8 | i4, np.signedinteger[_32Bit | _64Bit])
49
+ assert_type(i8 ^ i4, np.signedinteger[_32Bit | _64Bit])
50
+ assert_type(i8 & i4, np.signedinteger[_32Bit | _64Bit])
51
+
52
+ assert_type(i8 << b_, np.int64)
53
+ assert_type(i8 >> b_, np.int64)
54
+ assert_type(i8 | b_, np.int64)
55
+ assert_type(i8 ^ b_, np.int64)
56
+ assert_type(i8 & b_, np.int64)
57
+
58
+ assert_type(i8 << b, np.int64)
59
+ assert_type(i8 >> b, np.int64)
60
+ assert_type(i8 | b, np.int64)
61
+ assert_type(i8 ^ b, np.int64)
62
+ assert_type(i8 & b, np.int64)
63
+
64
+ assert_type(u8 << u8, np.uint64)
65
+ assert_type(u8 >> u8, np.uint64)
66
+ assert_type(u8 | u8, np.uint64)
67
+ assert_type(u8 ^ u8, np.uint64)
68
+ assert_type(u8 & u8, np.uint64)
69
+
70
+ assert_type(u8 << AR, npt.NDArray[np.signedinteger[Any]])
71
+ assert_type(u8 >> AR, npt.NDArray[np.signedinteger[Any]])
72
+ assert_type(u8 | AR, npt.NDArray[np.signedinteger[Any]])
73
+ assert_type(u8 ^ AR, npt.NDArray[np.signedinteger[Any]])
74
+ assert_type(u8 & AR, npt.NDArray[np.signedinteger[Any]])
75
+
76
+ assert_type(u4 << u4, np.uint32)
77
+ assert_type(u4 >> u4, np.uint32)
78
+ assert_type(u4 | u4, np.uint32)
79
+ assert_type(u4 ^ u4, np.uint32)
80
+ assert_type(u4 & u4, np.uint32)
81
+
82
+ assert_type(u4 << i4, np.signedinteger[Any])
83
+ assert_type(u4 >> i4, np.signedinteger[Any])
84
+ assert_type(u4 | i4, np.signedinteger[Any])
85
+ assert_type(u4 ^ i4, np.signedinteger[Any])
86
+ assert_type(u4 & i4, np.signedinteger[Any])
87
+
88
+ assert_type(u4 << i, np.signedinteger[Any])
89
+ assert_type(u4 >> i, np.signedinteger[Any])
90
+ assert_type(u4 | i, np.signedinteger[Any])
91
+ assert_type(u4 ^ i, np.signedinteger[Any])
92
+ assert_type(u4 & i, np.signedinteger[Any])
93
+
94
+ assert_type(u8 << b_, np.uint64)
95
+ assert_type(u8 >> b_, np.uint64)
96
+ assert_type(u8 | b_, np.uint64)
97
+ assert_type(u8 ^ b_, np.uint64)
98
+ assert_type(u8 & b_, np.uint64)
99
+
100
+ assert_type(u8 << b, np.uint64)
101
+ assert_type(u8 >> b, np.uint64)
102
+ assert_type(u8 | b, np.uint64)
103
+ assert_type(u8 ^ b, np.uint64)
104
+ assert_type(u8 & b, np.uint64)
105
+
106
+ assert_type(b_ << b_, np.int8)
107
+ assert_type(b_ >> b_, np.int8)
108
+ assert_type(b_ | b_, np.bool_)
109
+ assert_type(b_ ^ b_, np.bool_)
110
+ assert_type(b_ & b_, np.bool_)
111
+
112
+ assert_type(b_ << AR, npt.NDArray[np.signedinteger[Any]])
113
+ assert_type(b_ >> AR, npt.NDArray[np.signedinteger[Any]])
114
+ assert_type(b_ | AR, npt.NDArray[np.signedinteger[Any]])
115
+ assert_type(b_ ^ AR, npt.NDArray[np.signedinteger[Any]])
116
+ assert_type(b_ & AR, npt.NDArray[np.signedinteger[Any]])
117
+
118
+ assert_type(b_ << b, np.int8)
119
+ assert_type(b_ >> b, np.int8)
120
+ assert_type(b_ | b, np.bool_)
121
+ assert_type(b_ ^ b, np.bool_)
122
+ assert_type(b_ & b, np.bool_)
123
+
124
+ assert_type(b_ << i, np.int_)
125
+ assert_type(b_ >> i, np.int_)
126
+ assert_type(b_ | i, np.int_)
127
+ assert_type(b_ ^ i, np.int_)
128
+ assert_type(b_ & i, np.int_)
129
+
130
+ assert_type(~i8, np.int64)
131
+ assert_type(~i4, np.int32)
132
+ assert_type(~u8, np.uint64)
133
+ assert_type(~u4, np.uint32)
134
+ assert_type(~b_, np.bool_)
135
+ assert_type(~AR, npt.NDArray[np.int32])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/chararray.pyi ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any
3
+
4
+ import numpy as np
5
+ import numpy.typing as npt
6
+
7
+ if sys.version_info >= (3, 11):
8
+ from typing import assert_type
9
+ else:
10
+ from typing_extensions import assert_type
11
+
12
+ AR_U: np.chararray[Any, np.dtype[np.str_]]
13
+ AR_S: np.chararray[Any, np.dtype[np.bytes_]]
14
+
15
+ assert_type(AR_U == AR_U, npt.NDArray[np.bool_])
16
+ assert_type(AR_S == AR_S, npt.NDArray[np.bool_])
17
+
18
+ assert_type(AR_U != AR_U, npt.NDArray[np.bool_])
19
+ assert_type(AR_S != AR_S, npt.NDArray[np.bool_])
20
+
21
+ assert_type(AR_U >= AR_U, npt.NDArray[np.bool_])
22
+ assert_type(AR_S >= AR_S, npt.NDArray[np.bool_])
23
+
24
+ assert_type(AR_U <= AR_U, npt.NDArray[np.bool_])
25
+ assert_type(AR_S <= AR_S, npt.NDArray[np.bool_])
26
+
27
+ assert_type(AR_U > AR_U, npt.NDArray[np.bool_])
28
+ assert_type(AR_S > AR_S, npt.NDArray[np.bool_])
29
+
30
+ assert_type(AR_U < AR_U, npt.NDArray[np.bool_])
31
+ assert_type(AR_S < AR_S, npt.NDArray[np.bool_])
32
+
33
+ assert_type(AR_U * 5, np.chararray[Any, np.dtype[np.str_]])
34
+ assert_type(AR_S * [5], np.chararray[Any, np.dtype[np.bytes_]])
35
+
36
+ assert_type(AR_U % "test", np.chararray[Any, np.dtype[np.str_]])
37
+ assert_type(AR_S % b"test", np.chararray[Any, np.dtype[np.bytes_]])
38
+
39
+ assert_type(AR_U.capitalize(), np.chararray[Any, np.dtype[np.str_]])
40
+ assert_type(AR_S.capitalize(), np.chararray[Any, np.dtype[np.bytes_]])
41
+
42
+ assert_type(AR_U.center(5), np.chararray[Any, np.dtype[np.str_]])
43
+ assert_type(AR_S.center([2, 3, 4], b"a"), np.chararray[Any, np.dtype[np.bytes_]])
44
+
45
+ assert_type(AR_U.encode(), np.chararray[Any, np.dtype[np.bytes_]])
46
+ assert_type(AR_S.decode(), np.chararray[Any, np.dtype[np.str_]])
47
+
48
+ assert_type(AR_U.expandtabs(), np.chararray[Any, np.dtype[np.str_]])
49
+ assert_type(AR_S.expandtabs(tabsize=4), np.chararray[Any, np.dtype[np.bytes_]])
50
+
51
+ assert_type(AR_U.join("_"), np.chararray[Any, np.dtype[np.str_]])
52
+ assert_type(AR_S.join([b"_", b""]), np.chararray[Any, np.dtype[np.bytes_]])
53
+
54
+ assert_type(AR_U.ljust(5), np.chararray[Any, np.dtype[np.str_]])
55
+ assert_type(AR_S.ljust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.chararray[Any, np.dtype[np.bytes_]])
56
+ assert_type(AR_U.rjust(5), np.chararray[Any, np.dtype[np.str_]])
57
+ assert_type(AR_S.rjust([4, 3, 1], fillchar=[b"a", b"b", b"c"]), np.chararray[Any, np.dtype[np.bytes_]])
58
+
59
+ assert_type(AR_U.lstrip(), np.chararray[Any, np.dtype[np.str_]])
60
+ assert_type(AR_S.lstrip(chars=b"_"), np.chararray[Any, np.dtype[np.bytes_]])
61
+ assert_type(AR_U.rstrip(), np.chararray[Any, np.dtype[np.str_]])
62
+ assert_type(AR_S.rstrip(chars=b"_"), np.chararray[Any, np.dtype[np.bytes_]])
63
+ assert_type(AR_U.strip(), np.chararray[Any, np.dtype[np.str_]])
64
+ assert_type(AR_S.strip(chars=b"_"), np.chararray[Any, np.dtype[np.bytes_]])
65
+
66
+ assert_type(AR_U.partition("\n"), np.chararray[Any, np.dtype[np.str_]])
67
+ assert_type(AR_S.partition([b"a", b"b", b"c"]), np.chararray[Any, np.dtype[np.bytes_]])
68
+ assert_type(AR_U.rpartition("\n"), np.chararray[Any, np.dtype[np.str_]])
69
+ assert_type(AR_S.rpartition([b"a", b"b", b"c"]), np.chararray[Any, np.dtype[np.bytes_]])
70
+
71
+ assert_type(AR_U.replace("_", "-"), np.chararray[Any, np.dtype[np.str_]])
72
+ assert_type(AR_S.replace([b"_", b""], [b"a", b"b"]), np.chararray[Any, np.dtype[np.bytes_]])
73
+
74
+ assert_type(AR_U.split("_"), npt.NDArray[np.object_])
75
+ assert_type(AR_S.split(maxsplit=[1, 2, 3]), npt.NDArray[np.object_])
76
+ assert_type(AR_U.rsplit("_"), npt.NDArray[np.object_])
77
+ assert_type(AR_S.rsplit(maxsplit=[1, 2, 3]), npt.NDArray[np.object_])
78
+
79
+ assert_type(AR_U.splitlines(), npt.NDArray[np.object_])
80
+ assert_type(AR_S.splitlines(keepends=[True, True, False]), npt.NDArray[np.object_])
81
+
82
+ assert_type(AR_U.swapcase(), np.chararray[Any, np.dtype[np.str_]])
83
+ assert_type(AR_S.swapcase(), np.chararray[Any, np.dtype[np.bytes_]])
84
+
85
+ assert_type(AR_U.title(), np.chararray[Any, np.dtype[np.str_]])
86
+ assert_type(AR_S.title(), np.chararray[Any, np.dtype[np.bytes_]])
87
+
88
+ assert_type(AR_U.upper(), np.chararray[Any, np.dtype[np.str_]])
89
+ assert_type(AR_S.upper(), np.chararray[Any, np.dtype[np.bytes_]])
90
+
91
+ assert_type(AR_U.zfill(5), np.chararray[Any, np.dtype[np.str_]])
92
+ assert_type(AR_S.zfill([2, 3, 4]), np.chararray[Any, np.dtype[np.bytes_]])
93
+
94
+ assert_type(AR_U.count("a", start=[1, 2, 3]), npt.NDArray[np.int_])
95
+ assert_type(AR_S.count([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_])
96
+
97
+ assert_type(AR_U.endswith("a", start=[1, 2, 3]), npt.NDArray[np.bool_])
98
+ assert_type(AR_S.endswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool_])
99
+ assert_type(AR_U.startswith("a", start=[1, 2, 3]), npt.NDArray[np.bool_])
100
+ assert_type(AR_S.startswith([b"a", b"b", b"c"], end=9), npt.NDArray[np.bool_])
101
+
102
+ assert_type(AR_U.find("a", start=[1, 2, 3]), npt.NDArray[np.int_])
103
+ assert_type(AR_S.find([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_])
104
+ assert_type(AR_U.rfind("a", start=[1, 2, 3]), npt.NDArray[np.int_])
105
+ assert_type(AR_S.rfind([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_])
106
+
107
+ assert_type(AR_U.index("a", start=[1, 2, 3]), npt.NDArray[np.int_])
108
+ assert_type(AR_S.index([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_])
109
+ assert_type(AR_U.rindex("a", start=[1, 2, 3]), npt.NDArray[np.int_])
110
+ assert_type(AR_S.rindex([b"a", b"b", b"c"], end=9), npt.NDArray[np.int_])
111
+
112
+ assert_type(AR_U.isalpha(), npt.NDArray[np.bool_])
113
+ assert_type(AR_S.isalpha(), npt.NDArray[np.bool_])
114
+
115
+ assert_type(AR_U.isalnum(), npt.NDArray[np.bool_])
116
+ assert_type(AR_S.isalnum(), npt.NDArray[np.bool_])
117
+
118
+ assert_type(AR_U.isdecimal(), npt.NDArray[np.bool_])
119
+ assert_type(AR_S.isdecimal(), npt.NDArray[np.bool_])
120
+
121
+ assert_type(AR_U.isdigit(), npt.NDArray[np.bool_])
122
+ assert_type(AR_S.isdigit(), npt.NDArray[np.bool_])
123
+
124
+ assert_type(AR_U.islower(), npt.NDArray[np.bool_])
125
+ assert_type(AR_S.islower(), npt.NDArray[np.bool_])
126
+
127
+ assert_type(AR_U.isnumeric(), npt.NDArray[np.bool_])
128
+ assert_type(AR_S.isnumeric(), npt.NDArray[np.bool_])
129
+
130
+ assert_type(AR_U.isspace(), npt.NDArray[np.bool_])
131
+ assert_type(AR_S.isspace(), npt.NDArray[np.bool_])
132
+
133
+ assert_type(AR_U.istitle(), npt.NDArray[np.bool_])
134
+ assert_type(AR_S.istitle(), npt.NDArray[np.bool_])
135
+
136
+ assert_type(AR_U.isupper(), npt.NDArray[np.bool_])
137
+ assert_type(AR_S.isupper(), npt.NDArray[np.bool_])
138
+
139
+ assert_type(AR_U.__array_finalize__(object()), None)
140
+ assert_type(AR_S.__array_finalize__(object()), None)
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/comparisons.pyi ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import fractions
3
+ import decimal
4
+ from typing import Any
5
+
6
+ import numpy as np
7
+ import numpy.typing as npt
8
+
9
+ if sys.version_info >= (3, 11):
10
+ from typing import assert_type
11
+ else:
12
+ from typing_extensions import assert_type
13
+
14
+ c16 = np.complex128()
15
+ f8 = np.float64()
16
+ i8 = np.int64()
17
+ u8 = np.uint64()
18
+
19
+ c8 = np.complex64()
20
+ f4 = np.float32()
21
+ i4 = np.int32()
22
+ u4 = np.uint32()
23
+
24
+ dt = np.datetime64(0, "D")
25
+ td = np.timedelta64(0, "D")
26
+
27
+ b_ = np.bool_()
28
+
29
+ b = bool()
30
+ c = complex()
31
+ f = float()
32
+ i = int()
33
+
34
+ AR = np.array([0], dtype=np.int64)
35
+ AR.setflags(write=False)
36
+
37
+ SEQ = (0, 1, 2, 3, 4)
38
+
39
+ # object-like comparisons
40
+
41
+ assert_type(i8 > fractions.Fraction(1, 5), Any)
42
+ assert_type(i8 > [fractions.Fraction(1, 5)], Any)
43
+ assert_type(i8 > decimal.Decimal("1.5"), Any)
44
+ assert_type(i8 > [decimal.Decimal("1.5")], Any)
45
+
46
+ # Time structures
47
+
48
+ assert_type(dt > dt, np.bool_)
49
+
50
+ assert_type(td > td, np.bool_)
51
+ assert_type(td > i, np.bool_)
52
+ assert_type(td > i4, np.bool_)
53
+ assert_type(td > i8, np.bool_)
54
+
55
+ assert_type(td > AR, npt.NDArray[np.bool_])
56
+ assert_type(td > SEQ, npt.NDArray[np.bool_])
57
+ assert_type(AR > SEQ, npt.NDArray[np.bool_])
58
+ assert_type(AR > td, npt.NDArray[np.bool_])
59
+ assert_type(SEQ > td, npt.NDArray[np.bool_])
60
+ assert_type(SEQ > AR, npt.NDArray[np.bool_])
61
+
62
+ # boolean
63
+
64
+ assert_type(b_ > b, np.bool_)
65
+ assert_type(b_ > b_, np.bool_)
66
+ assert_type(b_ > i, np.bool_)
67
+ assert_type(b_ > i8, np.bool_)
68
+ assert_type(b_ > i4, np.bool_)
69
+ assert_type(b_ > u8, np.bool_)
70
+ assert_type(b_ > u4, np.bool_)
71
+ assert_type(b_ > f, np.bool_)
72
+ assert_type(b_ > f8, np.bool_)
73
+ assert_type(b_ > f4, np.bool_)
74
+ assert_type(b_ > c, np.bool_)
75
+ assert_type(b_ > c16, np.bool_)
76
+ assert_type(b_ > c8, np.bool_)
77
+ assert_type(b_ > AR, npt.NDArray[np.bool_])
78
+ assert_type(b_ > SEQ, npt.NDArray[np.bool_])
79
+
80
+ # Complex
81
+
82
+ assert_type(c16 > c16, np.bool_)
83
+ assert_type(c16 > f8, np.bool_)
84
+ assert_type(c16 > i8, np.bool_)
85
+ assert_type(c16 > c8, np.bool_)
86
+ assert_type(c16 > f4, np.bool_)
87
+ assert_type(c16 > i4, np.bool_)
88
+ assert_type(c16 > b_, np.bool_)
89
+ assert_type(c16 > b, np.bool_)
90
+ assert_type(c16 > c, np.bool_)
91
+ assert_type(c16 > f, np.bool_)
92
+ assert_type(c16 > i, np.bool_)
93
+ assert_type(c16 > AR, npt.NDArray[np.bool_])
94
+ assert_type(c16 > SEQ, npt.NDArray[np.bool_])
95
+
96
+ assert_type(c16 > c16, np.bool_)
97
+ assert_type(f8 > c16, np.bool_)
98
+ assert_type(i8 > c16, np.bool_)
99
+ assert_type(c8 > c16, np.bool_)
100
+ assert_type(f4 > c16, np.bool_)
101
+ assert_type(i4 > c16, np.bool_)
102
+ assert_type(b_ > c16, np.bool_)
103
+ assert_type(b > c16, np.bool_)
104
+ assert_type(c > c16, np.bool_)
105
+ assert_type(f > c16, np.bool_)
106
+ assert_type(i > c16, np.bool_)
107
+ assert_type(AR > c16, npt.NDArray[np.bool_])
108
+ assert_type(SEQ > c16, npt.NDArray[np.bool_])
109
+
110
+ assert_type(c8 > c16, np.bool_)
111
+ assert_type(c8 > f8, np.bool_)
112
+ assert_type(c8 > i8, np.bool_)
113
+ assert_type(c8 > c8, np.bool_)
114
+ assert_type(c8 > f4, np.bool_)
115
+ assert_type(c8 > i4, np.bool_)
116
+ assert_type(c8 > b_, np.bool_)
117
+ assert_type(c8 > b, np.bool_)
118
+ assert_type(c8 > c, np.bool_)
119
+ assert_type(c8 > f, np.bool_)
120
+ assert_type(c8 > i, np.bool_)
121
+ assert_type(c8 > AR, npt.NDArray[np.bool_])
122
+ assert_type(c8 > SEQ, npt.NDArray[np.bool_])
123
+
124
+ assert_type(c16 > c8, np.bool_)
125
+ assert_type(f8 > c8, np.bool_)
126
+ assert_type(i8 > c8, np.bool_)
127
+ assert_type(c8 > c8, np.bool_)
128
+ assert_type(f4 > c8, np.bool_)
129
+ assert_type(i4 > c8, np.bool_)
130
+ assert_type(b_ > c8, np.bool_)
131
+ assert_type(b > c8, np.bool_)
132
+ assert_type(c > c8, np.bool_)
133
+ assert_type(f > c8, np.bool_)
134
+ assert_type(i > c8, np.bool_)
135
+ assert_type(AR > c8, npt.NDArray[np.bool_])
136
+ assert_type(SEQ > c8, npt.NDArray[np.bool_])
137
+
138
+ # Float
139
+
140
+ assert_type(f8 > f8, np.bool_)
141
+ assert_type(f8 > i8, np.bool_)
142
+ assert_type(f8 > f4, np.bool_)
143
+ assert_type(f8 > i4, np.bool_)
144
+ assert_type(f8 > b_, np.bool_)
145
+ assert_type(f8 > b, np.bool_)
146
+ assert_type(f8 > c, np.bool_)
147
+ assert_type(f8 > f, np.bool_)
148
+ assert_type(f8 > i, np.bool_)
149
+ assert_type(f8 > AR, npt.NDArray[np.bool_])
150
+ assert_type(f8 > SEQ, npt.NDArray[np.bool_])
151
+
152
+ assert_type(f8 > f8, np.bool_)
153
+ assert_type(i8 > f8, np.bool_)
154
+ assert_type(f4 > f8, np.bool_)
155
+ assert_type(i4 > f8, np.bool_)
156
+ assert_type(b_ > f8, np.bool_)
157
+ assert_type(b > f8, np.bool_)
158
+ assert_type(c > f8, np.bool_)
159
+ assert_type(f > f8, np.bool_)
160
+ assert_type(i > f8, np.bool_)
161
+ assert_type(AR > f8, npt.NDArray[np.bool_])
162
+ assert_type(SEQ > f8, npt.NDArray[np.bool_])
163
+
164
+ assert_type(f4 > f8, np.bool_)
165
+ assert_type(f4 > i8, np.bool_)
166
+ assert_type(f4 > f4, np.bool_)
167
+ assert_type(f4 > i4, np.bool_)
168
+ assert_type(f4 > b_, np.bool_)
169
+ assert_type(f4 > b, np.bool_)
170
+ assert_type(f4 > c, np.bool_)
171
+ assert_type(f4 > f, np.bool_)
172
+ assert_type(f4 > i, np.bool_)
173
+ assert_type(f4 > AR, npt.NDArray[np.bool_])
174
+ assert_type(f4 > SEQ, npt.NDArray[np.bool_])
175
+
176
+ assert_type(f8 > f4, np.bool_)
177
+ assert_type(i8 > f4, np.bool_)
178
+ assert_type(f4 > f4, np.bool_)
179
+ assert_type(i4 > f4, np.bool_)
180
+ assert_type(b_ > f4, np.bool_)
181
+ assert_type(b > f4, np.bool_)
182
+ assert_type(c > f4, np.bool_)
183
+ assert_type(f > f4, np.bool_)
184
+ assert_type(i > f4, np.bool_)
185
+ assert_type(AR > f4, npt.NDArray[np.bool_])
186
+ assert_type(SEQ > f4, npt.NDArray[np.bool_])
187
+
188
+ # Int
189
+
190
+ assert_type(i8 > i8, np.bool_)
191
+ assert_type(i8 > u8, np.bool_)
192
+ assert_type(i8 > i4, np.bool_)
193
+ assert_type(i8 > u4, np.bool_)
194
+ assert_type(i8 > b_, np.bool_)
195
+ assert_type(i8 > b, np.bool_)
196
+ assert_type(i8 > c, np.bool_)
197
+ assert_type(i8 > f, np.bool_)
198
+ assert_type(i8 > i, np.bool_)
199
+ assert_type(i8 > AR, npt.NDArray[np.bool_])
200
+ assert_type(i8 > SEQ, npt.NDArray[np.bool_])
201
+
202
+ assert_type(u8 > u8, np.bool_)
203
+ assert_type(u8 > i4, np.bool_)
204
+ assert_type(u8 > u4, np.bool_)
205
+ assert_type(u8 > b_, np.bool_)
206
+ assert_type(u8 > b, np.bool_)
207
+ assert_type(u8 > c, np.bool_)
208
+ assert_type(u8 > f, np.bool_)
209
+ assert_type(u8 > i, np.bool_)
210
+ assert_type(u8 > AR, npt.NDArray[np.bool_])
211
+ assert_type(u8 > SEQ, npt.NDArray[np.bool_])
212
+
213
+ assert_type(i8 > i8, np.bool_)
214
+ assert_type(u8 > i8, np.bool_)
215
+ assert_type(i4 > i8, np.bool_)
216
+ assert_type(u4 > i8, np.bool_)
217
+ assert_type(b_ > i8, np.bool_)
218
+ assert_type(b > i8, np.bool_)
219
+ assert_type(c > i8, np.bool_)
220
+ assert_type(f > i8, np.bool_)
221
+ assert_type(i > i8, np.bool_)
222
+ assert_type(AR > i8, npt.NDArray[np.bool_])
223
+ assert_type(SEQ > i8, npt.NDArray[np.bool_])
224
+
225
+ assert_type(u8 > u8, np.bool_)
226
+ assert_type(i4 > u8, np.bool_)
227
+ assert_type(u4 > u8, np.bool_)
228
+ assert_type(b_ > u8, np.bool_)
229
+ assert_type(b > u8, np.bool_)
230
+ assert_type(c > u8, np.bool_)
231
+ assert_type(f > u8, np.bool_)
232
+ assert_type(i > u8, np.bool_)
233
+ assert_type(AR > u8, npt.NDArray[np.bool_])
234
+ assert_type(SEQ > u8, npt.NDArray[np.bool_])
235
+
236
+ assert_type(i4 > i8, np.bool_)
237
+ assert_type(i4 > i4, np.bool_)
238
+ assert_type(i4 > i, np.bool_)
239
+ assert_type(i4 > b_, np.bool_)
240
+ assert_type(i4 > b, np.bool_)
241
+ assert_type(i4 > AR, npt.NDArray[np.bool_])
242
+ assert_type(i4 > SEQ, npt.NDArray[np.bool_])
243
+
244
+ assert_type(u4 > i8, np.bool_)
245
+ assert_type(u4 > i4, np.bool_)
246
+ assert_type(u4 > u8, np.bool_)
247
+ assert_type(u4 > u4, np.bool_)
248
+ assert_type(u4 > i, np.bool_)
249
+ assert_type(u4 > b_, np.bool_)
250
+ assert_type(u4 > b, np.bool_)
251
+ assert_type(u4 > AR, npt.NDArray[np.bool_])
252
+ assert_type(u4 > SEQ, npt.NDArray[np.bool_])
253
+
254
+ assert_type(i8 > i4, np.bool_)
255
+ assert_type(i4 > i4, np.bool_)
256
+ assert_type(i > i4, np.bool_)
257
+ assert_type(b_ > i4, np.bool_)
258
+ assert_type(b > i4, np.bool_)
259
+ assert_type(AR > i4, npt.NDArray[np.bool_])
260
+ assert_type(SEQ > i4, npt.NDArray[np.bool_])
261
+
262
+ assert_type(i8 > u4, np.bool_)
263
+ assert_type(i4 > u4, np.bool_)
264
+ assert_type(u8 > u4, np.bool_)
265
+ assert_type(u4 > u4, np.bool_)
266
+ assert_type(b_ > u4, np.bool_)
267
+ assert_type(b > u4, np.bool_)
268
+ assert_type(i > u4, np.bool_)
269
+ assert_type(AR > u4, npt.NDArray[np.bool_])
270
+ assert_type(SEQ > u4, npt.NDArray[np.bool_])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/dtype.pyi ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import ctypes as ct
3
+ from typing import Any
4
+
5
+ import numpy as np
6
+
7
+ if sys.version_info >= (3, 11):
8
+ from typing import assert_type
9
+ else:
10
+ from typing_extensions import assert_type
11
+
12
+ dtype_U: np.dtype[np.str_]
13
+ dtype_V: np.dtype[np.void]
14
+ dtype_i8: np.dtype[np.int64]
15
+
16
+ assert_type(np.dtype(np.float64), np.dtype[np.float64])
17
+ assert_type(np.dtype(np.float64, metadata={"test": "test"}), np.dtype[np.float64])
18
+ assert_type(np.dtype(np.int64), np.dtype[np.int64])
19
+
20
+ # String aliases
21
+ assert_type(np.dtype("float64"), np.dtype[np.float64])
22
+ assert_type(np.dtype("float32"), np.dtype[np.float32])
23
+ assert_type(np.dtype("int64"), np.dtype[np.int64])
24
+ assert_type(np.dtype("int32"), np.dtype[np.int32])
25
+ assert_type(np.dtype("bool"), np.dtype[np.bool_])
26
+ assert_type(np.dtype("bytes"), np.dtype[np.bytes_])
27
+ assert_type(np.dtype("str"), np.dtype[np.str_])
28
+
29
+ # Python types
30
+ assert_type(np.dtype(complex), np.dtype[np.cdouble])
31
+ assert_type(np.dtype(float), np.dtype[np.double])
32
+ assert_type(np.dtype(int), np.dtype[np.int_])
33
+ assert_type(np.dtype(bool), np.dtype[np.bool_])
34
+ assert_type(np.dtype(str), np.dtype[np.str_])
35
+ assert_type(np.dtype(bytes), np.dtype[np.bytes_])
36
+ assert_type(np.dtype(object), np.dtype[np.object_])
37
+
38
+ # ctypes
39
+ assert_type(np.dtype(ct.c_double), np.dtype[np.double])
40
+ assert_type(np.dtype(ct.c_longlong), np.dtype[np.longlong])
41
+ assert_type(np.dtype(ct.c_uint32), np.dtype[np.uint32])
42
+ assert_type(np.dtype(ct.c_bool), np.dtype[np.bool_])
43
+ assert_type(np.dtype(ct.c_char), np.dtype[np.bytes_])
44
+ assert_type(np.dtype(ct.py_object), np.dtype[np.object_])
45
+
46
+ # Special case for None
47
+ assert_type(np.dtype(None), np.dtype[np.double])
48
+
49
+ # Dtypes of dtypes
50
+ assert_type(np.dtype(np.dtype(np.float64)), np.dtype[np.float64])
51
+
52
+ # Parameterized dtypes
53
+ assert_type(np.dtype("S8"), np.dtype)
54
+
55
+ # Void
56
+ assert_type(np.dtype(("U", 10)), np.dtype[np.void])
57
+
58
+ # Methods and attributes
59
+ assert_type(dtype_U.base, np.dtype[Any])
60
+ assert_type(dtype_U.subdtype, None | tuple[np.dtype[Any], tuple[int, ...]])
61
+ assert_type(dtype_U.newbyteorder(), np.dtype[np.str_])
62
+ assert_type(dtype_U.type, type[np.str_])
63
+ assert_type(dtype_U.name, str)
64
+ assert_type(dtype_U.names, None | tuple[str, ...])
65
+
66
+ assert_type(dtype_U * 0, np.dtype[np.str_])
67
+ assert_type(dtype_U * 1, np.dtype[np.str_])
68
+ assert_type(dtype_U * 2, np.dtype[np.str_])
69
+
70
+ assert_type(dtype_i8 * 0, np.dtype[np.void])
71
+ assert_type(dtype_i8 * 1, np.dtype[np.int64])
72
+ assert_type(dtype_i8 * 2, np.dtype[np.void])
73
+
74
+ assert_type(0 * dtype_U, np.dtype[np.str_])
75
+ assert_type(1 * dtype_U, np.dtype[np.str_])
76
+ assert_type(2 * dtype_U, np.dtype[np.str_])
77
+
78
+ assert_type(0 * dtype_i8, np.dtype[Any])
79
+ assert_type(1 * dtype_i8, np.dtype[Any])
80
+ assert_type(2 * dtype_i8, np.dtype[Any])
81
+
82
+ assert_type(dtype_V["f0"], np.dtype[Any])
83
+ assert_type(dtype_V[0], np.dtype[Any])
84
+ assert_type(dtype_V[["f0", "f1"]], np.dtype[np.void])
85
+ assert_type(dtype_V[["f0"]], np.dtype[np.void])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/einsumfunc.pyi ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any
3
+
4
+ import numpy as np
5
+ import numpy.typing as npt
6
+
7
+ if sys.version_info >= (3, 11):
8
+ from typing import assert_type
9
+ else:
10
+ from typing_extensions import assert_type
11
+
12
+ AR_LIKE_b: list[bool]
13
+ AR_LIKE_u: list[np.uint32]
14
+ AR_LIKE_i: list[int]
15
+ AR_LIKE_f: list[float]
16
+ AR_LIKE_c: list[complex]
17
+ AR_LIKE_U: list[str]
18
+ AR_o: npt.NDArray[np.object_]
19
+
20
+ OUT_f: npt.NDArray[np.float64]
21
+
22
+ assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_b), Any)
23
+ assert_type(np.einsum("i,i->i", AR_o, AR_o), Any)
24
+ assert_type(np.einsum("i,i->i", AR_LIKE_u, AR_LIKE_u), Any)
25
+ assert_type(np.einsum("i,i->i", AR_LIKE_i, AR_LIKE_i), Any)
26
+ assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f), Any)
27
+ assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c), Any)
28
+ assert_type(np.einsum("i,i->i", AR_LIKE_b, AR_LIKE_i), Any)
29
+ assert_type(np.einsum("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), Any)
30
+
31
+ assert_type(np.einsum("i,i->i", AR_LIKE_c, AR_LIKE_c, out=OUT_f), npt.NDArray[np.float64])
32
+ assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe", out=OUT_f), npt.NDArray[np.float64])
33
+ assert_type(np.einsum("i,i->i", AR_LIKE_f, AR_LIKE_f, dtype="c16"), Any)
34
+ assert_type(np.einsum("i,i->i", AR_LIKE_U, AR_LIKE_U, dtype=bool, casting="unsafe"), Any)
35
+
36
+ assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_b), tuple[list[Any], str])
37
+ assert_type(np.einsum_path("i,i->i", AR_LIKE_u, AR_LIKE_u), tuple[list[Any], str])
38
+ assert_type(np.einsum_path("i,i->i", AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str])
39
+ assert_type(np.einsum_path("i,i->i", AR_LIKE_f, AR_LIKE_f), tuple[list[Any], str])
40
+ assert_type(np.einsum_path("i,i->i", AR_LIKE_c, AR_LIKE_c), tuple[list[Any], str])
41
+ assert_type(np.einsum_path("i,i->i", AR_LIKE_b, AR_LIKE_i), tuple[list[Any], str])
42
+ assert_type(np.einsum_path("i,i,i,i->i", AR_LIKE_b, AR_LIKE_u, AR_LIKE_i, AR_LIKE_c), tuple[list[Any], str])
43
+
44
+ assert_type(np.einsum([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), Any)
45
+ assert_type(np.einsum_path([[1, 1], [1, 1]], AR_LIKE_i, AR_LIKE_i), tuple[list[Any], str])
venv/lib/python3.10/site-packages/numpy/typing/tests/data/reveal/flatiter.pyi ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from typing import Any
3
+
4
+ import numpy as np
5
+ import numpy.typing as npt
6
+
7
+ if sys.version_info >= (3, 11):
8
+ from typing import assert_type
9
+ else:
10
+ from typing_extensions import assert_type
11
+
12
+ a: np.flatiter[npt.NDArray[np.str_]]
13
+
14
+ assert_type(a.base, npt.NDArray[np.str_])
15
+ assert_type(a.copy(), npt.NDArray[np.str_])
16
+ assert_type(a.coords, tuple[int, ...])
17
+ assert_type(a.index, int)
18
+ assert_type(iter(a), np.flatiter[npt.NDArray[np.str_]])
19
+ assert_type(next(a), np.str_)
20
+ assert_type(a[0], np.str_)
21
+ assert_type(a[[0, 1, 2]], npt.NDArray[np.str_])
22
+ assert_type(a[...], npt.NDArray[np.str_])
23
+ assert_type(a[:], npt.NDArray[np.str_])
24
+ assert_type(a[(...,)], npt.NDArray[np.str_])
25
+ assert_type(a[(0,)], np.str_)
26
+ assert_type(a.__array__(), npt.NDArray[np.str_])
27
+ assert_type(a.__array__(np.dtype(np.float64)), npt.NDArray[np.float64])
28
+ a[0] = "a"
29
+ a[:5] = "a"
30
+ a[...] = "a"
31
+ a[(...,)] = "a"