applied-ai-018 commited on
Commit
69ef83b
·
verified ·
1 Parent(s): 4a88805

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc +0 -0
  2. env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc +0 -0
  3. env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc +0 -0
  4. env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc +0 -0
  5. env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc +0 -0
  6. env-llmeval/lib/python3.10/site-packages/numpy/lib/__init__.py +92 -0
  7. env-llmeval/lib/python3.10/site-packages/numpy/lib/__init__.pyi +245 -0
  8. env-llmeval/lib/python3.10/site-packages/numpy/lib/_datasource.py +704 -0
  9. env-llmeval/lib/python3.10/site-packages/numpy/lib/_iotools.py +897 -0
  10. env-llmeval/lib/python3.10/site-packages/numpy/lib/_version.py +155 -0
  11. env-llmeval/lib/python3.10/site-packages/numpy/lib/_version.pyi +17 -0
  12. env-llmeval/lib/python3.10/site-packages/numpy/lib/arraypad.py +882 -0
  13. env-llmeval/lib/python3.10/site-packages/numpy/lib/arraypad.pyi +85 -0
  14. env-llmeval/lib/python3.10/site-packages/numpy/lib/arraysetops.py +981 -0
  15. env-llmeval/lib/python3.10/site-packages/numpy/lib/arraysetops.pyi +362 -0
  16. env-llmeval/lib/python3.10/site-packages/numpy/lib/arrayterator.py +219 -0
  17. env-llmeval/lib/python3.10/site-packages/numpy/lib/format.py +976 -0
  18. env-llmeval/lib/python3.10/site-packages/numpy/lib/format.pyi +22 -0
  19. env-llmeval/lib/python3.10/site-packages/numpy/lib/function_base.py +0 -0
  20. env-llmeval/lib/python3.10/site-packages/numpy/lib/function_base.pyi +697 -0
  21. env-llmeval/lib/python3.10/site-packages/numpy/lib/histograms.py +1072 -0
  22. env-llmeval/lib/python3.10/site-packages/numpy/lib/index_tricks.py +1046 -0
  23. env-llmeval/lib/python3.10/site-packages/numpy/lib/index_tricks.pyi +162 -0
  24. env-llmeval/lib/python3.10/site-packages/numpy/lib/mixins.py +177 -0
  25. env-llmeval/lib/python3.10/site-packages/numpy/lib/mixins.pyi +74 -0
  26. env-llmeval/lib/python3.10/site-packages/numpy/lib/nanfunctions.py +1887 -0
  27. env-llmeval/lib/python3.10/site-packages/numpy/lib/nanfunctions.pyi +38 -0
  28. env-llmeval/lib/python3.10/site-packages/numpy/lib/npyio.py +2547 -0
  29. env-llmeval/lib/python3.10/site-packages/numpy/lib/polynomial.py +1453 -0
  30. env-llmeval/lib/python3.10/site-packages/numpy/lib/polynomial.pyi +303 -0
  31. env-llmeval/lib/python3.10/site-packages/numpy/lib/recfunctions.py +1673 -0
  32. env-llmeval/lib/python3.10/site-packages/numpy/lib/scimath.py +625 -0
  33. env-llmeval/lib/python3.10/site-packages/numpy/lib/scimath.pyi +94 -0
  34. env-llmeval/lib/python3.10/site-packages/numpy/lib/shape_base.py +1274 -0
  35. env-llmeval/lib/python3.10/site-packages/numpy/lib/twodim_base.py +1183 -0
  36. env-llmeval/lib/python3.10/site-packages/numpy/lib/twodim_base.pyi +239 -0
  37. env-llmeval/lib/python3.10/site-packages/numpy/lib/type_check.py +735 -0
  38. env-llmeval/lib/python3.10/site-packages/numpy/lib/ufunclike.pyi +66 -0
  39. env-llmeval/lib/python3.10/site-packages/numpy/lib/user_array.py +286 -0
  40. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/__init__.py +11 -0
  41. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/__init__.pyi +15 -0
  42. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/defmatrix.py +1114 -0
  43. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/defmatrix.pyi +16 -0
  44. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/setup.py +12 -0
  45. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/__init__.py +0 -0
  46. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-310.pyc +0 -0
  47. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/test_defmatrix.py +453 -0
  48. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/test_interaction.py +354 -0
  49. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/test_masked_matrix.py +231 -0
  50. env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py +93 -0
env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc ADDED
Binary file (477 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc ADDED
Binary file (767 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc ADDED
Binary file (349 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/numpy/lib/__init__.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ **Note:** almost all functions in the ``numpy.lib`` namespace
3
+ are also present in the main ``numpy`` namespace. Please use the
4
+ functions as ``np.<funcname>`` where possible.
5
+
6
+ ``numpy.lib`` is mostly a space for implementing functions that don't
7
+ belong in core or in another NumPy submodule with a clear purpose
8
+ (e.g. ``random``, ``fft``, ``linalg``, ``ma``).
9
+
10
+ Most contains basic functions that are used by several submodules and are
11
+ useful to have in the main name-space.
12
+
13
+ """
14
+
15
+ # Public submodules
16
+ # Note: recfunctions and (maybe) format are public too, but not imported
17
+ from . import mixins
18
+ from . import scimath as emath
19
+
20
+ # Private submodules
21
+ # load module names. See https://github.com/networkx/networkx/issues/5838
22
+ from . import type_check
23
+ from . import index_tricks
24
+ from . import function_base
25
+ from . import nanfunctions
26
+ from . import shape_base
27
+ from . import stride_tricks
28
+ from . import twodim_base
29
+ from . import ufunclike
30
+ from . import histograms
31
+ from . import polynomial
32
+ from . import utils
33
+ from . import arraysetops
34
+ from . import npyio
35
+ from . import arrayterator
36
+ from . import arraypad
37
+ from . import _version
38
+
39
+ from .type_check import *
40
+ from .index_tricks import *
41
+ from .function_base import *
42
+ from .nanfunctions import *
43
+ from .shape_base import *
44
+ from .stride_tricks import *
45
+ from .twodim_base import *
46
+ from .ufunclike import *
47
+ from .histograms import *
48
+
49
+ from .polynomial import *
50
+ from .utils import *
51
+ from .arraysetops import *
52
+ from .npyio import *
53
+ from .arrayterator import Arrayterator
54
+ from .arraypad import *
55
+ from ._version import *
56
+ from numpy.core._multiarray_umath import tracemalloc_domain
57
+
58
+ __all__ = ['emath', 'tracemalloc_domain', 'Arrayterator']
59
+ __all__ += type_check.__all__
60
+ __all__ += index_tricks.__all__
61
+ __all__ += function_base.__all__
62
+ __all__ += shape_base.__all__
63
+ __all__ += stride_tricks.__all__
64
+ __all__ += twodim_base.__all__
65
+ __all__ += ufunclike.__all__
66
+ __all__ += arraypad.__all__
67
+ __all__ += polynomial.__all__
68
+ __all__ += utils.__all__
69
+ __all__ += arraysetops.__all__
70
+ __all__ += npyio.__all__
71
+ __all__ += nanfunctions.__all__
72
+ __all__ += histograms.__all__
73
+
74
+ from numpy._pytesttester import PytestTester
75
+ test = PytestTester(__name__)
76
+ del PytestTester
77
+
78
+ def __getattr__(attr):
79
+ # Warn for reprecated attributes
80
+ import math
81
+ import warnings
82
+
83
+ if attr == 'math':
84
+ warnings.warn(
85
+ "`np.lib.math` is a deprecated alias for the standard library "
86
+ "`math` module (Deprecated Numpy 1.25). Replace usages of "
87
+ "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
88
+ return math
89
+ else:
90
+ raise AttributeError("module {!r} has no attribute "
91
+ "{!r}".format(__name__, attr))
92
+
env-llmeval/lib/python3.10/site-packages/numpy/lib/__init__.pyi ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math as math
2
+ from typing import Any
3
+
4
+ from numpy._pytesttester import PytestTester
5
+
6
+ from numpy import (
7
+ ndenumerate as ndenumerate,
8
+ ndindex as ndindex,
9
+ )
10
+
11
+ from numpy.version import version
12
+
13
+ from numpy.lib import (
14
+ format as format,
15
+ mixins as mixins,
16
+ scimath as scimath,
17
+ stride_tricks as stride_tricks,
18
+ )
19
+
20
+ from numpy.lib._version import (
21
+ NumpyVersion as NumpyVersion,
22
+ )
23
+
24
+ from numpy.lib.arraypad import (
25
+ pad as pad,
26
+ )
27
+
28
+ from numpy.lib.arraysetops import (
29
+ ediff1d as ediff1d,
30
+ intersect1d as intersect1d,
31
+ setxor1d as setxor1d,
32
+ union1d as union1d,
33
+ setdiff1d as setdiff1d,
34
+ unique as unique,
35
+ in1d as in1d,
36
+ isin as isin,
37
+ )
38
+
39
+ from numpy.lib.arrayterator import (
40
+ Arrayterator as Arrayterator,
41
+ )
42
+
43
+ from numpy.lib.function_base import (
44
+ select as select,
45
+ piecewise as piecewise,
46
+ trim_zeros as trim_zeros,
47
+ copy as copy,
48
+ iterable as iterable,
49
+ percentile as percentile,
50
+ diff as diff,
51
+ gradient as gradient,
52
+ angle as angle,
53
+ unwrap as unwrap,
54
+ sort_complex as sort_complex,
55
+ disp as disp,
56
+ flip as flip,
57
+ rot90 as rot90,
58
+ extract as extract,
59
+ place as place,
60
+ vectorize as vectorize,
61
+ asarray_chkfinite as asarray_chkfinite,
62
+ average as average,
63
+ bincount as bincount,
64
+ digitize as digitize,
65
+ cov as cov,
66
+ corrcoef as corrcoef,
67
+ median as median,
68
+ sinc as sinc,
69
+ hamming as hamming,
70
+ hanning as hanning,
71
+ bartlett as bartlett,
72
+ blackman as blackman,
73
+ kaiser as kaiser,
74
+ trapz as trapz,
75
+ i0 as i0,
76
+ add_newdoc as add_newdoc,
77
+ add_docstring as add_docstring,
78
+ meshgrid as meshgrid,
79
+ delete as delete,
80
+ insert as insert,
81
+ append as append,
82
+ interp as interp,
83
+ add_newdoc_ufunc as add_newdoc_ufunc,
84
+ quantile as quantile,
85
+ )
86
+
87
+ from numpy.lib.histograms import (
88
+ histogram_bin_edges as histogram_bin_edges,
89
+ histogram as histogram,
90
+ histogramdd as histogramdd,
91
+ )
92
+
93
+ from numpy.lib.index_tricks import (
94
+ ravel_multi_index as ravel_multi_index,
95
+ unravel_index as unravel_index,
96
+ mgrid as mgrid,
97
+ ogrid as ogrid,
98
+ r_ as r_,
99
+ c_ as c_,
100
+ s_ as s_,
101
+ index_exp as index_exp,
102
+ ix_ as ix_,
103
+ fill_diagonal as fill_diagonal,
104
+ diag_indices as diag_indices,
105
+ diag_indices_from as diag_indices_from,
106
+ )
107
+
108
+ from numpy.lib.nanfunctions import (
109
+ nansum as nansum,
110
+ nanmax as nanmax,
111
+ nanmin as nanmin,
112
+ nanargmax as nanargmax,
113
+ nanargmin as nanargmin,
114
+ nanmean as nanmean,
115
+ nanmedian as nanmedian,
116
+ nanpercentile as nanpercentile,
117
+ nanvar as nanvar,
118
+ nanstd as nanstd,
119
+ nanprod as nanprod,
120
+ nancumsum as nancumsum,
121
+ nancumprod as nancumprod,
122
+ nanquantile as nanquantile,
123
+ )
124
+
125
+ from numpy.lib.npyio import (
126
+ savetxt as savetxt,
127
+ loadtxt as loadtxt,
128
+ genfromtxt as genfromtxt,
129
+ recfromtxt as recfromtxt,
130
+ recfromcsv as recfromcsv,
131
+ load as load,
132
+ save as save,
133
+ savez as savez,
134
+ savez_compressed as savez_compressed,
135
+ packbits as packbits,
136
+ unpackbits as unpackbits,
137
+ fromregex as fromregex,
138
+ DataSource as DataSource,
139
+ )
140
+
141
+ from numpy.lib.polynomial import (
142
+ poly as poly,
143
+ roots as roots,
144
+ polyint as polyint,
145
+ polyder as polyder,
146
+ polyadd as polyadd,
147
+ polysub as polysub,
148
+ polymul as polymul,
149
+ polydiv as polydiv,
150
+ polyval as polyval,
151
+ polyfit as polyfit,
152
+ RankWarning as RankWarning,
153
+ poly1d as poly1d,
154
+ )
155
+
156
+ from numpy.lib.shape_base import (
157
+ column_stack as column_stack,
158
+ row_stack as row_stack,
159
+ dstack as dstack,
160
+ array_split as array_split,
161
+ split as split,
162
+ hsplit as hsplit,
163
+ vsplit as vsplit,
164
+ dsplit as dsplit,
165
+ apply_over_axes as apply_over_axes,
166
+ expand_dims as expand_dims,
167
+ apply_along_axis as apply_along_axis,
168
+ kron as kron,
169
+ tile as tile,
170
+ get_array_wrap as get_array_wrap,
171
+ take_along_axis as take_along_axis,
172
+ put_along_axis as put_along_axis,
173
+ )
174
+
175
+ from numpy.lib.stride_tricks import (
176
+ broadcast_to as broadcast_to,
177
+ broadcast_arrays as broadcast_arrays,
178
+ broadcast_shapes as broadcast_shapes,
179
+ )
180
+
181
+ from numpy.lib.twodim_base import (
182
+ diag as diag,
183
+ diagflat as diagflat,
184
+ eye as eye,
185
+ fliplr as fliplr,
186
+ flipud as flipud,
187
+ tri as tri,
188
+ triu as triu,
189
+ tril as tril,
190
+ vander as vander,
191
+ histogram2d as histogram2d,
192
+ mask_indices as mask_indices,
193
+ tril_indices as tril_indices,
194
+ tril_indices_from as tril_indices_from,
195
+ triu_indices as triu_indices,
196
+ triu_indices_from as triu_indices_from,
197
+ )
198
+
199
+ from numpy.lib.type_check import (
200
+ mintypecode as mintypecode,
201
+ asfarray as asfarray,
202
+ real as real,
203
+ imag as imag,
204
+ iscomplex as iscomplex,
205
+ isreal as isreal,
206
+ iscomplexobj as iscomplexobj,
207
+ isrealobj as isrealobj,
208
+ nan_to_num as nan_to_num,
209
+ real_if_close as real_if_close,
210
+ typename as typename,
211
+ common_type as common_type,
212
+ )
213
+
214
+ from numpy.lib.ufunclike import (
215
+ fix as fix,
216
+ isposinf as isposinf,
217
+ isneginf as isneginf,
218
+ )
219
+
220
+ from numpy.lib.utils import (
221
+ issubclass_ as issubclass_,
222
+ issubsctype as issubsctype,
223
+ issubdtype as issubdtype,
224
+ deprecate as deprecate,
225
+ deprecate_with_doc as deprecate_with_doc,
226
+ get_include as get_include,
227
+ info as info,
228
+ source as source,
229
+ who as who,
230
+ lookfor as lookfor,
231
+ byte_bounds as byte_bounds,
232
+ safe_eval as safe_eval,
233
+ show_runtime as show_runtime,
234
+ )
235
+
236
+ from numpy.core.multiarray import (
237
+ tracemalloc_domain as tracemalloc_domain,
238
+ )
239
+
240
+ __all__: list[str]
241
+ __path__: list[str]
242
+ test: PytestTester
243
+
244
+ __version__ = version
245
+ emath = scimath
env-llmeval/lib/python3.10/site-packages/numpy/lib/_datasource.py ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A file interface for handling local and remote data files.
2
+
3
+ The goal of datasource is to abstract some of the file system operations
4
+ when dealing with data files so the researcher doesn't have to know all the
5
+ low-level details. Through datasource, a researcher can obtain and use a
6
+ file with one function call, regardless of location of the file.
7
+
8
+ DataSource is meant to augment standard python libraries, not replace them.
9
+ It should work seamlessly with standard file IO operations and the os
10
+ module.
11
+
12
+ DataSource files can originate locally or remotely:
13
+
14
+ - local files : '/home/guido/src/local/data.txt'
15
+ - URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
16
+
17
+ DataSource files can also be compressed or uncompressed. Currently only
18
+ gzip, bz2 and xz are supported.
19
+
20
+ Example::
21
+
22
+ >>> # Create a DataSource, use os.curdir (default) for local storage.
23
+ >>> from numpy import DataSource
24
+ >>> ds = DataSource()
25
+ >>>
26
+ >>> # Open a remote file.
27
+ >>> # DataSource downloads the file, stores it locally in:
28
+ >>> # './www.google.com/index.html'
29
+ >>> # opens the file and returns a file object.
30
+ >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
31
+ >>>
32
+ >>> # Use the file as you normally would
33
+ >>> fp.read() # doctest: +SKIP
34
+ >>> fp.close() # doctest: +SKIP
35
+
36
+ """
37
+ import os
38
+ import io
39
+
40
+ from .._utils import set_module
41
+
42
+
43
+ _open = open
44
+
45
+
46
+ def _check_mode(mode, encoding, newline):
47
+ """Check mode and that encoding and newline are compatible.
48
+
49
+ Parameters
50
+ ----------
51
+ mode : str
52
+ File open mode.
53
+ encoding : str
54
+ File encoding.
55
+ newline : str
56
+ Newline for text files.
57
+
58
+ """
59
+ if "t" in mode:
60
+ if "b" in mode:
61
+ raise ValueError("Invalid mode: %r" % (mode,))
62
+ else:
63
+ if encoding is not None:
64
+ raise ValueError("Argument 'encoding' not supported in binary mode")
65
+ if newline is not None:
66
+ raise ValueError("Argument 'newline' not supported in binary mode")
67
+
68
+
69
+ # Using a class instead of a module-level dictionary
70
+ # to reduce the initial 'import numpy' overhead by
71
+ # deferring the import of lzma, bz2 and gzip until needed
72
+
73
+ # TODO: .zip support, .tar support?
74
+ class _FileOpeners:
75
+ """
76
+ Container for different methods to open (un-)compressed files.
77
+
78
+ `_FileOpeners` contains a dictionary that holds one method for each
79
+ supported file format. Attribute lookup is implemented in such a way
80
+ that an instance of `_FileOpeners` itself can be indexed with the keys
81
+ of that dictionary. Currently uncompressed files as well as files
82
+ compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
83
+
84
+ Notes
85
+ -----
86
+ `_file_openers`, an instance of `_FileOpeners`, is made available for
87
+ use in the `_datasource` module.
88
+
89
+ Examples
90
+ --------
91
+ >>> import gzip
92
+ >>> np.lib._datasource._file_openers.keys()
93
+ [None, '.bz2', '.gz', '.xz', '.lzma']
94
+ >>> np.lib._datasource._file_openers['.gz'] is gzip.open
95
+ True
96
+
97
+ """
98
+
99
+ def __init__(self):
100
+ self._loaded = False
101
+ self._file_openers = {None: io.open}
102
+
103
+ def _load(self):
104
+ if self._loaded:
105
+ return
106
+
107
+ try:
108
+ import bz2
109
+ self._file_openers[".bz2"] = bz2.open
110
+ except ImportError:
111
+ pass
112
+
113
+ try:
114
+ import gzip
115
+ self._file_openers[".gz"] = gzip.open
116
+ except ImportError:
117
+ pass
118
+
119
+ try:
120
+ import lzma
121
+ self._file_openers[".xz"] = lzma.open
122
+ self._file_openers[".lzma"] = lzma.open
123
+ except (ImportError, AttributeError):
124
+ # There are incompatible backports of lzma that do not have the
125
+ # lzma.open attribute, so catch that as well as ImportError.
126
+ pass
127
+
128
+ self._loaded = True
129
+
130
+ def keys(self):
131
+ """
132
+ Return the keys of currently supported file openers.
133
+
134
+ Parameters
135
+ ----------
136
+ None
137
+
138
+ Returns
139
+ -------
140
+ keys : list
141
+ The keys are None for uncompressed files and the file extension
142
+ strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
143
+ methods.
144
+
145
+ """
146
+ self._load()
147
+ return list(self._file_openers.keys())
148
+
149
+ def __getitem__(self, key):
150
+ self._load()
151
+ return self._file_openers[key]
152
+
153
+ _file_openers = _FileOpeners()
154
+
155
+ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
156
+ """
157
+ Open `path` with `mode` and return the file object.
158
+
159
+ If ``path`` is an URL, it will be downloaded, stored in the
160
+ `DataSource` `destpath` directory and opened from there.
161
+
162
+ Parameters
163
+ ----------
164
+ path : str
165
+ Local file path or URL to open.
166
+ mode : str, optional
167
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
168
+ append. Available modes depend on the type of object specified by
169
+ path. Default is 'r'.
170
+ destpath : str, optional
171
+ Path to the directory where the source file gets downloaded to for
172
+ use. If `destpath` is None, a temporary directory will be created.
173
+ The default path is the current directory.
174
+ encoding : {None, str}, optional
175
+ Open text file with given encoding. The default encoding will be
176
+ what `io.open` uses.
177
+ newline : {None, str}, optional
178
+ Newline to use when reading text file.
179
+
180
+ Returns
181
+ -------
182
+ out : file object
183
+ The opened file.
184
+
185
+ Notes
186
+ -----
187
+ This is a convenience function that instantiates a `DataSource` and
188
+ returns the file object from ``DataSource.open(path)``.
189
+
190
+ """
191
+
192
+ ds = DataSource(destpath)
193
+ return ds.open(path, mode, encoding=encoding, newline=newline)
194
+
195
+
196
+ @set_module('numpy')
197
+ class DataSource:
198
+ """
199
+ DataSource(destpath='.')
200
+
201
+ A generic data source file (file, http, ftp, ...).
202
+
203
+ DataSources can be local files or remote files/URLs. The files may
204
+ also be compressed or uncompressed. DataSource hides some of the
205
+ low-level details of downloading the file, allowing you to simply pass
206
+ in a valid file path (or URL) and obtain a file object.
207
+
208
+ Parameters
209
+ ----------
210
+ destpath : str or None, optional
211
+ Path to the directory where the source file gets downloaded to for
212
+ use. If `destpath` is None, a temporary directory will be created.
213
+ The default path is the current directory.
214
+
215
+ Notes
216
+ -----
217
+ URLs require a scheme string (``http://``) to be used, without it they
218
+ will fail::
219
+
220
+ >>> repos = np.DataSource()
221
+ >>> repos.exists('www.google.com/index.html')
222
+ False
223
+ >>> repos.exists('http://www.google.com/index.html')
224
+ True
225
+
226
+ Temporary directories are deleted when the DataSource is deleted.
227
+
228
+ Examples
229
+ --------
230
+ ::
231
+
232
+ >>> ds = np.DataSource('/home/guido')
233
+ >>> urlname = 'http://www.google.com/'
234
+ >>> gfile = ds.open('http://www.google.com/')
235
+ >>> ds.abspath(urlname)
236
+ '/home/guido/www.google.com/index.html'
237
+
238
+ >>> ds = np.DataSource(None) # use with temporary file
239
+ >>> ds.open('/home/guido/foobar.txt')
240
+ <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
241
+ >>> ds.abspath('/home/guido/foobar.txt')
242
+ '/tmp/.../home/guido/foobar.txt'
243
+
244
+ """
245
+
246
+ def __init__(self, destpath=os.curdir):
247
+ """Create a DataSource with a local path at destpath."""
248
+ if destpath:
249
+ self._destpath = os.path.abspath(destpath)
250
+ self._istmpdest = False
251
+ else:
252
+ import tempfile # deferring import to improve startup time
253
+ self._destpath = tempfile.mkdtemp()
254
+ self._istmpdest = True
255
+
256
+ def __del__(self):
257
+ # Remove temp directories
258
+ if hasattr(self, '_istmpdest') and self._istmpdest:
259
+ import shutil
260
+
261
+ shutil.rmtree(self._destpath)
262
+
263
+ def _iszip(self, filename):
264
+ """Test if the filename is a zip file by looking at the file extension.
265
+
266
+ """
267
+ fname, ext = os.path.splitext(filename)
268
+ return ext in _file_openers.keys()
269
+
270
+ def _iswritemode(self, mode):
271
+ """Test if the given mode will open a file for writing."""
272
+
273
+ # Currently only used to test the bz2 files.
274
+ _writemodes = ("w", "+")
275
+ for c in mode:
276
+ if c in _writemodes:
277
+ return True
278
+ return False
279
+
280
+ def _splitzipext(self, filename):
281
+ """Split zip extension from filename and return filename.
282
+
283
+ Returns
284
+ -------
285
+ base, zip_ext : {tuple}
286
+
287
+ """
288
+
289
+ if self._iszip(filename):
290
+ return os.path.splitext(filename)
291
+ else:
292
+ return filename, None
293
+
294
+ def _possible_names(self, filename):
295
+ """Return a tuple containing compressed filename variations."""
296
+ names = [filename]
297
+ if not self._iszip(filename):
298
+ for zipext in _file_openers.keys():
299
+ if zipext:
300
+ names.append(filename+zipext)
301
+ return names
302
+
303
+ def _isurl(self, path):
304
+ """Test if path is a net location. Tests the scheme and netloc."""
305
+
306
+ # We do this here to reduce the 'import numpy' initial import time.
307
+ from urllib.parse import urlparse
308
+
309
+ # BUG : URLs require a scheme string ('http://') to be used.
310
+ # www.google.com will fail.
311
+ # Should we prepend the scheme for those that don't have it and
312
+ # test that also? Similar to the way we append .gz and test for
313
+ # for compressed versions of files.
314
+
315
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
316
+ return bool(scheme and netloc)
317
+
318
+ def _cache(self, path):
319
+ """Cache the file specified by path.
320
+
321
+ Creates a copy of the file in the datasource cache.
322
+
323
+ """
324
+ # We import these here because importing them is slow and
325
+ # a significant fraction of numpy's total import time.
326
+ import shutil
327
+ from urllib.request import urlopen
328
+
329
+ upath = self.abspath(path)
330
+
331
+ # ensure directory exists
332
+ if not os.path.exists(os.path.dirname(upath)):
333
+ os.makedirs(os.path.dirname(upath))
334
+
335
+ # TODO: Doesn't handle compressed files!
336
+ if self._isurl(path):
337
+ with urlopen(path) as openedurl:
338
+ with _open(upath, 'wb') as f:
339
+ shutil.copyfileobj(openedurl, f)
340
+ else:
341
+ shutil.copyfile(path, upath)
342
+ return upath
343
+
344
+ def _findfile(self, path):
345
+ """Searches for ``path`` and returns full path if found.
346
+
347
+ If path is an URL, _findfile will cache a local copy and return the
348
+ path to the cached file. If path is a local file, _findfile will
349
+ return a path to that local file.
350
+
351
+ The search will include possible compressed versions of the file
352
+ and return the first occurrence found.
353
+
354
+ """
355
+
356
+ # Build list of possible local file paths
357
+ if not self._isurl(path):
358
+ # Valid local paths
359
+ filelist = self._possible_names(path)
360
+ # Paths in self._destpath
361
+ filelist += self._possible_names(self.abspath(path))
362
+ else:
363
+ # Cached URLs in self._destpath
364
+ filelist = self._possible_names(self.abspath(path))
365
+ # Remote URLs
366
+ filelist = filelist + self._possible_names(path)
367
+
368
+ for name in filelist:
369
+ if self.exists(name):
370
+ if self._isurl(name):
371
+ name = self._cache(name)
372
+ return name
373
+ return None
374
+
375
+ def abspath(self, path):
376
+ """
377
+ Return absolute path of file in the DataSource directory.
378
+
379
+ If `path` is an URL, then `abspath` will return either the location
380
+ the file exists locally or the location it would exist when opened
381
+ using the `open` method.
382
+
383
+ Parameters
384
+ ----------
385
+ path : str
386
+ Can be a local file or a remote URL.
387
+
388
+ Returns
389
+ -------
390
+ out : str
391
+ Complete path, including the `DataSource` destination directory.
392
+
393
+ Notes
394
+ -----
395
+ The functionality is based on `os.path.abspath`.
396
+
397
+ """
398
+ # We do this here to reduce the 'import numpy' initial import time.
399
+ from urllib.parse import urlparse
400
+
401
+ # TODO: This should be more robust. Handles case where path includes
402
+ # the destpath, but not other sub-paths. Failing case:
403
+ # path = /home/guido/datafile.txt
404
+ # destpath = /home/alex/
405
+ # upath = self.abspath(path)
406
+ # upath == '/home/alex/home/guido/datafile.txt'
407
+
408
+ # handle case where path includes self._destpath
409
+ splitpath = path.split(self._destpath, 2)
410
+ if len(splitpath) > 1:
411
+ path = splitpath[1]
412
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
413
+ netloc = self._sanitize_relative_path(netloc)
414
+ upath = self._sanitize_relative_path(upath)
415
+ return os.path.join(self._destpath, netloc, upath)
416
+
417
+ def _sanitize_relative_path(self, path):
418
+ """Return a sanitised relative path for which
419
+ os.path.abspath(os.path.join(base, path)).startswith(base)
420
+ """
421
+ last = None
422
+ path = os.path.normpath(path)
423
+ while path != last:
424
+ last = path
425
+ # Note: os.path.join treats '/' as os.sep on Windows
426
+ path = path.lstrip(os.sep).lstrip('/')
427
+ path = path.lstrip(os.pardir).lstrip('..')
428
+ drive, path = os.path.splitdrive(path) # for Windows
429
+ return path
430
+
431
+ def exists(self, path):
432
+ """
433
+ Test if path exists.
434
+
435
+ Test if `path` exists as (and in this order):
436
+
437
+ - a local file.
438
+ - a remote URL that has been downloaded and stored locally in the
439
+ `DataSource` directory.
440
+ - a remote URL that has not been downloaded, but is valid and
441
+ accessible.
442
+
443
+ Parameters
444
+ ----------
445
+ path : str
446
+ Can be a local file or a remote URL.
447
+
448
+ Returns
449
+ -------
450
+ out : bool
451
+ True if `path` exists.
452
+
453
+ Notes
454
+ -----
455
+ When `path` is an URL, `exists` will return True if it's either
456
+ stored locally in the `DataSource` directory, or is a valid remote
457
+ URL. `DataSource` does not discriminate between the two, the file
458
+ is accessible if it exists in either location.
459
+
460
+ """
461
+
462
+ # First test for local path
463
+ if os.path.exists(path):
464
+ return True
465
+
466
+ # We import this here because importing urllib is slow and
467
+ # a significant fraction of numpy's total import time.
468
+ from urllib.request import urlopen
469
+ from urllib.error import URLError
470
+
471
+ # Test cached url
472
+ upath = self.abspath(path)
473
+ if os.path.exists(upath):
474
+ return True
475
+
476
+ # Test remote url
477
+ if self._isurl(path):
478
+ try:
479
+ netfile = urlopen(path)
480
+ netfile.close()
481
+ del(netfile)
482
+ return True
483
+ except URLError:
484
+ return False
485
+ return False
486
+
487
+ def open(self, path, mode='r', encoding=None, newline=None):
488
+ """
489
+ Open and return file-like object.
490
+
491
+ If `path` is an URL, it will be downloaded, stored in the
492
+ `DataSource` directory and opened from there.
493
+
494
+ Parameters
495
+ ----------
496
+ path : str
497
+ Local file path or URL to open.
498
+ mode : {'r', 'w', 'a'}, optional
499
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
500
+ 'a' to append. Available modes depend on the type of object
501
+ specified by `path`. Default is 'r'.
502
+ encoding : {None, str}, optional
503
+ Open text file with given encoding. The default encoding will be
504
+ what `io.open` uses.
505
+ newline : {None, str}, optional
506
+ Newline to use when reading text file.
507
+
508
+ Returns
509
+ -------
510
+ out : file object
511
+ File object.
512
+
513
+ """
514
+
515
+ # TODO: There is no support for opening a file for writing which
516
+ # doesn't exist yet (creating a file). Should there be?
517
+
518
+ # TODO: Add a ``subdir`` parameter for specifying the subdirectory
519
+ # used to store URLs in self._destpath.
520
+
521
+ if self._isurl(path) and self._iswritemode(mode):
522
+ raise ValueError("URLs are not writeable")
523
+
524
+ # NOTE: _findfile will fail on a new file opened for writing.
525
+ found = self._findfile(path)
526
+ if found:
527
+ _fname, ext = self._splitzipext(found)
528
+ if ext == 'bz2':
529
+ mode.replace("+", "")
530
+ return _file_openers[ext](found, mode=mode,
531
+ encoding=encoding, newline=newline)
532
+ else:
533
+ raise FileNotFoundError(f"{path} not found.")
534
+
535
+
536
+ class Repository (DataSource):
537
+ """
538
+ Repository(baseurl, destpath='.')
539
+
540
+ A data repository where multiple DataSource's share a base
541
+ URL/directory.
542
+
543
+ `Repository` extends `DataSource` by prepending a base URL (or
544
+ directory) to all the files it handles. Use `Repository` when you will
545
+ be working with multiple files from one base URL. Initialize
546
+ `Repository` with the base URL, then refer to each file by its filename
547
+ only.
548
+
549
+ Parameters
550
+ ----------
551
+ baseurl : str
552
+ Path to the local directory or remote location that contains the
553
+ data files.
554
+ destpath : str or None, optional
555
+ Path to the directory where the source file gets downloaded to for
556
+ use. If `destpath` is None, a temporary directory will be created.
557
+ The default path is the current directory.
558
+
559
+ Examples
560
+ --------
561
+ To analyze all files in the repository, do something like this
562
+ (note: this is not self-contained code)::
563
+
564
+ >>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
565
+ >>> for filename in filelist:
566
+ ... fp = repos.open(filename)
567
+ ... fp.analyze()
568
+ ... fp.close()
569
+
570
+ Similarly you could use a URL for a repository::
571
+
572
+ >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
573
+
574
+ """
575
+
576
+ def __init__(self, baseurl, destpath=os.curdir):
577
+ """Create a Repository with a shared url or directory of baseurl."""
578
+ DataSource.__init__(self, destpath=destpath)
579
+ self._baseurl = baseurl
580
+
581
+ def __del__(self):
582
+ DataSource.__del__(self)
583
+
584
+ def _fullpath(self, path):
585
+ """Return complete path for path. Prepends baseurl if necessary."""
586
+ splitpath = path.split(self._baseurl, 2)
587
+ if len(splitpath) == 1:
588
+ result = os.path.join(self._baseurl, path)
589
+ else:
590
+ result = path # path contains baseurl already
591
+ return result
592
+
593
+ def _findfile(self, path):
594
+ """Extend DataSource method to prepend baseurl to ``path``."""
595
+ return DataSource._findfile(self, self._fullpath(path))
596
+
597
+ def abspath(self, path):
598
+ """
599
+ Return absolute path of file in the Repository directory.
600
+
601
+ If `path` is an URL, then `abspath` will return either the location
602
+ the file exists locally or the location it would exist when opened
603
+ using the `open` method.
604
+
605
+ Parameters
606
+ ----------
607
+ path : str
608
+ Can be a local file or a remote URL. This may, but does not
609
+ have to, include the `baseurl` with which the `Repository` was
610
+ initialized.
611
+
612
+ Returns
613
+ -------
614
+ out : str
615
+ Complete path, including the `DataSource` destination directory.
616
+
617
+ """
618
+ return DataSource.abspath(self, self._fullpath(path))
619
+
620
+ def exists(self, path):
621
+ """
622
+ Test if path exists prepending Repository base URL to path.
623
+
624
+ Test if `path` exists as (and in this order):
625
+
626
+ - a local file.
627
+ - a remote URL that has been downloaded and stored locally in the
628
+ `DataSource` directory.
629
+ - a remote URL that has not been downloaded, but is valid and
630
+ accessible.
631
+
632
+ Parameters
633
+ ----------
634
+ path : str
635
+ Can be a local file or a remote URL. This may, but does not
636
+ have to, include the `baseurl` with which the `Repository` was
637
+ initialized.
638
+
639
+ Returns
640
+ -------
641
+ out : bool
642
+ True if `path` exists.
643
+
644
+ Notes
645
+ -----
646
+ When `path` is an URL, `exists` will return True if it's either
647
+ stored locally in the `DataSource` directory, or is a valid remote
648
+ URL. `DataSource` does not discriminate between the two, the file
649
+ is accessible if it exists in either location.
650
+
651
+ """
652
+ return DataSource.exists(self, self._fullpath(path))
653
+
654
+ def open(self, path, mode='r', encoding=None, newline=None):
655
+ """
656
+ Open and return file-like object prepending Repository base URL.
657
+
658
+ If `path` is an URL, it will be downloaded, stored in the
659
+ DataSource directory and opened from there.
660
+
661
+ Parameters
662
+ ----------
663
+ path : str
664
+ Local file path or URL to open. This may, but does not have to,
665
+ include the `baseurl` with which the `Repository` was
666
+ initialized.
667
+ mode : {'r', 'w', 'a'}, optional
668
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
669
+ 'a' to append. Available modes depend on the type of object
670
+ specified by `path`. Default is 'r'.
671
+ encoding : {None, str}, optional
672
+ Open text file with given encoding. The default encoding will be
673
+ what `io.open` uses.
674
+ newline : {None, str}, optional
675
+ Newline to use when reading text file.
676
+
677
+ Returns
678
+ -------
679
+ out : file object
680
+ File object.
681
+
682
+ """
683
+ return DataSource.open(self, self._fullpath(path), mode,
684
+ encoding=encoding, newline=newline)
685
+
686
+ def listdir(self):
687
+ """
688
+ List files in the source Repository.
689
+
690
+ Returns
691
+ -------
692
+ files : list of str
693
+ List of file names (not containing a directory part).
694
+
695
+ Notes
696
+ -----
697
+ Does not currently work for remote repositories.
698
+
699
+ """
700
+ if self._isurl(self._baseurl):
701
+ raise NotImplementedError(
702
+ "Directory listing of URLs, not supported yet.")
703
+ else:
704
+ return os.listdir(self._baseurl)
env-llmeval/lib/python3.10/site-packages/numpy/lib/_iotools.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A collection of functions designed to help I/O with ascii files.
2
+
3
+ """
4
+ __docformat__ = "restructuredtext en"
5
+
6
+ import numpy as np
7
+ import numpy.core.numeric as nx
8
+ from numpy.compat import asbytes, asunicode
9
+
10
+
11
+ def _decode_line(line, encoding=None):
12
+ """Decode bytes from binary input streams.
13
+
14
+ Defaults to decoding from 'latin1'. That differs from the behavior of
15
+ np.compat.asunicode that decodes from 'ascii'.
16
+
17
+ Parameters
18
+ ----------
19
+ line : str or bytes
20
+ Line to be decoded.
21
+ encoding : str
22
+ Encoding used to decode `line`.
23
+
24
+ Returns
25
+ -------
26
+ decoded_line : str
27
+
28
+ """
29
+ if type(line) is bytes:
30
+ if encoding is None:
31
+ encoding = "latin1"
32
+ line = line.decode(encoding)
33
+
34
+ return line
35
+
36
+
37
+ def _is_string_like(obj):
38
+ """
39
+ Check whether obj behaves like a string.
40
+ """
41
+ try:
42
+ obj + ''
43
+ except (TypeError, ValueError):
44
+ return False
45
+ return True
46
+
47
+
48
+ def _is_bytes_like(obj):
49
+ """
50
+ Check whether obj behaves like a bytes object.
51
+ """
52
+ try:
53
+ obj + b''
54
+ except (TypeError, ValueError):
55
+ return False
56
+ return True
57
+
58
+
59
+ def has_nested_fields(ndtype):
60
+ """
61
+ Returns whether one or several fields of a dtype are nested.
62
+
63
+ Parameters
64
+ ----------
65
+ ndtype : dtype
66
+ Data-type of a structured array.
67
+
68
+ Raises
69
+ ------
70
+ AttributeError
71
+ If `ndtype` does not have a `names` attribute.
72
+
73
+ Examples
74
+ --------
75
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
76
+ >>> np.lib._iotools.has_nested_fields(dt)
77
+ False
78
+
79
+ """
80
+ for name in ndtype.names or ():
81
+ if ndtype[name].names is not None:
82
+ return True
83
+ return False
84
+
85
+
86
+ def flatten_dtype(ndtype, flatten_base=False):
87
+ """
88
+ Unpack a structured data-type by collapsing nested fields and/or fields
89
+ with a shape.
90
+
91
+ Note that the field names are lost.
92
+
93
+ Parameters
94
+ ----------
95
+ ndtype : dtype
96
+ The datatype to collapse
97
+ flatten_base : bool, optional
98
+ If True, transform a field with a shape into several fields. Default is
99
+ False.
100
+
101
+ Examples
102
+ --------
103
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
104
+ ... ('block', int, (2, 3))])
105
+ >>> np.lib._iotools.flatten_dtype(dt)
106
+ [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
107
+ >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
108
+ [dtype('S4'),
109
+ dtype('float64'),
110
+ dtype('float64'),
111
+ dtype('int64'),
112
+ dtype('int64'),
113
+ dtype('int64'),
114
+ dtype('int64'),
115
+ dtype('int64'),
116
+ dtype('int64')]
117
+
118
+ """
119
+ names = ndtype.names
120
+ if names is None:
121
+ if flatten_base:
122
+ return [ndtype.base] * int(np.prod(ndtype.shape))
123
+ return [ndtype.base]
124
+ else:
125
+ types = []
126
+ for field in names:
127
+ info = ndtype.fields[field]
128
+ flat_dt = flatten_dtype(info[0], flatten_base)
129
+ types.extend(flat_dt)
130
+ return types
131
+
132
+
133
+ class LineSplitter:
134
+ """
135
+ Object to split a string at a given delimiter or at given places.
136
+
137
+ Parameters
138
+ ----------
139
+ delimiter : str, int, or sequence of ints, optional
140
+ If a string, character used to delimit consecutive fields.
141
+ If an integer or a sequence of integers, width(s) of each field.
142
+ comments : str, optional
143
+ Character used to mark the beginning of a comment. Default is '#'.
144
+ autostrip : bool, optional
145
+ Whether to strip each individual field. Default is True.
146
+
147
+ """
148
+
149
+ def autostrip(self, method):
150
+ """
151
+ Wrapper to strip each member of the output of `method`.
152
+
153
+ Parameters
154
+ ----------
155
+ method : function
156
+ Function that takes a single argument and returns a sequence of
157
+ strings.
158
+
159
+ Returns
160
+ -------
161
+ wrapped : function
162
+ The result of wrapping `method`. `wrapped` takes a single input
163
+ argument and returns a list of strings that are stripped of
164
+ white-space.
165
+
166
+ """
167
+ return lambda input: [_.strip() for _ in method(input)]
168
+
169
+ def __init__(self, delimiter=None, comments='#', autostrip=True,
170
+ encoding=None):
171
+ delimiter = _decode_line(delimiter)
172
+ comments = _decode_line(comments)
173
+
174
+ self.comments = comments
175
+
176
+ # Delimiter is a character
177
+ if (delimiter is None) or isinstance(delimiter, str):
178
+ delimiter = delimiter or None
179
+ _handyman = self._delimited_splitter
180
+ # Delimiter is a list of field widths
181
+ elif hasattr(delimiter, '__iter__'):
182
+ _handyman = self._variablewidth_splitter
183
+ idx = np.cumsum([0] + list(delimiter))
184
+ delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
185
+ # Delimiter is a single integer
186
+ elif int(delimiter):
187
+ (_handyman, delimiter) = (
188
+ self._fixedwidth_splitter, int(delimiter))
189
+ else:
190
+ (_handyman, delimiter) = (self._delimited_splitter, None)
191
+ self.delimiter = delimiter
192
+ if autostrip:
193
+ self._handyman = self.autostrip(_handyman)
194
+ else:
195
+ self._handyman = _handyman
196
+ self.encoding = encoding
197
+
198
+ def _delimited_splitter(self, line):
199
+ """Chop off comments, strip, and split at delimiter. """
200
+ if self.comments is not None:
201
+ line = line.split(self.comments)[0]
202
+ line = line.strip(" \r\n")
203
+ if not line:
204
+ return []
205
+ return line.split(self.delimiter)
206
+
207
+ def _fixedwidth_splitter(self, line):
208
+ if self.comments is not None:
209
+ line = line.split(self.comments)[0]
210
+ line = line.strip("\r\n")
211
+ if not line:
212
+ return []
213
+ fixed = self.delimiter
214
+ slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
215
+ return [line[s] for s in slices]
216
+
217
+ def _variablewidth_splitter(self, line):
218
+ if self.comments is not None:
219
+ line = line.split(self.comments)[0]
220
+ if not line:
221
+ return []
222
+ slices = self.delimiter
223
+ return [line[s] for s in slices]
224
+
225
+ def __call__(self, line):
226
+ return self._handyman(_decode_line(line, self.encoding))
227
+
228
+
229
+ class NameValidator:
230
+ """
231
+ Object to validate a list of strings to use as field names.
232
+
233
+ The strings are stripped of any non alphanumeric character, and spaces
234
+ are replaced by '_'. During instantiation, the user can define a list
235
+ of names to exclude, as well as a list of invalid characters. Names in
236
+ the exclusion list are appended a '_' character.
237
+
238
+ Once an instance has been created, it can be called with a list of
239
+ names, and a list of valid names will be created. The `__call__`
240
+ method accepts an optional keyword "default" that sets the default name
241
+ in case of ambiguity. By default this is 'f', so that names will
242
+ default to `f0`, `f1`, etc.
243
+
244
+ Parameters
245
+ ----------
246
+ excludelist : sequence, optional
247
+ A list of names to exclude. This list is appended to the default
248
+ list ['return', 'file', 'print']. Excluded names are appended an
249
+ underscore: for example, `file` becomes `file_` if supplied.
250
+ deletechars : str, optional
251
+ A string combining invalid characters that must be deleted from the
252
+ names.
253
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
254
+ * If True, field names are case-sensitive.
255
+ * If False or 'upper', field names are converted to upper case.
256
+ * If 'lower', field names are converted to lower case.
257
+
258
+ The default value is True.
259
+ replace_space : '_', optional
260
+ Character(s) used in replacement of white spaces.
261
+
262
+ Notes
263
+ -----
264
+ Calling an instance of `NameValidator` is the same as calling its
265
+ method `validate`.
266
+
267
+ Examples
268
+ --------
269
+ >>> validator = np.lib._iotools.NameValidator()
270
+ >>> validator(['file', 'field2', 'with space', 'CaSe'])
271
+ ('file_', 'field2', 'with_space', 'CaSe')
272
+
273
+ >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
274
+ ... deletechars='q',
275
+ ... case_sensitive=False)
276
+ >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
277
+ ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
278
+
279
+ """
280
+
281
+ defaultexcludelist = ['return', 'file', 'print']
282
+ defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
283
+
284
+ def __init__(self, excludelist=None, deletechars=None,
285
+ case_sensitive=None, replace_space='_'):
286
+ # Process the exclusion list ..
287
+ if excludelist is None:
288
+ excludelist = []
289
+ excludelist.extend(self.defaultexcludelist)
290
+ self.excludelist = excludelist
291
+ # Process the list of characters to delete
292
+ if deletechars is None:
293
+ delete = self.defaultdeletechars
294
+ else:
295
+ delete = set(deletechars)
296
+ delete.add('"')
297
+ self.deletechars = delete
298
+ # Process the case option .....
299
+ if (case_sensitive is None) or (case_sensitive is True):
300
+ self.case_converter = lambda x: x
301
+ elif (case_sensitive is False) or case_sensitive.startswith('u'):
302
+ self.case_converter = lambda x: x.upper()
303
+ elif case_sensitive.startswith('l'):
304
+ self.case_converter = lambda x: x.lower()
305
+ else:
306
+ msg = 'unrecognized case_sensitive value %s.' % case_sensitive
307
+ raise ValueError(msg)
308
+
309
+ self.replace_space = replace_space
310
+
311
+ def validate(self, names, defaultfmt="f%i", nbfields=None):
312
+ """
313
+ Validate a list of strings as field names for a structured array.
314
+
315
+ Parameters
316
+ ----------
317
+ names : sequence of str
318
+ Strings to be validated.
319
+ defaultfmt : str, optional
320
+ Default format string, used if validating a given string
321
+ reduces its length to zero.
322
+ nbfields : integer, optional
323
+ Final number of validated names, used to expand or shrink the
324
+ initial list of names.
325
+
326
+ Returns
327
+ -------
328
+ validatednames : list of str
329
+ The list of validated field names.
330
+
331
+ Notes
332
+ -----
333
+ A `NameValidator` instance can be called directly, which is the
334
+ same as calling `validate`. For examples, see `NameValidator`.
335
+
336
+ """
337
+ # Initial checks ..............
338
+ if (names is None):
339
+ if (nbfields is None):
340
+ return None
341
+ names = []
342
+ if isinstance(names, str):
343
+ names = [names, ]
344
+ if nbfields is not None:
345
+ nbnames = len(names)
346
+ if (nbnames < nbfields):
347
+ names = list(names) + [''] * (nbfields - nbnames)
348
+ elif (nbnames > nbfields):
349
+ names = names[:nbfields]
350
+ # Set some shortcuts ...........
351
+ deletechars = self.deletechars
352
+ excludelist = self.excludelist
353
+ case_converter = self.case_converter
354
+ replace_space = self.replace_space
355
+ # Initializes some variables ...
356
+ validatednames = []
357
+ seen = dict()
358
+ nbempty = 0
359
+
360
+ for item in names:
361
+ item = case_converter(item).strip()
362
+ if replace_space:
363
+ item = item.replace(' ', replace_space)
364
+ item = ''.join([c for c in item if c not in deletechars])
365
+ if item == '':
366
+ item = defaultfmt % nbempty
367
+ while item in names:
368
+ nbempty += 1
369
+ item = defaultfmt % nbempty
370
+ nbempty += 1
371
+ elif item in excludelist:
372
+ item += '_'
373
+ cnt = seen.get(item, 0)
374
+ if cnt > 0:
375
+ validatednames.append(item + '_%d' % cnt)
376
+ else:
377
+ validatednames.append(item)
378
+ seen[item] = cnt + 1
379
+ return tuple(validatednames)
380
+
381
+ def __call__(self, names, defaultfmt="f%i", nbfields=None):
382
+ return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
383
+
384
+
385
+ def str2bool(value):
386
+ """
387
+ Tries to transform a string supposed to represent a boolean to a boolean.
388
+
389
+ Parameters
390
+ ----------
391
+ value : str
392
+ The string that is transformed to a boolean.
393
+
394
+ Returns
395
+ -------
396
+ boolval : bool
397
+ The boolean representation of `value`.
398
+
399
+ Raises
400
+ ------
401
+ ValueError
402
+ If the string is not 'True' or 'False' (case independent)
403
+
404
+ Examples
405
+ --------
406
+ >>> np.lib._iotools.str2bool('TRUE')
407
+ True
408
+ >>> np.lib._iotools.str2bool('false')
409
+ False
410
+
411
+ """
412
+ value = value.upper()
413
+ if value == 'TRUE':
414
+ return True
415
+ elif value == 'FALSE':
416
+ return False
417
+ else:
418
+ raise ValueError("Invalid boolean")
419
+
420
+
421
+ class ConverterError(Exception):
422
+ """
423
+ Exception raised when an error occurs in a converter for string values.
424
+
425
+ """
426
+ pass
427
+
428
+
429
+ class ConverterLockError(ConverterError):
430
+ """
431
+ Exception raised when an attempt is made to upgrade a locked converter.
432
+
433
+ """
434
+ pass
435
+
436
+
437
+ class ConversionWarning(UserWarning):
438
+ """
439
+ Warning issued when a string converter has a problem.
440
+
441
+ Notes
442
+ -----
443
+ In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
444
+ is explicitly suppressed with the "invalid_raise" keyword.
445
+
446
+ """
447
+ pass
448
+
449
+
450
+ class StringConverter:
451
+ """
452
+ Factory class for function transforming a string into another object
453
+ (int, float).
454
+
455
+ After initialization, an instance can be called to transform a string
456
+ into another object. If the string is recognized as representing a
457
+ missing value, a default value is returned.
458
+
459
+ Attributes
460
+ ----------
461
+ func : function
462
+ Function used for the conversion.
463
+ default : any
464
+ Default value to return when the input corresponds to a missing
465
+ value.
466
+ type : type
467
+ Type of the output.
468
+ _status : int
469
+ Integer representing the order of the conversion.
470
+ _mapper : sequence of tuples
471
+ Sequence of tuples (dtype, function, default value) to evaluate in
472
+ order.
473
+ _locked : bool
474
+ Holds `locked` parameter.
475
+
476
+ Parameters
477
+ ----------
478
+ dtype_or_func : {None, dtype, function}, optional
479
+ If a `dtype`, specifies the input data type, used to define a basic
480
+ function and a default value for missing data. For example, when
481
+ `dtype` is float, the `func` attribute is set to `float` and the
482
+ default value to `np.nan`. If a function, this function is used to
483
+ convert a string to another object. In this case, it is recommended
484
+ to give an associated default value as input.
485
+ default : any, optional
486
+ Value to return by default, that is, when the string to be
487
+ converted is flagged as missing. If not given, `StringConverter`
488
+ tries to supply a reasonable default value.
489
+ missing_values : {None, sequence of str}, optional
490
+ ``None`` or sequence of strings indicating a missing value. If ``None``
491
+ then missing values are indicated by empty entries. The default is
492
+ ``None``.
493
+ locked : bool, optional
494
+ Whether the StringConverter should be locked to prevent automatic
495
+ upgrade or not. Default is False.
496
+
497
+ """
498
+ _mapper = [(nx.bool_, str2bool, False),
499
+ (nx.int_, int, -1),]
500
+
501
+ # On 32-bit systems, we need to make sure that we explicitly include
502
+ # nx.int64 since ns.int_ is nx.int32.
503
+ if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
504
+ _mapper.append((nx.int64, int, -1))
505
+
506
+ _mapper.extend([(nx.float64, float, nx.nan),
507
+ (nx.complex128, complex, nx.nan + 0j),
508
+ (nx.longdouble, nx.longdouble, nx.nan),
509
+ # If a non-default dtype is passed, fall back to generic
510
+ # ones (should only be used for the converter)
511
+ (nx.integer, int, -1),
512
+ (nx.floating, float, nx.nan),
513
+ (nx.complexfloating, complex, nx.nan + 0j),
514
+ # Last, try with the string types (must be last, because
515
+ # `_mapper[-1]` is used as default in some cases)
516
+ (nx.str_, asunicode, '???'),
517
+ (nx.bytes_, asbytes, '???'),
518
+ ])
519
+
520
+ @classmethod
521
+ def _getdtype(cls, val):
522
+ """Returns the dtype of the input variable."""
523
+ return np.array(val).dtype
524
+
525
+ @classmethod
526
+ def _getsubdtype(cls, val):
527
+ """Returns the type of the dtype of the input variable."""
528
+ return np.array(val).dtype.type
529
+
530
+ @classmethod
531
+ def _dtypeortype(cls, dtype):
532
+ """Returns dtype for datetime64 and type of dtype otherwise."""
533
+
534
+ # This is a bit annoying. We want to return the "general" type in most
535
+ # cases (ie. "string" rather than "S10"), but we want to return the
536
+ # specific type for datetime64 (ie. "datetime64[us]" rather than
537
+ # "datetime64").
538
+ if dtype.type == np.datetime64:
539
+ return dtype
540
+ return dtype.type
541
+
542
+ @classmethod
543
+ def upgrade_mapper(cls, func, default=None):
544
+ """
545
+ Upgrade the mapper of a StringConverter by adding a new function and
546
+ its corresponding default.
547
+
548
+ The input function (or sequence of functions) and its associated
549
+ default value (if any) is inserted in penultimate position of the
550
+ mapper. The corresponding type is estimated from the dtype of the
551
+ default value.
552
+
553
+ Parameters
554
+ ----------
555
+ func : var
556
+ Function, or sequence of functions
557
+
558
+ Examples
559
+ --------
560
+ >>> import dateutil.parser
561
+ >>> import datetime
562
+ >>> dateparser = dateutil.parser.parse
563
+ >>> defaultdate = datetime.date(2000, 1, 1)
564
+ >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
565
+ """
566
+ # Func is a single functions
567
+ if hasattr(func, '__call__'):
568
+ cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
569
+ return
570
+ elif hasattr(func, '__iter__'):
571
+ if isinstance(func[0], (tuple, list)):
572
+ for _ in func:
573
+ cls._mapper.insert(-1, _)
574
+ return
575
+ if default is None:
576
+ default = [None] * len(func)
577
+ else:
578
+ default = list(default)
579
+ default.append([None] * (len(func) - len(default)))
580
+ for fct, dft in zip(func, default):
581
+ cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
582
+
583
+ @classmethod
584
+ def _find_map_entry(cls, dtype):
585
+ # if a converter for the specific dtype is available use that
586
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
587
+ if dtype.type == deftype:
588
+ return i, (deftype, func, default_def)
589
+
590
+ # otherwise find an inexact match
591
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
592
+ if np.issubdtype(dtype.type, deftype):
593
+ return i, (deftype, func, default_def)
594
+
595
+ raise LookupError
596
+
597
+ def __init__(self, dtype_or_func=None, default=None, missing_values=None,
598
+ locked=False):
599
+ # Defines a lock for upgrade
600
+ self._locked = bool(locked)
601
+ # No input dtype: minimal initialization
602
+ if dtype_or_func is None:
603
+ self.func = str2bool
604
+ self._status = 0
605
+ self.default = default or False
606
+ dtype = np.dtype('bool')
607
+ else:
608
+ # Is the input a np.dtype ?
609
+ try:
610
+ self.func = None
611
+ dtype = np.dtype(dtype_or_func)
612
+ except TypeError:
613
+ # dtype_or_func must be a function, then
614
+ if not hasattr(dtype_or_func, '__call__'):
615
+ errmsg = ("The input argument `dtype` is neither a"
616
+ " function nor a dtype (got '%s' instead)")
617
+ raise TypeError(errmsg % type(dtype_or_func))
618
+ # Set the function
619
+ self.func = dtype_or_func
620
+ # If we don't have a default, try to guess it or set it to
621
+ # None
622
+ if default is None:
623
+ try:
624
+ default = self.func('0')
625
+ except ValueError:
626
+ default = None
627
+ dtype = self._getdtype(default)
628
+
629
+ # find the best match in our mapper
630
+ try:
631
+ self._status, (_, func, default_def) = self._find_map_entry(dtype)
632
+ except LookupError:
633
+ # no match
634
+ self.default = default
635
+ _, func, _ = self._mapper[-1]
636
+ self._status = 0
637
+ else:
638
+ # use the found default only if we did not already have one
639
+ if default is None:
640
+ self.default = default_def
641
+ else:
642
+ self.default = default
643
+
644
+ # If the input was a dtype, set the function to the last we saw
645
+ if self.func is None:
646
+ self.func = func
647
+
648
+ # If the status is 1 (int), change the function to
649
+ # something more robust.
650
+ if self.func == self._mapper[1][1]:
651
+ if issubclass(dtype.type, np.uint64):
652
+ self.func = np.uint64
653
+ elif issubclass(dtype.type, np.int64):
654
+ self.func = np.int64
655
+ else:
656
+ self.func = lambda x: int(float(x))
657
+ # Store the list of strings corresponding to missing values.
658
+ if missing_values is None:
659
+ self.missing_values = {''}
660
+ else:
661
+ if isinstance(missing_values, str):
662
+ missing_values = missing_values.split(",")
663
+ self.missing_values = set(list(missing_values) + [''])
664
+
665
+ self._callingfunction = self._strict_call
666
+ self.type = self._dtypeortype(dtype)
667
+ self._checked = False
668
+ self._initial_default = default
669
+
670
+ def _loose_call(self, value):
671
+ try:
672
+ return self.func(value)
673
+ except ValueError:
674
+ return self.default
675
+
676
+ def _strict_call(self, value):
677
+ try:
678
+
679
+ # We check if we can convert the value using the current function
680
+ new_value = self.func(value)
681
+
682
+ # In addition to having to check whether func can convert the
683
+ # value, we also have to make sure that we don't get overflow
684
+ # errors for integers.
685
+ if self.func is int:
686
+ try:
687
+ np.array(value, dtype=self.type)
688
+ except OverflowError:
689
+ raise ValueError
690
+
691
+ # We're still here so we can now return the new value
692
+ return new_value
693
+
694
+ except ValueError:
695
+ if value.strip() in self.missing_values:
696
+ if not self._status:
697
+ self._checked = False
698
+ return self.default
699
+ raise ValueError("Cannot convert string '%s'" % value)
700
+
701
+ def __call__(self, value):
702
+ return self._callingfunction(value)
703
+
704
+ def _do_upgrade(self):
705
+ # Raise an exception if we locked the converter...
706
+ if self._locked:
707
+ errmsg = "Converter is locked and cannot be upgraded"
708
+ raise ConverterLockError(errmsg)
709
+ _statusmax = len(self._mapper)
710
+ # Complains if we try to upgrade by the maximum
711
+ _status = self._status
712
+ if _status == _statusmax:
713
+ errmsg = "Could not find a valid conversion function"
714
+ raise ConverterError(errmsg)
715
+ elif _status < _statusmax - 1:
716
+ _status += 1
717
+ self.type, self.func, default = self._mapper[_status]
718
+ self._status = _status
719
+ if self._initial_default is not None:
720
+ self.default = self._initial_default
721
+ else:
722
+ self.default = default
723
+
724
+ def upgrade(self, value):
725
+ """
726
+ Find the best converter for a given string, and return the result.
727
+
728
+ The supplied string `value` is converted by testing different
729
+ converters in order. First the `func` method of the
730
+ `StringConverter` instance is tried, if this fails other available
731
+ converters are tried. The order in which these other converters
732
+ are tried is determined by the `_status` attribute of the instance.
733
+
734
+ Parameters
735
+ ----------
736
+ value : str
737
+ The string to convert.
738
+
739
+ Returns
740
+ -------
741
+ out : any
742
+ The result of converting `value` with the appropriate converter.
743
+
744
+ """
745
+ self._checked = True
746
+ try:
747
+ return self._strict_call(value)
748
+ except ValueError:
749
+ self._do_upgrade()
750
+ return self.upgrade(value)
751
+
752
+ def iterupgrade(self, value):
753
+ self._checked = True
754
+ if not hasattr(value, '__iter__'):
755
+ value = (value,)
756
+ _strict_call = self._strict_call
757
+ try:
758
+ for _m in value:
759
+ _strict_call(_m)
760
+ except ValueError:
761
+ self._do_upgrade()
762
+ self.iterupgrade(value)
763
+
764
+ def update(self, func, default=None, testing_value=None,
765
+ missing_values='', locked=False):
766
+ """
767
+ Set StringConverter attributes directly.
768
+
769
+ Parameters
770
+ ----------
771
+ func : function
772
+ Conversion function.
773
+ default : any, optional
774
+ Value to return by default, that is, when the string to be
775
+ converted is flagged as missing. If not given,
776
+ `StringConverter` tries to supply a reasonable default value.
777
+ testing_value : str, optional
778
+ A string representing a standard input value of the converter.
779
+ This string is used to help defining a reasonable default
780
+ value.
781
+ missing_values : {sequence of str, None}, optional
782
+ Sequence of strings indicating a missing value. If ``None``, then
783
+ the existing `missing_values` are cleared. The default is `''`.
784
+ locked : bool, optional
785
+ Whether the StringConverter should be locked to prevent
786
+ automatic upgrade or not. Default is False.
787
+
788
+ Notes
789
+ -----
790
+ `update` takes the same parameters as the constructor of
791
+ `StringConverter`, except that `func` does not accept a `dtype`
792
+ whereas `dtype_or_func` in the constructor does.
793
+
794
+ """
795
+ self.func = func
796
+ self._locked = locked
797
+
798
+ # Don't reset the default to None if we can avoid it
799
+ if default is not None:
800
+ self.default = default
801
+ self.type = self._dtypeortype(self._getdtype(default))
802
+ else:
803
+ try:
804
+ tester = func(testing_value or '1')
805
+ except (TypeError, ValueError):
806
+ tester = None
807
+ self.type = self._dtypeortype(self._getdtype(tester))
808
+
809
+ # Add the missing values to the existing set or clear it.
810
+ if missing_values is None:
811
+ # Clear all missing values even though the ctor initializes it to
812
+ # set(['']) when the argument is None.
813
+ self.missing_values = set()
814
+ else:
815
+ if not np.iterable(missing_values):
816
+ missing_values = [missing_values]
817
+ if not all(isinstance(v, str) for v in missing_values):
818
+ raise TypeError("missing_values must be strings or unicode")
819
+ self.missing_values.update(missing_values)
820
+
821
+
822
+ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
823
+ """
824
+ Convenience function to create a `np.dtype` object.
825
+
826
+ The function processes the input `dtype` and matches it with the given
827
+ names.
828
+
829
+ Parameters
830
+ ----------
831
+ ndtype : var
832
+ Definition of the dtype. Can be any string or dictionary recognized
833
+ by the `np.dtype` function, or a sequence of types.
834
+ names : str or sequence, optional
835
+ Sequence of strings to use as field names for a structured dtype.
836
+ For convenience, `names` can be a string of a comma-separated list
837
+ of names.
838
+ defaultfmt : str, optional
839
+ Format string used to define missing names, such as ``"f%i"``
840
+ (default) or ``"fields_%02i"``.
841
+ validationargs : optional
842
+ A series of optional arguments used to initialize a
843
+ `NameValidator`.
844
+
845
+ Examples
846
+ --------
847
+ >>> np.lib._iotools.easy_dtype(float)
848
+ dtype('float64')
849
+ >>> np.lib._iotools.easy_dtype("i4, f8")
850
+ dtype([('f0', '<i4'), ('f1', '<f8')])
851
+ >>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
852
+ dtype([('field_000', '<i4'), ('field_001', '<f8')])
853
+
854
+ >>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
855
+ dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
856
+ >>> np.lib._iotools.easy_dtype(float, names="a,b,c")
857
+ dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
858
+
859
+ """
860
+ try:
861
+ ndtype = np.dtype(ndtype)
862
+ except TypeError:
863
+ validate = NameValidator(**validationargs)
864
+ nbfields = len(ndtype)
865
+ if names is None:
866
+ names = [''] * len(ndtype)
867
+ elif isinstance(names, str):
868
+ names = names.split(",")
869
+ names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
870
+ ndtype = np.dtype(dict(formats=ndtype, names=names))
871
+ else:
872
+ # Explicit names
873
+ if names is not None:
874
+ validate = NameValidator(**validationargs)
875
+ if isinstance(names, str):
876
+ names = names.split(",")
877
+ # Simple dtype: repeat to match the nb of names
878
+ if ndtype.names is None:
879
+ formats = tuple([ndtype.type] * len(names))
880
+ names = validate(names, defaultfmt=defaultfmt)
881
+ ndtype = np.dtype(list(zip(names, formats)))
882
+ # Structured dtype: just validate the names as needed
883
+ else:
884
+ ndtype.names = validate(names, nbfields=len(ndtype.names),
885
+ defaultfmt=defaultfmt)
886
+ # No implicit names
887
+ elif ndtype.names is not None:
888
+ validate = NameValidator(**validationargs)
889
+ # Default initial names : should we change the format ?
890
+ numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
891
+ if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
892
+ ndtype.names = validate([''] * len(ndtype.names),
893
+ defaultfmt=defaultfmt)
894
+ # Explicit initial names : just validate
895
+ else:
896
+ ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
897
+ return ndtype
env-llmeval/lib/python3.10/site-packages/numpy/lib/_version.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility to compare (NumPy) version strings.
2
+
3
+ The NumpyVersion class allows properly comparing numpy version strings.
4
+ The LooseVersion and StrictVersion classes that distutils provides don't
5
+ work; they don't recognize anything like alpha/beta/rc/dev versions.
6
+
7
+ """
8
+ import re
9
+
10
+
11
+ __all__ = ['NumpyVersion']
12
+
13
+
14
+ class NumpyVersion():
15
+ """Parse and compare numpy version strings.
16
+
17
+ NumPy has the following versioning scheme (numbers given are examples; they
18
+ can be > 9 in principle):
19
+
20
+ - Released version: '1.8.0', '1.8.1', etc.
21
+ - Alpha: '1.8.0a1', '1.8.0a2', etc.
22
+ - Beta: '1.8.0b1', '1.8.0b2', etc.
23
+ - Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
24
+ - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
25
+ - Development versions after a1: '1.8.0a1.dev-f1234afa',
26
+ '1.8.0b2.dev-f1234afa',
27
+ '1.8.1rc1.dev-f1234afa', etc.
28
+ - Development versions (no git hash available): '1.8.0.dev-Unknown'
29
+
30
+ Comparing needs to be done against a valid version string or other
31
+ `NumpyVersion` instance. Note that all development versions of the same
32
+ (pre-)release compare equal.
33
+
34
+ .. versionadded:: 1.9.0
35
+
36
+ Parameters
37
+ ----------
38
+ vstring : str
39
+ NumPy version string (``np.__version__``).
40
+
41
+ Examples
42
+ --------
43
+ >>> from numpy.lib import NumpyVersion
44
+ >>> if NumpyVersion(np.__version__) < '1.7.0':
45
+ ... print('skip')
46
+ >>> # skip
47
+
48
+ >>> NumpyVersion('1.7') # raises ValueError, add ".0"
49
+ Traceback (most recent call last):
50
+ ...
51
+ ValueError: Not a valid numpy version string
52
+
53
+ """
54
+
55
+ def __init__(self, vstring):
56
+ self.vstring = vstring
57
+ ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
58
+ if not ver_main:
59
+ raise ValueError("Not a valid numpy version string")
60
+
61
+ self.version = ver_main.group()
62
+ self.major, self.minor, self.bugfix = [int(x) for x in
63
+ self.version.split('.')]
64
+ if len(vstring) == ver_main.end():
65
+ self.pre_release = 'final'
66
+ else:
67
+ alpha = re.match(r'a\d', vstring[ver_main.end():])
68
+ beta = re.match(r'b\d', vstring[ver_main.end():])
69
+ rc = re.match(r'rc\d', vstring[ver_main.end():])
70
+ pre_rel = [m for m in [alpha, beta, rc] if m is not None]
71
+ if pre_rel:
72
+ self.pre_release = pre_rel[0].group()
73
+ else:
74
+ self.pre_release = ''
75
+
76
+ self.is_devversion = bool(re.search(r'.dev', vstring))
77
+
78
+ def _compare_version(self, other):
79
+ """Compare major.minor.bugfix"""
80
+ if self.major == other.major:
81
+ if self.minor == other.minor:
82
+ if self.bugfix == other.bugfix:
83
+ vercmp = 0
84
+ elif self.bugfix > other.bugfix:
85
+ vercmp = 1
86
+ else:
87
+ vercmp = -1
88
+ elif self.minor > other.minor:
89
+ vercmp = 1
90
+ else:
91
+ vercmp = -1
92
+ elif self.major > other.major:
93
+ vercmp = 1
94
+ else:
95
+ vercmp = -1
96
+
97
+ return vercmp
98
+
99
+ def _compare_pre_release(self, other):
100
+ """Compare alpha/beta/rc/final."""
101
+ if self.pre_release == other.pre_release:
102
+ vercmp = 0
103
+ elif self.pre_release == 'final':
104
+ vercmp = 1
105
+ elif other.pre_release == 'final':
106
+ vercmp = -1
107
+ elif self.pre_release > other.pre_release:
108
+ vercmp = 1
109
+ else:
110
+ vercmp = -1
111
+
112
+ return vercmp
113
+
114
+ def _compare(self, other):
115
+ if not isinstance(other, (str, NumpyVersion)):
116
+ raise ValueError("Invalid object to compare with NumpyVersion.")
117
+
118
+ if isinstance(other, str):
119
+ other = NumpyVersion(other)
120
+
121
+ vercmp = self._compare_version(other)
122
+ if vercmp == 0:
123
+ # Same x.y.z version, check for alpha/beta/rc
124
+ vercmp = self._compare_pre_release(other)
125
+ if vercmp == 0:
126
+ # Same version and same pre-release, check if dev version
127
+ if self.is_devversion is other.is_devversion:
128
+ vercmp = 0
129
+ elif self.is_devversion:
130
+ vercmp = -1
131
+ else:
132
+ vercmp = 1
133
+
134
+ return vercmp
135
+
136
+ def __lt__(self, other):
137
+ return self._compare(other) < 0
138
+
139
+ def __le__(self, other):
140
+ return self._compare(other) <= 0
141
+
142
+ def __eq__(self, other):
143
+ return self._compare(other) == 0
144
+
145
+ def __ne__(self, other):
146
+ return self._compare(other) != 0
147
+
148
+ def __gt__(self, other):
149
+ return self._compare(other) > 0
150
+
151
+ def __ge__(self, other):
152
+ return self._compare(other) >= 0
153
+
154
+ def __repr__(self):
155
+ return "NumpyVersion(%s)" % self.vstring
env-llmeval/lib/python3.10/site-packages/numpy/lib/_version.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str]
2
+
3
+ class NumpyVersion:
4
+ vstring: str
5
+ version: str
6
+ major: int
7
+ minor: int
8
+ bugfix: int
9
+ pre_release: str
10
+ is_devversion: bool
11
+ def __init__(self, vstring: str) -> None: ...
12
+ def __lt__(self, other: str | NumpyVersion) -> bool: ...
13
+ def __le__(self, other: str | NumpyVersion) -> bool: ...
14
+ def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
15
+ def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
16
+ def __gt__(self, other: str | NumpyVersion) -> bool: ...
17
+ def __ge__(self, other: str | NumpyVersion) -> bool: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/arraypad.py ADDED
@@ -0,0 +1,882 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The arraypad module contains a group of functions to pad values onto the edges
3
+ of an n-dimensional array.
4
+
5
+ """
6
+ import numpy as np
7
+ from numpy.core.overrides import array_function_dispatch
8
+ from numpy.lib.index_tricks import ndindex
9
+
10
+
11
+ __all__ = ['pad']
12
+
13
+
14
+ ###############################################################################
15
+ # Private utility functions.
16
+
17
+
18
+ def _round_if_needed(arr, dtype):
19
+ """
20
+ Rounds arr inplace if destination dtype is integer.
21
+
22
+ Parameters
23
+ ----------
24
+ arr : ndarray
25
+ Input array.
26
+ dtype : dtype
27
+ The dtype of the destination array.
28
+ """
29
+ if np.issubdtype(dtype, np.integer):
30
+ arr.round(out=arr)
31
+
32
+
33
+ def _slice_at_axis(sl, axis):
34
+ """
35
+ Construct tuple of slices to slice an array in the given dimension.
36
+
37
+ Parameters
38
+ ----------
39
+ sl : slice
40
+ The slice for the given dimension.
41
+ axis : int
42
+ The axis to which `sl` is applied. All other dimensions are left
43
+ "unsliced".
44
+
45
+ Returns
46
+ -------
47
+ sl : tuple of slices
48
+ A tuple with slices matching `shape` in length.
49
+
50
+ Examples
51
+ --------
52
+ >>> _slice_at_axis(slice(None, 3, -1), 1)
53
+ (slice(None, None, None), slice(None, 3, -1), (...,))
54
+ """
55
+ return (slice(None),) * axis + (sl,) + (...,)
56
+
57
+
58
+ def _view_roi(array, original_area_slice, axis):
59
+ """
60
+ Get a view of the current region of interest during iterative padding.
61
+
62
+ When padding multiple dimensions iteratively corner values are
63
+ unnecessarily overwritten multiple times. This function reduces the
64
+ working area for the first dimensions so that corners are excluded.
65
+
66
+ Parameters
67
+ ----------
68
+ array : ndarray
69
+ The array with the region of interest.
70
+ original_area_slice : tuple of slices
71
+ Denotes the area with original values of the unpadded array.
72
+ axis : int
73
+ The currently padded dimension assuming that `axis` is padded before
74
+ `axis` + 1.
75
+
76
+ Returns
77
+ -------
78
+ roi : ndarray
79
+ The region of interest of the original `array`.
80
+ """
81
+ axis += 1
82
+ sl = (slice(None),) * axis + original_area_slice[axis:]
83
+ return array[sl]
84
+
85
+
86
+ def _pad_simple(array, pad_width, fill_value=None):
87
+ """
88
+ Pad array on all sides with either a single value or undefined values.
89
+
90
+ Parameters
91
+ ----------
92
+ array : ndarray
93
+ Array to grow.
94
+ pad_width : sequence of tuple[int, int]
95
+ Pad width on both sides for each dimension in `arr`.
96
+ fill_value : scalar, optional
97
+ If provided the padded area is filled with this value, otherwise
98
+ the pad area left undefined.
99
+
100
+ Returns
101
+ -------
102
+ padded : ndarray
103
+ The padded array with the same dtype as`array`. Its order will default
104
+ to C-style if `array` is not F-contiguous.
105
+ original_area_slice : tuple
106
+ A tuple of slices pointing to the area of the original array.
107
+ """
108
+ # Allocate grown array
109
+ new_shape = tuple(
110
+ left + size + right
111
+ for size, (left, right) in zip(array.shape, pad_width)
112
+ )
113
+ order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
114
+ padded = np.empty(new_shape, dtype=array.dtype, order=order)
115
+
116
+ if fill_value is not None:
117
+ padded.fill(fill_value)
118
+
119
+ # Copy old array into correct space
120
+ original_area_slice = tuple(
121
+ slice(left, left + size)
122
+ for size, (left, right) in zip(array.shape, pad_width)
123
+ )
124
+ padded[original_area_slice] = array
125
+
126
+ return padded, original_area_slice
127
+
128
+
129
+ def _set_pad_area(padded, axis, width_pair, value_pair):
130
+ """
131
+ Set empty-padded area in given dimension.
132
+
133
+ Parameters
134
+ ----------
135
+ padded : ndarray
136
+ Array with the pad area which is modified inplace.
137
+ axis : int
138
+ Dimension with the pad area to set.
139
+ width_pair : (int, int)
140
+ Pair of widths that mark the pad area on both sides in the given
141
+ dimension.
142
+ value_pair : tuple of scalars or ndarrays
143
+ Values inserted into the pad area on each side. It must match or be
144
+ broadcastable to the shape of `arr`.
145
+ """
146
+ left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
147
+ padded[left_slice] = value_pair[0]
148
+
149
+ right_slice = _slice_at_axis(
150
+ slice(padded.shape[axis] - width_pair[1], None), axis)
151
+ padded[right_slice] = value_pair[1]
152
+
153
+
154
+ def _get_edges(padded, axis, width_pair):
155
+ """
156
+ Retrieve edge values from empty-padded array in given dimension.
157
+
158
+ Parameters
159
+ ----------
160
+ padded : ndarray
161
+ Empty-padded array.
162
+ axis : int
163
+ Dimension in which the edges are considered.
164
+ width_pair : (int, int)
165
+ Pair of widths that mark the pad area on both sides in the given
166
+ dimension.
167
+
168
+ Returns
169
+ -------
170
+ left_edge, right_edge : ndarray
171
+ Edge values of the valid area in `padded` in the given dimension. Its
172
+ shape will always match `padded` except for the dimension given by
173
+ `axis` which will have a length of 1.
174
+ """
175
+ left_index = width_pair[0]
176
+ left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
177
+ left_edge = padded[left_slice]
178
+
179
+ right_index = padded.shape[axis] - width_pair[1]
180
+ right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
181
+ right_edge = padded[right_slice]
182
+
183
+ return left_edge, right_edge
184
+
185
+
186
+ def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
187
+ """
188
+ Construct linear ramps for empty-padded array in given dimension.
189
+
190
+ Parameters
191
+ ----------
192
+ padded : ndarray
193
+ Empty-padded array.
194
+ axis : int
195
+ Dimension in which the ramps are constructed.
196
+ width_pair : (int, int)
197
+ Pair of widths that mark the pad area on both sides in the given
198
+ dimension.
199
+ end_value_pair : (scalar, scalar)
200
+ End values for the linear ramps which form the edge of the fully padded
201
+ array. These values are included in the linear ramps.
202
+
203
+ Returns
204
+ -------
205
+ left_ramp, right_ramp : ndarray
206
+ Linear ramps to set on both sides of `padded`.
207
+ """
208
+ edge_pair = _get_edges(padded, axis, width_pair)
209
+
210
+ left_ramp, right_ramp = (
211
+ np.linspace(
212
+ start=end_value,
213
+ stop=edge.squeeze(axis), # Dimension is replaced by linspace
214
+ num=width,
215
+ endpoint=False,
216
+ dtype=padded.dtype,
217
+ axis=axis
218
+ )
219
+ for end_value, edge, width in zip(
220
+ end_value_pair, edge_pair, width_pair
221
+ )
222
+ )
223
+
224
+ # Reverse linear space in appropriate dimension
225
+ right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
226
+
227
+ return left_ramp, right_ramp
228
+
229
+
230
+ def _get_stats(padded, axis, width_pair, length_pair, stat_func):
231
+ """
232
+ Calculate statistic for the empty-padded array in given dimension.
233
+
234
+ Parameters
235
+ ----------
236
+ padded : ndarray
237
+ Empty-padded array.
238
+ axis : int
239
+ Dimension in which the statistic is calculated.
240
+ width_pair : (int, int)
241
+ Pair of widths that mark the pad area on both sides in the given
242
+ dimension.
243
+ length_pair : 2-element sequence of None or int
244
+ Gives the number of values in valid area from each side that is
245
+ taken into account when calculating the statistic. If None the entire
246
+ valid area in `padded` is considered.
247
+ stat_func : function
248
+ Function to compute statistic. The expected signature is
249
+ ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
250
+
251
+ Returns
252
+ -------
253
+ left_stat, right_stat : ndarray
254
+ Calculated statistic for both sides of `padded`.
255
+ """
256
+ # Calculate indices of the edges of the area with original values
257
+ left_index = width_pair[0]
258
+ right_index = padded.shape[axis] - width_pair[1]
259
+ # as well as its length
260
+ max_length = right_index - left_index
261
+
262
+ # Limit stat_lengths to max_length
263
+ left_length, right_length = length_pair
264
+ if left_length is None or max_length < left_length:
265
+ left_length = max_length
266
+ if right_length is None or max_length < right_length:
267
+ right_length = max_length
268
+
269
+ if (left_length == 0 or right_length == 0) \
270
+ and stat_func in {np.amax, np.amin}:
271
+ # amax and amin can't operate on an empty array,
272
+ # raise a more descriptive warning here instead of the default one
273
+ raise ValueError("stat_length of 0 yields no value for padding")
274
+
275
+ # Calculate statistic for the left side
276
+ left_slice = _slice_at_axis(
277
+ slice(left_index, left_index + left_length), axis)
278
+ left_chunk = padded[left_slice]
279
+ left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
280
+ _round_if_needed(left_stat, padded.dtype)
281
+
282
+ if left_length == right_length == max_length:
283
+ # return early as right_stat must be identical to left_stat
284
+ return left_stat, left_stat
285
+
286
+ # Calculate statistic for the right side
287
+ right_slice = _slice_at_axis(
288
+ slice(right_index - right_length, right_index), axis)
289
+ right_chunk = padded[right_slice]
290
+ right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
291
+ _round_if_needed(right_stat, padded.dtype)
292
+
293
+ return left_stat, right_stat
294
+
295
+
296
+ def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
297
+ """
298
+ Pad `axis` of `arr` with reflection.
299
+
300
+ Parameters
301
+ ----------
302
+ padded : ndarray
303
+ Input array of arbitrary shape.
304
+ axis : int
305
+ Axis along which to pad `arr`.
306
+ width_pair : (int, int)
307
+ Pair of widths that mark the pad area on both sides in the given
308
+ dimension.
309
+ method : str
310
+ Controls method of reflection; options are 'even' or 'odd'.
311
+ include_edge : bool
312
+ If true, edge value is included in reflection, otherwise the edge
313
+ value forms the symmetric axis to the reflection.
314
+
315
+ Returns
316
+ -------
317
+ pad_amt : tuple of ints, length 2
318
+ New index positions of padding to do along the `axis`. If these are
319
+ both 0, padding is done in this dimension.
320
+ """
321
+ left_pad, right_pad = width_pair
322
+ old_length = padded.shape[axis] - right_pad - left_pad
323
+
324
+ if include_edge:
325
+ # Edge is included, we need to offset the pad amount by 1
326
+ edge_offset = 1
327
+ else:
328
+ edge_offset = 0 # Edge is not included, no need to offset pad amount
329
+ old_length -= 1 # but must be omitted from the chunk
330
+
331
+ if left_pad > 0:
332
+ # Pad with reflected values on left side:
333
+ # First limit chunk size which can't be larger than pad area
334
+ chunk_length = min(old_length, left_pad)
335
+ # Slice right to left, stop on or next to edge, start relative to stop
336
+ stop = left_pad - edge_offset
337
+ start = stop + chunk_length
338
+ left_slice = _slice_at_axis(slice(start, stop, -1), axis)
339
+ left_chunk = padded[left_slice]
340
+
341
+ if method == "odd":
342
+ # Negate chunk and align with edge
343
+ edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
344
+ left_chunk = 2 * padded[edge_slice] - left_chunk
345
+
346
+ # Insert chunk into padded area
347
+ start = left_pad - chunk_length
348
+ stop = left_pad
349
+ pad_area = _slice_at_axis(slice(start, stop), axis)
350
+ padded[pad_area] = left_chunk
351
+ # Adjust pointer to left edge for next iteration
352
+ left_pad -= chunk_length
353
+
354
+ if right_pad > 0:
355
+ # Pad with reflected values on right side:
356
+ # First limit chunk size which can't be larger than pad area
357
+ chunk_length = min(old_length, right_pad)
358
+ # Slice right to left, start on or next to edge, stop relative to start
359
+ start = -right_pad + edge_offset - 2
360
+ stop = start - chunk_length
361
+ right_slice = _slice_at_axis(slice(start, stop, -1), axis)
362
+ right_chunk = padded[right_slice]
363
+
364
+ if method == "odd":
365
+ # Negate chunk and align with edge
366
+ edge_slice = _slice_at_axis(
367
+ slice(-right_pad - 1, -right_pad), axis)
368
+ right_chunk = 2 * padded[edge_slice] - right_chunk
369
+
370
+ # Insert chunk into padded area
371
+ start = padded.shape[axis] - right_pad
372
+ stop = start + chunk_length
373
+ pad_area = _slice_at_axis(slice(start, stop), axis)
374
+ padded[pad_area] = right_chunk
375
+ # Adjust pointer to right edge for next iteration
376
+ right_pad -= chunk_length
377
+
378
+ return left_pad, right_pad
379
+
380
+
381
+ def _set_wrap_both(padded, axis, width_pair, original_period):
382
+ """
383
+ Pad `axis` of `arr` with wrapped values.
384
+
385
+ Parameters
386
+ ----------
387
+ padded : ndarray
388
+ Input array of arbitrary shape.
389
+ axis : int
390
+ Axis along which to pad `arr`.
391
+ width_pair : (int, int)
392
+ Pair of widths that mark the pad area on both sides in the given
393
+ dimension.
394
+ original_period : int
395
+ Original length of data on `axis` of `arr`.
396
+
397
+ Returns
398
+ -------
399
+ pad_amt : tuple of ints, length 2
400
+ New index positions of padding to do along the `axis`. If these are
401
+ both 0, padding is done in this dimension.
402
+ """
403
+ left_pad, right_pad = width_pair
404
+ period = padded.shape[axis] - right_pad - left_pad
405
+ # Avoid wrapping with only a subset of the original area by ensuring period
406
+ # can only be a multiple of the original area's length.
407
+ period = period // original_period * original_period
408
+
409
+ # If the current dimension of `arr` doesn't contain enough valid values
410
+ # (not part of the undefined pad area) we need to pad multiple times.
411
+ # Each time the pad area shrinks on both sides which is communicated with
412
+ # these variables.
413
+ new_left_pad = 0
414
+ new_right_pad = 0
415
+
416
+ if left_pad > 0:
417
+ # Pad with wrapped values on left side
418
+ # First slice chunk from left side of the non-pad area.
419
+ # Use min(period, left_pad) to ensure that chunk is not larger than
420
+ # pad area.
421
+ slice_end = left_pad + period
422
+ slice_start = slice_end - min(period, left_pad)
423
+ right_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
424
+ right_chunk = padded[right_slice]
425
+
426
+ if left_pad > period:
427
+ # Chunk is smaller than pad area
428
+ pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
429
+ new_left_pad = left_pad - period
430
+ else:
431
+ # Chunk matches pad area
432
+ pad_area = _slice_at_axis(slice(None, left_pad), axis)
433
+ padded[pad_area] = right_chunk
434
+
435
+ if right_pad > 0:
436
+ # Pad with wrapped values on right side
437
+ # First slice chunk from right side of the non-pad area.
438
+ # Use min(period, right_pad) to ensure that chunk is not larger than
439
+ # pad area.
440
+ slice_start = -right_pad - period
441
+ slice_end = slice_start + min(period, right_pad)
442
+ left_slice = _slice_at_axis(slice(slice_start, slice_end), axis)
443
+ left_chunk = padded[left_slice]
444
+
445
+ if right_pad > period:
446
+ # Chunk is smaller than pad area
447
+ pad_area = _slice_at_axis(
448
+ slice(-right_pad, -right_pad + period), axis)
449
+ new_right_pad = right_pad - period
450
+ else:
451
+ # Chunk matches pad area
452
+ pad_area = _slice_at_axis(slice(-right_pad, None), axis)
453
+ padded[pad_area] = left_chunk
454
+
455
+ return new_left_pad, new_right_pad
456
+
457
+
458
+ def _as_pairs(x, ndim, as_index=False):
459
+ """
460
+ Broadcast `x` to an array with the shape (`ndim`, 2).
461
+
462
+ A helper function for `pad` that prepares and validates arguments like
463
+ `pad_width` for iteration in pairs.
464
+
465
+ Parameters
466
+ ----------
467
+ x : {None, scalar, array-like}
468
+ The object to broadcast to the shape (`ndim`, 2).
469
+ ndim : int
470
+ Number of pairs the broadcasted `x` will have.
471
+ as_index : bool, optional
472
+ If `x` is not None, try to round each element of `x` to an integer
473
+ (dtype `np.intp`) and ensure every element is positive.
474
+
475
+ Returns
476
+ -------
477
+ pairs : nested iterables, shape (`ndim`, 2)
478
+ The broadcasted version of `x`.
479
+
480
+ Raises
481
+ ------
482
+ ValueError
483
+ If `as_index` is True and `x` contains negative elements.
484
+ Or if `x` is not broadcastable to the shape (`ndim`, 2).
485
+ """
486
+ if x is None:
487
+ # Pass through None as a special case, otherwise np.round(x) fails
488
+ # with an AttributeError
489
+ return ((None, None),) * ndim
490
+
491
+ x = np.array(x)
492
+ if as_index:
493
+ x = np.round(x).astype(np.intp, copy=False)
494
+
495
+ if x.ndim < 3:
496
+ # Optimization: Possibly use faster paths for cases where `x` has
497
+ # only 1 or 2 elements. `np.broadcast_to` could handle these as well
498
+ # but is currently slower
499
+
500
+ if x.size == 1:
501
+ # x was supplied as a single value
502
+ x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
503
+ if as_index and x < 0:
504
+ raise ValueError("index can't contain negative values")
505
+ return ((x[0], x[0]),) * ndim
506
+
507
+ if x.size == 2 and x.shape != (2, 1):
508
+ # x was supplied with a single value for each side
509
+ # but except case when each dimension has a single value
510
+ # which should be broadcasted to a pair,
511
+ # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
512
+ x = x.ravel() # Ensure x[0], x[1] works
513
+ if as_index and (x[0] < 0 or x[1] < 0):
514
+ raise ValueError("index can't contain negative values")
515
+ return ((x[0], x[1]),) * ndim
516
+
517
+ if as_index and x.min() < 0:
518
+ raise ValueError("index can't contain negative values")
519
+
520
+ # Converting the array with `tolist` seems to improve performance
521
+ # when iterating and indexing the result (see usage in `pad`)
522
+ return np.broadcast_to(x, (ndim, 2)).tolist()
523
+
524
+
525
+ def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
526
+ return (array,)
527
+
528
+
529
+ ###############################################################################
530
+ # Public functions
531
+
532
+
533
+ @array_function_dispatch(_pad_dispatcher, module='numpy')
534
+ def pad(array, pad_width, mode='constant', **kwargs):
535
+ """
536
+ Pad an array.
537
+
538
+ Parameters
539
+ ----------
540
+ array : array_like of rank N
541
+ The array to pad.
542
+ pad_width : {sequence, array_like, int}
543
+ Number of values padded to the edges of each axis.
544
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad widths
545
+ for each axis.
546
+ ``(before, after)`` or ``((before, after),)`` yields same before
547
+ and after pad for each axis.
548
+ ``(pad,)`` or ``int`` is a shortcut for before = after = pad width
549
+ for all axes.
550
+ mode : str or function, optional
551
+ One of the following string values or a user supplied function.
552
+
553
+ 'constant' (default)
554
+ Pads with a constant value.
555
+ 'edge'
556
+ Pads with the edge values of array.
557
+ 'linear_ramp'
558
+ Pads with the linear ramp between end_value and the
559
+ array edge value.
560
+ 'maximum'
561
+ Pads with the maximum value of all or part of the
562
+ vector along each axis.
563
+ 'mean'
564
+ Pads with the mean value of all or part of the
565
+ vector along each axis.
566
+ 'median'
567
+ Pads with the median value of all or part of the
568
+ vector along each axis.
569
+ 'minimum'
570
+ Pads with the minimum value of all or part of the
571
+ vector along each axis.
572
+ 'reflect'
573
+ Pads with the reflection of the vector mirrored on
574
+ the first and last values of the vector along each
575
+ axis.
576
+ 'symmetric'
577
+ Pads with the reflection of the vector mirrored
578
+ along the edge of the array.
579
+ 'wrap'
580
+ Pads with the wrap of the vector along the axis.
581
+ The first values are used to pad the end and the
582
+ end values are used to pad the beginning.
583
+ 'empty'
584
+ Pads with undefined values.
585
+
586
+ .. versionadded:: 1.17
587
+
588
+ <function>
589
+ Padding function, see Notes.
590
+ stat_length : sequence or int, optional
591
+ Used in 'maximum', 'mean', 'median', and 'minimum'. Number of
592
+ values at edge of each axis used to calculate the statistic value.
593
+
594
+ ``((before_1, after_1), ... (before_N, after_N))`` unique statistic
595
+ lengths for each axis.
596
+
597
+ ``(before, after)`` or ``((before, after),)`` yields same before
598
+ and after statistic lengths for each axis.
599
+
600
+ ``(stat_length,)`` or ``int`` is a shortcut for
601
+ ``before = after = statistic`` length for all axes.
602
+
603
+ Default is ``None``, to use the entire axis.
604
+ constant_values : sequence or scalar, optional
605
+ Used in 'constant'. The values to set the padded values for each
606
+ axis.
607
+
608
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
609
+ for each axis.
610
+
611
+ ``(before, after)`` or ``((before, after),)`` yields same before
612
+ and after constants for each axis.
613
+
614
+ ``(constant,)`` or ``constant`` is a shortcut for
615
+ ``before = after = constant`` for all axes.
616
+
617
+ Default is 0.
618
+ end_values : sequence or scalar, optional
619
+ Used in 'linear_ramp'. The values used for the ending value of the
620
+ linear_ramp and that will form the edge of the padded array.
621
+
622
+ ``((before_1, after_1), ... (before_N, after_N))`` unique end values
623
+ for each axis.
624
+
625
+ ``(before, after)`` or ``((before, after),)`` yields same before
626
+ and after end values for each axis.
627
+
628
+ ``(constant,)`` or ``constant`` is a shortcut for
629
+ ``before = after = constant`` for all axes.
630
+
631
+ Default is 0.
632
+ reflect_type : {'even', 'odd'}, optional
633
+ Used in 'reflect', and 'symmetric'. The 'even' style is the
634
+ default with an unaltered reflection around the edge value. For
635
+ the 'odd' style, the extended part of the array is created by
636
+ subtracting the reflected values from two times the edge value.
637
+
638
+ Returns
639
+ -------
640
+ pad : ndarray
641
+ Padded array of rank equal to `array` with shape increased
642
+ according to `pad_width`.
643
+
644
+ Notes
645
+ -----
646
+ .. versionadded:: 1.7.0
647
+
648
+ For an array with rank greater than 1, some of the padding of later
649
+ axes is calculated from padding of previous axes. This is easiest to
650
+ think about with a rank 2 array where the corners of the padded array
651
+ are calculated by using padded values from the first axis.
652
+
653
+ The padding function, if used, should modify a rank 1 array in-place. It
654
+ has the following signature::
655
+
656
+ padding_func(vector, iaxis_pad_width, iaxis, kwargs)
657
+
658
+ where
659
+
660
+ vector : ndarray
661
+ A rank 1 array already padded with zeros. Padded values are
662
+ vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
663
+ iaxis_pad_width : tuple
664
+ A 2-tuple of ints, iaxis_pad_width[0] represents the number of
665
+ values padded at the beginning of vector where
666
+ iaxis_pad_width[1] represents the number of values padded at
667
+ the end of vector.
668
+ iaxis : int
669
+ The axis currently being calculated.
670
+ kwargs : dict
671
+ Any keyword arguments the function requires.
672
+
673
+ Examples
674
+ --------
675
+ >>> a = [1, 2, 3, 4, 5]
676
+ >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
677
+ array([4, 4, 1, ..., 6, 6, 6])
678
+
679
+ >>> np.pad(a, (2, 3), 'edge')
680
+ array([1, 1, 1, ..., 5, 5, 5])
681
+
682
+ >>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
683
+ array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
684
+
685
+ >>> np.pad(a, (2,), 'maximum')
686
+ array([5, 5, 1, 2, 3, 4, 5, 5, 5])
687
+
688
+ >>> np.pad(a, (2,), 'mean')
689
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
690
+
691
+ >>> np.pad(a, (2,), 'median')
692
+ array([3, 3, 1, 2, 3, 4, 5, 3, 3])
693
+
694
+ >>> a = [[1, 2], [3, 4]]
695
+ >>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
696
+ array([[1, 1, 1, 2, 1, 1, 1],
697
+ [1, 1, 1, 2, 1, 1, 1],
698
+ [1, 1, 1, 2, 1, 1, 1],
699
+ [1, 1, 1, 2, 1, 1, 1],
700
+ [3, 3, 3, 4, 3, 3, 3],
701
+ [1, 1, 1, 2, 1, 1, 1],
702
+ [1, 1, 1, 2, 1, 1, 1]])
703
+
704
+ >>> a = [1, 2, 3, 4, 5]
705
+ >>> np.pad(a, (2, 3), 'reflect')
706
+ array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
707
+
708
+ >>> np.pad(a, (2, 3), 'reflect', reflect_type='odd')
709
+ array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
710
+
711
+ >>> np.pad(a, (2, 3), 'symmetric')
712
+ array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
713
+
714
+ >>> np.pad(a, (2, 3), 'symmetric', reflect_type='odd')
715
+ array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
716
+
717
+ >>> np.pad(a, (2, 3), 'wrap')
718
+ array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
719
+
720
+ >>> def pad_with(vector, pad_width, iaxis, kwargs):
721
+ ... pad_value = kwargs.get('padder', 10)
722
+ ... vector[:pad_width[0]] = pad_value
723
+ ... vector[-pad_width[1]:] = pad_value
724
+ >>> a = np.arange(6)
725
+ >>> a = a.reshape((2, 3))
726
+ >>> np.pad(a, 2, pad_with)
727
+ array([[10, 10, 10, 10, 10, 10, 10],
728
+ [10, 10, 10, 10, 10, 10, 10],
729
+ [10, 10, 0, 1, 2, 10, 10],
730
+ [10, 10, 3, 4, 5, 10, 10],
731
+ [10, 10, 10, 10, 10, 10, 10],
732
+ [10, 10, 10, 10, 10, 10, 10]])
733
+ >>> np.pad(a, 2, pad_with, padder=100)
734
+ array([[100, 100, 100, 100, 100, 100, 100],
735
+ [100, 100, 100, 100, 100, 100, 100],
736
+ [100, 100, 0, 1, 2, 100, 100],
737
+ [100, 100, 3, 4, 5, 100, 100],
738
+ [100, 100, 100, 100, 100, 100, 100],
739
+ [100, 100, 100, 100, 100, 100, 100]])
740
+ """
741
+ array = np.asarray(array)
742
+ pad_width = np.asarray(pad_width)
743
+
744
+ if not pad_width.dtype.kind == 'i':
745
+ raise TypeError('`pad_width` must be of integral type.')
746
+
747
+ # Broadcast to shape (array.ndim, 2)
748
+ pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
749
+
750
+ if callable(mode):
751
+ # Old behavior: Use user-supplied function with np.apply_along_axis
752
+ function = mode
753
+ # Create a new zero padded array
754
+ padded, _ = _pad_simple(array, pad_width, fill_value=0)
755
+ # And apply along each axis
756
+
757
+ for axis in range(padded.ndim):
758
+ # Iterate using ndindex as in apply_along_axis, but assuming that
759
+ # function operates inplace on the padded array.
760
+
761
+ # view with the iteration axis at the end
762
+ view = np.moveaxis(padded, axis, -1)
763
+
764
+ # compute indices for the iteration axes, and append a trailing
765
+ # ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
766
+ inds = ndindex(view.shape[:-1])
767
+ inds = (ind + (Ellipsis,) for ind in inds)
768
+ for ind in inds:
769
+ function(view[ind], pad_width[axis], axis, kwargs)
770
+
771
+ return padded
772
+
773
+ # Make sure that no unsupported keywords were passed for the current mode
774
+ allowed_kwargs = {
775
+ 'empty': [], 'edge': [], 'wrap': [],
776
+ 'constant': ['constant_values'],
777
+ 'linear_ramp': ['end_values'],
778
+ 'maximum': ['stat_length'],
779
+ 'mean': ['stat_length'],
780
+ 'median': ['stat_length'],
781
+ 'minimum': ['stat_length'],
782
+ 'reflect': ['reflect_type'],
783
+ 'symmetric': ['reflect_type'],
784
+ }
785
+ try:
786
+ unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
787
+ except KeyError:
788
+ raise ValueError("mode '{}' is not supported".format(mode)) from None
789
+ if unsupported_kwargs:
790
+ raise ValueError("unsupported keyword arguments for mode '{}': {}"
791
+ .format(mode, unsupported_kwargs))
792
+
793
+ stat_functions = {"maximum": np.amax, "minimum": np.amin,
794
+ "mean": np.mean, "median": np.median}
795
+
796
+ # Create array with final shape and original values
797
+ # (padded area is undefined)
798
+ padded, original_area_slice = _pad_simple(array, pad_width)
799
+ # And prepare iteration over all dimensions
800
+ # (zipping may be more readable than using enumerate)
801
+ axes = range(padded.ndim)
802
+
803
+ if mode == "constant":
804
+ values = kwargs.get("constant_values", 0)
805
+ values = _as_pairs(values, padded.ndim)
806
+ for axis, width_pair, value_pair in zip(axes, pad_width, values):
807
+ roi = _view_roi(padded, original_area_slice, axis)
808
+ _set_pad_area(roi, axis, width_pair, value_pair)
809
+
810
+ elif mode == "empty":
811
+ pass # Do nothing as _pad_simple already returned the correct result
812
+
813
+ elif array.size == 0:
814
+ # Only modes "constant" and "empty" can extend empty axes, all other
815
+ # modes depend on `array` not being empty
816
+ # -> ensure every empty axis is only "padded with 0"
817
+ for axis, width_pair in zip(axes, pad_width):
818
+ if array.shape[axis] == 0 and any(width_pair):
819
+ raise ValueError(
820
+ "can't extend empty axis {} using modes other than "
821
+ "'constant' or 'empty'".format(axis)
822
+ )
823
+ # passed, don't need to do anything more as _pad_simple already
824
+ # returned the correct result
825
+
826
+ elif mode == "edge":
827
+ for axis, width_pair in zip(axes, pad_width):
828
+ roi = _view_roi(padded, original_area_slice, axis)
829
+ edge_pair = _get_edges(roi, axis, width_pair)
830
+ _set_pad_area(roi, axis, width_pair, edge_pair)
831
+
832
+ elif mode == "linear_ramp":
833
+ end_values = kwargs.get("end_values", 0)
834
+ end_values = _as_pairs(end_values, padded.ndim)
835
+ for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
836
+ roi = _view_roi(padded, original_area_slice, axis)
837
+ ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
838
+ _set_pad_area(roi, axis, width_pair, ramp_pair)
839
+
840
+ elif mode in stat_functions:
841
+ func = stat_functions[mode]
842
+ length = kwargs.get("stat_length", None)
843
+ length = _as_pairs(length, padded.ndim, as_index=True)
844
+ for axis, width_pair, length_pair in zip(axes, pad_width, length):
845
+ roi = _view_roi(padded, original_area_slice, axis)
846
+ stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
847
+ _set_pad_area(roi, axis, width_pair, stat_pair)
848
+
849
+ elif mode in {"reflect", "symmetric"}:
850
+ method = kwargs.get("reflect_type", "even")
851
+ include_edge = True if mode == "symmetric" else False
852
+ for axis, (left_index, right_index) in zip(axes, pad_width):
853
+ if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
854
+ # Extending singleton dimension for 'reflect' is legacy
855
+ # behavior; it really should raise an error.
856
+ edge_pair = _get_edges(padded, axis, (left_index, right_index))
857
+ _set_pad_area(
858
+ padded, axis, (left_index, right_index), edge_pair)
859
+ continue
860
+
861
+ roi = _view_roi(padded, original_area_slice, axis)
862
+ while left_index > 0 or right_index > 0:
863
+ # Iteratively pad until dimension is filled with reflected
864
+ # values. This is necessary if the pad area is larger than
865
+ # the length of the original values in the current dimension.
866
+ left_index, right_index = _set_reflect_both(
867
+ roi, axis, (left_index, right_index),
868
+ method, include_edge
869
+ )
870
+
871
+ elif mode == "wrap":
872
+ for axis, (left_index, right_index) in zip(axes, pad_width):
873
+ roi = _view_roi(padded, original_area_slice, axis)
874
+ original_period = padded.shape[axis] - right_index - left_index
875
+ while left_index > 0 or right_index > 0:
876
+ # Iteratively pad until dimension is filled with wrapped
877
+ # values. This is necessary if the pad area is larger than
878
+ # the length of the original values in the current dimension.
879
+ left_index, right_index = _set_wrap_both(
880
+ roi, axis, (left_index, right_index), original_period)
881
+
882
+ return padded
env-llmeval/lib/python3.10/site-packages/numpy/lib/arraypad.pyi ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ Any,
4
+ overload,
5
+ TypeVar,
6
+ Protocol,
7
+ )
8
+
9
+ from numpy import generic
10
+
11
+ from numpy._typing import (
12
+ ArrayLike,
13
+ NDArray,
14
+ _ArrayLikeInt,
15
+ _ArrayLike,
16
+ )
17
+
18
+ _SCT = TypeVar("_SCT", bound=generic)
19
+
20
+ class _ModeFunc(Protocol):
21
+ def __call__(
22
+ self,
23
+ vector: NDArray[Any],
24
+ iaxis_pad_width: tuple[int, int],
25
+ iaxis: int,
26
+ kwargs: dict[str, Any],
27
+ /,
28
+ ) -> None: ...
29
+
30
+ _ModeKind = L[
31
+ "constant",
32
+ "edge",
33
+ "linear_ramp",
34
+ "maximum",
35
+ "mean",
36
+ "median",
37
+ "minimum",
38
+ "reflect",
39
+ "symmetric",
40
+ "wrap",
41
+ "empty",
42
+ ]
43
+
44
+ __all__: list[str]
45
+
46
+ # TODO: In practice each keyword argument is exclusive to one or more
47
+ # specific modes. Consider adding more overloads to express this in the future.
48
+
49
+ # Expand `**kwargs` into explicit keyword-only arguments
50
+ @overload
51
+ def pad(
52
+ array: _ArrayLike[_SCT],
53
+ pad_width: _ArrayLikeInt,
54
+ mode: _ModeKind = ...,
55
+ *,
56
+ stat_length: None | _ArrayLikeInt = ...,
57
+ constant_values: ArrayLike = ...,
58
+ end_values: ArrayLike = ...,
59
+ reflect_type: L["odd", "even"] = ...,
60
+ ) -> NDArray[_SCT]: ...
61
+ @overload
62
+ def pad(
63
+ array: ArrayLike,
64
+ pad_width: _ArrayLikeInt,
65
+ mode: _ModeKind = ...,
66
+ *,
67
+ stat_length: None | _ArrayLikeInt = ...,
68
+ constant_values: ArrayLike = ...,
69
+ end_values: ArrayLike = ...,
70
+ reflect_type: L["odd", "even"] = ...,
71
+ ) -> NDArray[Any]: ...
72
+ @overload
73
+ def pad(
74
+ array: _ArrayLike[_SCT],
75
+ pad_width: _ArrayLikeInt,
76
+ mode: _ModeFunc,
77
+ **kwargs: Any,
78
+ ) -> NDArray[_SCT]: ...
79
+ @overload
80
+ def pad(
81
+ array: ArrayLike,
82
+ pad_width: _ArrayLikeInt,
83
+ mode: _ModeFunc,
84
+ **kwargs: Any,
85
+ ) -> NDArray[Any]: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/arraysetops.py ADDED
@@ -0,0 +1,981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Set operations for arrays based on sorting.
3
+
4
+ Notes
5
+ -----
6
+
7
+ For floating point arrays, inaccurate results may appear due to usual round-off
8
+ and floating point comparison issues.
9
+
10
+ Speed could be gained in some operations by an implementation of
11
+ `numpy.sort`, that can provide directly the permutation vectors, thus avoiding
12
+ calls to `numpy.argsort`.
13
+
14
+ Original author: Robert Cimrman
15
+
16
+ """
17
+ import functools
18
+
19
+ import numpy as np
20
+ from numpy.core import overrides
21
+
22
+
23
+ array_function_dispatch = functools.partial(
24
+ overrides.array_function_dispatch, module='numpy')
25
+
26
+
27
+ __all__ = [
28
+ 'ediff1d', 'intersect1d', 'setxor1d', 'union1d', 'setdiff1d', 'unique',
29
+ 'in1d', 'isin'
30
+ ]
31
+
32
+
33
+ def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
34
+ return (ary, to_end, to_begin)
35
+
36
+
37
+ @array_function_dispatch(_ediff1d_dispatcher)
38
+ def ediff1d(ary, to_end=None, to_begin=None):
39
+ """
40
+ The differences between consecutive elements of an array.
41
+
42
+ Parameters
43
+ ----------
44
+ ary : array_like
45
+ If necessary, will be flattened before the differences are taken.
46
+ to_end : array_like, optional
47
+ Number(s) to append at the end of the returned differences.
48
+ to_begin : array_like, optional
49
+ Number(s) to prepend at the beginning of the returned differences.
50
+
51
+ Returns
52
+ -------
53
+ ediff1d : ndarray
54
+ The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
55
+
56
+ See Also
57
+ --------
58
+ diff, gradient
59
+
60
+ Notes
61
+ -----
62
+ When applied to masked arrays, this function drops the mask information
63
+ if the `to_begin` and/or `to_end` parameters are used.
64
+
65
+ Examples
66
+ --------
67
+ >>> x = np.array([1, 2, 4, 7, 0])
68
+ >>> np.ediff1d(x)
69
+ array([ 1, 2, 3, -7])
70
+
71
+ >>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
72
+ array([-99, 1, 2, ..., -7, 88, 99])
73
+
74
+ The returned array is always 1D.
75
+
76
+ >>> y = [[1, 2, 4], [1, 6, 24]]
77
+ >>> np.ediff1d(y)
78
+ array([ 1, 2, -3, 5, 18])
79
+
80
+ """
81
+ # force a 1d array
82
+ ary = np.asanyarray(ary).ravel()
83
+
84
+ # enforce that the dtype of `ary` is used for the output
85
+ dtype_req = ary.dtype
86
+
87
+ # fast track default case
88
+ if to_begin is None and to_end is None:
89
+ return ary[1:] - ary[:-1]
90
+
91
+ if to_begin is None:
92
+ l_begin = 0
93
+ else:
94
+ to_begin = np.asanyarray(to_begin)
95
+ if not np.can_cast(to_begin, dtype_req, casting="same_kind"):
96
+ raise TypeError("dtype of `to_begin` must be compatible "
97
+ "with input `ary` under the `same_kind` rule.")
98
+
99
+ to_begin = to_begin.ravel()
100
+ l_begin = len(to_begin)
101
+
102
+ if to_end is None:
103
+ l_end = 0
104
+ else:
105
+ to_end = np.asanyarray(to_end)
106
+ if not np.can_cast(to_end, dtype_req, casting="same_kind"):
107
+ raise TypeError("dtype of `to_end` must be compatible "
108
+ "with input `ary` under the `same_kind` rule.")
109
+
110
+ to_end = to_end.ravel()
111
+ l_end = len(to_end)
112
+
113
+ # do the calculation in place and copy to_begin and to_end
114
+ l_diff = max(len(ary) - 1, 0)
115
+ result = np.empty(l_diff + l_begin + l_end, dtype=ary.dtype)
116
+ result = ary.__array_wrap__(result)
117
+ if l_begin > 0:
118
+ result[:l_begin] = to_begin
119
+ if l_end > 0:
120
+ result[l_begin + l_diff:] = to_end
121
+ np.subtract(ary[1:], ary[:-1], result[l_begin:l_begin + l_diff])
122
+ return result
123
+
124
+
125
+ def _unpack_tuple(x):
126
+ """ Unpacks one-element tuples for use as return values """
127
+ if len(x) == 1:
128
+ return x[0]
129
+ else:
130
+ return x
131
+
132
+
133
+ def _unique_dispatcher(ar, return_index=None, return_inverse=None,
134
+ return_counts=None, axis=None, *, equal_nan=None):
135
+ return (ar,)
136
+
137
+
138
+ @array_function_dispatch(_unique_dispatcher)
139
+ def unique(ar, return_index=False, return_inverse=False,
140
+ return_counts=False, axis=None, *, equal_nan=True):
141
+ """
142
+ Find the unique elements of an array.
143
+
144
+ Returns the sorted unique elements of an array. There are three optional
145
+ outputs in addition to the unique elements:
146
+
147
+ * the indices of the input array that give the unique values
148
+ * the indices of the unique array that reconstruct the input array
149
+ * the number of times each unique value comes up in the input array
150
+
151
+ Parameters
152
+ ----------
153
+ ar : array_like
154
+ Input array. Unless `axis` is specified, this will be flattened if it
155
+ is not already 1-D.
156
+ return_index : bool, optional
157
+ If True, also return the indices of `ar` (along the specified axis,
158
+ if provided, or in the flattened array) that result in the unique array.
159
+ return_inverse : bool, optional
160
+ If True, also return the indices of the unique array (for the specified
161
+ axis, if provided) that can be used to reconstruct `ar`.
162
+ return_counts : bool, optional
163
+ If True, also return the number of times each unique item appears
164
+ in `ar`.
165
+ axis : int or None, optional
166
+ The axis to operate on. If None, `ar` will be flattened. If an integer,
167
+ the subarrays indexed by the given axis will be flattened and treated
168
+ as the elements of a 1-D array with the dimension of the given axis,
169
+ see the notes for more details. Object arrays or structured arrays
170
+ that contain objects are not supported if the `axis` kwarg is used. The
171
+ default is None.
172
+
173
+ .. versionadded:: 1.13.0
174
+
175
+ equal_nan : bool, optional
176
+ If True, collapses multiple NaN values in the return array into one.
177
+
178
+ .. versionadded:: 1.24
179
+
180
+ Returns
181
+ -------
182
+ unique : ndarray
183
+ The sorted unique values.
184
+ unique_indices : ndarray, optional
185
+ The indices of the first occurrences of the unique values in the
186
+ original array. Only provided if `return_index` is True.
187
+ unique_inverse : ndarray, optional
188
+ The indices to reconstruct the original array from the
189
+ unique array. Only provided if `return_inverse` is True.
190
+ unique_counts : ndarray, optional
191
+ The number of times each of the unique values comes up in the
192
+ original array. Only provided if `return_counts` is True.
193
+
194
+ .. versionadded:: 1.9.0
195
+
196
+ See Also
197
+ --------
198
+ numpy.lib.arraysetops : Module with a number of other functions for
199
+ performing set operations on arrays.
200
+ repeat : Repeat elements of an array.
201
+
202
+ Notes
203
+ -----
204
+ When an axis is specified the subarrays indexed by the axis are sorted.
205
+ This is done by making the specified axis the first dimension of the array
206
+ (move the axis to the first dimension to keep the order of the other axes)
207
+ and then flattening the subarrays in C order. The flattened subarrays are
208
+ then viewed as a structured type with each element given a label, with the
209
+ effect that we end up with a 1-D array of structured types that can be
210
+ treated in the same way as any other 1-D array. The result is that the
211
+ flattened subarrays are sorted in lexicographic order starting with the
212
+ first element.
213
+
214
+ .. versionchanged: NumPy 1.21
215
+ If nan values are in the input array, a single nan is put
216
+ to the end of the sorted unique values.
217
+
218
+ Also for complex arrays all NaN values are considered equivalent
219
+ (no matter whether the NaN is in the real or imaginary part).
220
+ As the representant for the returned array the smallest one in the
221
+ lexicographical order is chosen - see np.sort for how the lexicographical
222
+ order is defined for complex arrays.
223
+
224
+ Examples
225
+ --------
226
+ >>> np.unique([1, 1, 2, 2, 3, 3])
227
+ array([1, 2, 3])
228
+ >>> a = np.array([[1, 1], [2, 3]])
229
+ >>> np.unique(a)
230
+ array([1, 2, 3])
231
+
232
+ Return the unique rows of a 2D array
233
+
234
+ >>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
235
+ >>> np.unique(a, axis=0)
236
+ array([[1, 0, 0], [2, 3, 4]])
237
+
238
+ Return the indices of the original array that give the unique values:
239
+
240
+ >>> a = np.array(['a', 'b', 'b', 'c', 'a'])
241
+ >>> u, indices = np.unique(a, return_index=True)
242
+ >>> u
243
+ array(['a', 'b', 'c'], dtype='<U1')
244
+ >>> indices
245
+ array([0, 1, 3])
246
+ >>> a[indices]
247
+ array(['a', 'b', 'c'], dtype='<U1')
248
+
249
+ Reconstruct the input array from the unique values and inverse:
250
+
251
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
252
+ >>> u, indices = np.unique(a, return_inverse=True)
253
+ >>> u
254
+ array([1, 2, 3, 4, 6])
255
+ >>> indices
256
+ array([0, 1, 4, 3, 1, 2, 1])
257
+ >>> u[indices]
258
+ array([1, 2, 6, 4, 2, 3, 2])
259
+
260
+ Reconstruct the input values from the unique values and counts:
261
+
262
+ >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
263
+ >>> values, counts = np.unique(a, return_counts=True)
264
+ >>> values
265
+ array([1, 2, 3, 4, 6])
266
+ >>> counts
267
+ array([1, 3, 1, 1, 1])
268
+ >>> np.repeat(values, counts)
269
+ array([1, 2, 2, 2, 3, 4, 6]) # original order not preserved
270
+
271
+ """
272
+ ar = np.asanyarray(ar)
273
+ if axis is None:
274
+ ret = _unique1d(ar, return_index, return_inverse, return_counts,
275
+ equal_nan=equal_nan)
276
+ return _unpack_tuple(ret)
277
+
278
+ # axis was specified and not None
279
+ try:
280
+ ar = np.moveaxis(ar, axis, 0)
281
+ except np.AxisError:
282
+ # this removes the "axis1" or "axis2" prefix from the error message
283
+ raise np.AxisError(axis, ar.ndim) from None
284
+
285
+ # Must reshape to a contiguous 2D array for this to work...
286
+ orig_shape, orig_dtype = ar.shape, ar.dtype
287
+ ar = ar.reshape(orig_shape[0], np.prod(orig_shape[1:], dtype=np.intp))
288
+ ar = np.ascontiguousarray(ar)
289
+ dtype = [('f{i}'.format(i=i), ar.dtype) for i in range(ar.shape[1])]
290
+
291
+ # At this point, `ar` has shape `(n, m)`, and `dtype` is a structured
292
+ # data type with `m` fields where each field has the data type of `ar`.
293
+ # In the following, we create the array `consolidated`, which has
294
+ # shape `(n,)` with data type `dtype`.
295
+ try:
296
+ if ar.shape[1] > 0:
297
+ consolidated = ar.view(dtype)
298
+ else:
299
+ # If ar.shape[1] == 0, then dtype will be `np.dtype([])`, which is
300
+ # a data type with itemsize 0, and the call `ar.view(dtype)` will
301
+ # fail. Instead, we'll use `np.empty` to explicitly create the
302
+ # array with shape `(len(ar),)`. Because `dtype` in this case has
303
+ # itemsize 0, the total size of the result is still 0 bytes.
304
+ consolidated = np.empty(len(ar), dtype=dtype)
305
+ except TypeError as e:
306
+ # There's no good way to do this for object arrays, etc...
307
+ msg = 'The axis argument to unique is not supported for dtype {dt}'
308
+ raise TypeError(msg.format(dt=ar.dtype)) from e
309
+
310
+ def reshape_uniq(uniq):
311
+ n = len(uniq)
312
+ uniq = uniq.view(orig_dtype)
313
+ uniq = uniq.reshape(n, *orig_shape[1:])
314
+ uniq = np.moveaxis(uniq, 0, axis)
315
+ return uniq
316
+
317
+ output = _unique1d(consolidated, return_index,
318
+ return_inverse, return_counts, equal_nan=equal_nan)
319
+ output = (reshape_uniq(output[0]),) + output[1:]
320
+ return _unpack_tuple(output)
321
+
322
+
323
+ def _unique1d(ar, return_index=False, return_inverse=False,
324
+ return_counts=False, *, equal_nan=True):
325
+ """
326
+ Find the unique elements of an array, ignoring shape.
327
+ """
328
+ ar = np.asanyarray(ar).flatten()
329
+
330
+ optional_indices = return_index or return_inverse
331
+
332
+ if optional_indices:
333
+ perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
334
+ aux = ar[perm]
335
+ else:
336
+ ar.sort()
337
+ aux = ar
338
+ mask = np.empty(aux.shape, dtype=np.bool_)
339
+ mask[:1] = True
340
+ if (equal_nan and aux.shape[0] > 0 and aux.dtype.kind in "cfmM" and
341
+ np.isnan(aux[-1])):
342
+ if aux.dtype.kind == "c": # for complex all NaNs are considered equivalent
343
+ aux_firstnan = np.searchsorted(np.isnan(aux), True, side='left')
344
+ else:
345
+ aux_firstnan = np.searchsorted(aux, aux[-1], side='left')
346
+ if aux_firstnan > 0:
347
+ mask[1:aux_firstnan] = (
348
+ aux[1:aux_firstnan] != aux[:aux_firstnan - 1])
349
+ mask[aux_firstnan] = True
350
+ mask[aux_firstnan + 1:] = False
351
+ else:
352
+ mask[1:] = aux[1:] != aux[:-1]
353
+
354
+ ret = (aux[mask],)
355
+ if return_index:
356
+ ret += (perm[mask],)
357
+ if return_inverse:
358
+ imask = np.cumsum(mask) - 1
359
+ inv_idx = np.empty(mask.shape, dtype=np.intp)
360
+ inv_idx[perm] = imask
361
+ ret += (inv_idx,)
362
+ if return_counts:
363
+ idx = np.concatenate(np.nonzero(mask) + ([mask.size],))
364
+ ret += (np.diff(idx),)
365
+ return ret
366
+
367
+
368
+ def _intersect1d_dispatcher(
369
+ ar1, ar2, assume_unique=None, return_indices=None):
370
+ return (ar1, ar2)
371
+
372
+
373
+ @array_function_dispatch(_intersect1d_dispatcher)
374
+ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
375
+ """
376
+ Find the intersection of two arrays.
377
+
378
+ Return the sorted, unique values that are in both of the input arrays.
379
+
380
+ Parameters
381
+ ----------
382
+ ar1, ar2 : array_like
383
+ Input arrays. Will be flattened if not already 1D.
384
+ assume_unique : bool
385
+ If True, the input arrays are both assumed to be unique, which
386
+ can speed up the calculation. If True but ``ar1`` or ``ar2`` are not
387
+ unique, incorrect results and out-of-bounds indices could result.
388
+ Default is False.
389
+ return_indices : bool
390
+ If True, the indices which correspond to the intersection of the two
391
+ arrays are returned. The first instance of a value is used if there are
392
+ multiple. Default is False.
393
+
394
+ .. versionadded:: 1.15.0
395
+
396
+ Returns
397
+ -------
398
+ intersect1d : ndarray
399
+ Sorted 1D array of common and unique elements.
400
+ comm1 : ndarray
401
+ The indices of the first occurrences of the common values in `ar1`.
402
+ Only provided if `return_indices` is True.
403
+ comm2 : ndarray
404
+ The indices of the first occurrences of the common values in `ar2`.
405
+ Only provided if `return_indices` is True.
406
+
407
+
408
+ See Also
409
+ --------
410
+ numpy.lib.arraysetops : Module with a number of other functions for
411
+ performing set operations on arrays.
412
+
413
+ Examples
414
+ --------
415
+ >>> np.intersect1d([1, 3, 4, 3], [3, 1, 2, 1])
416
+ array([1, 3])
417
+
418
+ To intersect more than two arrays, use functools.reduce:
419
+
420
+ >>> from functools import reduce
421
+ >>> reduce(np.intersect1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
422
+ array([3])
423
+
424
+ To return the indices of the values common to the input arrays
425
+ along with the intersected values:
426
+
427
+ >>> x = np.array([1, 1, 2, 3, 4])
428
+ >>> y = np.array([2, 1, 4, 6])
429
+ >>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
430
+ >>> x_ind, y_ind
431
+ (array([0, 2, 4]), array([1, 0, 2]))
432
+ >>> xy, x[x_ind], y[y_ind]
433
+ (array([1, 2, 4]), array([1, 2, 4]), array([1, 2, 4]))
434
+
435
+ """
436
+ ar1 = np.asanyarray(ar1)
437
+ ar2 = np.asanyarray(ar2)
438
+
439
+ if not assume_unique:
440
+ if return_indices:
441
+ ar1, ind1 = unique(ar1, return_index=True)
442
+ ar2, ind2 = unique(ar2, return_index=True)
443
+ else:
444
+ ar1 = unique(ar1)
445
+ ar2 = unique(ar2)
446
+ else:
447
+ ar1 = ar1.ravel()
448
+ ar2 = ar2.ravel()
449
+
450
+ aux = np.concatenate((ar1, ar2))
451
+ if return_indices:
452
+ aux_sort_indices = np.argsort(aux, kind='mergesort')
453
+ aux = aux[aux_sort_indices]
454
+ else:
455
+ aux.sort()
456
+
457
+ mask = aux[1:] == aux[:-1]
458
+ int1d = aux[:-1][mask]
459
+
460
+ if return_indices:
461
+ ar1_indices = aux_sort_indices[:-1][mask]
462
+ ar2_indices = aux_sort_indices[1:][mask] - ar1.size
463
+ if not assume_unique:
464
+ ar1_indices = ind1[ar1_indices]
465
+ ar2_indices = ind2[ar2_indices]
466
+
467
+ return int1d, ar1_indices, ar2_indices
468
+ else:
469
+ return int1d
470
+
471
+
472
+ def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
473
+ return (ar1, ar2)
474
+
475
+
476
+ @array_function_dispatch(_setxor1d_dispatcher)
477
+ def setxor1d(ar1, ar2, assume_unique=False):
478
+ """
479
+ Find the set exclusive-or of two arrays.
480
+
481
+ Return the sorted, unique values that are in only one (not both) of the
482
+ input arrays.
483
+
484
+ Parameters
485
+ ----------
486
+ ar1, ar2 : array_like
487
+ Input arrays.
488
+ assume_unique : bool
489
+ If True, the input arrays are both assumed to be unique, which
490
+ can speed up the calculation. Default is False.
491
+
492
+ Returns
493
+ -------
494
+ setxor1d : ndarray
495
+ Sorted 1D array of unique values that are in only one of the input
496
+ arrays.
497
+
498
+ Examples
499
+ --------
500
+ >>> a = np.array([1, 2, 3, 2, 4])
501
+ >>> b = np.array([2, 3, 5, 7, 5])
502
+ >>> np.setxor1d(a,b)
503
+ array([1, 4, 5, 7])
504
+
505
+ """
506
+ if not assume_unique:
507
+ ar1 = unique(ar1)
508
+ ar2 = unique(ar2)
509
+
510
+ aux = np.concatenate((ar1, ar2))
511
+ if aux.size == 0:
512
+ return aux
513
+
514
+ aux.sort()
515
+ flag = np.concatenate(([True], aux[1:] != aux[:-1], [True]))
516
+ return aux[flag[1:] & flag[:-1]]
517
+
518
+
519
+ def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None, *,
520
+ kind=None):
521
+ return (ar1, ar2)
522
+
523
+
524
+ @array_function_dispatch(_in1d_dispatcher)
525
+ def in1d(ar1, ar2, assume_unique=False, invert=False, *, kind=None):
526
+ """
527
+ Test whether each element of a 1-D array is also present in a second array.
528
+
529
+ Returns a boolean array the same length as `ar1` that is True
530
+ where an element of `ar1` is in `ar2` and False otherwise.
531
+
532
+ We recommend using :func:`isin` instead of `in1d` for new code.
533
+
534
+ Parameters
535
+ ----------
536
+ ar1 : (M,) array_like
537
+ Input array.
538
+ ar2 : array_like
539
+ The values against which to test each value of `ar1`.
540
+ assume_unique : bool, optional
541
+ If True, the input arrays are both assumed to be unique, which
542
+ can speed up the calculation. Default is False.
543
+ invert : bool, optional
544
+ If True, the values in the returned array are inverted (that is,
545
+ False where an element of `ar1` is in `ar2` and True otherwise).
546
+ Default is False. ``np.in1d(a, b, invert=True)`` is equivalent
547
+ to (but is faster than) ``np.invert(in1d(a, b))``.
548
+ kind : {None, 'sort', 'table'}, optional
549
+ The algorithm to use. This will not affect the final result,
550
+ but will affect the speed and memory use. The default, None,
551
+ will select automatically based on memory considerations.
552
+
553
+ * If 'sort', will use a mergesort-based approach. This will have
554
+ a memory usage of roughly 6 times the sum of the sizes of
555
+ `ar1` and `ar2`, not accounting for size of dtypes.
556
+ * If 'table', will use a lookup table approach similar
557
+ to a counting sort. This is only available for boolean and
558
+ integer arrays. This will have a memory usage of the
559
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
560
+ has no effect when the 'table' option is used.
561
+ * If None, will automatically choose 'table' if
562
+ the required memory allocation is less than or equal to
563
+ 6 times the sum of the sizes of `ar1` and `ar2`,
564
+ otherwise will use 'sort'. This is done to not use
565
+ a large amount of memory by default, even though
566
+ 'table' may be faster in most cases. If 'table' is chosen,
567
+ `assume_unique` will have no effect.
568
+
569
+ .. versionadded:: 1.8.0
570
+
571
+ Returns
572
+ -------
573
+ in1d : (M,) ndarray, bool
574
+ The values `ar1[in1d]` are in `ar2`.
575
+
576
+ See Also
577
+ --------
578
+ isin : Version of this function that preserves the
579
+ shape of ar1.
580
+ numpy.lib.arraysetops : Module with a number of other functions for
581
+ performing set operations on arrays.
582
+
583
+ Notes
584
+ -----
585
+ `in1d` can be considered as an element-wise function version of the
586
+ python keyword `in`, for 1-D sequences. ``in1d(a, b)`` is roughly
587
+ equivalent to ``np.array([item in b for item in a])``.
588
+ However, this idea fails if `ar2` is a set, or similar (non-sequence)
589
+ container: As ``ar2`` is converted to an array, in those cases
590
+ ``asarray(ar2)`` is an object array rather than the expected array of
591
+ contained values.
592
+
593
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
594
+ following relationship is true:
595
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
596
+ but may use greater memory. The default value for `kind` will
597
+ be automatically selected based only on memory usage, so one may
598
+ manually set ``kind='table'`` if memory constraints can be relaxed.
599
+
600
+ .. versionadded:: 1.4.0
601
+
602
+ Examples
603
+ --------
604
+ >>> test = np.array([0, 1, 2, 5, 0])
605
+ >>> states = [0, 2]
606
+ >>> mask = np.in1d(test, states)
607
+ >>> mask
608
+ array([ True, False, True, False, True])
609
+ >>> test[mask]
610
+ array([0, 2, 0])
611
+ >>> mask = np.in1d(test, states, invert=True)
612
+ >>> mask
613
+ array([False, True, False, True, False])
614
+ >>> test[mask]
615
+ array([1, 5])
616
+ """
617
+ # Ravel both arrays, behavior for the first array could be different
618
+ ar1 = np.asarray(ar1).ravel()
619
+ ar2 = np.asarray(ar2).ravel()
620
+
621
+ # Ensure that iteration through object arrays yields size-1 arrays
622
+ if ar2.dtype == object:
623
+ ar2 = ar2.reshape(-1, 1)
624
+
625
+ if kind not in {None, 'sort', 'table'}:
626
+ raise ValueError(
627
+ f"Invalid kind: '{kind}'. Please use None, 'sort' or 'table'.")
628
+
629
+ # Can use the table method if all arrays are integers or boolean:
630
+ is_int_arrays = all(ar.dtype.kind in ("u", "i", "b") for ar in (ar1, ar2))
631
+ use_table_method = is_int_arrays and kind in {None, 'table'}
632
+
633
+ if use_table_method:
634
+ if ar2.size == 0:
635
+ if invert:
636
+ return np.ones_like(ar1, dtype=bool)
637
+ else:
638
+ return np.zeros_like(ar1, dtype=bool)
639
+
640
+ # Convert booleans to uint8 so we can use the fast integer algorithm
641
+ if ar1.dtype == bool:
642
+ ar1 = ar1.astype(np.uint8)
643
+ if ar2.dtype == bool:
644
+ ar2 = ar2.astype(np.uint8)
645
+
646
+ ar2_min = np.min(ar2)
647
+ ar2_max = np.max(ar2)
648
+
649
+ ar2_range = int(ar2_max) - int(ar2_min)
650
+
651
+ # Constraints on whether we can actually use the table method:
652
+ # 1. Assert memory usage is not too large
653
+ below_memory_constraint = ar2_range <= 6 * (ar1.size + ar2.size)
654
+ # 2. Check overflows for (ar2 - ar2_min); dtype=ar2.dtype
655
+ range_safe_from_overflow = ar2_range <= np.iinfo(ar2.dtype).max
656
+ # 3. Check overflows for (ar1 - ar2_min); dtype=ar1.dtype
657
+ if ar1.size > 0:
658
+ ar1_min = np.min(ar1)
659
+ ar1_max = np.max(ar1)
660
+
661
+ # After masking, the range of ar1 is guaranteed to be
662
+ # within the range of ar2:
663
+ ar1_upper = min(int(ar1_max), int(ar2_max))
664
+ ar1_lower = max(int(ar1_min), int(ar2_min))
665
+
666
+ range_safe_from_overflow &= all((
667
+ ar1_upper - int(ar2_min) <= np.iinfo(ar1.dtype).max,
668
+ ar1_lower - int(ar2_min) >= np.iinfo(ar1.dtype).min
669
+ ))
670
+
671
+ # Optimal performance is for approximately
672
+ # log10(size) > (log10(range) - 2.27) / 0.927.
673
+ # However, here we set the requirement that by default
674
+ # the intermediate array can only be 6x
675
+ # the combined memory allocation of the original
676
+ # arrays. See discussion on
677
+ # https://github.com/numpy/numpy/pull/12065.
678
+
679
+ if (
680
+ range_safe_from_overflow and
681
+ (below_memory_constraint or kind == 'table')
682
+ ):
683
+
684
+ if invert:
685
+ outgoing_array = np.ones_like(ar1, dtype=bool)
686
+ else:
687
+ outgoing_array = np.zeros_like(ar1, dtype=bool)
688
+
689
+ # Make elements 1 where the integer exists in ar2
690
+ if invert:
691
+ isin_helper_ar = np.ones(ar2_range + 1, dtype=bool)
692
+ isin_helper_ar[ar2 - ar2_min] = 0
693
+ else:
694
+ isin_helper_ar = np.zeros(ar2_range + 1, dtype=bool)
695
+ isin_helper_ar[ar2 - ar2_min] = 1
696
+
697
+ # Mask out elements we know won't work
698
+ basic_mask = (ar1 <= ar2_max) & (ar1 >= ar2_min)
699
+ outgoing_array[basic_mask] = isin_helper_ar[ar1[basic_mask] -
700
+ ar2_min]
701
+
702
+ return outgoing_array
703
+ elif kind == 'table': # not range_safe_from_overflow
704
+ raise RuntimeError(
705
+ "You have specified kind='table', "
706
+ "but the range of values in `ar2` or `ar1` exceed the "
707
+ "maximum integer of the datatype. "
708
+ "Please set `kind` to None or 'sort'."
709
+ )
710
+ elif kind == 'table':
711
+ raise ValueError(
712
+ "The 'table' method is only "
713
+ "supported for boolean or integer arrays. "
714
+ "Please select 'sort' or None for kind."
715
+ )
716
+
717
+
718
+ # Check if one of the arrays may contain arbitrary objects
719
+ contains_object = ar1.dtype.hasobject or ar2.dtype.hasobject
720
+
721
+ # This code is run when
722
+ # a) the first condition is true, making the code significantly faster
723
+ # b) the second condition is true (i.e. `ar1` or `ar2` may contain
724
+ # arbitrary objects), since then sorting is not guaranteed to work
725
+ if len(ar2) < 10 * len(ar1) ** 0.145 or contains_object:
726
+ if invert:
727
+ mask = np.ones(len(ar1), dtype=bool)
728
+ for a in ar2:
729
+ mask &= (ar1 != a)
730
+ else:
731
+ mask = np.zeros(len(ar1), dtype=bool)
732
+ for a in ar2:
733
+ mask |= (ar1 == a)
734
+ return mask
735
+
736
+ # Otherwise use sorting
737
+ if not assume_unique:
738
+ ar1, rev_idx = np.unique(ar1, return_inverse=True)
739
+ ar2 = np.unique(ar2)
740
+
741
+ ar = np.concatenate((ar1, ar2))
742
+ # We need this to be a stable sort, so always use 'mergesort'
743
+ # here. The values from the first array should always come before
744
+ # the values from the second array.
745
+ order = ar.argsort(kind='mergesort')
746
+ sar = ar[order]
747
+ if invert:
748
+ bool_ar = (sar[1:] != sar[:-1])
749
+ else:
750
+ bool_ar = (sar[1:] == sar[:-1])
751
+ flag = np.concatenate((bool_ar, [invert]))
752
+ ret = np.empty(ar.shape, dtype=bool)
753
+ ret[order] = flag
754
+
755
+ if assume_unique:
756
+ return ret[:len(ar1)]
757
+ else:
758
+ return ret[rev_idx]
759
+
760
+
761
+ def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None,
762
+ *, kind=None):
763
+ return (element, test_elements)
764
+
765
+
766
+ @array_function_dispatch(_isin_dispatcher)
767
+ def isin(element, test_elements, assume_unique=False, invert=False, *,
768
+ kind=None):
769
+ """
770
+ Calculates ``element in test_elements``, broadcasting over `element` only.
771
+ Returns a boolean array of the same shape as `element` that is True
772
+ where an element of `element` is in `test_elements` and False otherwise.
773
+
774
+ Parameters
775
+ ----------
776
+ element : array_like
777
+ Input array.
778
+ test_elements : array_like
779
+ The values against which to test each value of `element`.
780
+ This argument is flattened if it is an array or array_like.
781
+ See notes for behavior with non-array-like parameters.
782
+ assume_unique : bool, optional
783
+ If True, the input arrays are both assumed to be unique, which
784
+ can speed up the calculation. Default is False.
785
+ invert : bool, optional
786
+ If True, the values in the returned array are inverted, as if
787
+ calculating `element not in test_elements`. Default is False.
788
+ ``np.isin(a, b, invert=True)`` is equivalent to (but faster
789
+ than) ``np.invert(np.isin(a, b))``.
790
+ kind : {None, 'sort', 'table'}, optional
791
+ The algorithm to use. This will not affect the final result,
792
+ but will affect the speed and memory use. The default, None,
793
+ will select automatically based on memory considerations.
794
+
795
+ * If 'sort', will use a mergesort-based approach. This will have
796
+ a memory usage of roughly 6 times the sum of the sizes of
797
+ `ar1` and `ar2`, not accounting for size of dtypes.
798
+ * If 'table', will use a lookup table approach similar
799
+ to a counting sort. This is only available for boolean and
800
+ integer arrays. This will have a memory usage of the
801
+ size of `ar1` plus the max-min value of `ar2`. `assume_unique`
802
+ has no effect when the 'table' option is used.
803
+ * If None, will automatically choose 'table' if
804
+ the required memory allocation is less than or equal to
805
+ 6 times the sum of the sizes of `ar1` and `ar2`,
806
+ otherwise will use 'sort'. This is done to not use
807
+ a large amount of memory by default, even though
808
+ 'table' may be faster in most cases. If 'table' is chosen,
809
+ `assume_unique` will have no effect.
810
+
811
+
812
+ Returns
813
+ -------
814
+ isin : ndarray, bool
815
+ Has the same shape as `element`. The values `element[isin]`
816
+ are in `test_elements`.
817
+
818
+ See Also
819
+ --------
820
+ in1d : Flattened version of this function.
821
+ numpy.lib.arraysetops : Module with a number of other functions for
822
+ performing set operations on arrays.
823
+
824
+ Notes
825
+ -----
826
+
827
+ `isin` is an element-wise function version of the python keyword `in`.
828
+ ``isin(a, b)`` is roughly equivalent to
829
+ ``np.array([item in b for item in a])`` if `a` and `b` are 1-D sequences.
830
+
831
+ `element` and `test_elements` are converted to arrays if they are not
832
+ already. If `test_elements` is a set (or other non-sequence collection)
833
+ it will be converted to an object array with one element, rather than an
834
+ array of the values contained in `test_elements`. This is a consequence
835
+ of the `array` constructor's way of handling non-sequence collections.
836
+ Converting the set to a list usually gives the desired behavior.
837
+
838
+ Using ``kind='table'`` tends to be faster than `kind='sort'` if the
839
+ following relationship is true:
840
+ ``log10(len(ar2)) > (log10(max(ar2)-min(ar2)) - 2.27) / 0.927``,
841
+ but may use greater memory. The default value for `kind` will
842
+ be automatically selected based only on memory usage, so one may
843
+ manually set ``kind='table'`` if memory constraints can be relaxed.
844
+
845
+ .. versionadded:: 1.13.0
846
+
847
+ Examples
848
+ --------
849
+ >>> element = 2*np.arange(4).reshape((2, 2))
850
+ >>> element
851
+ array([[0, 2],
852
+ [4, 6]])
853
+ >>> test_elements = [1, 2, 4, 8]
854
+ >>> mask = np.isin(element, test_elements)
855
+ >>> mask
856
+ array([[False, True],
857
+ [ True, False]])
858
+ >>> element[mask]
859
+ array([2, 4])
860
+
861
+ The indices of the matched values can be obtained with `nonzero`:
862
+
863
+ >>> np.nonzero(mask)
864
+ (array([0, 1]), array([1, 0]))
865
+
866
+ The test can also be inverted:
867
+
868
+ >>> mask = np.isin(element, test_elements, invert=True)
869
+ >>> mask
870
+ array([[ True, False],
871
+ [False, True]])
872
+ >>> element[mask]
873
+ array([0, 6])
874
+
875
+ Because of how `array` handles sets, the following does not
876
+ work as expected:
877
+
878
+ >>> test_set = {1, 2, 4, 8}
879
+ >>> np.isin(element, test_set)
880
+ array([[False, False],
881
+ [False, False]])
882
+
883
+ Casting the set to a list gives the expected result:
884
+
885
+ >>> np.isin(element, list(test_set))
886
+ array([[False, True],
887
+ [ True, False]])
888
+ """
889
+ element = np.asarray(element)
890
+ return in1d(element, test_elements, assume_unique=assume_unique,
891
+ invert=invert, kind=kind).reshape(element.shape)
892
+
893
+
894
+ def _union1d_dispatcher(ar1, ar2):
895
+ return (ar1, ar2)
896
+
897
+
898
+ @array_function_dispatch(_union1d_dispatcher)
899
+ def union1d(ar1, ar2):
900
+ """
901
+ Find the union of two arrays.
902
+
903
+ Return the unique, sorted array of values that are in either of the two
904
+ input arrays.
905
+
906
+ Parameters
907
+ ----------
908
+ ar1, ar2 : array_like
909
+ Input arrays. They are flattened if they are not already 1D.
910
+
911
+ Returns
912
+ -------
913
+ union1d : ndarray
914
+ Unique, sorted union of the input arrays.
915
+
916
+ See Also
917
+ --------
918
+ numpy.lib.arraysetops : Module with a number of other functions for
919
+ performing set operations on arrays.
920
+
921
+ Examples
922
+ --------
923
+ >>> np.union1d([-1, 0, 1], [-2, 0, 2])
924
+ array([-2, -1, 0, 1, 2])
925
+
926
+ To find the union of more than two arrays, use functools.reduce:
927
+
928
+ >>> from functools import reduce
929
+ >>> reduce(np.union1d, ([1, 3, 4, 3], [3, 1, 2, 1], [6, 3, 4, 2]))
930
+ array([1, 2, 3, 4, 6])
931
+ """
932
+ return unique(np.concatenate((ar1, ar2), axis=None))
933
+
934
+
935
+ def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
936
+ return (ar1, ar2)
937
+
938
+
939
+ @array_function_dispatch(_setdiff1d_dispatcher)
940
+ def setdiff1d(ar1, ar2, assume_unique=False):
941
+ """
942
+ Find the set difference of two arrays.
943
+
944
+ Return the unique values in `ar1` that are not in `ar2`.
945
+
946
+ Parameters
947
+ ----------
948
+ ar1 : array_like
949
+ Input array.
950
+ ar2 : array_like
951
+ Input comparison array.
952
+ assume_unique : bool
953
+ If True, the input arrays are both assumed to be unique, which
954
+ can speed up the calculation. Default is False.
955
+
956
+ Returns
957
+ -------
958
+ setdiff1d : ndarray
959
+ 1D array of values in `ar1` that are not in `ar2`. The result
960
+ is sorted when `assume_unique=False`, but otherwise only sorted
961
+ if the input is sorted.
962
+
963
+ See Also
964
+ --------
965
+ numpy.lib.arraysetops : Module with a number of other functions for
966
+ performing set operations on arrays.
967
+
968
+ Examples
969
+ --------
970
+ >>> a = np.array([1, 2, 3, 2, 4, 1])
971
+ >>> b = np.array([3, 4, 5, 6])
972
+ >>> np.setdiff1d(a, b)
973
+ array([1, 2])
974
+
975
+ """
976
+ if assume_unique:
977
+ ar1 = np.asarray(ar1).ravel()
978
+ else:
979
+ ar1 = unique(ar1)
980
+ ar2 = unique(ar2)
981
+ return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
env-llmeval/lib/python3.10/site-packages/numpy/lib/arraysetops.pyi ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ Any,
4
+ TypeVar,
5
+ overload,
6
+ SupportsIndex,
7
+ )
8
+
9
+ from numpy import (
10
+ generic,
11
+ number,
12
+ bool_,
13
+ ushort,
14
+ ubyte,
15
+ uintc,
16
+ uint,
17
+ ulonglong,
18
+ short,
19
+ int8,
20
+ byte,
21
+ intc,
22
+ int_,
23
+ intp,
24
+ longlong,
25
+ half,
26
+ single,
27
+ double,
28
+ longdouble,
29
+ csingle,
30
+ cdouble,
31
+ clongdouble,
32
+ timedelta64,
33
+ datetime64,
34
+ object_,
35
+ str_,
36
+ bytes_,
37
+ void,
38
+ )
39
+
40
+ from numpy._typing import (
41
+ ArrayLike,
42
+ NDArray,
43
+ _ArrayLike,
44
+ _ArrayLikeBool_co,
45
+ _ArrayLikeDT64_co,
46
+ _ArrayLikeTD64_co,
47
+ _ArrayLikeObject_co,
48
+ _ArrayLikeNumber_co,
49
+ )
50
+
51
+ _SCT = TypeVar("_SCT", bound=generic)
52
+ _NumberType = TypeVar("_NumberType", bound=number[Any])
53
+
54
+ # Explicitly set all allowed values to prevent accidental castings to
55
+ # abstract dtypes (their common super-type).
56
+ #
57
+ # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)
58
+ # which could result in, for example, `int64` and `float64`producing a
59
+ # `number[_64Bit]` array
60
+ _SCTNoCast = TypeVar(
61
+ "_SCTNoCast",
62
+ bool_,
63
+ ushort,
64
+ ubyte,
65
+ uintc,
66
+ uint,
67
+ ulonglong,
68
+ short,
69
+ byte,
70
+ intc,
71
+ int_,
72
+ longlong,
73
+ half,
74
+ single,
75
+ double,
76
+ longdouble,
77
+ csingle,
78
+ cdouble,
79
+ clongdouble,
80
+ timedelta64,
81
+ datetime64,
82
+ object_,
83
+ str_,
84
+ bytes_,
85
+ void,
86
+ )
87
+
88
+ __all__: list[str]
89
+
90
+ @overload
91
+ def ediff1d(
92
+ ary: _ArrayLikeBool_co,
93
+ to_end: None | ArrayLike = ...,
94
+ to_begin: None | ArrayLike = ...,
95
+ ) -> NDArray[int8]: ...
96
+ @overload
97
+ def ediff1d(
98
+ ary: _ArrayLike[_NumberType],
99
+ to_end: None | ArrayLike = ...,
100
+ to_begin: None | ArrayLike = ...,
101
+ ) -> NDArray[_NumberType]: ...
102
+ @overload
103
+ def ediff1d(
104
+ ary: _ArrayLikeNumber_co,
105
+ to_end: None | ArrayLike = ...,
106
+ to_begin: None | ArrayLike = ...,
107
+ ) -> NDArray[Any]: ...
108
+ @overload
109
+ def ediff1d(
110
+ ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co,
111
+ to_end: None | ArrayLike = ...,
112
+ to_begin: None | ArrayLike = ...,
113
+ ) -> NDArray[timedelta64]: ...
114
+ @overload
115
+ def ediff1d(
116
+ ary: _ArrayLikeObject_co,
117
+ to_end: None | ArrayLike = ...,
118
+ to_begin: None | ArrayLike = ...,
119
+ ) -> NDArray[object_]: ...
120
+
121
+ @overload
122
+ def unique(
123
+ ar: _ArrayLike[_SCT],
124
+ return_index: L[False] = ...,
125
+ return_inverse: L[False] = ...,
126
+ return_counts: L[False] = ...,
127
+ axis: None | SupportsIndex = ...,
128
+ *,
129
+ equal_nan: bool = ...,
130
+ ) -> NDArray[_SCT]: ...
131
+ @overload
132
+ def unique(
133
+ ar: ArrayLike,
134
+ return_index: L[False] = ...,
135
+ return_inverse: L[False] = ...,
136
+ return_counts: L[False] = ...,
137
+ axis: None | SupportsIndex = ...,
138
+ *,
139
+ equal_nan: bool = ...,
140
+ ) -> NDArray[Any]: ...
141
+ @overload
142
+ def unique(
143
+ ar: _ArrayLike[_SCT],
144
+ return_index: L[True] = ...,
145
+ return_inverse: L[False] = ...,
146
+ return_counts: L[False] = ...,
147
+ axis: None | SupportsIndex = ...,
148
+ *,
149
+ equal_nan: bool = ...,
150
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
151
+ @overload
152
+ def unique(
153
+ ar: ArrayLike,
154
+ return_index: L[True] = ...,
155
+ return_inverse: L[False] = ...,
156
+ return_counts: L[False] = ...,
157
+ axis: None | SupportsIndex = ...,
158
+ *,
159
+ equal_nan: bool = ...,
160
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
161
+ @overload
162
+ def unique(
163
+ ar: _ArrayLike[_SCT],
164
+ return_index: L[False] = ...,
165
+ return_inverse: L[True] = ...,
166
+ return_counts: L[False] = ...,
167
+ axis: None | SupportsIndex = ...,
168
+ *,
169
+ equal_nan: bool = ...,
170
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
171
+ @overload
172
+ def unique(
173
+ ar: ArrayLike,
174
+ return_index: L[False] = ...,
175
+ return_inverse: L[True] = ...,
176
+ return_counts: L[False] = ...,
177
+ axis: None | SupportsIndex = ...,
178
+ *,
179
+ equal_nan: bool = ...,
180
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
181
+ @overload
182
+ def unique(
183
+ ar: _ArrayLike[_SCT],
184
+ return_index: L[False] = ...,
185
+ return_inverse: L[False] = ...,
186
+ return_counts: L[True] = ...,
187
+ axis: None | SupportsIndex = ...,
188
+ *,
189
+ equal_nan: bool = ...,
190
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
191
+ @overload
192
+ def unique(
193
+ ar: ArrayLike,
194
+ return_index: L[False] = ...,
195
+ return_inverse: L[False] = ...,
196
+ return_counts: L[True] = ...,
197
+ axis: None | SupportsIndex = ...,
198
+ *,
199
+ equal_nan: bool = ...,
200
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
201
+ @overload
202
+ def unique(
203
+ ar: _ArrayLike[_SCT],
204
+ return_index: L[True] = ...,
205
+ return_inverse: L[True] = ...,
206
+ return_counts: L[False] = ...,
207
+ axis: None | SupportsIndex = ...,
208
+ *,
209
+ equal_nan: bool = ...,
210
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
211
+ @overload
212
+ def unique(
213
+ ar: ArrayLike,
214
+ return_index: L[True] = ...,
215
+ return_inverse: L[True] = ...,
216
+ return_counts: L[False] = ...,
217
+ axis: None | SupportsIndex = ...,
218
+ *,
219
+ equal_nan: bool = ...,
220
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
221
+ @overload
222
+ def unique(
223
+ ar: _ArrayLike[_SCT],
224
+ return_index: L[True] = ...,
225
+ return_inverse: L[False] = ...,
226
+ return_counts: L[True] = ...,
227
+ axis: None | SupportsIndex = ...,
228
+ *,
229
+ equal_nan: bool = ...,
230
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
231
+ @overload
232
+ def unique(
233
+ ar: ArrayLike,
234
+ return_index: L[True] = ...,
235
+ return_inverse: L[False] = ...,
236
+ return_counts: L[True] = ...,
237
+ axis: None | SupportsIndex = ...,
238
+ *,
239
+ equal_nan: bool = ...,
240
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
241
+ @overload
242
+ def unique(
243
+ ar: _ArrayLike[_SCT],
244
+ return_index: L[False] = ...,
245
+ return_inverse: L[True] = ...,
246
+ return_counts: L[True] = ...,
247
+ axis: None | SupportsIndex = ...,
248
+ *,
249
+ equal_nan: bool = ...,
250
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
251
+ @overload
252
+ def unique(
253
+ ar: ArrayLike,
254
+ return_index: L[False] = ...,
255
+ return_inverse: L[True] = ...,
256
+ return_counts: L[True] = ...,
257
+ axis: None | SupportsIndex = ...,
258
+ *,
259
+ equal_nan: bool = ...,
260
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
261
+ @overload
262
+ def unique(
263
+ ar: _ArrayLike[_SCT],
264
+ return_index: L[True] = ...,
265
+ return_inverse: L[True] = ...,
266
+ return_counts: L[True] = ...,
267
+ axis: None | SupportsIndex = ...,
268
+ *,
269
+ equal_nan: bool = ...,
270
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
271
+ @overload
272
+ def unique(
273
+ ar: ArrayLike,
274
+ return_index: L[True] = ...,
275
+ return_inverse: L[True] = ...,
276
+ return_counts: L[True] = ...,
277
+ axis: None | SupportsIndex = ...,
278
+ *,
279
+ equal_nan: bool = ...,
280
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
281
+
282
+ @overload
283
+ def intersect1d(
284
+ ar1: _ArrayLike[_SCTNoCast],
285
+ ar2: _ArrayLike[_SCTNoCast],
286
+ assume_unique: bool = ...,
287
+ return_indices: L[False] = ...,
288
+ ) -> NDArray[_SCTNoCast]: ...
289
+ @overload
290
+ def intersect1d(
291
+ ar1: ArrayLike,
292
+ ar2: ArrayLike,
293
+ assume_unique: bool = ...,
294
+ return_indices: L[False] = ...,
295
+ ) -> NDArray[Any]: ...
296
+ @overload
297
+ def intersect1d(
298
+ ar1: _ArrayLike[_SCTNoCast],
299
+ ar2: _ArrayLike[_SCTNoCast],
300
+ assume_unique: bool = ...,
301
+ return_indices: L[True] = ...,
302
+ ) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ...
303
+ @overload
304
+ def intersect1d(
305
+ ar1: ArrayLike,
306
+ ar2: ArrayLike,
307
+ assume_unique: bool = ...,
308
+ return_indices: L[True] = ...,
309
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
310
+
311
+ @overload
312
+ def setxor1d(
313
+ ar1: _ArrayLike[_SCTNoCast],
314
+ ar2: _ArrayLike[_SCTNoCast],
315
+ assume_unique: bool = ...,
316
+ ) -> NDArray[_SCTNoCast]: ...
317
+ @overload
318
+ def setxor1d(
319
+ ar1: ArrayLike,
320
+ ar2: ArrayLike,
321
+ assume_unique: bool = ...,
322
+ ) -> NDArray[Any]: ...
323
+
324
+ def in1d(
325
+ ar1: ArrayLike,
326
+ ar2: ArrayLike,
327
+ assume_unique: bool = ...,
328
+ invert: bool = ...,
329
+ ) -> NDArray[bool_]: ...
330
+
331
+ def isin(
332
+ element: ArrayLike,
333
+ test_elements: ArrayLike,
334
+ assume_unique: bool = ...,
335
+ invert: bool = ...,
336
+ *,
337
+ kind: None | str = ...,
338
+ ) -> NDArray[bool_]: ...
339
+
340
+ @overload
341
+ def union1d(
342
+ ar1: _ArrayLike[_SCTNoCast],
343
+ ar2: _ArrayLike[_SCTNoCast],
344
+ ) -> NDArray[_SCTNoCast]: ...
345
+ @overload
346
+ def union1d(
347
+ ar1: ArrayLike,
348
+ ar2: ArrayLike,
349
+ ) -> NDArray[Any]: ...
350
+
351
+ @overload
352
+ def setdiff1d(
353
+ ar1: _ArrayLike[_SCTNoCast],
354
+ ar2: _ArrayLike[_SCTNoCast],
355
+ assume_unique: bool = ...,
356
+ ) -> NDArray[_SCTNoCast]: ...
357
+ @overload
358
+ def setdiff1d(
359
+ ar1: ArrayLike,
360
+ ar2: ArrayLike,
361
+ assume_unique: bool = ...,
362
+ ) -> NDArray[Any]: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/arrayterator.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A buffered iterator for big arrays.
3
+
4
+ This module solves the problem of iterating over a big file-based array
5
+ without having to read it into memory. The `Arrayterator` class wraps
6
+ an array object, and when iterated it will return sub-arrays with at most
7
+ a user-specified number of elements.
8
+
9
+ """
10
+ from operator import mul
11
+ from functools import reduce
12
+
13
+ __all__ = ['Arrayterator']
14
+
15
+
16
+ class Arrayterator:
17
+ """
18
+ Buffered iterator for big arrays.
19
+
20
+ `Arrayterator` creates a buffered iterator for reading big arrays in small
21
+ contiguous blocks. The class is useful for objects stored in the
22
+ file system. It allows iteration over the object *without* reading
23
+ everything in memory; instead, small blocks are read and iterated over.
24
+
25
+ `Arrayterator` can be used with any object that supports multidimensional
26
+ slices. This includes NumPy arrays, but also variables from
27
+ Scientific.IO.NetCDF or pynetcdf for example.
28
+
29
+ Parameters
30
+ ----------
31
+ var : array_like
32
+ The object to iterate over.
33
+ buf_size : int, optional
34
+ The buffer size. If `buf_size` is supplied, the maximum amount of
35
+ data that will be read into memory is `buf_size` elements.
36
+ Default is None, which will read as many element as possible
37
+ into memory.
38
+
39
+ Attributes
40
+ ----------
41
+ var
42
+ buf_size
43
+ start
44
+ stop
45
+ step
46
+ shape
47
+ flat
48
+
49
+ See Also
50
+ --------
51
+ ndenumerate : Multidimensional array iterator.
52
+ flatiter : Flat array iterator.
53
+ memmap : Create a memory-map to an array stored in a binary file on disk.
54
+
55
+ Notes
56
+ -----
57
+ The algorithm works by first finding a "running dimension", along which
58
+ the blocks will be extracted. Given an array of dimensions
59
+ ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
60
+ first dimension will be used. If, on the other hand,
61
+ ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
62
+ Blocks are extracted along this dimension, and when the last block is
63
+ returned the process continues from the next dimension, until all
64
+ elements have been read.
65
+
66
+ Examples
67
+ --------
68
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
69
+ >>> a_itor = np.lib.Arrayterator(a, 2)
70
+ >>> a_itor.shape
71
+ (3, 4, 5, 6)
72
+
73
+ Now we can iterate over ``a_itor``, and it will return arrays of size
74
+ two. Since `buf_size` was smaller than any dimension, the first
75
+ dimension will be iterated over first:
76
+
77
+ >>> for subarr in a_itor:
78
+ ... if not subarr.all():
79
+ ... print(subarr, subarr.shape) # doctest: +SKIP
80
+ >>> # [[[[0 1]]]] (1, 1, 1, 2)
81
+
82
+ """
83
+
84
+ def __init__(self, var, buf_size=None):
85
+ self.var = var
86
+ self.buf_size = buf_size
87
+
88
+ self.start = [0 for dim in var.shape]
89
+ self.stop = [dim for dim in var.shape]
90
+ self.step = [1 for dim in var.shape]
91
+
92
+ def __getattr__(self, attr):
93
+ return getattr(self.var, attr)
94
+
95
+ def __getitem__(self, index):
96
+ """
97
+ Return a new arrayterator.
98
+
99
+ """
100
+ # Fix index, handling ellipsis and incomplete slices.
101
+ if not isinstance(index, tuple):
102
+ index = (index,)
103
+ fixed = []
104
+ length, dims = len(index), self.ndim
105
+ for slice_ in index:
106
+ if slice_ is Ellipsis:
107
+ fixed.extend([slice(None)] * (dims-length+1))
108
+ length = len(fixed)
109
+ elif isinstance(slice_, int):
110
+ fixed.append(slice(slice_, slice_+1, 1))
111
+ else:
112
+ fixed.append(slice_)
113
+ index = tuple(fixed)
114
+ if len(index) < dims:
115
+ index += (slice(None),) * (dims-len(index))
116
+
117
+ # Return a new arrayterator object.
118
+ out = self.__class__(self.var, self.buf_size)
119
+ for i, (start, stop, step, slice_) in enumerate(
120
+ zip(self.start, self.stop, self.step, index)):
121
+ out.start[i] = start + (slice_.start or 0)
122
+ out.step[i] = step * (slice_.step or 1)
123
+ out.stop[i] = start + (slice_.stop or stop-start)
124
+ out.stop[i] = min(stop, out.stop[i])
125
+ return out
126
+
127
+ def __array__(self):
128
+ """
129
+ Return corresponding data.
130
+
131
+ """
132
+ slice_ = tuple(slice(*t) for t in zip(
133
+ self.start, self.stop, self.step))
134
+ return self.var[slice_]
135
+
136
+ @property
137
+ def flat(self):
138
+ """
139
+ A 1-D flat iterator for Arrayterator objects.
140
+
141
+ This iterator returns elements of the array to be iterated over in
142
+ `Arrayterator` one by one. It is similar to `flatiter`.
143
+
144
+ See Also
145
+ --------
146
+ Arrayterator
147
+ flatiter
148
+
149
+ Examples
150
+ --------
151
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
152
+ >>> a_itor = np.lib.Arrayterator(a, 2)
153
+
154
+ >>> for subarr in a_itor.flat:
155
+ ... if not subarr:
156
+ ... print(subarr, type(subarr))
157
+ ...
158
+ 0 <class 'numpy.int64'>
159
+
160
+ """
161
+ for block in self:
162
+ yield from block.flat
163
+
164
+ @property
165
+ def shape(self):
166
+ """
167
+ The shape of the array to be iterated over.
168
+
169
+ For an example, see `Arrayterator`.
170
+
171
+ """
172
+ return tuple(((stop-start-1)//step+1) for start, stop, step in
173
+ zip(self.start, self.stop, self.step))
174
+
175
+ def __iter__(self):
176
+ # Skip arrays with degenerate dimensions
177
+ if [dim for dim in self.shape if dim <= 0]:
178
+ return
179
+
180
+ start = self.start[:]
181
+ stop = self.stop[:]
182
+ step = self.step[:]
183
+ ndims = self.var.ndim
184
+
185
+ while True:
186
+ count = self.buf_size or reduce(mul, self.shape)
187
+
188
+ # iterate over each dimension, looking for the
189
+ # running dimension (ie, the dimension along which
190
+ # the blocks will be built from)
191
+ rundim = 0
192
+ for i in range(ndims-1, -1, -1):
193
+ # if count is zero we ran out of elements to read
194
+ # along higher dimensions, so we read only a single position
195
+ if count == 0:
196
+ stop[i] = start[i]+1
197
+ elif count <= self.shape[i]:
198
+ # limit along this dimension
199
+ stop[i] = start[i] + count*step[i]
200
+ rundim = i
201
+ else:
202
+ # read everything along this dimension
203
+ stop[i] = self.stop[i]
204
+ stop[i] = min(self.stop[i], stop[i])
205
+ count = count//self.shape[i]
206
+
207
+ # yield a block
208
+ slice_ = tuple(slice(*t) for t in zip(start, stop, step))
209
+ yield self.var[slice_]
210
+
211
+ # Update start position, taking care of overflow to
212
+ # other dimensions
213
+ start[rundim] = stop[rundim] # start where we stopped
214
+ for i in range(ndims-1, 0, -1):
215
+ if start[i] >= self.stop[i]:
216
+ start[i] = self.start[i]
217
+ start[i-1] += self.step[i-1]
218
+ if start[0] >= self.stop[0]:
219
+ return
env-llmeval/lib/python3.10/site-packages/numpy/lib/format.py ADDED
@@ -0,0 +1,976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Binary serialization
3
+
4
+ NPY format
5
+ ==========
6
+
7
+ A simple format for saving numpy arrays to disk with the full
8
+ information about them.
9
+
10
+ The ``.npy`` format is the standard binary file format in NumPy for
11
+ persisting a *single* arbitrary NumPy array on disk. The format stores all
12
+ of the shape and dtype information necessary to reconstruct the array
13
+ correctly even on another machine with a different architecture.
14
+ The format is designed to be as simple as possible while achieving
15
+ its limited goals.
16
+
17
+ The ``.npz`` format is the standard format for persisting *multiple* NumPy
18
+ arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
19
+ files, one for each array.
20
+
21
+ Capabilities
22
+ ------------
23
+
24
+ - Can represent all NumPy arrays including nested record arrays and
25
+ object arrays.
26
+
27
+ - Represents the data in its native binary form.
28
+
29
+ - Supports Fortran-contiguous arrays directly.
30
+
31
+ - Stores all of the necessary information to reconstruct the array
32
+ including shape and dtype on a machine of a different
33
+ architecture. Both little-endian and big-endian arrays are
34
+ supported, and a file with little-endian numbers will yield
35
+ a little-endian array on any machine reading the file. The
36
+ types are described in terms of their actual sizes. For example,
37
+ if a machine with a 64-bit C "long int" writes out an array with
38
+ "long ints", a reading machine with 32-bit C "long ints" will yield
39
+ an array with 64-bit integers.
40
+
41
+ - Is straightforward to reverse engineer. Datasets often live longer than
42
+ the programs that created them. A competent developer should be
43
+ able to create a solution in their preferred programming language to
44
+ read most ``.npy`` files that they have been given without much
45
+ documentation.
46
+
47
+ - Allows memory-mapping of the data. See `open_memmap`.
48
+
49
+ - Can be read from a filelike stream object instead of an actual file.
50
+
51
+ - Stores object arrays, i.e. arrays containing elements that are arbitrary
52
+ Python objects. Files with object arrays are not to be mmapable, but
53
+ can be read and written to disk.
54
+
55
+ Limitations
56
+ -----------
57
+
58
+ - Arbitrary subclasses of numpy.ndarray are not completely preserved.
59
+ Subclasses will be accepted for writing, but only the array data will
60
+ be written out. A regular numpy.ndarray object will be created
61
+ upon reading the file.
62
+
63
+ .. warning::
64
+
65
+ Due to limitations in the interpretation of structured dtypes, dtypes
66
+ with fields with empty names will have the names replaced by 'f0', 'f1',
67
+ etc. Such arrays will not round-trip through the format entirely
68
+ accurately. The data is intact; only the field names will differ. We are
69
+ working on a fix for this. This fix will not require a change in the
70
+ file format. The arrays with such structures can still be saved and
71
+ restored, and the correct dtype may be restored by using the
72
+ ``loadedarray.view(correct_dtype)`` method.
73
+
74
+ File extensions
75
+ ---------------
76
+
77
+ We recommend using the ``.npy`` and ``.npz`` extensions for files saved
78
+ in this format. This is by no means a requirement; applications may wish
79
+ to use these file formats but use an extension specific to the
80
+ application. In the absence of an obvious alternative, however,
81
+ we suggest using ``.npy`` and ``.npz``.
82
+
83
+ Version numbering
84
+ -----------------
85
+
86
+ The version numbering of these formats is independent of NumPy version
87
+ numbering. If the format is upgraded, the code in `numpy.io` will still
88
+ be able to read and write Version 1.0 files.
89
+
90
+ Format Version 1.0
91
+ ------------------
92
+
93
+ The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
94
+
95
+ The next 1 byte is an unsigned byte: the major version number of the file
96
+ format, e.g. ``\\x01``.
97
+
98
+ The next 1 byte is an unsigned byte: the minor version number of the file
99
+ format, e.g. ``\\x00``. Note: the version of the file format is not tied
100
+ to the version of the numpy package.
101
+
102
+ The next 2 bytes form a little-endian unsigned short int: the length of
103
+ the header data HEADER_LEN.
104
+
105
+ The next HEADER_LEN bytes form the header data describing the array's
106
+ format. It is an ASCII string which contains a Python literal expression
107
+ of a dictionary. It is terminated by a newline (``\\n``) and padded with
108
+ spaces (``\\x20``) to make the total of
109
+ ``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
110
+ by 64 for alignment purposes.
111
+
112
+ The dictionary contains three keys:
113
+
114
+ "descr" : dtype.descr
115
+ An object that can be passed as an argument to the `numpy.dtype`
116
+ constructor to create the array's dtype.
117
+ "fortran_order" : bool
118
+ Whether the array data is Fortran-contiguous or not. Since
119
+ Fortran-contiguous arrays are a common form of non-C-contiguity,
120
+ we allow them to be written directly to disk for efficiency.
121
+ "shape" : tuple of int
122
+ The shape of the array.
123
+
124
+ For repeatability and readability, the dictionary keys are sorted in
125
+ alphabetic order. This is for convenience only. A writer SHOULD implement
126
+ this if possible. A reader MUST NOT depend on this.
127
+
128
+ Following the header comes the array data. If the dtype contains Python
129
+ objects (i.e. ``dtype.hasobject is True``), then the data is a Python
130
+ pickle of the array. Otherwise the data is the contiguous (either C-
131
+ or Fortran-, depending on ``fortran_order``) bytes of the array.
132
+ Consumers can figure out the number of bytes by multiplying the number
133
+ of elements given by the shape (noting that ``shape=()`` means there is
134
+ 1 element) by ``dtype.itemsize``.
135
+
136
+ Format Version 2.0
137
+ ------------------
138
+
139
+ The version 1.0 format only allowed the array header to have a total size of
140
+ 65535 bytes. This can be exceeded by structured arrays with a large number of
141
+ columns. The version 2.0 format extends the header size to 4 GiB.
142
+ `numpy.save` will automatically save in 2.0 format if the data requires it,
143
+ else it will always use the more compatible 1.0 format.
144
+
145
+ The description of the fourth element of the header therefore has become:
146
+ "The next 4 bytes form a little-endian unsigned int: the length of the header
147
+ data HEADER_LEN."
148
+
149
+ Format Version 3.0
150
+ ------------------
151
+
152
+ This version replaces the ASCII string (which in practice was latin1) with
153
+ a utf8-encoded string, so supports structured types with any unicode field
154
+ names.
155
+
156
+ Notes
157
+ -----
158
+ The ``.npy`` format, including motivation for creating it and a comparison of
159
+ alternatives, is described in the
160
+ :doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have
161
+ evolved with time and this document is more current.
162
+
163
+ """
164
+ import numpy
165
+ import warnings
166
+ from numpy.lib.utils import safe_eval, drop_metadata
167
+ from numpy.compat import (
168
+ isfileobj, os_fspath, pickle
169
+ )
170
+
171
+
172
+ __all__ = []
173
+
174
+
175
+ EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
176
+ MAGIC_PREFIX = b'\x93NUMPY'
177
+ MAGIC_LEN = len(MAGIC_PREFIX) + 2
178
+ ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
179
+ BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
180
+ # allow growth within the address space of a 64 bit machine along one axis
181
+ GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype
182
+
183
+ # difference between version 1.0 and 2.0 is a 4 byte (I) header length
184
+ # instead of 2 bytes (H) allowing storage of large structured arrays
185
+ _header_size_info = {
186
+ (1, 0): ('<H', 'latin1'),
187
+ (2, 0): ('<I', 'latin1'),
188
+ (3, 0): ('<I', 'utf8'),
189
+ }
190
+
191
+ # Python's literal_eval is not actually safe for large inputs, since parsing
192
+ # may become slow or even cause interpreter crashes.
193
+ # This is an arbitrary, low limit which should make it safe in practice.
194
+ _MAX_HEADER_SIZE = 10000
195
+
196
+ def _check_version(version):
197
+ if version not in [(1, 0), (2, 0), (3, 0), None]:
198
+ msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
199
+ raise ValueError(msg % (version,))
200
+
201
+ def magic(major, minor):
202
+ """ Return the magic string for the given file format version.
203
+
204
+ Parameters
205
+ ----------
206
+ major : int in [0, 255]
207
+ minor : int in [0, 255]
208
+
209
+ Returns
210
+ -------
211
+ magic : str
212
+
213
+ Raises
214
+ ------
215
+ ValueError if the version cannot be formatted.
216
+ """
217
+ if major < 0 or major > 255:
218
+ raise ValueError("major version must be 0 <= major < 256")
219
+ if minor < 0 or minor > 255:
220
+ raise ValueError("minor version must be 0 <= minor < 256")
221
+ return MAGIC_PREFIX + bytes([major, minor])
222
+
223
+ def read_magic(fp):
224
+ """ Read the magic string to get the version of the file format.
225
+
226
+ Parameters
227
+ ----------
228
+ fp : filelike object
229
+
230
+ Returns
231
+ -------
232
+ major : int
233
+ minor : int
234
+ """
235
+ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
236
+ if magic_str[:-2] != MAGIC_PREFIX:
237
+ msg = "the magic string is not correct; expected %r, got %r"
238
+ raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
239
+ major, minor = magic_str[-2:]
240
+ return major, minor
241
+
242
+
243
+ def dtype_to_descr(dtype):
244
+ """
245
+ Get a serializable descriptor from the dtype.
246
+
247
+ The .descr attribute of a dtype object cannot be round-tripped through
248
+ the dtype() constructor. Simple types, like dtype('float32'), have
249
+ a descr which looks like a record array with one field with '' as
250
+ a name. The dtype() constructor interprets this as a request to give
251
+ a default name. Instead, we construct descriptor that can be passed to
252
+ dtype().
253
+
254
+ Parameters
255
+ ----------
256
+ dtype : dtype
257
+ The dtype of the array that will be written to disk.
258
+
259
+ Returns
260
+ -------
261
+ descr : object
262
+ An object that can be passed to `numpy.dtype()` in order to
263
+ replicate the input dtype.
264
+
265
+ """
266
+ # NOTE: that drop_metadata may not return the right dtype e.g. for user
267
+ # dtypes. In that case our code below would fail the same, though.
268
+ new_dtype = drop_metadata(dtype)
269
+ if new_dtype is not dtype:
270
+ warnings.warn("metadata on a dtype is not saved to an npy/npz. "
271
+ "Use another format (such as pickle) to store it.",
272
+ UserWarning, stacklevel=2)
273
+ if dtype.names is not None:
274
+ # This is a record array. The .descr is fine. XXX: parts of the
275
+ # record array with an empty name, like padding bytes, still get
276
+ # fiddled with. This needs to be fixed in the C implementation of
277
+ # dtype().
278
+ return dtype.descr
279
+ else:
280
+ return dtype.str
281
+
282
+ def descr_to_dtype(descr):
283
+ """
284
+ Returns a dtype based off the given description.
285
+
286
+ This is essentially the reverse of `dtype_to_descr()`. It will remove
287
+ the valueless padding fields created by, i.e. simple fields like
288
+ dtype('float32'), and then convert the description to its corresponding
289
+ dtype.
290
+
291
+ Parameters
292
+ ----------
293
+ descr : object
294
+ The object retrieved by dtype.descr. Can be passed to
295
+ `numpy.dtype()` in order to replicate the input dtype.
296
+
297
+ Returns
298
+ -------
299
+ dtype : dtype
300
+ The dtype constructed by the description.
301
+
302
+ """
303
+ if isinstance(descr, str):
304
+ # No padding removal needed
305
+ return numpy.dtype(descr)
306
+ elif isinstance(descr, tuple):
307
+ # subtype, will always have a shape descr[1]
308
+ dt = descr_to_dtype(descr[0])
309
+ return numpy.dtype((dt, descr[1]))
310
+
311
+ titles = []
312
+ names = []
313
+ formats = []
314
+ offsets = []
315
+ offset = 0
316
+ for field in descr:
317
+ if len(field) == 2:
318
+ name, descr_str = field
319
+ dt = descr_to_dtype(descr_str)
320
+ else:
321
+ name, descr_str, shape = field
322
+ dt = numpy.dtype((descr_to_dtype(descr_str), shape))
323
+
324
+ # Ignore padding bytes, which will be void bytes with '' as name
325
+ # Once support for blank names is removed, only "if name == ''" needed)
326
+ is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
327
+ if not is_pad:
328
+ title, name = name if isinstance(name, tuple) else (None, name)
329
+ titles.append(title)
330
+ names.append(name)
331
+ formats.append(dt)
332
+ offsets.append(offset)
333
+ offset += dt.itemsize
334
+
335
+ return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
336
+ 'offsets': offsets, 'itemsize': offset})
337
+
338
+ def header_data_from_array_1_0(array):
339
+ """ Get the dictionary of header metadata from a numpy.ndarray.
340
+
341
+ Parameters
342
+ ----------
343
+ array : numpy.ndarray
344
+
345
+ Returns
346
+ -------
347
+ d : dict
348
+ This has the appropriate entries for writing its string representation
349
+ to the header of the file.
350
+ """
351
+ d = {'shape': array.shape}
352
+ if array.flags.c_contiguous:
353
+ d['fortran_order'] = False
354
+ elif array.flags.f_contiguous:
355
+ d['fortran_order'] = True
356
+ else:
357
+ # Totally non-contiguous data. We will have to make it C-contiguous
358
+ # before writing. Note that we need to test for C_CONTIGUOUS first
359
+ # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
360
+ d['fortran_order'] = False
361
+
362
+ d['descr'] = dtype_to_descr(array.dtype)
363
+ return d
364
+
365
+
366
+ def _wrap_header(header, version):
367
+ """
368
+ Takes a stringified header, and attaches the prefix and padding to it
369
+ """
370
+ import struct
371
+ assert version is not None
372
+ fmt, encoding = _header_size_info[version]
373
+ header = header.encode(encoding)
374
+ hlen = len(header) + 1
375
+ padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
376
+ try:
377
+ header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
378
+ except struct.error:
379
+ msg = "Header length {} too big for version={}".format(hlen, version)
380
+ raise ValueError(msg) from None
381
+
382
+ # Pad the header with spaces and a final newline such that the magic
383
+ # string, the header-length short and the header are aligned on a
384
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
385
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
386
+ # offset must be page-aligned (i.e. the beginning of the file).
387
+ return header_prefix + header + b' '*padlen + b'\n'
388
+
389
+
390
+ def _wrap_header_guess_version(header):
391
+ """
392
+ Like `_wrap_header`, but chooses an appropriate version given the contents
393
+ """
394
+ try:
395
+ return _wrap_header(header, (1, 0))
396
+ except ValueError:
397
+ pass
398
+
399
+ try:
400
+ ret = _wrap_header(header, (2, 0))
401
+ except UnicodeEncodeError:
402
+ pass
403
+ else:
404
+ warnings.warn("Stored array in format 2.0. It can only be"
405
+ "read by NumPy >= 1.9", UserWarning, stacklevel=2)
406
+ return ret
407
+
408
+ header = _wrap_header(header, (3, 0))
409
+ warnings.warn("Stored array in format 3.0. It can only be "
410
+ "read by NumPy >= 1.17", UserWarning, stacklevel=2)
411
+ return header
412
+
413
+
414
+ def _write_array_header(fp, d, version=None):
415
+ """ Write the header for an array and returns the version used
416
+
417
+ Parameters
418
+ ----------
419
+ fp : filelike object
420
+ d : dict
421
+ This has the appropriate entries for writing its string representation
422
+ to the header of the file.
423
+ version : tuple or None
424
+ None means use oldest that works. Providing an explicit version will
425
+ raise a ValueError if the format does not allow saving this data.
426
+ Default: None
427
+ """
428
+ header = ["{"]
429
+ for key, value in sorted(d.items()):
430
+ # Need to use repr here, since we eval these when reading
431
+ header.append("'%s': %s, " % (key, repr(value)))
432
+ header.append("}")
433
+ header = "".join(header)
434
+
435
+ # Add some spare space so that the array header can be modified in-place
436
+ # when changing the array size, e.g. when growing it by appending data at
437
+ # the end.
438
+ shape = d['shape']
439
+ header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
440
+ shape[-1 if d['fortran_order'] else 0]
441
+ ))) if len(shape) > 0 else 0)
442
+
443
+ if version is None:
444
+ header = _wrap_header_guess_version(header)
445
+ else:
446
+ header = _wrap_header(header, version)
447
+ fp.write(header)
448
+
449
+ def write_array_header_1_0(fp, d):
450
+ """ Write the header for an array using the 1.0 format.
451
+
452
+ Parameters
453
+ ----------
454
+ fp : filelike object
455
+ d : dict
456
+ This has the appropriate entries for writing its string
457
+ representation to the header of the file.
458
+ """
459
+ _write_array_header(fp, d, (1, 0))
460
+
461
+
462
+ def write_array_header_2_0(fp, d):
463
+ """ Write the header for an array using the 2.0 format.
464
+ The 2.0 format allows storing very large structured arrays.
465
+
466
+ .. versionadded:: 1.9.0
467
+
468
+ Parameters
469
+ ----------
470
+ fp : filelike object
471
+ d : dict
472
+ This has the appropriate entries for writing its string
473
+ representation to the header of the file.
474
+ """
475
+ _write_array_header(fp, d, (2, 0))
476
+
477
+ def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
478
+ """
479
+ Read an array header from a filelike object using the 1.0 file format
480
+ version.
481
+
482
+ This will leave the file object located just after the header.
483
+
484
+ Parameters
485
+ ----------
486
+ fp : filelike object
487
+ A file object or something with a `.read()` method like a file.
488
+
489
+ Returns
490
+ -------
491
+ shape : tuple of int
492
+ The shape of the array.
493
+ fortran_order : bool
494
+ The array data will be written out directly if it is either
495
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
496
+ contiguous before writing it out.
497
+ dtype : dtype
498
+ The dtype of the file's data.
499
+ max_header_size : int, optional
500
+ Maximum allowed size of the header. Large headers may not be safe
501
+ to load securely and thus require explicitly passing a larger value.
502
+ See :py:func:`ast.literal_eval()` for details.
503
+
504
+ Raises
505
+ ------
506
+ ValueError
507
+ If the data is invalid.
508
+
509
+ """
510
+ return _read_array_header(
511
+ fp, version=(1, 0), max_header_size=max_header_size)
512
+
513
+ def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
514
+ """
515
+ Read an array header from a filelike object using the 2.0 file format
516
+ version.
517
+
518
+ This will leave the file object located just after the header.
519
+
520
+ .. versionadded:: 1.9.0
521
+
522
+ Parameters
523
+ ----------
524
+ fp : filelike object
525
+ A file object or something with a `.read()` method like a file.
526
+ max_header_size : int, optional
527
+ Maximum allowed size of the header. Large headers may not be safe
528
+ to load securely and thus require explicitly passing a larger value.
529
+ See :py:func:`ast.literal_eval()` for details.
530
+
531
+ Returns
532
+ -------
533
+ shape : tuple of int
534
+ The shape of the array.
535
+ fortran_order : bool
536
+ The array data will be written out directly if it is either
537
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
538
+ contiguous before writing it out.
539
+ dtype : dtype
540
+ The dtype of the file's data.
541
+
542
+ Raises
543
+ ------
544
+ ValueError
545
+ If the data is invalid.
546
+
547
+ """
548
+ return _read_array_header(
549
+ fp, version=(2, 0), max_header_size=max_header_size)
550
+
551
+
552
+ def _filter_header(s):
553
+ """Clean up 'L' in npz header ints.
554
+
555
+ Cleans up the 'L' in strings representing integers. Needed to allow npz
556
+ headers produced in Python2 to be read in Python3.
557
+
558
+ Parameters
559
+ ----------
560
+ s : string
561
+ Npy file header.
562
+
563
+ Returns
564
+ -------
565
+ header : str
566
+ Cleaned up header.
567
+
568
+ """
569
+ import tokenize
570
+ from io import StringIO
571
+
572
+ tokens = []
573
+ last_token_was_number = False
574
+ for token in tokenize.generate_tokens(StringIO(s).readline):
575
+ token_type = token[0]
576
+ token_string = token[1]
577
+ if (last_token_was_number and
578
+ token_type == tokenize.NAME and
579
+ token_string == "L"):
580
+ continue
581
+ else:
582
+ tokens.append(token)
583
+ last_token_was_number = (token_type == tokenize.NUMBER)
584
+ return tokenize.untokenize(tokens)
585
+
586
+
587
+ def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
588
+ """
589
+ see read_array_header_1_0
590
+ """
591
+ # Read an unsigned, little-endian short int which has the length of the
592
+ # header.
593
+ import struct
594
+ hinfo = _header_size_info.get(version)
595
+ if hinfo is None:
596
+ raise ValueError("Invalid version {!r}".format(version))
597
+ hlength_type, encoding = hinfo
598
+
599
+ hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
600
+ header_length = struct.unpack(hlength_type, hlength_str)[0]
601
+ header = _read_bytes(fp, header_length, "array header")
602
+ header = header.decode(encoding)
603
+ if len(header) > max_header_size:
604
+ raise ValueError(
605
+ f"Header info length ({len(header)}) is large and may not be safe "
606
+ "to load securely.\n"
607
+ "To allow loading, adjust `max_header_size` or fully trust "
608
+ "the `.npy` file using `allow_pickle=True`.\n"
609
+ "For safety against large resource use or crashes, sandboxing "
610
+ "may be necessary.")
611
+
612
+ # The header is a pretty-printed string representation of a literal
613
+ # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
614
+ # boundary. The keys are strings.
615
+ # "shape" : tuple of int
616
+ # "fortran_order" : bool
617
+ # "descr" : dtype.descr
618
+ # Versions (2, 0) and (1, 0) could have been created by a Python 2
619
+ # implementation before header filtering was implemented.
620
+ #
621
+ # For performance reasons, we try without _filter_header first though
622
+ try:
623
+ d = safe_eval(header)
624
+ except SyntaxError as e:
625
+ if version <= (2, 0):
626
+ header = _filter_header(header)
627
+ try:
628
+ d = safe_eval(header)
629
+ except SyntaxError as e2:
630
+ msg = "Cannot parse header: {!r}"
631
+ raise ValueError(msg.format(header)) from e2
632
+ else:
633
+ warnings.warn(
634
+ "Reading `.npy` or `.npz` file required additional "
635
+ "header parsing as it was created on Python 2. Save the "
636
+ "file again to speed up loading and avoid this warning.",
637
+ UserWarning, stacklevel=4)
638
+ else:
639
+ msg = "Cannot parse header: {!r}"
640
+ raise ValueError(msg.format(header)) from e
641
+ if not isinstance(d, dict):
642
+ msg = "Header is not a dictionary: {!r}"
643
+ raise ValueError(msg.format(d))
644
+
645
+ if EXPECTED_KEYS != d.keys():
646
+ keys = sorted(d.keys())
647
+ msg = "Header does not contain the correct keys: {!r}"
648
+ raise ValueError(msg.format(keys))
649
+
650
+ # Sanity-check the values.
651
+ if (not isinstance(d['shape'], tuple) or
652
+ not all(isinstance(x, int) for x in d['shape'])):
653
+ msg = "shape is not valid: {!r}"
654
+ raise ValueError(msg.format(d['shape']))
655
+ if not isinstance(d['fortran_order'], bool):
656
+ msg = "fortran_order is not a valid bool: {!r}"
657
+ raise ValueError(msg.format(d['fortran_order']))
658
+ try:
659
+ dtype = descr_to_dtype(d['descr'])
660
+ except TypeError as e:
661
+ msg = "descr is not a valid dtype descriptor: {!r}"
662
+ raise ValueError(msg.format(d['descr'])) from e
663
+
664
+ return d['shape'], d['fortran_order'], dtype
665
+
666
+ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
667
+ """
668
+ Write an array to an NPY file, including a header.
669
+
670
+ If the array is neither C-contiguous nor Fortran-contiguous AND the
671
+ file_like object is not a real file object, this function will have to
672
+ copy data in memory.
673
+
674
+ Parameters
675
+ ----------
676
+ fp : file_like object
677
+ An open, writable file object, or similar object with a
678
+ ``.write()`` method.
679
+ array : ndarray
680
+ The array to write to disk.
681
+ version : (int, int) or None, optional
682
+ The version number of the format. None means use the oldest
683
+ supported version that is able to store the data. Default: None
684
+ allow_pickle : bool, optional
685
+ Whether to allow writing pickled data. Default: True
686
+ pickle_kwargs : dict, optional
687
+ Additional keyword arguments to pass to pickle.dump, excluding
688
+ 'protocol'. These are only useful when pickling objects in object
689
+ arrays on Python 3 to Python 2 compatible format.
690
+
691
+ Raises
692
+ ------
693
+ ValueError
694
+ If the array cannot be persisted. This includes the case of
695
+ allow_pickle=False and array being an object array.
696
+ Various other errors
697
+ If the array contains Python objects as part of its dtype, the
698
+ process of pickling them may raise various errors if the objects
699
+ are not picklable.
700
+
701
+ """
702
+ _check_version(version)
703
+ _write_array_header(fp, header_data_from_array_1_0(array), version)
704
+
705
+ if array.itemsize == 0:
706
+ buffersize = 0
707
+ else:
708
+ # Set buffer size to 16 MiB to hide the Python loop overhead.
709
+ buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
710
+
711
+ if array.dtype.hasobject:
712
+ # We contain Python objects so we cannot write out the data
713
+ # directly. Instead, we will pickle it out
714
+ if not allow_pickle:
715
+ raise ValueError("Object arrays cannot be saved when "
716
+ "allow_pickle=False")
717
+ if pickle_kwargs is None:
718
+ pickle_kwargs = {}
719
+ pickle.dump(array, fp, protocol=3, **pickle_kwargs)
720
+ elif array.flags.f_contiguous and not array.flags.c_contiguous:
721
+ if isfileobj(fp):
722
+ array.T.tofile(fp)
723
+ else:
724
+ for chunk in numpy.nditer(
725
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
726
+ buffersize=buffersize, order='F'):
727
+ fp.write(chunk.tobytes('C'))
728
+ else:
729
+ if isfileobj(fp):
730
+ array.tofile(fp)
731
+ else:
732
+ for chunk in numpy.nditer(
733
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
734
+ buffersize=buffersize, order='C'):
735
+ fp.write(chunk.tobytes('C'))
736
+
737
+
738
+ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
739
+ max_header_size=_MAX_HEADER_SIZE):
740
+ """
741
+ Read an array from an NPY file.
742
+
743
+ Parameters
744
+ ----------
745
+ fp : file_like object
746
+ If this is not a real file object, then this may take extra memory
747
+ and time.
748
+ allow_pickle : bool, optional
749
+ Whether to allow writing pickled data. Default: False
750
+
751
+ .. versionchanged:: 1.16.3
752
+ Made default False in response to CVE-2019-6446.
753
+
754
+ pickle_kwargs : dict
755
+ Additional keyword arguments to pass to pickle.load. These are only
756
+ useful when loading object arrays saved on Python 2 when using
757
+ Python 3.
758
+ max_header_size : int, optional
759
+ Maximum allowed size of the header. Large headers may not be safe
760
+ to load securely and thus require explicitly passing a larger value.
761
+ See :py:func:`ast.literal_eval()` for details.
762
+ This option is ignored when `allow_pickle` is passed. In that case
763
+ the file is by definition trusted and the limit is unnecessary.
764
+
765
+ Returns
766
+ -------
767
+ array : ndarray
768
+ The array from the data on disk.
769
+
770
+ Raises
771
+ ------
772
+ ValueError
773
+ If the data is invalid, or allow_pickle=False and the file contains
774
+ an object array.
775
+
776
+ """
777
+ if allow_pickle:
778
+ # Effectively ignore max_header_size, since `allow_pickle` indicates
779
+ # that the input is fully trusted.
780
+ max_header_size = 2**64
781
+
782
+ version = read_magic(fp)
783
+ _check_version(version)
784
+ shape, fortran_order, dtype = _read_array_header(
785
+ fp, version, max_header_size=max_header_size)
786
+ if len(shape) == 0:
787
+ count = 1
788
+ else:
789
+ count = numpy.multiply.reduce(shape, dtype=numpy.int64)
790
+
791
+ # Now read the actual data.
792
+ if dtype.hasobject:
793
+ # The array contained Python objects. We need to unpickle the data.
794
+ if not allow_pickle:
795
+ raise ValueError("Object arrays cannot be loaded when "
796
+ "allow_pickle=False")
797
+ if pickle_kwargs is None:
798
+ pickle_kwargs = {}
799
+ try:
800
+ array = pickle.load(fp, **pickle_kwargs)
801
+ except UnicodeError as err:
802
+ # Friendlier error message
803
+ raise UnicodeError("Unpickling a python object failed: %r\n"
804
+ "You may need to pass the encoding= option "
805
+ "to numpy.load" % (err,)) from err
806
+ else:
807
+ if isfileobj(fp):
808
+ # We can use the fast fromfile() function.
809
+ array = numpy.fromfile(fp, dtype=dtype, count=count)
810
+ else:
811
+ # This is not a real file. We have to read it the
812
+ # memory-intensive way.
813
+ # crc32 module fails on reads greater than 2 ** 32 bytes,
814
+ # breaking large reads from gzip streams. Chunk reads to
815
+ # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
816
+ # of the read. In non-chunked case count < max_read_count, so
817
+ # only one read is performed.
818
+
819
+ # Use np.ndarray instead of np.empty since the latter does
820
+ # not correctly instantiate zero-width string dtypes; see
821
+ # https://github.com/numpy/numpy/pull/6430
822
+ array = numpy.ndarray(count, dtype=dtype)
823
+
824
+ if dtype.itemsize > 0:
825
+ # If dtype.itemsize == 0 then there's nothing more to read
826
+ max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
827
+
828
+ for i in range(0, count, max_read_count):
829
+ read_count = min(max_read_count, count - i)
830
+ read_size = int(read_count * dtype.itemsize)
831
+ data = _read_bytes(fp, read_size, "array data")
832
+ array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
833
+ count=read_count)
834
+
835
+ if fortran_order:
836
+ array.shape = shape[::-1]
837
+ array = array.transpose()
838
+ else:
839
+ array.shape = shape
840
+
841
+ return array
842
+
843
+
844
+ def open_memmap(filename, mode='r+', dtype=None, shape=None,
845
+ fortran_order=False, version=None, *,
846
+ max_header_size=_MAX_HEADER_SIZE):
847
+ """
848
+ Open a .npy file as a memory-mapped array.
849
+
850
+ This may be used to read an existing file or create a new one.
851
+
852
+ Parameters
853
+ ----------
854
+ filename : str or path-like
855
+ The name of the file on disk. This may *not* be a file-like
856
+ object.
857
+ mode : str, optional
858
+ The mode in which to open the file; the default is 'r+'. In
859
+ addition to the standard file modes, 'c' is also accepted to mean
860
+ "copy on write." See `memmap` for the available mode strings.
861
+ dtype : data-type, optional
862
+ The data type of the array if we are creating a new file in "write"
863
+ mode, if not, `dtype` is ignored. The default value is None, which
864
+ results in a data-type of `float64`.
865
+ shape : tuple of int
866
+ The shape of the array if we are creating a new file in "write"
867
+ mode, in which case this parameter is required. Otherwise, this
868
+ parameter is ignored and is thus optional.
869
+ fortran_order : bool, optional
870
+ Whether the array should be Fortran-contiguous (True) or
871
+ C-contiguous (False, the default) if we are creating a new file in
872
+ "write" mode.
873
+ version : tuple of int (major, minor) or None
874
+ If the mode is a "write" mode, then this is the version of the file
875
+ format used to create the file. None means use the oldest
876
+ supported version that is able to store the data. Default: None
877
+ max_header_size : int, optional
878
+ Maximum allowed size of the header. Large headers may not be safe
879
+ to load securely and thus require explicitly passing a larger value.
880
+ See :py:func:`ast.literal_eval()` for details.
881
+
882
+ Returns
883
+ -------
884
+ marray : memmap
885
+ The memory-mapped array.
886
+
887
+ Raises
888
+ ------
889
+ ValueError
890
+ If the data or the mode is invalid.
891
+ OSError
892
+ If the file is not found or cannot be opened correctly.
893
+
894
+ See Also
895
+ --------
896
+ numpy.memmap
897
+
898
+ """
899
+ if isfileobj(filename):
900
+ raise ValueError("Filename must be a string or a path-like object."
901
+ " Memmap cannot use existing file handles.")
902
+
903
+ if 'w' in mode:
904
+ # We are creating the file, not reading it.
905
+ # Check if we ought to create the file.
906
+ _check_version(version)
907
+ # Ensure that the given dtype is an authentic dtype object rather
908
+ # than just something that can be interpreted as a dtype object.
909
+ dtype = numpy.dtype(dtype)
910
+ if dtype.hasobject:
911
+ msg = "Array can't be memory-mapped: Python objects in dtype."
912
+ raise ValueError(msg)
913
+ d = dict(
914
+ descr=dtype_to_descr(dtype),
915
+ fortran_order=fortran_order,
916
+ shape=shape,
917
+ )
918
+ # If we got here, then it should be safe to create the file.
919
+ with open(os_fspath(filename), mode+'b') as fp:
920
+ _write_array_header(fp, d, version)
921
+ offset = fp.tell()
922
+ else:
923
+ # Read the header of the file first.
924
+ with open(os_fspath(filename), 'rb') as fp:
925
+ version = read_magic(fp)
926
+ _check_version(version)
927
+
928
+ shape, fortran_order, dtype = _read_array_header(
929
+ fp, version, max_header_size=max_header_size)
930
+ if dtype.hasobject:
931
+ msg = "Array can't be memory-mapped: Python objects in dtype."
932
+ raise ValueError(msg)
933
+ offset = fp.tell()
934
+
935
+ if fortran_order:
936
+ order = 'F'
937
+ else:
938
+ order = 'C'
939
+
940
+ # We need to change a write-only mode to a read-write mode since we've
941
+ # already written data to the file.
942
+ if mode == 'w+':
943
+ mode = 'r+'
944
+
945
+ marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
946
+ mode=mode, offset=offset)
947
+
948
+ return marray
949
+
950
+
951
+ def _read_bytes(fp, size, error_template="ran out of data"):
952
+ """
953
+ Read from file-like object until size bytes are read.
954
+ Raises ValueError if not EOF is encountered before size bytes are read.
955
+ Non-blocking objects only supported if they derive from io objects.
956
+
957
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
958
+ requested.
959
+ """
960
+ data = bytes()
961
+ while True:
962
+ # io files (default in python3) return None or raise on
963
+ # would-block, python2 file will truncate, probably nothing can be
964
+ # done about that. note that regular files can't be non-blocking
965
+ try:
966
+ r = fp.read(size - len(data))
967
+ data += r
968
+ if len(r) == 0 or len(data) == size:
969
+ break
970
+ except BlockingIOError:
971
+ pass
972
+ if len(data) != size:
973
+ msg = "EOF: reading %s, expected %d bytes got %d"
974
+ raise ValueError(msg % (error_template, size, len(data)))
975
+ else:
976
+ return data
env-llmeval/lib/python3.10/site-packages/numpy/lib/format.pyi ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, Literal, Final
2
+
3
+ __all__: list[str]
4
+
5
+ EXPECTED_KEYS: Final[set[str]]
6
+ MAGIC_PREFIX: Final[bytes]
7
+ MAGIC_LEN: Literal[8]
8
+ ARRAY_ALIGN: Literal[64]
9
+ BUFFER_SIZE: Literal[262144] # 2**18
10
+
11
+ def magic(major, minor): ...
12
+ def read_magic(fp): ...
13
+ def dtype_to_descr(dtype): ...
14
+ def descr_to_dtype(descr): ...
15
+ def header_data_from_array_1_0(array): ...
16
+ def write_array_header_1_0(fp, d): ...
17
+ def write_array_header_2_0(fp, d): ...
18
+ def read_array_header_1_0(fp): ...
19
+ def read_array_header_2_0(fp): ...
20
+ def write_array(fp, array, version=..., allow_pickle=..., pickle_kwargs=...): ...
21
+ def read_array(fp, allow_pickle=..., pickle_kwargs=...): ...
22
+ def open_memmap(filename, mode=..., dtype=..., shape=..., fortran_order=..., version=...): ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/function_base.py ADDED
The diff for this file is too large to render. See raw diff
 
env-llmeval/lib/python3.10/site-packages/numpy/lib/function_base.pyi ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from collections.abc import Sequence, Iterator, Callable, Iterable
3
+ from typing import (
4
+ Literal as L,
5
+ Any,
6
+ TypeVar,
7
+ overload,
8
+ Protocol,
9
+ SupportsIndex,
10
+ SupportsInt,
11
+ )
12
+
13
+ if sys.version_info >= (3, 10):
14
+ from typing import TypeGuard
15
+ else:
16
+ from typing_extensions import TypeGuard
17
+
18
+ from numpy import (
19
+ vectorize as vectorize,
20
+ ufunc,
21
+ generic,
22
+ floating,
23
+ complexfloating,
24
+ intp,
25
+ float64,
26
+ complex128,
27
+ timedelta64,
28
+ datetime64,
29
+ object_,
30
+ _OrderKACF,
31
+ )
32
+
33
+ from numpy._typing import (
34
+ NDArray,
35
+ ArrayLike,
36
+ DTypeLike,
37
+ _ShapeLike,
38
+ _ScalarLike_co,
39
+ _DTypeLike,
40
+ _ArrayLike,
41
+ _ArrayLikeInt_co,
42
+ _ArrayLikeFloat_co,
43
+ _ArrayLikeComplex_co,
44
+ _ArrayLikeTD64_co,
45
+ _ArrayLikeDT64_co,
46
+ _ArrayLikeObject_co,
47
+ _FloatLike_co,
48
+ _ComplexLike_co,
49
+ )
50
+
51
+ from numpy.core.function_base import (
52
+ add_newdoc as add_newdoc,
53
+ )
54
+
55
+ from numpy.core.multiarray import (
56
+ add_docstring as add_docstring,
57
+ bincount as bincount,
58
+ )
59
+
60
+ from numpy.core.umath import _add_newdoc_ufunc
61
+
62
+ _T = TypeVar("_T")
63
+ _T_co = TypeVar("_T_co", covariant=True)
64
+ _SCT = TypeVar("_SCT", bound=generic)
65
+ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
66
+
67
+ _2Tuple = tuple[_T, _T]
68
+
69
+ class _TrimZerosSequence(Protocol[_T_co]):
70
+ def __len__(self) -> int: ...
71
+ def __getitem__(self, key: slice, /) -> _T_co: ...
72
+ def __iter__(self) -> Iterator[Any]: ...
73
+
74
+ class _SupportsWriteFlush(Protocol):
75
+ def write(self, s: str, /) -> object: ...
76
+ def flush(self) -> object: ...
77
+
78
+ __all__: list[str]
79
+
80
+ # NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc`
81
+ def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ...
82
+
83
+ @overload
84
+ def rot90(
85
+ m: _ArrayLike[_SCT],
86
+ k: int = ...,
87
+ axes: tuple[int, int] = ...,
88
+ ) -> NDArray[_SCT]: ...
89
+ @overload
90
+ def rot90(
91
+ m: ArrayLike,
92
+ k: int = ...,
93
+ axes: tuple[int, int] = ...,
94
+ ) -> NDArray[Any]: ...
95
+
96
+ @overload
97
+ def flip(m: _SCT, axis: None = ...) -> _SCT: ...
98
+ @overload
99
+ def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
100
+ @overload
101
+ def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
102
+ @overload
103
+ def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
104
+
105
+ def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
106
+
107
+ @overload
108
+ def average(
109
+ a: _ArrayLikeFloat_co,
110
+ axis: None = ...,
111
+ weights: None | _ArrayLikeFloat_co= ...,
112
+ returned: L[False] = ...,
113
+ keepdims: L[False] = ...,
114
+ ) -> floating[Any]: ...
115
+ @overload
116
+ def average(
117
+ a: _ArrayLikeComplex_co,
118
+ axis: None = ...,
119
+ weights: None | _ArrayLikeComplex_co = ...,
120
+ returned: L[False] = ...,
121
+ keepdims: L[False] = ...,
122
+ ) -> complexfloating[Any, Any]: ...
123
+ @overload
124
+ def average(
125
+ a: _ArrayLikeObject_co,
126
+ axis: None = ...,
127
+ weights: None | Any = ...,
128
+ returned: L[False] = ...,
129
+ keepdims: L[False] = ...,
130
+ ) -> Any: ...
131
+ @overload
132
+ def average(
133
+ a: _ArrayLikeFloat_co,
134
+ axis: None = ...,
135
+ weights: None | _ArrayLikeFloat_co= ...,
136
+ returned: L[True] = ...,
137
+ keepdims: L[False] = ...,
138
+ ) -> _2Tuple[floating[Any]]: ...
139
+ @overload
140
+ def average(
141
+ a: _ArrayLikeComplex_co,
142
+ axis: None = ...,
143
+ weights: None | _ArrayLikeComplex_co = ...,
144
+ returned: L[True] = ...,
145
+ keepdims: L[False] = ...,
146
+ ) -> _2Tuple[complexfloating[Any, Any]]: ...
147
+ @overload
148
+ def average(
149
+ a: _ArrayLikeObject_co,
150
+ axis: None = ...,
151
+ weights: None | Any = ...,
152
+ returned: L[True] = ...,
153
+ keepdims: L[False] = ...,
154
+ ) -> _2Tuple[Any]: ...
155
+ @overload
156
+ def average(
157
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
158
+ axis: None | _ShapeLike = ...,
159
+ weights: None | Any = ...,
160
+ returned: L[False] = ...,
161
+ keepdims: bool = ...,
162
+ ) -> Any: ...
163
+ @overload
164
+ def average(
165
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
166
+ axis: None | _ShapeLike = ...,
167
+ weights: None | Any = ...,
168
+ returned: L[True] = ...,
169
+ keepdims: bool = ...,
170
+ ) -> _2Tuple[Any]: ...
171
+
172
+ @overload
173
+ def asarray_chkfinite(
174
+ a: _ArrayLike[_SCT],
175
+ dtype: None = ...,
176
+ order: _OrderKACF = ...,
177
+ ) -> NDArray[_SCT]: ...
178
+ @overload
179
+ def asarray_chkfinite(
180
+ a: object,
181
+ dtype: None = ...,
182
+ order: _OrderKACF = ...,
183
+ ) -> NDArray[Any]: ...
184
+ @overload
185
+ def asarray_chkfinite(
186
+ a: Any,
187
+ dtype: _DTypeLike[_SCT],
188
+ order: _OrderKACF = ...,
189
+ ) -> NDArray[_SCT]: ...
190
+ @overload
191
+ def asarray_chkfinite(
192
+ a: Any,
193
+ dtype: DTypeLike,
194
+ order: _OrderKACF = ...,
195
+ ) -> NDArray[Any]: ...
196
+
197
+ # TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
198
+ # xref python/mypy#8645
199
+ @overload
200
+ def piecewise(
201
+ x: _ArrayLike[_SCT],
202
+ condlist: ArrayLike,
203
+ funclist: Sequence[Any | Callable[..., Any]],
204
+ *args: Any,
205
+ **kw: Any,
206
+ ) -> NDArray[_SCT]: ...
207
+ @overload
208
+ def piecewise(
209
+ x: ArrayLike,
210
+ condlist: ArrayLike,
211
+ funclist: Sequence[Any | Callable[..., Any]],
212
+ *args: Any,
213
+ **kw: Any,
214
+ ) -> NDArray[Any]: ...
215
+
216
+ def select(
217
+ condlist: Sequence[ArrayLike],
218
+ choicelist: Sequence[ArrayLike],
219
+ default: ArrayLike = ...,
220
+ ) -> NDArray[Any]: ...
221
+
222
+ @overload
223
+ def copy(
224
+ a: _ArrayType,
225
+ order: _OrderKACF,
226
+ subok: L[True],
227
+ ) -> _ArrayType: ...
228
+ @overload
229
+ def copy(
230
+ a: _ArrayType,
231
+ order: _OrderKACF = ...,
232
+ *,
233
+ subok: L[True],
234
+ ) -> _ArrayType: ...
235
+ @overload
236
+ def copy(
237
+ a: _ArrayLike[_SCT],
238
+ order: _OrderKACF = ...,
239
+ subok: L[False] = ...,
240
+ ) -> NDArray[_SCT]: ...
241
+ @overload
242
+ def copy(
243
+ a: ArrayLike,
244
+ order: _OrderKACF = ...,
245
+ subok: L[False] = ...,
246
+ ) -> NDArray[Any]: ...
247
+
248
+ def gradient(
249
+ f: ArrayLike,
250
+ *varargs: ArrayLike,
251
+ axis: None | _ShapeLike = ...,
252
+ edge_order: L[1, 2] = ...,
253
+ ) -> Any: ...
254
+
255
+ @overload
256
+ def diff(
257
+ a: _T,
258
+ n: L[0],
259
+ axis: SupportsIndex = ...,
260
+ prepend: ArrayLike = ...,
261
+ append: ArrayLike = ...,
262
+ ) -> _T: ...
263
+ @overload
264
+ def diff(
265
+ a: ArrayLike,
266
+ n: int = ...,
267
+ axis: SupportsIndex = ...,
268
+ prepend: ArrayLike = ...,
269
+ append: ArrayLike = ...,
270
+ ) -> NDArray[Any]: ...
271
+
272
+ @overload
273
+ def interp(
274
+ x: _ArrayLikeFloat_co,
275
+ xp: _ArrayLikeFloat_co,
276
+ fp: _ArrayLikeFloat_co,
277
+ left: None | _FloatLike_co = ...,
278
+ right: None | _FloatLike_co = ...,
279
+ period: None | _FloatLike_co = ...,
280
+ ) -> NDArray[float64]: ...
281
+ @overload
282
+ def interp(
283
+ x: _ArrayLikeFloat_co,
284
+ xp: _ArrayLikeFloat_co,
285
+ fp: _ArrayLikeComplex_co,
286
+ left: None | _ComplexLike_co = ...,
287
+ right: None | _ComplexLike_co = ...,
288
+ period: None | _FloatLike_co = ...,
289
+ ) -> NDArray[complex128]: ...
290
+
291
+ @overload
292
+ def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ...
293
+ @overload
294
+ def angle(z: object_, deg: bool = ...) -> Any: ...
295
+ @overload
296
+ def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ...
297
+ @overload
298
+ def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
299
+
300
+ @overload
301
+ def unwrap(
302
+ p: _ArrayLikeFloat_co,
303
+ discont: None | float = ...,
304
+ axis: int = ...,
305
+ *,
306
+ period: float = ...,
307
+ ) -> NDArray[floating[Any]]: ...
308
+ @overload
309
+ def unwrap(
310
+ p: _ArrayLikeObject_co,
311
+ discont: None | float = ...,
312
+ axis: int = ...,
313
+ *,
314
+ period: float = ...,
315
+ ) -> NDArray[object_]: ...
316
+
317
+ def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
318
+
319
+ def trim_zeros(
320
+ filt: _TrimZerosSequence[_T],
321
+ trim: L["f", "b", "fb", "bf"] = ...,
322
+ ) -> _T: ...
323
+
324
+ @overload
325
+ def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
326
+ @overload
327
+ def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
328
+
329
+ def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
330
+
331
+ def disp(
332
+ mesg: object,
333
+ device: None | _SupportsWriteFlush = ...,
334
+ linefeed: bool = ...,
335
+ ) -> None: ...
336
+
337
+ @overload
338
+ def cov(
339
+ m: _ArrayLikeFloat_co,
340
+ y: None | _ArrayLikeFloat_co = ...,
341
+ rowvar: bool = ...,
342
+ bias: bool = ...,
343
+ ddof: None | SupportsIndex | SupportsInt = ...,
344
+ fweights: None | ArrayLike = ...,
345
+ aweights: None | ArrayLike = ...,
346
+ *,
347
+ dtype: None = ...,
348
+ ) -> NDArray[floating[Any]]: ...
349
+ @overload
350
+ def cov(
351
+ m: _ArrayLikeComplex_co,
352
+ y: None | _ArrayLikeComplex_co = ...,
353
+ rowvar: bool = ...,
354
+ bias: bool = ...,
355
+ ddof: None | SupportsIndex | SupportsInt = ...,
356
+ fweights: None | ArrayLike = ...,
357
+ aweights: None | ArrayLike = ...,
358
+ *,
359
+ dtype: None = ...,
360
+ ) -> NDArray[complexfloating[Any, Any]]: ...
361
+ @overload
362
+ def cov(
363
+ m: _ArrayLikeComplex_co,
364
+ y: None | _ArrayLikeComplex_co = ...,
365
+ rowvar: bool = ...,
366
+ bias: bool = ...,
367
+ ddof: None | SupportsIndex | SupportsInt = ...,
368
+ fweights: None | ArrayLike = ...,
369
+ aweights: None | ArrayLike = ...,
370
+ *,
371
+ dtype: _DTypeLike[_SCT],
372
+ ) -> NDArray[_SCT]: ...
373
+ @overload
374
+ def cov(
375
+ m: _ArrayLikeComplex_co,
376
+ y: None | _ArrayLikeComplex_co = ...,
377
+ rowvar: bool = ...,
378
+ bias: bool = ...,
379
+ ddof: None | SupportsIndex | SupportsInt = ...,
380
+ fweights: None | ArrayLike = ...,
381
+ aweights: None | ArrayLike = ...,
382
+ *,
383
+ dtype: DTypeLike,
384
+ ) -> NDArray[Any]: ...
385
+
386
+ # NOTE `bias` and `ddof` have been deprecated
387
+ @overload
388
+ def corrcoef(
389
+ m: _ArrayLikeFloat_co,
390
+ y: None | _ArrayLikeFloat_co = ...,
391
+ rowvar: bool = ...,
392
+ *,
393
+ dtype: None = ...,
394
+ ) -> NDArray[floating[Any]]: ...
395
+ @overload
396
+ def corrcoef(
397
+ m: _ArrayLikeComplex_co,
398
+ y: None | _ArrayLikeComplex_co = ...,
399
+ rowvar: bool = ...,
400
+ *,
401
+ dtype: None = ...,
402
+ ) -> NDArray[complexfloating[Any, Any]]: ...
403
+ @overload
404
+ def corrcoef(
405
+ m: _ArrayLikeComplex_co,
406
+ y: None | _ArrayLikeComplex_co = ...,
407
+ rowvar: bool = ...,
408
+ *,
409
+ dtype: _DTypeLike[_SCT],
410
+ ) -> NDArray[_SCT]: ...
411
+ @overload
412
+ def corrcoef(
413
+ m: _ArrayLikeComplex_co,
414
+ y: None | _ArrayLikeComplex_co = ...,
415
+ rowvar: bool = ...,
416
+ *,
417
+ dtype: DTypeLike,
418
+ ) -> NDArray[Any]: ...
419
+
420
+ def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
421
+
422
+ def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
423
+
424
+ def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
425
+
426
+ def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
427
+
428
+ def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
429
+
430
+ def kaiser(
431
+ M: _FloatLike_co,
432
+ beta: _FloatLike_co,
433
+ ) -> NDArray[floating[Any]]: ...
434
+
435
+ @overload
436
+ def sinc(x: _FloatLike_co) -> floating[Any]: ...
437
+ @overload
438
+ def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
439
+ @overload
440
+ def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
441
+ @overload
442
+ def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
443
+
444
+ # NOTE: Deprecated
445
+ # def msort(a: ArrayLike) -> NDArray[Any]: ...
446
+
447
+ @overload
448
+ def median(
449
+ a: _ArrayLikeFloat_co,
450
+ axis: None = ...,
451
+ out: None = ...,
452
+ overwrite_input: bool = ...,
453
+ keepdims: L[False] = ...,
454
+ ) -> floating[Any]: ...
455
+ @overload
456
+ def median(
457
+ a: _ArrayLikeComplex_co,
458
+ axis: None = ...,
459
+ out: None = ...,
460
+ overwrite_input: bool = ...,
461
+ keepdims: L[False] = ...,
462
+ ) -> complexfloating[Any, Any]: ...
463
+ @overload
464
+ def median(
465
+ a: _ArrayLikeTD64_co,
466
+ axis: None = ...,
467
+ out: None = ...,
468
+ overwrite_input: bool = ...,
469
+ keepdims: L[False] = ...,
470
+ ) -> timedelta64: ...
471
+ @overload
472
+ def median(
473
+ a: _ArrayLikeObject_co,
474
+ axis: None = ...,
475
+ out: None = ...,
476
+ overwrite_input: bool = ...,
477
+ keepdims: L[False] = ...,
478
+ ) -> Any: ...
479
+ @overload
480
+ def median(
481
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
482
+ axis: None | _ShapeLike = ...,
483
+ out: None = ...,
484
+ overwrite_input: bool = ...,
485
+ keepdims: bool = ...,
486
+ ) -> Any: ...
487
+ @overload
488
+ def median(
489
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
490
+ axis: None | _ShapeLike = ...,
491
+ out: _ArrayType = ...,
492
+ overwrite_input: bool = ...,
493
+ keepdims: bool = ...,
494
+ ) -> _ArrayType: ...
495
+
496
+ _MethodKind = L[
497
+ "inverted_cdf",
498
+ "averaged_inverted_cdf",
499
+ "closest_observation",
500
+ "interpolated_inverted_cdf",
501
+ "hazen",
502
+ "weibull",
503
+ "linear",
504
+ "median_unbiased",
505
+ "normal_unbiased",
506
+ "lower",
507
+ "higher",
508
+ "midpoint",
509
+ "nearest",
510
+ ]
511
+
512
+ @overload
513
+ def percentile(
514
+ a: _ArrayLikeFloat_co,
515
+ q: _FloatLike_co,
516
+ axis: None = ...,
517
+ out: None = ...,
518
+ overwrite_input: bool = ...,
519
+ method: _MethodKind = ...,
520
+ keepdims: L[False] = ...,
521
+ ) -> floating[Any]: ...
522
+ @overload
523
+ def percentile(
524
+ a: _ArrayLikeComplex_co,
525
+ q: _FloatLike_co,
526
+ axis: None = ...,
527
+ out: None = ...,
528
+ overwrite_input: bool = ...,
529
+ method: _MethodKind = ...,
530
+ keepdims: L[False] = ...,
531
+ ) -> complexfloating[Any, Any]: ...
532
+ @overload
533
+ def percentile(
534
+ a: _ArrayLikeTD64_co,
535
+ q: _FloatLike_co,
536
+ axis: None = ...,
537
+ out: None = ...,
538
+ overwrite_input: bool = ...,
539
+ method: _MethodKind = ...,
540
+ keepdims: L[False] = ...,
541
+ ) -> timedelta64: ...
542
+ @overload
543
+ def percentile(
544
+ a: _ArrayLikeDT64_co,
545
+ q: _FloatLike_co,
546
+ axis: None = ...,
547
+ out: None = ...,
548
+ overwrite_input: bool = ...,
549
+ method: _MethodKind = ...,
550
+ keepdims: L[False] = ...,
551
+ ) -> datetime64: ...
552
+ @overload
553
+ def percentile(
554
+ a: _ArrayLikeObject_co,
555
+ q: _FloatLike_co,
556
+ axis: None = ...,
557
+ out: None = ...,
558
+ overwrite_input: bool = ...,
559
+ method: _MethodKind = ...,
560
+ keepdims: L[False] = ...,
561
+ ) -> Any: ...
562
+ @overload
563
+ def percentile(
564
+ a: _ArrayLikeFloat_co,
565
+ q: _ArrayLikeFloat_co,
566
+ axis: None = ...,
567
+ out: None = ...,
568
+ overwrite_input: bool = ...,
569
+ method: _MethodKind = ...,
570
+ keepdims: L[False] = ...,
571
+ ) -> NDArray[floating[Any]]: ...
572
+ @overload
573
+ def percentile(
574
+ a: _ArrayLikeComplex_co,
575
+ q: _ArrayLikeFloat_co,
576
+ axis: None = ...,
577
+ out: None = ...,
578
+ overwrite_input: bool = ...,
579
+ method: _MethodKind = ...,
580
+ keepdims: L[False] = ...,
581
+ ) -> NDArray[complexfloating[Any, Any]]: ...
582
+ @overload
583
+ def percentile(
584
+ a: _ArrayLikeTD64_co,
585
+ q: _ArrayLikeFloat_co,
586
+ axis: None = ...,
587
+ out: None = ...,
588
+ overwrite_input: bool = ...,
589
+ method: _MethodKind = ...,
590
+ keepdims: L[False] = ...,
591
+ ) -> NDArray[timedelta64]: ...
592
+ @overload
593
+ def percentile(
594
+ a: _ArrayLikeDT64_co,
595
+ q: _ArrayLikeFloat_co,
596
+ axis: None = ...,
597
+ out: None = ...,
598
+ overwrite_input: bool = ...,
599
+ method: _MethodKind = ...,
600
+ keepdims: L[False] = ...,
601
+ ) -> NDArray[datetime64]: ...
602
+ @overload
603
+ def percentile(
604
+ a: _ArrayLikeObject_co,
605
+ q: _ArrayLikeFloat_co,
606
+ axis: None = ...,
607
+ out: None = ...,
608
+ overwrite_input: bool = ...,
609
+ method: _MethodKind = ...,
610
+ keepdims: L[False] = ...,
611
+ ) -> NDArray[object_]: ...
612
+ @overload
613
+ def percentile(
614
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
615
+ q: _ArrayLikeFloat_co,
616
+ axis: None | _ShapeLike = ...,
617
+ out: None = ...,
618
+ overwrite_input: bool = ...,
619
+ method: _MethodKind = ...,
620
+ keepdims: bool = ...,
621
+ ) -> Any: ...
622
+ @overload
623
+ def percentile(
624
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
625
+ q: _ArrayLikeFloat_co,
626
+ axis: None | _ShapeLike = ...,
627
+ out: _ArrayType = ...,
628
+ overwrite_input: bool = ...,
629
+ method: _MethodKind = ...,
630
+ keepdims: bool = ...,
631
+ ) -> _ArrayType: ...
632
+
633
+ # NOTE: Not an alias, but they do have identical signatures
634
+ # (that we can reuse)
635
+ quantile = percentile
636
+
637
+ # TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise
638
+ def trapz(
639
+ y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
640
+ x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ...,
641
+ dx: float = ...,
642
+ axis: SupportsIndex = ...,
643
+ ) -> Any: ...
644
+
645
+ def meshgrid(
646
+ *xi: ArrayLike,
647
+ copy: bool = ...,
648
+ sparse: bool = ...,
649
+ indexing: L["xy", "ij"] = ...,
650
+ ) -> list[NDArray[Any]]: ...
651
+
652
+ @overload
653
+ def delete(
654
+ arr: _ArrayLike[_SCT],
655
+ obj: slice | _ArrayLikeInt_co,
656
+ axis: None | SupportsIndex = ...,
657
+ ) -> NDArray[_SCT]: ...
658
+ @overload
659
+ def delete(
660
+ arr: ArrayLike,
661
+ obj: slice | _ArrayLikeInt_co,
662
+ axis: None | SupportsIndex = ...,
663
+ ) -> NDArray[Any]: ...
664
+
665
+ @overload
666
+ def insert(
667
+ arr: _ArrayLike[_SCT],
668
+ obj: slice | _ArrayLikeInt_co,
669
+ values: ArrayLike,
670
+ axis: None | SupportsIndex = ...,
671
+ ) -> NDArray[_SCT]: ...
672
+ @overload
673
+ def insert(
674
+ arr: ArrayLike,
675
+ obj: slice | _ArrayLikeInt_co,
676
+ values: ArrayLike,
677
+ axis: None | SupportsIndex = ...,
678
+ ) -> NDArray[Any]: ...
679
+
680
+ def append(
681
+ arr: ArrayLike,
682
+ values: ArrayLike,
683
+ axis: None | SupportsIndex = ...,
684
+ ) -> NDArray[Any]: ...
685
+
686
+ @overload
687
+ def digitize(
688
+ x: _FloatLike_co,
689
+ bins: _ArrayLikeFloat_co,
690
+ right: bool = ...,
691
+ ) -> intp: ...
692
+ @overload
693
+ def digitize(
694
+ x: _ArrayLikeFloat_co,
695
+ bins: _ArrayLikeFloat_co,
696
+ right: bool = ...,
697
+ ) -> NDArray[intp]: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/histograms.py ADDED
@@ -0,0 +1,1072 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Histogram-related functions
3
+ """
4
+ import contextlib
5
+ import functools
6
+ import operator
7
+ import warnings
8
+
9
+ import numpy as np
10
+ from numpy.core import overrides
11
+
12
+ __all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
13
+
14
+ array_function_dispatch = functools.partial(
15
+ overrides.array_function_dispatch, module='numpy')
16
+
17
+ # range is a keyword argument to many functions, so save the builtin so they can
18
+ # use it.
19
+ _range = range
20
+
21
+
22
+ def _ptp(x):
23
+ """Peak-to-peak value of x.
24
+
25
+ This implementation avoids the problem of signed integer arrays having a
26
+ peak-to-peak value that cannot be represented with the array's data type.
27
+ This function returns an unsigned value for signed integer arrays.
28
+ """
29
+ return _unsigned_subtract(x.max(), x.min())
30
+
31
+
32
+ def _hist_bin_sqrt(x, range):
33
+ """
34
+ Square root histogram bin estimator.
35
+
36
+ Bin width is inversely proportional to the data size. Used by many
37
+ programs for its simplicity.
38
+
39
+ Parameters
40
+ ----------
41
+ x : array_like
42
+ Input data that is to be histogrammed, trimmed to range. May not
43
+ be empty.
44
+
45
+ Returns
46
+ -------
47
+ h : An estimate of the optimal bin width for the given data.
48
+ """
49
+ del range # unused
50
+ return _ptp(x) / np.sqrt(x.size)
51
+
52
+
53
+ def _hist_bin_sturges(x, range):
54
+ """
55
+ Sturges histogram bin estimator.
56
+
57
+ A very simplistic estimator based on the assumption of normality of
58
+ the data. This estimator has poor performance for non-normal data,
59
+ which becomes especially obvious for large data sets. The estimate
60
+ depends only on size of the data.
61
+
62
+ Parameters
63
+ ----------
64
+ x : array_like
65
+ Input data that is to be histogrammed, trimmed to range. May not
66
+ be empty.
67
+
68
+ Returns
69
+ -------
70
+ h : An estimate of the optimal bin width for the given data.
71
+ """
72
+ del range # unused
73
+ return _ptp(x) / (np.log2(x.size) + 1.0)
74
+
75
+
76
+ def _hist_bin_rice(x, range):
77
+ """
78
+ Rice histogram bin estimator.
79
+
80
+ Another simple estimator with no normality assumption. It has better
81
+ performance for large data than Sturges, but tends to overestimate
82
+ the number of bins. The number of bins is proportional to the cube
83
+ root of data size (asymptotically optimal). The estimate depends
84
+ only on size of the data.
85
+
86
+ Parameters
87
+ ----------
88
+ x : array_like
89
+ Input data that is to be histogrammed, trimmed to range. May not
90
+ be empty.
91
+
92
+ Returns
93
+ -------
94
+ h : An estimate of the optimal bin width for the given data.
95
+ """
96
+ del range # unused
97
+ return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
98
+
99
+
100
+ def _hist_bin_scott(x, range):
101
+ """
102
+ Scott histogram bin estimator.
103
+
104
+ The binwidth is proportional to the standard deviation of the data
105
+ and inversely proportional to the cube root of data size
106
+ (asymptotically optimal).
107
+
108
+ Parameters
109
+ ----------
110
+ x : array_like
111
+ Input data that is to be histogrammed, trimmed to range. May not
112
+ be empty.
113
+
114
+ Returns
115
+ -------
116
+ h : An estimate of the optimal bin width for the given data.
117
+ """
118
+ del range # unused
119
+ return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
120
+
121
+
122
+ def _hist_bin_stone(x, range):
123
+ """
124
+ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
125
+
126
+ The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
127
+ The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
128
+ https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
129
+
130
+ This paper by Stone appears to be the origination of this rule.
131
+ http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
132
+
133
+ Parameters
134
+ ----------
135
+ x : array_like
136
+ Input data that is to be histogrammed, trimmed to range. May not
137
+ be empty.
138
+ range : (float, float)
139
+ The lower and upper range of the bins.
140
+
141
+ Returns
142
+ -------
143
+ h : An estimate of the optimal bin width for the given data.
144
+ """
145
+
146
+ n = x.size
147
+ ptp_x = _ptp(x)
148
+ if n <= 1 or ptp_x == 0:
149
+ return 0
150
+
151
+ def jhat(nbins):
152
+ hh = ptp_x / nbins
153
+ p_k = np.histogram(x, bins=nbins, range=range)[0] / n
154
+ return (2 - (n + 1) * p_k.dot(p_k)) / hh
155
+
156
+ nbins_upper_bound = max(100, int(np.sqrt(n)))
157
+ nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
158
+ if nbins == nbins_upper_bound:
159
+ warnings.warn("The number of bins estimated may be suboptimal.",
160
+ RuntimeWarning, stacklevel=3)
161
+ return ptp_x / nbins
162
+
163
+
164
+ def _hist_bin_doane(x, range):
165
+ """
166
+ Doane's histogram bin estimator.
167
+
168
+ Improved version of Sturges' formula which works better for
169
+ non-normal data. See
170
+ stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
171
+
172
+ Parameters
173
+ ----------
174
+ x : array_like
175
+ Input data that is to be histogrammed, trimmed to range. May not
176
+ be empty.
177
+
178
+ Returns
179
+ -------
180
+ h : An estimate of the optimal bin width for the given data.
181
+ """
182
+ del range # unused
183
+ if x.size > 2:
184
+ sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
185
+ sigma = np.std(x)
186
+ if sigma > 0.0:
187
+ # These three operations add up to
188
+ # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
189
+ # but use only one temp array instead of three
190
+ temp = x - np.mean(x)
191
+ np.true_divide(temp, sigma, temp)
192
+ np.power(temp, 3, temp)
193
+ g1 = np.mean(temp)
194
+ return _ptp(x) / (1.0 + np.log2(x.size) +
195
+ np.log2(1.0 + np.absolute(g1) / sg1))
196
+ return 0.0
197
+
198
+
199
+ def _hist_bin_fd(x, range):
200
+ """
201
+ The Freedman-Diaconis histogram bin estimator.
202
+
203
+ The Freedman-Diaconis rule uses interquartile range (IQR) to
204
+ estimate binwidth. It is considered a variation of the Scott rule
205
+ with more robustness as the IQR is less affected by outliers than
206
+ the standard deviation. However, the IQR depends on fewer points
207
+ than the standard deviation, so it is less accurate, especially for
208
+ long tailed distributions.
209
+
210
+ If the IQR is 0, this function returns 0 for the bin width.
211
+ Binwidth is inversely proportional to the cube root of data size
212
+ (asymptotically optimal).
213
+
214
+ Parameters
215
+ ----------
216
+ x : array_like
217
+ Input data that is to be histogrammed, trimmed to range. May not
218
+ be empty.
219
+
220
+ Returns
221
+ -------
222
+ h : An estimate of the optimal bin width for the given data.
223
+ """
224
+ del range # unused
225
+ iqr = np.subtract(*np.percentile(x, [75, 25]))
226
+ return 2.0 * iqr * x.size ** (-1.0 / 3.0)
227
+
228
+
229
+ def _hist_bin_auto(x, range):
230
+ """
231
+ Histogram bin estimator that uses the minimum width of the
232
+ Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
233
+ If the bin width from the FD estimator is 0, the Sturges estimator is used.
234
+
235
+ The FD estimator is usually the most robust method, but its width
236
+ estimate tends to be too large for small `x` and bad for data with limited
237
+ variance. The Sturges estimator is quite good for small (<1000) datasets
238
+ and is the default in the R language. This method gives good off-the-shelf
239
+ behaviour.
240
+
241
+ .. versionchanged:: 1.15.0
242
+ If there is limited variance the IQR can be 0, which results in the
243
+ FD bin width being 0 too. This is not a valid bin width, so
244
+ ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
245
+ If the IQR is 0, it's unlikely any variance-based estimators will be of
246
+ use, so we revert to the Sturges estimator, which only uses the size of the
247
+ dataset in its calculation.
248
+
249
+ Parameters
250
+ ----------
251
+ x : array_like
252
+ Input data that is to be histogrammed, trimmed to range. May not
253
+ be empty.
254
+
255
+ Returns
256
+ -------
257
+ h : An estimate of the optimal bin width for the given data.
258
+
259
+ See Also
260
+ --------
261
+ _hist_bin_fd, _hist_bin_sturges
262
+ """
263
+ fd_bw = _hist_bin_fd(x, range)
264
+ sturges_bw = _hist_bin_sturges(x, range)
265
+ del range # unused
266
+ if fd_bw:
267
+ return min(fd_bw, sturges_bw)
268
+ else:
269
+ # limited variance, so we return a len dependent bw estimator
270
+ return sturges_bw
271
+
272
+ # Private dict initialized at module load time
273
+ _hist_bin_selectors = {'stone': _hist_bin_stone,
274
+ 'auto': _hist_bin_auto,
275
+ 'doane': _hist_bin_doane,
276
+ 'fd': _hist_bin_fd,
277
+ 'rice': _hist_bin_rice,
278
+ 'scott': _hist_bin_scott,
279
+ 'sqrt': _hist_bin_sqrt,
280
+ 'sturges': _hist_bin_sturges}
281
+
282
+
283
+ def _ravel_and_check_weights(a, weights):
284
+ """ Check a and weights have matching shapes, and ravel both """
285
+ a = np.asarray(a)
286
+
287
+ # Ensure that the array is a "subtractable" dtype
288
+ if a.dtype == np.bool_:
289
+ warnings.warn("Converting input from {} to {} for compatibility."
290
+ .format(a.dtype, np.uint8),
291
+ RuntimeWarning, stacklevel=3)
292
+ a = a.astype(np.uint8)
293
+
294
+ if weights is not None:
295
+ weights = np.asarray(weights)
296
+ if weights.shape != a.shape:
297
+ raise ValueError(
298
+ 'weights should have the same shape as a.')
299
+ weights = weights.ravel()
300
+ a = a.ravel()
301
+ return a, weights
302
+
303
+
304
+ def _get_outer_edges(a, range):
305
+ """
306
+ Determine the outer bin edges to use, from either the data or the range
307
+ argument
308
+ """
309
+ if range is not None:
310
+ first_edge, last_edge = range
311
+ if first_edge > last_edge:
312
+ raise ValueError(
313
+ 'max must be larger than min in range parameter.')
314
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
315
+ raise ValueError(
316
+ "supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
317
+ elif a.size == 0:
318
+ # handle empty arrays. Can't determine range, so use 0-1.
319
+ first_edge, last_edge = 0, 1
320
+ else:
321
+ first_edge, last_edge = a.min(), a.max()
322
+ if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
323
+ raise ValueError(
324
+ "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
325
+
326
+ # expand empty range to avoid divide by zero
327
+ if first_edge == last_edge:
328
+ first_edge = first_edge - 0.5
329
+ last_edge = last_edge + 0.5
330
+
331
+ return first_edge, last_edge
332
+
333
+
334
+ def _unsigned_subtract(a, b):
335
+ """
336
+ Subtract two values where a >= b, and produce an unsigned result
337
+
338
+ This is needed when finding the difference between the upper and lower
339
+ bound of an int16 histogram
340
+ """
341
+ # coerce to a single type
342
+ signed_to_unsigned = {
343
+ np.byte: np.ubyte,
344
+ np.short: np.ushort,
345
+ np.intc: np.uintc,
346
+ np.int_: np.uint,
347
+ np.longlong: np.ulonglong
348
+ }
349
+ dt = np.result_type(a, b)
350
+ try:
351
+ dt = signed_to_unsigned[dt.type]
352
+ except KeyError:
353
+ return np.subtract(a, b, dtype=dt)
354
+ else:
355
+ # we know the inputs are integers, and we are deliberately casting
356
+ # signed to unsigned
357
+ return np.subtract(a, b, casting='unsafe', dtype=dt)
358
+
359
+
360
+ def _get_bin_edges(a, bins, range, weights):
361
+ """
362
+ Computes the bins used internally by `histogram`.
363
+
364
+ Parameters
365
+ ==========
366
+ a : ndarray
367
+ Ravelled data array
368
+ bins, range
369
+ Forwarded arguments from `histogram`.
370
+ weights : ndarray, optional
371
+ Ravelled weights array, or None
372
+
373
+ Returns
374
+ =======
375
+ bin_edges : ndarray
376
+ Array of bin edges
377
+ uniform_bins : (Number, Number, int):
378
+ The upper bound, lowerbound, and number of bins, used in the optimized
379
+ implementation of `histogram` that works on uniform bins.
380
+ """
381
+ # parse the overloaded bins argument
382
+ n_equal_bins = None
383
+ bin_edges = None
384
+
385
+ if isinstance(bins, str):
386
+ bin_name = bins
387
+ # if `bins` is a string for an automatic method,
388
+ # this will replace it with the number of bins calculated
389
+ if bin_name not in _hist_bin_selectors:
390
+ raise ValueError(
391
+ "{!r} is not a valid estimator for `bins`".format(bin_name))
392
+ if weights is not None:
393
+ raise TypeError("Automated estimation of the number of "
394
+ "bins is not supported for weighted data")
395
+
396
+ first_edge, last_edge = _get_outer_edges(a, range)
397
+
398
+ # truncate the range if needed
399
+ if range is not None:
400
+ keep = (a >= first_edge)
401
+ keep &= (a <= last_edge)
402
+ if not np.logical_and.reduce(keep):
403
+ a = a[keep]
404
+
405
+ if a.size == 0:
406
+ n_equal_bins = 1
407
+ else:
408
+ # Do not call selectors on empty arrays
409
+ width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
410
+ if width:
411
+ n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
412
+ else:
413
+ # Width can be zero for some estimators, e.g. FD when
414
+ # the IQR of the data is zero.
415
+ n_equal_bins = 1
416
+
417
+ elif np.ndim(bins) == 0:
418
+ try:
419
+ n_equal_bins = operator.index(bins)
420
+ except TypeError as e:
421
+ raise TypeError(
422
+ '`bins` must be an integer, a string, or an array') from e
423
+ if n_equal_bins < 1:
424
+ raise ValueError('`bins` must be positive, when an integer')
425
+
426
+ first_edge, last_edge = _get_outer_edges(a, range)
427
+
428
+ elif np.ndim(bins) == 1:
429
+ bin_edges = np.asarray(bins)
430
+ if np.any(bin_edges[:-1] > bin_edges[1:]):
431
+ raise ValueError(
432
+ '`bins` must increase monotonically, when an array')
433
+
434
+ else:
435
+ raise ValueError('`bins` must be 1d, when an array')
436
+
437
+ if n_equal_bins is not None:
438
+ # gh-10322 means that type resolution rules are dependent on array
439
+ # shapes. To avoid this causing problems, we pick a type now and stick
440
+ # with it throughout.
441
+ bin_type = np.result_type(first_edge, last_edge, a)
442
+ if np.issubdtype(bin_type, np.integer):
443
+ bin_type = np.result_type(bin_type, float)
444
+
445
+ # bin edges must be computed
446
+ bin_edges = np.linspace(
447
+ first_edge, last_edge, n_equal_bins + 1,
448
+ endpoint=True, dtype=bin_type)
449
+ return bin_edges, (first_edge, last_edge, n_equal_bins)
450
+ else:
451
+ return bin_edges, None
452
+
453
+
454
+ def _search_sorted_inclusive(a, v):
455
+ """
456
+ Like `searchsorted`, but where the last item in `v` is placed on the right.
457
+
458
+ In the context of a histogram, this makes the last bin edge inclusive
459
+ """
460
+ return np.concatenate((
461
+ a.searchsorted(v[:-1], 'left'),
462
+ a.searchsorted(v[-1:], 'right')
463
+ ))
464
+
465
+
466
+ def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
467
+ return (a, bins, weights)
468
+
469
+
470
+ @array_function_dispatch(_histogram_bin_edges_dispatcher)
471
+ def histogram_bin_edges(a, bins=10, range=None, weights=None):
472
+ r"""
473
+ Function to calculate only the edges of the bins used by the `histogram`
474
+ function.
475
+
476
+ Parameters
477
+ ----------
478
+ a : array_like
479
+ Input data. The histogram is computed over the flattened array.
480
+ bins : int or sequence of scalars or str, optional
481
+ If `bins` is an int, it defines the number of equal-width
482
+ bins in the given range (10, by default). If `bins` is a
483
+ sequence, it defines the bin edges, including the rightmost
484
+ edge, allowing for non-uniform bin widths.
485
+
486
+ If `bins` is a string from the list below, `histogram_bin_edges` will use
487
+ the method chosen to calculate the optimal bin width and
488
+ consequently the number of bins (see `Notes` for more detail on
489
+ the estimators) from the data that falls within the requested
490
+ range. While the bin width will be optimal for the actual data
491
+ in the range, the number of bins will be computed to fill the
492
+ entire range, including the empty portions. For visualisation,
493
+ using the 'auto' option is suggested. Weighted data is not
494
+ supported for automated bin size selection.
495
+
496
+ 'auto'
497
+ Maximum of the 'sturges' and 'fd' estimators. Provides good
498
+ all around performance.
499
+
500
+ 'fd' (Freedman Diaconis Estimator)
501
+ Robust (resilient to outliers) estimator that takes into
502
+ account data variability and data size.
503
+
504
+ 'doane'
505
+ An improved version of Sturges' estimator that works better
506
+ with non-normal datasets.
507
+
508
+ 'scott'
509
+ Less robust estimator that takes into account data variability
510
+ and data size.
511
+
512
+ 'stone'
513
+ Estimator based on leave-one-out cross-validation estimate of
514
+ the integrated squared error. Can be regarded as a generalization
515
+ of Scott's rule.
516
+
517
+ 'rice'
518
+ Estimator does not take variability into account, only data
519
+ size. Commonly overestimates number of bins required.
520
+
521
+ 'sturges'
522
+ R's default method, only accounts for data size. Only
523
+ optimal for gaussian data and underestimates number of bins
524
+ for large non-gaussian datasets.
525
+
526
+ 'sqrt'
527
+ Square root (of data size) estimator, used by Excel and
528
+ other programs for its speed and simplicity.
529
+
530
+ range : (float, float), optional
531
+ The lower and upper range of the bins. If not provided, range
532
+ is simply ``(a.min(), a.max())``. Values outside the range are
533
+ ignored. The first element of the range must be less than or
534
+ equal to the second. `range` affects the automatic bin
535
+ computation as well. While bin width is computed to be optimal
536
+ based on the actual data within `range`, the bin count will fill
537
+ the entire range including portions containing no data.
538
+
539
+ weights : array_like, optional
540
+ An array of weights, of the same shape as `a`. Each value in
541
+ `a` only contributes its associated weight towards the bin count
542
+ (instead of 1). This is currently not used by any of the bin estimators,
543
+ but may be in the future.
544
+
545
+ Returns
546
+ -------
547
+ bin_edges : array of dtype float
548
+ The edges to pass into `histogram`
549
+
550
+ See Also
551
+ --------
552
+ histogram
553
+
554
+ Notes
555
+ -----
556
+ The methods to estimate the optimal number of bins are well founded
557
+ in literature, and are inspired by the choices R provides for
558
+ histogram visualisation. Note that having the number of bins
559
+ proportional to :math:`n^{1/3}` is asymptotically optimal, which is
560
+ why it appears in most estimators. These are simply plug-in methods
561
+ that give good starting points for number of bins. In the equations
562
+ below, :math:`h` is the binwidth and :math:`n_h` is the number of
563
+ bins. All estimators that compute bin counts are recast to bin width
564
+ using the `ptp` of the data. The final bin count is obtained from
565
+ ``np.round(np.ceil(range / h))``. The final bin width is often less
566
+ than what is returned by the estimators below.
567
+
568
+ 'auto' (maximum of the 'sturges' and 'fd' estimators)
569
+ A compromise to get a good value. For small datasets the Sturges
570
+ value will usually be chosen, while larger datasets will usually
571
+ default to FD. Avoids the overly conservative behaviour of FD
572
+ and Sturges for small and large datasets respectively.
573
+ Switchover point is usually :math:`a.size \approx 1000`.
574
+
575
+ 'fd' (Freedman Diaconis Estimator)
576
+ .. math:: h = 2 \frac{IQR}{n^{1/3}}
577
+
578
+ The binwidth is proportional to the interquartile range (IQR)
579
+ and inversely proportional to cube root of a.size. Can be too
580
+ conservative for small datasets, but is quite good for large
581
+ datasets. The IQR is very robust to outliers.
582
+
583
+ 'scott'
584
+ .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
585
+
586
+ The binwidth is proportional to the standard deviation of the
587
+ data and inversely proportional to cube root of ``x.size``. Can
588
+ be too conservative for small datasets, but is quite good for
589
+ large datasets. The standard deviation is not very robust to
590
+ outliers. Values are very similar to the Freedman-Diaconis
591
+ estimator in the absence of outliers.
592
+
593
+ 'rice'
594
+ .. math:: n_h = 2n^{1/3}
595
+
596
+ The number of bins is only proportional to cube root of
597
+ ``a.size``. It tends to overestimate the number of bins and it
598
+ does not take into account data variability.
599
+
600
+ 'sturges'
601
+ .. math:: n_h = \log _{2}(n) + 1
602
+
603
+ The number of bins is the base 2 log of ``a.size``. This
604
+ estimator assumes normality of data and is too conservative for
605
+ larger, non-normal datasets. This is the default method in R's
606
+ ``hist`` method.
607
+
608
+ 'doane'
609
+ .. math:: n_h = 1 + \log_{2}(n) +
610
+ \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
611
+
612
+ g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
613
+
614
+ \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
615
+
616
+ An improved version of Sturges' formula that produces better
617
+ estimates for non-normal datasets. This estimator attempts to
618
+ account for the skew of the data.
619
+
620
+ 'sqrt'
621
+ .. math:: n_h = \sqrt n
622
+
623
+ The simplest and fastest estimator. Only takes into account the
624
+ data size.
625
+
626
+ Examples
627
+ --------
628
+ >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
629
+ >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
630
+ array([0. , 0.25, 0.5 , 0.75, 1. ])
631
+ >>> np.histogram_bin_edges(arr, bins=2)
632
+ array([0. , 2.5, 5. ])
633
+
634
+ For consistency with histogram, an array of pre-computed bins is
635
+ passed through unmodified:
636
+
637
+ >>> np.histogram_bin_edges(arr, [1, 2])
638
+ array([1, 2])
639
+
640
+ This function allows one set of bins to be computed, and reused across
641
+ multiple histograms:
642
+
643
+ >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
644
+ >>> shared_bins
645
+ array([0., 1., 2., 3., 4., 5.])
646
+
647
+ >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
648
+ >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
649
+ >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
650
+
651
+ >>> hist_0; hist_1
652
+ array([1, 1, 0, 1, 0])
653
+ array([2, 0, 1, 1, 2])
654
+
655
+ Which gives more easily comparable results than using separate bins for
656
+ each histogram:
657
+
658
+ >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
659
+ >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
660
+ >>> hist_0; hist_1
661
+ array([1, 1, 1])
662
+ array([2, 1, 1, 2])
663
+ >>> bins_0; bins_1
664
+ array([0., 1., 2., 3.])
665
+ array([0. , 1.25, 2.5 , 3.75, 5. ])
666
+
667
+ """
668
+ a, weights = _ravel_and_check_weights(a, weights)
669
+ bin_edges, _ = _get_bin_edges(a, bins, range, weights)
670
+ return bin_edges
671
+
672
+
673
+ def _histogram_dispatcher(
674
+ a, bins=None, range=None, density=None, weights=None):
675
+ return (a, bins, weights)
676
+
677
+
678
+ @array_function_dispatch(_histogram_dispatcher)
679
+ def histogram(a, bins=10, range=None, density=None, weights=None):
680
+ r"""
681
+ Compute the histogram of a dataset.
682
+
683
+ Parameters
684
+ ----------
685
+ a : array_like
686
+ Input data. The histogram is computed over the flattened array.
687
+ bins : int or sequence of scalars or str, optional
688
+ If `bins` is an int, it defines the number of equal-width
689
+ bins in the given range (10, by default). If `bins` is a
690
+ sequence, it defines a monotonically increasing array of bin edges,
691
+ including the rightmost edge, allowing for non-uniform bin widths.
692
+
693
+ .. versionadded:: 1.11.0
694
+
695
+ If `bins` is a string, it defines the method used to calculate the
696
+ optimal bin width, as defined by `histogram_bin_edges`.
697
+
698
+ range : (float, float), optional
699
+ The lower and upper range of the bins. If not provided, range
700
+ is simply ``(a.min(), a.max())``. Values outside the range are
701
+ ignored. The first element of the range must be less than or
702
+ equal to the second. `range` affects the automatic bin
703
+ computation as well. While bin width is computed to be optimal
704
+ based on the actual data within `range`, the bin count will fill
705
+ the entire range including portions containing no data.
706
+ weights : array_like, optional
707
+ An array of weights, of the same shape as `a`. Each value in
708
+ `a` only contributes its associated weight towards the bin count
709
+ (instead of 1). If `density` is True, the weights are
710
+ normalized, so that the integral of the density over the range
711
+ remains 1.
712
+ density : bool, optional
713
+ If ``False``, the result will contain the number of samples in
714
+ each bin. If ``True``, the result is the value of the
715
+ probability *density* function at the bin, normalized such that
716
+ the *integral* over the range is 1. Note that the sum of the
717
+ histogram values will not be equal to 1 unless bins of unity
718
+ width are chosen; it is not a probability *mass* function.
719
+
720
+ Returns
721
+ -------
722
+ hist : array
723
+ The values of the histogram. See `density` and `weights` for a
724
+ description of the possible semantics.
725
+ bin_edges : array of dtype float
726
+ Return the bin edges ``(length(hist)+1)``.
727
+
728
+
729
+ See Also
730
+ --------
731
+ histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
732
+
733
+ Notes
734
+ -----
735
+ All but the last (righthand-most) bin is half-open. In other words,
736
+ if `bins` is::
737
+
738
+ [1, 2, 3, 4]
739
+
740
+ then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
741
+ the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
742
+ *includes* 4.
743
+
744
+
745
+ Examples
746
+ --------
747
+ >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
748
+ (array([0, 2, 1]), array([0, 1, 2, 3]))
749
+ >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
750
+ (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
751
+ >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
752
+ (array([1, 4, 1]), array([0, 1, 2, 3]))
753
+
754
+ >>> a = np.arange(5)
755
+ >>> hist, bin_edges = np.histogram(a, density=True)
756
+ >>> hist
757
+ array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
758
+ >>> hist.sum()
759
+ 2.4999999999999996
760
+ >>> np.sum(hist * np.diff(bin_edges))
761
+ 1.0
762
+
763
+ .. versionadded:: 1.11.0
764
+
765
+ Automated Bin Selection Methods example, using 2 peak random data
766
+ with 2000 points:
767
+
768
+ >>> import matplotlib.pyplot as plt
769
+ >>> rng = np.random.RandomState(10) # deterministic random data
770
+ >>> a = np.hstack((rng.normal(size=1000),
771
+ ... rng.normal(loc=5, scale=2, size=1000)))
772
+ >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
773
+ >>> plt.title("Histogram with 'auto' bins")
774
+ Text(0.5, 1.0, "Histogram with 'auto' bins")
775
+ >>> plt.show()
776
+
777
+ """
778
+ a, weights = _ravel_and_check_weights(a, weights)
779
+
780
+ bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
781
+
782
+ # Histogram is an integer or a float array depending on the weights.
783
+ if weights is None:
784
+ ntype = np.dtype(np.intp)
785
+ else:
786
+ ntype = weights.dtype
787
+
788
+ # We set a block size, as this allows us to iterate over chunks when
789
+ # computing histograms, to minimize memory usage.
790
+ BLOCK = 65536
791
+
792
+ # The fast path uses bincount, but that only works for certain types
793
+ # of weight
794
+ simple_weights = (
795
+ weights is None or
796
+ np.can_cast(weights.dtype, np.double) or
797
+ np.can_cast(weights.dtype, complex)
798
+ )
799
+
800
+ if uniform_bins is not None and simple_weights:
801
+ # Fast algorithm for equal bins
802
+ # We now convert values of a to bin indices, under the assumption of
803
+ # equal bin widths (which is valid here).
804
+ first_edge, last_edge, n_equal_bins = uniform_bins
805
+
806
+ # Initialize empty histogram
807
+ n = np.zeros(n_equal_bins, ntype)
808
+
809
+ # Pre-compute histogram scaling factor
810
+ norm_numerator = n_equal_bins
811
+ norm_denom = _unsigned_subtract(last_edge, first_edge)
812
+
813
+ # We iterate over blocks here for two reasons: the first is that for
814
+ # large arrays, it is actually faster (for example for a 10^8 array it
815
+ # is 2x as fast) and it results in a memory footprint 3x lower in the
816
+ # limit of large arrays.
817
+ for i in _range(0, len(a), BLOCK):
818
+ tmp_a = a[i:i+BLOCK]
819
+ if weights is None:
820
+ tmp_w = None
821
+ else:
822
+ tmp_w = weights[i:i + BLOCK]
823
+
824
+ # Only include values in the right range
825
+ keep = (tmp_a >= first_edge)
826
+ keep &= (tmp_a <= last_edge)
827
+ if not np.logical_and.reduce(keep):
828
+ tmp_a = tmp_a[keep]
829
+ if tmp_w is not None:
830
+ tmp_w = tmp_w[keep]
831
+
832
+ # This cast ensures no type promotions occur below, which gh-10322
833
+ # make unpredictable. Getting it wrong leads to precision errors
834
+ # like gh-8123.
835
+ tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
836
+
837
+ # Compute the bin indices, and for values that lie exactly on
838
+ # last_edge we need to subtract one
839
+ f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom)
840
+ * norm_numerator)
841
+ indices = f_indices.astype(np.intp)
842
+ indices[indices == n_equal_bins] -= 1
843
+
844
+ # The index computation is not guaranteed to give exactly
845
+ # consistent results within ~1 ULP of the bin edges.
846
+ decrement = tmp_a < bin_edges[indices]
847
+ indices[decrement] -= 1
848
+ # The last bin includes the right edge. The other bins do not.
849
+ increment = ((tmp_a >= bin_edges[indices + 1])
850
+ & (indices != n_equal_bins - 1))
851
+ indices[increment] += 1
852
+
853
+ # We now compute the histogram using bincount
854
+ if ntype.kind == 'c':
855
+ n.real += np.bincount(indices, weights=tmp_w.real,
856
+ minlength=n_equal_bins)
857
+ n.imag += np.bincount(indices, weights=tmp_w.imag,
858
+ minlength=n_equal_bins)
859
+ else:
860
+ n += np.bincount(indices, weights=tmp_w,
861
+ minlength=n_equal_bins).astype(ntype)
862
+ else:
863
+ # Compute via cumulative histogram
864
+ cum_n = np.zeros(bin_edges.shape, ntype)
865
+ if weights is None:
866
+ for i in _range(0, len(a), BLOCK):
867
+ sa = np.sort(a[i:i+BLOCK])
868
+ cum_n += _search_sorted_inclusive(sa, bin_edges)
869
+ else:
870
+ zero = np.zeros(1, dtype=ntype)
871
+ for i in _range(0, len(a), BLOCK):
872
+ tmp_a = a[i:i+BLOCK]
873
+ tmp_w = weights[i:i+BLOCK]
874
+ sorting_index = np.argsort(tmp_a)
875
+ sa = tmp_a[sorting_index]
876
+ sw = tmp_w[sorting_index]
877
+ cw = np.concatenate((zero, sw.cumsum()))
878
+ bin_index = _search_sorted_inclusive(sa, bin_edges)
879
+ cum_n += cw[bin_index]
880
+
881
+ n = np.diff(cum_n)
882
+
883
+ if density:
884
+ db = np.array(np.diff(bin_edges), float)
885
+ return n/db/n.sum(), bin_edges
886
+
887
+ return n, bin_edges
888
+
889
+
890
+ def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
891
+ weights=None):
892
+ if hasattr(sample, 'shape'): # same condition as used in histogramdd
893
+ yield sample
894
+ else:
895
+ yield from sample
896
+ with contextlib.suppress(TypeError):
897
+ yield from bins
898
+ yield weights
899
+
900
+
901
+ @array_function_dispatch(_histogramdd_dispatcher)
902
+ def histogramdd(sample, bins=10, range=None, density=None, weights=None):
903
+ """
904
+ Compute the multidimensional histogram of some data.
905
+
906
+ Parameters
907
+ ----------
908
+ sample : (N, D) array, or (N, D) array_like
909
+ The data to be histogrammed.
910
+
911
+ Note the unusual interpretation of sample when an array_like:
912
+
913
+ * When an array, each row is a coordinate in a D-dimensional space -
914
+ such as ``histogramdd(np.array([p1, p2, p3]))``.
915
+ * When an array_like, each element is the list of values for single
916
+ coordinate - such as ``histogramdd((X, Y, Z))``.
917
+
918
+ The first form should be preferred.
919
+
920
+ bins : sequence or int, optional
921
+ The bin specification:
922
+
923
+ * A sequence of arrays describing the monotonically increasing bin
924
+ edges along each dimension.
925
+ * The number of bins for each dimension (nx, ny, ... =bins)
926
+ * The number of bins for all dimensions (nx=ny=...=bins).
927
+
928
+ range : sequence, optional
929
+ A sequence of length D, each an optional (lower, upper) tuple giving
930
+ the outer bin edges to be used if the edges are not given explicitly in
931
+ `bins`.
932
+ An entry of None in the sequence results in the minimum and maximum
933
+ values being used for the corresponding dimension.
934
+ The default, None, is equivalent to passing a tuple of D None values.
935
+ density : bool, optional
936
+ If False, the default, returns the number of samples in each bin.
937
+ If True, returns the probability *density* function at the bin,
938
+ ``bin_count / sample_count / bin_volume``.
939
+ weights : (N,) array_like, optional
940
+ An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
941
+ Weights are normalized to 1 if density is True. If density is False,
942
+ the values of the returned histogram are equal to the sum of the
943
+ weights belonging to the samples falling into each bin.
944
+
945
+ Returns
946
+ -------
947
+ H : ndarray
948
+ The multidimensional histogram of sample x. See density and weights
949
+ for the different possible semantics.
950
+ edges : list
951
+ A list of D arrays describing the bin edges for each dimension.
952
+
953
+ See Also
954
+ --------
955
+ histogram: 1-D histogram
956
+ histogram2d: 2-D histogram
957
+
958
+ Examples
959
+ --------
960
+ >>> r = np.random.randn(100,3)
961
+ >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
962
+ >>> H.shape, edges[0].size, edges[1].size, edges[2].size
963
+ ((5, 8, 4), 6, 9, 5)
964
+
965
+ """
966
+
967
+ try:
968
+ # Sample is an ND-array.
969
+ N, D = sample.shape
970
+ except (AttributeError, ValueError):
971
+ # Sample is a sequence of 1D arrays.
972
+ sample = np.atleast_2d(sample).T
973
+ N, D = sample.shape
974
+
975
+ nbin = np.empty(D, np.intp)
976
+ edges = D*[None]
977
+ dedges = D*[None]
978
+ if weights is not None:
979
+ weights = np.asarray(weights)
980
+
981
+ try:
982
+ M = len(bins)
983
+ if M != D:
984
+ raise ValueError(
985
+ 'The dimension of bins must be equal to the dimension of the '
986
+ 'sample x.')
987
+ except TypeError:
988
+ # bins is an integer
989
+ bins = D*[bins]
990
+
991
+ # normalize the range argument
992
+ if range is None:
993
+ range = (None,) * D
994
+ elif len(range) != D:
995
+ raise ValueError('range argument must have one entry per dimension')
996
+
997
+ # Create edge arrays
998
+ for i in _range(D):
999
+ if np.ndim(bins[i]) == 0:
1000
+ if bins[i] < 1:
1001
+ raise ValueError(
1002
+ '`bins[{}]` must be positive, when an integer'.format(i))
1003
+ smin, smax = _get_outer_edges(sample[:,i], range[i])
1004
+ try:
1005
+ n = operator.index(bins[i])
1006
+
1007
+ except TypeError as e:
1008
+ raise TypeError(
1009
+ "`bins[{}]` must be an integer, when a scalar".format(i)
1010
+ ) from e
1011
+
1012
+ edges[i] = np.linspace(smin, smax, n + 1)
1013
+ elif np.ndim(bins[i]) == 1:
1014
+ edges[i] = np.asarray(bins[i])
1015
+ if np.any(edges[i][:-1] > edges[i][1:]):
1016
+ raise ValueError(
1017
+ '`bins[{}]` must be monotonically increasing, when an array'
1018
+ .format(i))
1019
+ else:
1020
+ raise ValueError(
1021
+ '`bins[{}]` must be a scalar or 1d array'.format(i))
1022
+
1023
+ nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
1024
+ dedges[i] = np.diff(edges[i])
1025
+
1026
+ # Compute the bin number each sample falls into.
1027
+ Ncount = tuple(
1028
+ # avoid np.digitize to work around gh-11022
1029
+ np.searchsorted(edges[i], sample[:, i], side='right')
1030
+ for i in _range(D)
1031
+ )
1032
+
1033
+ # Using digitize, values that fall on an edge are put in the right bin.
1034
+ # For the rightmost bin, we want values equal to the right edge to be
1035
+ # counted in the last bin, and not as an outlier.
1036
+ for i in _range(D):
1037
+ # Find which points are on the rightmost edge.
1038
+ on_edge = (sample[:, i] == edges[i][-1])
1039
+ # Shift these points one bin to the left.
1040
+ Ncount[i][on_edge] -= 1
1041
+
1042
+ # Compute the sample indices in the flattened histogram matrix.
1043
+ # This raises an error if the array is too large.
1044
+ xy = np.ravel_multi_index(Ncount, nbin)
1045
+
1046
+ # Compute the number of repetitions in xy and assign it to the
1047
+ # flattened histmat.
1048
+ hist = np.bincount(xy, weights, minlength=nbin.prod())
1049
+
1050
+ # Shape into a proper matrix
1051
+ hist = hist.reshape(nbin)
1052
+
1053
+ # This preserves the (bad) behavior observed in gh-7845, for now.
1054
+ hist = hist.astype(float, casting='safe')
1055
+
1056
+ # Remove outliers (indices 0 and -1 for each dimension).
1057
+ core = D*(slice(1, -1),)
1058
+ hist = hist[core]
1059
+
1060
+ if density:
1061
+ # calculate the probability density function
1062
+ s = hist.sum()
1063
+ for i in _range(D):
1064
+ shape = np.ones(D, int)
1065
+ shape[i] = nbin[i] - 2
1066
+ hist = hist / dedges[i].reshape(shape)
1067
+ hist /= s
1068
+
1069
+ if (hist.shape != nbin - 2).any():
1070
+ raise RuntimeError(
1071
+ "Internal Shape Error")
1072
+ return hist, edges
env-llmeval/lib/python3.10/site-packages/numpy/lib/index_tricks.py ADDED
@@ -0,0 +1,1046 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import sys
3
+ import math
4
+ import warnings
5
+
6
+ import numpy as np
7
+ from .._utils import set_module
8
+ import numpy.core.numeric as _nx
9
+ from numpy.core.numeric import ScalarType, array
10
+ from numpy.core.numerictypes import issubdtype
11
+
12
+ import numpy.matrixlib as matrixlib
13
+ from .function_base import diff
14
+ from numpy.core.multiarray import ravel_multi_index, unravel_index
15
+ from numpy.core import overrides, linspace
16
+ from numpy.lib.stride_tricks import as_strided
17
+
18
+
19
+ array_function_dispatch = functools.partial(
20
+ overrides.array_function_dispatch, module='numpy')
21
+
22
+
23
+ __all__ = [
24
+ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
25
+ 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
26
+ 'diag_indices', 'diag_indices_from'
27
+ ]
28
+
29
+
30
+ def _ix__dispatcher(*args):
31
+ return args
32
+
33
+
34
+ @array_function_dispatch(_ix__dispatcher)
35
+ def ix_(*args):
36
+ """
37
+ Construct an open mesh from multiple sequences.
38
+
39
+ This function takes N 1-D sequences and returns N outputs with N
40
+ dimensions each, such that the shape is 1 in all but one dimension
41
+ and the dimension with the non-unit shape value cycles through all
42
+ N dimensions.
43
+
44
+ Using `ix_` one can quickly construct index arrays that will index
45
+ the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
46
+ ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
47
+
48
+ Parameters
49
+ ----------
50
+ args : 1-D sequences
51
+ Each sequence should be of integer or boolean type.
52
+ Boolean sequences will be interpreted as boolean masks for the
53
+ corresponding dimension (equivalent to passing in
54
+ ``np.nonzero(boolean_sequence)``).
55
+
56
+ Returns
57
+ -------
58
+ out : tuple of ndarrays
59
+ N arrays with N dimensions each, with N the number of input
60
+ sequences. Together these arrays form an open mesh.
61
+
62
+ See Also
63
+ --------
64
+ ogrid, mgrid, meshgrid
65
+
66
+ Examples
67
+ --------
68
+ >>> a = np.arange(10).reshape(2, 5)
69
+ >>> a
70
+ array([[0, 1, 2, 3, 4],
71
+ [5, 6, 7, 8, 9]])
72
+ >>> ixgrid = np.ix_([0, 1], [2, 4])
73
+ >>> ixgrid
74
+ (array([[0],
75
+ [1]]), array([[2, 4]]))
76
+ >>> ixgrid[0].shape, ixgrid[1].shape
77
+ ((2, 1), (1, 2))
78
+ >>> a[ixgrid]
79
+ array([[2, 4],
80
+ [7, 9]])
81
+
82
+ >>> ixgrid = np.ix_([True, True], [2, 4])
83
+ >>> a[ixgrid]
84
+ array([[2, 4],
85
+ [7, 9]])
86
+ >>> ixgrid = np.ix_([True, True], [False, False, True, False, True])
87
+ >>> a[ixgrid]
88
+ array([[2, 4],
89
+ [7, 9]])
90
+
91
+ """
92
+ out = []
93
+ nd = len(args)
94
+ for k, new in enumerate(args):
95
+ if not isinstance(new, _nx.ndarray):
96
+ new = np.asarray(new)
97
+ if new.size == 0:
98
+ # Explicitly type empty arrays to avoid float default
99
+ new = new.astype(_nx.intp)
100
+ if new.ndim != 1:
101
+ raise ValueError("Cross index must be 1 dimensional")
102
+ if issubdtype(new.dtype, _nx.bool_):
103
+ new, = new.nonzero()
104
+ new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
105
+ out.append(new)
106
+ return tuple(out)
107
+
108
+
109
+ class nd_grid:
110
+ """
111
+ Construct a multi-dimensional "meshgrid".
112
+
113
+ ``grid = nd_grid()`` creates an instance which will return a mesh-grid
114
+ when indexed. The dimension and number of the output arrays are equal
115
+ to the number of indexing dimensions. If the step length is not a
116
+ complex number, then the stop is not inclusive.
117
+
118
+ However, if the step length is a **complex number** (e.g. 5j), then the
119
+ integer part of its magnitude is interpreted as specifying the
120
+ number of points to create between the start and stop values, where
121
+ the stop value **is inclusive**.
122
+
123
+ If instantiated with an argument of ``sparse=True``, the mesh-grid is
124
+ open (or not fleshed out) so that only one-dimension of each returned
125
+ argument is greater than 1.
126
+
127
+ Parameters
128
+ ----------
129
+ sparse : bool, optional
130
+ Whether the grid is sparse or not. Default is False.
131
+
132
+ Notes
133
+ -----
134
+ Two instances of `nd_grid` are made available in the NumPy namespace,
135
+ `mgrid` and `ogrid`, approximately defined as::
136
+
137
+ mgrid = nd_grid(sparse=False)
138
+ ogrid = nd_grid(sparse=True)
139
+
140
+ Users should use these pre-defined instances instead of using `nd_grid`
141
+ directly.
142
+ """
143
+
144
+ def __init__(self, sparse=False):
145
+ self.sparse = sparse
146
+
147
+ def __getitem__(self, key):
148
+ try:
149
+ size = []
150
+ # Mimic the behavior of `np.arange` and use a data type
151
+ # which is at least as large as `np.int_`
152
+ num_list = [0]
153
+ for k in range(len(key)):
154
+ step = key[k].step
155
+ start = key[k].start
156
+ stop = key[k].stop
157
+ if start is None:
158
+ start = 0
159
+ if step is None:
160
+ step = 1
161
+ if isinstance(step, (_nx.complexfloating, complex)):
162
+ step = abs(step)
163
+ size.append(int(step))
164
+ else:
165
+ size.append(
166
+ int(math.ceil((stop - start) / (step*1.0))))
167
+ num_list += [start, stop, step]
168
+ typ = _nx.result_type(*num_list)
169
+ if self.sparse:
170
+ nn = [_nx.arange(_x, dtype=_t)
171
+ for _x, _t in zip(size, (typ,)*len(size))]
172
+ else:
173
+ nn = _nx.indices(size, typ)
174
+ for k, kk in enumerate(key):
175
+ step = kk.step
176
+ start = kk.start
177
+ if start is None:
178
+ start = 0
179
+ if step is None:
180
+ step = 1
181
+ if isinstance(step, (_nx.complexfloating, complex)):
182
+ step = int(abs(step))
183
+ if step != 1:
184
+ step = (kk.stop - start) / float(step - 1)
185
+ nn[k] = (nn[k]*step+start)
186
+ if self.sparse:
187
+ slobj = [_nx.newaxis]*len(size)
188
+ for k in range(len(size)):
189
+ slobj[k] = slice(None, None)
190
+ nn[k] = nn[k][tuple(slobj)]
191
+ slobj[k] = _nx.newaxis
192
+ return nn
193
+ except (IndexError, TypeError):
194
+ step = key.step
195
+ stop = key.stop
196
+ start = key.start
197
+ if start is None:
198
+ start = 0
199
+ if isinstance(step, (_nx.complexfloating, complex)):
200
+ # Prevent the (potential) creation of integer arrays
201
+ step_float = abs(step)
202
+ step = length = int(step_float)
203
+ if step != 1:
204
+ step = (key.stop-start)/float(step-1)
205
+ typ = _nx.result_type(start, stop, step_float)
206
+ return _nx.arange(0, length, 1, dtype=typ)*step + start
207
+ else:
208
+ return _nx.arange(start, stop, step)
209
+
210
+
211
+ class MGridClass(nd_grid):
212
+ """
213
+ An instance which returns a dense multi-dimensional "meshgrid".
214
+
215
+ An instance which returns a dense (or fleshed out) mesh-grid
216
+ when indexed, so that each returned argument has the same shape.
217
+ The dimensions and number of the output arrays are equal to the
218
+ number of indexing dimensions. If the step length is not a complex
219
+ number, then the stop is not inclusive.
220
+
221
+ However, if the step length is a **complex number** (e.g. 5j), then
222
+ the integer part of its magnitude is interpreted as specifying the
223
+ number of points to create between the start and stop values, where
224
+ the stop value **is inclusive**.
225
+
226
+ Returns
227
+ -------
228
+ mesh-grid `ndarrays` all of the same dimensions
229
+
230
+ See Also
231
+ --------
232
+ ogrid : like `mgrid` but returns open (not fleshed out) mesh grids
233
+ meshgrid: return coordinate matrices from coordinate vectors
234
+ r_ : array concatenator
235
+ :ref:`how-to-partition`
236
+
237
+ Examples
238
+ --------
239
+ >>> np.mgrid[0:5, 0:5]
240
+ array([[[0, 0, 0, 0, 0],
241
+ [1, 1, 1, 1, 1],
242
+ [2, 2, 2, 2, 2],
243
+ [3, 3, 3, 3, 3],
244
+ [4, 4, 4, 4, 4]],
245
+ [[0, 1, 2, 3, 4],
246
+ [0, 1, 2, 3, 4],
247
+ [0, 1, 2, 3, 4],
248
+ [0, 1, 2, 3, 4],
249
+ [0, 1, 2, 3, 4]]])
250
+ >>> np.mgrid[-1:1:5j]
251
+ array([-1. , -0.5, 0. , 0.5, 1. ])
252
+
253
+ """
254
+
255
+ def __init__(self):
256
+ super().__init__(sparse=False)
257
+
258
+
259
+ mgrid = MGridClass()
260
+
261
+
262
+ class OGridClass(nd_grid):
263
+ """
264
+ An instance which returns an open multi-dimensional "meshgrid".
265
+
266
+ An instance which returns an open (i.e. not fleshed out) mesh-grid
267
+ when indexed, so that only one dimension of each returned array is
268
+ greater than 1. The dimension and number of the output arrays are
269
+ equal to the number of indexing dimensions. If the step length is
270
+ not a complex number, then the stop is not inclusive.
271
+
272
+ However, if the step length is a **complex number** (e.g. 5j), then
273
+ the integer part of its magnitude is interpreted as specifying the
274
+ number of points to create between the start and stop values, where
275
+ the stop value **is inclusive**.
276
+
277
+ Returns
278
+ -------
279
+ mesh-grid
280
+ `ndarrays` with only one dimension not equal to 1
281
+
282
+ See Also
283
+ --------
284
+ mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
285
+ meshgrid: return coordinate matrices from coordinate vectors
286
+ r_ : array concatenator
287
+ :ref:`how-to-partition`
288
+
289
+ Examples
290
+ --------
291
+ >>> from numpy import ogrid
292
+ >>> ogrid[-1:1:5j]
293
+ array([-1. , -0.5, 0. , 0.5, 1. ])
294
+ >>> ogrid[0:5,0:5]
295
+ [array([[0],
296
+ [1],
297
+ [2],
298
+ [3],
299
+ [4]]), array([[0, 1, 2, 3, 4]])]
300
+
301
+ """
302
+
303
+ def __init__(self):
304
+ super().__init__(sparse=True)
305
+
306
+
307
+ ogrid = OGridClass()
308
+
309
+
310
+ class AxisConcatenator:
311
+ """
312
+ Translates slice objects to concatenation along an axis.
313
+
314
+ For detailed documentation on usage, see `r_`.
315
+ """
316
+ # allow ma.mr_ to override this
317
+ concatenate = staticmethod(_nx.concatenate)
318
+ makemat = staticmethod(matrixlib.matrix)
319
+
320
+ def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
321
+ self.axis = axis
322
+ self.matrix = matrix
323
+ self.trans1d = trans1d
324
+ self.ndmin = ndmin
325
+
326
+ def __getitem__(self, key):
327
+ # handle matrix builder syntax
328
+ if isinstance(key, str):
329
+ frame = sys._getframe().f_back
330
+ mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
331
+ return mymat
332
+
333
+ if not isinstance(key, tuple):
334
+ key = (key,)
335
+
336
+ # copy attributes, since they can be overridden in the first argument
337
+ trans1d = self.trans1d
338
+ ndmin = self.ndmin
339
+ matrix = self.matrix
340
+ axis = self.axis
341
+
342
+ objs = []
343
+ # dtypes or scalars for weak scalar handling in result_type
344
+ result_type_objs = []
345
+
346
+ for k, item in enumerate(key):
347
+ scalar = False
348
+ if isinstance(item, slice):
349
+ step = item.step
350
+ start = item.start
351
+ stop = item.stop
352
+ if start is None:
353
+ start = 0
354
+ if step is None:
355
+ step = 1
356
+ if isinstance(step, (_nx.complexfloating, complex)):
357
+ size = int(abs(step))
358
+ newobj = linspace(start, stop, num=size)
359
+ else:
360
+ newobj = _nx.arange(start, stop, step)
361
+ if ndmin > 1:
362
+ newobj = array(newobj, copy=False, ndmin=ndmin)
363
+ if trans1d != -1:
364
+ newobj = newobj.swapaxes(-1, trans1d)
365
+ elif isinstance(item, str):
366
+ if k != 0:
367
+ raise ValueError("special directives must be the "
368
+ "first entry.")
369
+ if item in ('r', 'c'):
370
+ matrix = True
371
+ col = (item == 'c')
372
+ continue
373
+ if ',' in item:
374
+ vec = item.split(',')
375
+ try:
376
+ axis, ndmin = [int(x) for x in vec[:2]]
377
+ if len(vec) == 3:
378
+ trans1d = int(vec[2])
379
+ continue
380
+ except Exception as e:
381
+ raise ValueError(
382
+ "unknown special directive {!r}".format(item)
383
+ ) from e
384
+ try:
385
+ axis = int(item)
386
+ continue
387
+ except (ValueError, TypeError) as e:
388
+ raise ValueError("unknown special directive") from e
389
+ elif type(item) in ScalarType:
390
+ scalar = True
391
+ newobj = item
392
+ else:
393
+ item_ndim = np.ndim(item)
394
+ newobj = array(item, copy=False, subok=True, ndmin=ndmin)
395
+ if trans1d != -1 and item_ndim < ndmin:
396
+ k2 = ndmin - item_ndim
397
+ k1 = trans1d
398
+ if k1 < 0:
399
+ k1 += k2 + 1
400
+ defaxes = list(range(ndmin))
401
+ axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
402
+ newobj = newobj.transpose(axes)
403
+
404
+ objs.append(newobj)
405
+ if scalar:
406
+ result_type_objs.append(item)
407
+ else:
408
+ result_type_objs.append(newobj.dtype)
409
+
410
+ # Ensure that scalars won't up-cast unless warranted, for 0, drops
411
+ # through to error in concatenate.
412
+ if len(result_type_objs) != 0:
413
+ final_dtype = _nx.result_type(*result_type_objs)
414
+ # concatenate could do cast, but that can be overriden:
415
+ objs = [array(obj, copy=False, subok=True,
416
+ ndmin=ndmin, dtype=final_dtype) for obj in objs]
417
+
418
+ res = self.concatenate(tuple(objs), axis=axis)
419
+
420
+ if matrix:
421
+ oldndim = res.ndim
422
+ res = self.makemat(res)
423
+ if oldndim == 1 and col:
424
+ res = res.T
425
+ return res
426
+
427
+ def __len__(self):
428
+ return 0
429
+
430
+ # separate classes are used here instead of just making r_ = concatentor(0),
431
+ # etc. because otherwise we couldn't get the doc string to come out right
432
+ # in help(r_)
433
+
434
+
435
+ class RClass(AxisConcatenator):
436
+ """
437
+ Translates slice objects to concatenation along the first axis.
438
+
439
+ This is a simple way to build up arrays quickly. There are two use cases.
440
+
441
+ 1. If the index expression contains comma separated arrays, then stack
442
+ them along their first axis.
443
+ 2. If the index expression contains slice notation or scalars then create
444
+ a 1-D array with a range indicated by the slice notation.
445
+
446
+ If slice notation is used, the syntax ``start:stop:step`` is equivalent
447
+ to ``np.arange(start, stop, step)`` inside of the brackets. However, if
448
+ ``step`` is an imaginary number (i.e. 100j) then its integer portion is
449
+ interpreted as a number-of-points desired and the start and stop are
450
+ inclusive. In other words ``start:stop:stepj`` is interpreted as
451
+ ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
452
+ After expansion of slice notation, all comma separated sequences are
453
+ concatenated together.
454
+
455
+ Optional character strings placed as the first element of the index
456
+ expression can be used to change the output. The strings 'r' or 'c' result
457
+ in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
458
+ matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
459
+ (column) matrix is produced. If the result is 2-D then both provide the
460
+ same matrix result.
461
+
462
+ A string integer specifies which axis to stack multiple comma separated
463
+ arrays along. A string of two comma-separated integers allows indication
464
+ of the minimum number of dimensions to force each entry into as the
465
+ second integer (the axis to concatenate along is still the first integer).
466
+
467
+ A string with three comma-separated integers allows specification of the
468
+ axis to concatenate along, the minimum number of dimensions to force the
469
+ entries to, and which axis should contain the start of the arrays which
470
+ are less than the specified number of dimensions. In other words the third
471
+ integer allows you to specify where the 1's should be placed in the shape
472
+ of the arrays that have their shapes upgraded. By default, they are placed
473
+ in the front of the shape tuple. The third argument allows you to specify
474
+ where the start of the array should be instead. Thus, a third argument of
475
+ '0' would place the 1's at the end of the array shape. Negative integers
476
+ specify where in the new shape tuple the last dimension of upgraded arrays
477
+ should be placed, so the default is '-1'.
478
+
479
+ Parameters
480
+ ----------
481
+ Not a function, so takes no parameters
482
+
483
+
484
+ Returns
485
+ -------
486
+ A concatenated ndarray or matrix.
487
+
488
+ See Also
489
+ --------
490
+ concatenate : Join a sequence of arrays along an existing axis.
491
+ c_ : Translates slice objects to concatenation along the second axis.
492
+
493
+ Examples
494
+ --------
495
+ >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
496
+ array([1, 2, 3, ..., 4, 5, 6])
497
+ >>> np.r_[-1:1:6j, [0]*3, 5, 6]
498
+ array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
499
+
500
+ String integers specify the axis to concatenate along or the minimum
501
+ number of dimensions to force entries into.
502
+
503
+ >>> a = np.array([[0, 1, 2], [3, 4, 5]])
504
+ >>> np.r_['-1', a, a] # concatenate along last axis
505
+ array([[0, 1, 2, 0, 1, 2],
506
+ [3, 4, 5, 3, 4, 5]])
507
+ >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
508
+ array([[1, 2, 3],
509
+ [4, 5, 6]])
510
+
511
+ >>> np.r_['0,2,0', [1,2,3], [4,5,6]]
512
+ array([[1],
513
+ [2],
514
+ [3],
515
+ [4],
516
+ [5],
517
+ [6]])
518
+ >>> np.r_['1,2,0', [1,2,3], [4,5,6]]
519
+ array([[1, 4],
520
+ [2, 5],
521
+ [3, 6]])
522
+
523
+ Using 'r' or 'c' as a first string argument creates a matrix.
524
+
525
+ >>> np.r_['r',[1,2,3], [4,5,6]]
526
+ matrix([[1, 2, 3, 4, 5, 6]])
527
+
528
+ """
529
+
530
+ def __init__(self):
531
+ AxisConcatenator.__init__(self, 0)
532
+
533
+
534
+ r_ = RClass()
535
+
536
+
537
+ class CClass(AxisConcatenator):
538
+ """
539
+ Translates slice objects to concatenation along the second axis.
540
+
541
+ This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
542
+ useful because of its common occurrence. In particular, arrays will be
543
+ stacked along their last axis after being upgraded to at least 2-D with
544
+ 1's post-pended to the shape (column vectors made out of 1-D arrays).
545
+
546
+ See Also
547
+ --------
548
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
549
+ r_ : For more detailed documentation.
550
+
551
+ Examples
552
+ --------
553
+ >>> np.c_[np.array([1,2,3]), np.array([4,5,6])]
554
+ array([[1, 4],
555
+ [2, 5],
556
+ [3, 6]])
557
+ >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
558
+ array([[1, 2, 3, ..., 4, 5, 6]])
559
+
560
+ """
561
+
562
+ def __init__(self):
563
+ AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
564
+
565
+
566
+ c_ = CClass()
567
+
568
+
569
+ @set_module('numpy')
570
+ class ndenumerate:
571
+ """
572
+ Multidimensional index iterator.
573
+
574
+ Return an iterator yielding pairs of array coordinates and values.
575
+
576
+ Parameters
577
+ ----------
578
+ arr : ndarray
579
+ Input array.
580
+
581
+ See Also
582
+ --------
583
+ ndindex, flatiter
584
+
585
+ Examples
586
+ --------
587
+ >>> a = np.array([[1, 2], [3, 4]])
588
+ >>> for index, x in np.ndenumerate(a):
589
+ ... print(index, x)
590
+ (0, 0) 1
591
+ (0, 1) 2
592
+ (1, 0) 3
593
+ (1, 1) 4
594
+
595
+ """
596
+
597
+ def __init__(self, arr):
598
+ self.iter = np.asarray(arr).flat
599
+
600
+ def __next__(self):
601
+ """
602
+ Standard iterator method, returns the index tuple and array value.
603
+
604
+ Returns
605
+ -------
606
+ coords : tuple of ints
607
+ The indices of the current iteration.
608
+ val : scalar
609
+ The array element of the current iteration.
610
+
611
+ """
612
+ return self.iter.coords, next(self.iter)
613
+
614
+ def __iter__(self):
615
+ return self
616
+
617
+
618
+ @set_module('numpy')
619
+ class ndindex:
620
+ """
621
+ An N-dimensional iterator object to index arrays.
622
+
623
+ Given the shape of an array, an `ndindex` instance iterates over
624
+ the N-dimensional index of the array. At each iteration a tuple
625
+ of indices is returned, the last dimension is iterated over first.
626
+
627
+ Parameters
628
+ ----------
629
+ shape : ints, or a single tuple of ints
630
+ The size of each dimension of the array can be passed as
631
+ individual parameters or as the elements of a tuple.
632
+
633
+ See Also
634
+ --------
635
+ ndenumerate, flatiter
636
+
637
+ Examples
638
+ --------
639
+ Dimensions as individual arguments
640
+
641
+ >>> for index in np.ndindex(3, 2, 1):
642
+ ... print(index)
643
+ (0, 0, 0)
644
+ (0, 1, 0)
645
+ (1, 0, 0)
646
+ (1, 1, 0)
647
+ (2, 0, 0)
648
+ (2, 1, 0)
649
+
650
+ Same dimensions - but in a tuple ``(3, 2, 1)``
651
+
652
+ >>> for index in np.ndindex((3, 2, 1)):
653
+ ... print(index)
654
+ (0, 0, 0)
655
+ (0, 1, 0)
656
+ (1, 0, 0)
657
+ (1, 1, 0)
658
+ (2, 0, 0)
659
+ (2, 1, 0)
660
+
661
+ """
662
+
663
+ def __init__(self, *shape):
664
+ if len(shape) == 1 and isinstance(shape[0], tuple):
665
+ shape = shape[0]
666
+ x = as_strided(_nx.zeros(1), shape=shape,
667
+ strides=_nx.zeros_like(shape))
668
+ self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
669
+ order='C')
670
+
671
+ def __iter__(self):
672
+ return self
673
+
674
+ def ndincr(self):
675
+ """
676
+ Increment the multi-dimensional index by one.
677
+
678
+ This method is for backward compatibility only: do not use.
679
+
680
+ .. deprecated:: 1.20.0
681
+ This method has been advised against since numpy 1.8.0, but only
682
+ started emitting DeprecationWarning as of this version.
683
+ """
684
+ # NumPy 1.20.0, 2020-09-08
685
+ warnings.warn(
686
+ "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",
687
+ DeprecationWarning, stacklevel=2)
688
+ next(self)
689
+
690
+ def __next__(self):
691
+ """
692
+ Standard iterator method, updates the index and returns the index
693
+ tuple.
694
+
695
+ Returns
696
+ -------
697
+ val : tuple of ints
698
+ Returns a tuple containing the indices of the current
699
+ iteration.
700
+
701
+ """
702
+ next(self._it)
703
+ return self._it.multi_index
704
+
705
+
706
+ # You can do all this with slice() plus a few special objects,
707
+ # but there's a lot to remember. This version is simpler because
708
+ # it uses the standard array indexing syntax.
709
+ #
710
+ # Written by Konrad Hinsen <[email protected]>
711
+ # last revision: 1999-7-23
712
+ #
713
+ # Cosmetic changes by T. Oliphant 2001
714
+ #
715
+ #
716
+
717
+ class IndexExpression:
718
+ """
719
+ A nicer way to build up index tuples for arrays.
720
+
721
+ .. note::
722
+ Use one of the two predefined instances `index_exp` or `s_`
723
+ rather than directly using `IndexExpression`.
724
+
725
+ For any index combination, including slicing and axis insertion,
726
+ ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
727
+ array `a`. However, ``np.index_exp[indices]`` can be used anywhere
728
+ in Python code and returns a tuple of slice objects that can be
729
+ used in the construction of complex index expressions.
730
+
731
+ Parameters
732
+ ----------
733
+ maketuple : bool
734
+ If True, always returns a tuple.
735
+
736
+ See Also
737
+ --------
738
+ index_exp : Predefined instance that always returns a tuple:
739
+ `index_exp = IndexExpression(maketuple=True)`.
740
+ s_ : Predefined instance without tuple conversion:
741
+ `s_ = IndexExpression(maketuple=False)`.
742
+
743
+ Notes
744
+ -----
745
+ You can do all this with `slice()` plus a few special objects,
746
+ but there's a lot to remember and this version is simpler because
747
+ it uses the standard array indexing syntax.
748
+
749
+ Examples
750
+ --------
751
+ >>> np.s_[2::2]
752
+ slice(2, None, 2)
753
+ >>> np.index_exp[2::2]
754
+ (slice(2, None, 2),)
755
+
756
+ >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
757
+ array([2, 4])
758
+
759
+ """
760
+
761
+ def __init__(self, maketuple):
762
+ self.maketuple = maketuple
763
+
764
+ def __getitem__(self, item):
765
+ if self.maketuple and not isinstance(item, tuple):
766
+ return (item,)
767
+ else:
768
+ return item
769
+
770
+
771
+ index_exp = IndexExpression(maketuple=True)
772
+ s_ = IndexExpression(maketuple=False)
773
+
774
+ # End contribution from Konrad.
775
+
776
+
777
+ # The following functions complement those in twodim_base, but are
778
+ # applicable to N-dimensions.
779
+
780
+
781
+ def _fill_diagonal_dispatcher(a, val, wrap=None):
782
+ return (a,)
783
+
784
+
785
+ @array_function_dispatch(_fill_diagonal_dispatcher)
786
+ def fill_diagonal(a, val, wrap=False):
787
+ """Fill the main diagonal of the given array of any dimensionality.
788
+
789
+ For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
790
+ locations with indices ``a[i, ..., i]`` all identical. This function
791
+ modifies the input array in-place, it does not return a value.
792
+
793
+ Parameters
794
+ ----------
795
+ a : array, at least 2-D.
796
+ Array whose diagonal is to be filled, it gets modified in-place.
797
+
798
+ val : scalar or array_like
799
+ Value(s) to write on the diagonal. If `val` is scalar, the value is
800
+ written along the diagonal. If array-like, the flattened `val` is
801
+ written along the diagonal, repeating if necessary to fill all
802
+ diagonal entries.
803
+
804
+ wrap : bool
805
+ For tall matrices in NumPy version up to 1.6.2, the
806
+ diagonal "wrapped" after N columns. You can have this behavior
807
+ with this option. This affects only tall matrices.
808
+
809
+ See also
810
+ --------
811
+ diag_indices, diag_indices_from
812
+
813
+ Notes
814
+ -----
815
+ .. versionadded:: 1.4.0
816
+
817
+ This functionality can be obtained via `diag_indices`, but internally
818
+ this version uses a much faster implementation that never constructs the
819
+ indices and uses simple slicing.
820
+
821
+ Examples
822
+ --------
823
+ >>> a = np.zeros((3, 3), int)
824
+ >>> np.fill_diagonal(a, 5)
825
+ >>> a
826
+ array([[5, 0, 0],
827
+ [0, 5, 0],
828
+ [0, 0, 5]])
829
+
830
+ The same function can operate on a 4-D array:
831
+
832
+ >>> a = np.zeros((3, 3, 3, 3), int)
833
+ >>> np.fill_diagonal(a, 4)
834
+
835
+ We only show a few blocks for clarity:
836
+
837
+ >>> a[0, 0]
838
+ array([[4, 0, 0],
839
+ [0, 0, 0],
840
+ [0, 0, 0]])
841
+ >>> a[1, 1]
842
+ array([[0, 0, 0],
843
+ [0, 4, 0],
844
+ [0, 0, 0]])
845
+ >>> a[2, 2]
846
+ array([[0, 0, 0],
847
+ [0, 0, 0],
848
+ [0, 0, 4]])
849
+
850
+ The wrap option affects only tall matrices:
851
+
852
+ >>> # tall matrices no wrap
853
+ >>> a = np.zeros((5, 3), int)
854
+ >>> np.fill_diagonal(a, 4)
855
+ >>> a
856
+ array([[4, 0, 0],
857
+ [0, 4, 0],
858
+ [0, 0, 4],
859
+ [0, 0, 0],
860
+ [0, 0, 0]])
861
+
862
+ >>> # tall matrices wrap
863
+ >>> a = np.zeros((5, 3), int)
864
+ >>> np.fill_diagonal(a, 4, wrap=True)
865
+ >>> a
866
+ array([[4, 0, 0],
867
+ [0, 4, 0],
868
+ [0, 0, 4],
869
+ [0, 0, 0],
870
+ [4, 0, 0]])
871
+
872
+ >>> # wide matrices
873
+ >>> a = np.zeros((3, 5), int)
874
+ >>> np.fill_diagonal(a, 4, wrap=True)
875
+ >>> a
876
+ array([[4, 0, 0, 0, 0],
877
+ [0, 4, 0, 0, 0],
878
+ [0, 0, 4, 0, 0]])
879
+
880
+ The anti-diagonal can be filled by reversing the order of elements
881
+ using either `numpy.flipud` or `numpy.fliplr`.
882
+
883
+ >>> a = np.zeros((3, 3), int);
884
+ >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
885
+ >>> a
886
+ array([[0, 0, 1],
887
+ [0, 2, 0],
888
+ [3, 0, 0]])
889
+ >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
890
+ >>> a
891
+ array([[0, 0, 3],
892
+ [0, 2, 0],
893
+ [1, 0, 0]])
894
+
895
+ Note that the order in which the diagonal is filled varies depending
896
+ on the flip function.
897
+ """
898
+ if a.ndim < 2:
899
+ raise ValueError("array must be at least 2-d")
900
+ end = None
901
+ if a.ndim == 2:
902
+ # Explicit, fast formula for the common case. For 2-d arrays, we
903
+ # accept rectangular ones.
904
+ step = a.shape[1] + 1
905
+ # This is needed to don't have tall matrix have the diagonal wrap.
906
+ if not wrap:
907
+ end = a.shape[1] * a.shape[1]
908
+ else:
909
+ # For more than d=2, the strided formula is only valid for arrays with
910
+ # all dimensions equal, so we check first.
911
+ if not np.all(diff(a.shape) == 0):
912
+ raise ValueError("All dimensions of input must be of equal length")
913
+ step = 1 + (np.cumprod(a.shape[:-1])).sum()
914
+
915
+ # Write the value out into the diagonal.
916
+ a.flat[:end:step] = val
917
+
918
+
919
+ @set_module('numpy')
920
+ def diag_indices(n, ndim=2):
921
+ """
922
+ Return the indices to access the main diagonal of an array.
923
+
924
+ This returns a tuple of indices that can be used to access the main
925
+ diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
926
+ (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
927
+ ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
928
+ for ``i = [0..n-1]``.
929
+
930
+ Parameters
931
+ ----------
932
+ n : int
933
+ The size, along each dimension, of the arrays for which the returned
934
+ indices can be used.
935
+
936
+ ndim : int, optional
937
+ The number of dimensions.
938
+
939
+ See Also
940
+ --------
941
+ diag_indices_from
942
+
943
+ Notes
944
+ -----
945
+ .. versionadded:: 1.4.0
946
+
947
+ Examples
948
+ --------
949
+ Create a set of indices to access the diagonal of a (4, 4) array:
950
+
951
+ >>> di = np.diag_indices(4)
952
+ >>> di
953
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
954
+ >>> a = np.arange(16).reshape(4, 4)
955
+ >>> a
956
+ array([[ 0, 1, 2, 3],
957
+ [ 4, 5, 6, 7],
958
+ [ 8, 9, 10, 11],
959
+ [12, 13, 14, 15]])
960
+ >>> a[di] = 100
961
+ >>> a
962
+ array([[100, 1, 2, 3],
963
+ [ 4, 100, 6, 7],
964
+ [ 8, 9, 100, 11],
965
+ [ 12, 13, 14, 100]])
966
+
967
+ Now, we create indices to manipulate a 3-D array:
968
+
969
+ >>> d3 = np.diag_indices(2, 3)
970
+ >>> d3
971
+ (array([0, 1]), array([0, 1]), array([0, 1]))
972
+
973
+ And use it to set the diagonal of an array of zeros to 1:
974
+
975
+ >>> a = np.zeros((2, 2, 2), dtype=int)
976
+ >>> a[d3] = 1
977
+ >>> a
978
+ array([[[1, 0],
979
+ [0, 0]],
980
+ [[0, 0],
981
+ [0, 1]]])
982
+
983
+ """
984
+ idx = np.arange(n)
985
+ return (idx,) * ndim
986
+
987
+
988
+ def _diag_indices_from(arr):
989
+ return (arr,)
990
+
991
+
992
+ @array_function_dispatch(_diag_indices_from)
993
+ def diag_indices_from(arr):
994
+ """
995
+ Return the indices to access the main diagonal of an n-dimensional array.
996
+
997
+ See `diag_indices` for full details.
998
+
999
+ Parameters
1000
+ ----------
1001
+ arr : array, at least 2-D
1002
+
1003
+ See Also
1004
+ --------
1005
+ diag_indices
1006
+
1007
+ Notes
1008
+ -----
1009
+ .. versionadded:: 1.4.0
1010
+
1011
+ Examples
1012
+ --------
1013
+
1014
+ Create a 4 by 4 array.
1015
+
1016
+ >>> a = np.arange(16).reshape(4, 4)
1017
+ >>> a
1018
+ array([[ 0, 1, 2, 3],
1019
+ [ 4, 5, 6, 7],
1020
+ [ 8, 9, 10, 11],
1021
+ [12, 13, 14, 15]])
1022
+
1023
+ Get the indices of the diagonal elements.
1024
+
1025
+ >>> di = np.diag_indices_from(a)
1026
+ >>> di
1027
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
1028
+
1029
+ >>> a[di]
1030
+ array([ 0, 5, 10, 15])
1031
+
1032
+ This is simply syntactic sugar for diag_indices.
1033
+
1034
+ >>> np.diag_indices(a.shape[0])
1035
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
1036
+
1037
+ """
1038
+
1039
+ if not arr.ndim >= 2:
1040
+ raise ValueError("input array must be at least 2-d")
1041
+ # For more than d=2, the strided formula is only valid for arrays with
1042
+ # all dimensions equal, so we check first.
1043
+ if not np.all(diff(arr.shape) == 0):
1044
+ raise ValueError("All dimensions of input must be of equal length")
1045
+
1046
+ return diag_indices(arr.shape[0], arr.ndim)
env-llmeval/lib/python3.10/site-packages/numpy/lib/index_tricks.pyi ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ from typing import (
3
+ Any,
4
+ TypeVar,
5
+ Generic,
6
+ overload,
7
+ Literal,
8
+ SupportsIndex,
9
+ )
10
+
11
+ from numpy import (
12
+ # Circumvent a naming conflict with `AxisConcatenator.matrix`
13
+ matrix as _Matrix,
14
+ ndenumerate as ndenumerate,
15
+ ndindex as ndindex,
16
+ ndarray,
17
+ dtype,
18
+ integer,
19
+ str_,
20
+ bytes_,
21
+ bool_,
22
+ int_,
23
+ float_,
24
+ complex_,
25
+ intp,
26
+ _OrderCF,
27
+ _ModeKind,
28
+ )
29
+ from numpy._typing import (
30
+ # Arrays
31
+ ArrayLike,
32
+ _NestedSequence,
33
+ _FiniteNestedSequence,
34
+ NDArray,
35
+ _ArrayLikeInt,
36
+
37
+ # DTypes
38
+ DTypeLike,
39
+ _SupportsDType,
40
+
41
+ # Shapes
42
+ _ShapeLike,
43
+ )
44
+
45
+ from numpy.core.multiarray import (
46
+ unravel_index as unravel_index,
47
+ ravel_multi_index as ravel_multi_index,
48
+ )
49
+
50
+ _T = TypeVar("_T")
51
+ _DType = TypeVar("_DType", bound=dtype[Any])
52
+ _BoolType = TypeVar("_BoolType", Literal[True], Literal[False])
53
+ _TupType = TypeVar("_TupType", bound=tuple[Any, ...])
54
+ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
55
+
56
+ __all__: list[str]
57
+
58
+ @overload
59
+ def ix_(*args: _FiniteNestedSequence[_SupportsDType[_DType]]) -> tuple[ndarray[Any, _DType], ...]: ...
60
+ @overload
61
+ def ix_(*args: str | _NestedSequence[str]) -> tuple[NDArray[str_], ...]: ...
62
+ @overload
63
+ def ix_(*args: bytes | _NestedSequence[bytes]) -> tuple[NDArray[bytes_], ...]: ...
64
+ @overload
65
+ def ix_(*args: bool | _NestedSequence[bool]) -> tuple[NDArray[bool_], ...]: ...
66
+ @overload
67
+ def ix_(*args: int | _NestedSequence[int]) -> tuple[NDArray[int_], ...]: ...
68
+ @overload
69
+ def ix_(*args: float | _NestedSequence[float]) -> tuple[NDArray[float_], ...]: ...
70
+ @overload
71
+ def ix_(*args: complex | _NestedSequence[complex]) -> tuple[NDArray[complex_], ...]: ...
72
+
73
+ class nd_grid(Generic[_BoolType]):
74
+ sparse: _BoolType
75
+ def __init__(self, sparse: _BoolType = ...) -> None: ...
76
+ @overload
77
+ def __getitem__(
78
+ self: nd_grid[Literal[False]],
79
+ key: slice | Sequence[slice],
80
+ ) -> NDArray[Any]: ...
81
+ @overload
82
+ def __getitem__(
83
+ self: nd_grid[Literal[True]],
84
+ key: slice | Sequence[slice],
85
+ ) -> list[NDArray[Any]]: ...
86
+
87
+ class MGridClass(nd_grid[Literal[False]]):
88
+ def __init__(self) -> None: ...
89
+
90
+ mgrid: MGridClass
91
+
92
+ class OGridClass(nd_grid[Literal[True]]):
93
+ def __init__(self) -> None: ...
94
+
95
+ ogrid: OGridClass
96
+
97
+ class AxisConcatenator:
98
+ axis: int
99
+ matrix: bool
100
+ ndmin: int
101
+ trans1d: int
102
+ def __init__(
103
+ self,
104
+ axis: int = ...,
105
+ matrix: bool = ...,
106
+ ndmin: int = ...,
107
+ trans1d: int = ...,
108
+ ) -> None: ...
109
+ @staticmethod
110
+ @overload
111
+ def concatenate( # type: ignore[misc]
112
+ *a: ArrayLike, axis: SupportsIndex = ..., out: None = ...
113
+ ) -> NDArray[Any]: ...
114
+ @staticmethod
115
+ @overload
116
+ def concatenate(
117
+ *a: ArrayLike, axis: SupportsIndex = ..., out: _ArrayType = ...
118
+ ) -> _ArrayType: ...
119
+ @staticmethod
120
+ def makemat(
121
+ data: ArrayLike, dtype: DTypeLike = ..., copy: bool = ...
122
+ ) -> _Matrix[Any, Any]: ...
123
+
124
+ # TODO: Sort out this `__getitem__` method
125
+ def __getitem__(self, key: Any) -> Any: ...
126
+
127
+ class RClass(AxisConcatenator):
128
+ axis: Literal[0]
129
+ matrix: Literal[False]
130
+ ndmin: Literal[1]
131
+ trans1d: Literal[-1]
132
+ def __init__(self) -> None: ...
133
+
134
+ r_: RClass
135
+
136
+ class CClass(AxisConcatenator):
137
+ axis: Literal[-1]
138
+ matrix: Literal[False]
139
+ ndmin: Literal[2]
140
+ trans1d: Literal[0]
141
+ def __init__(self) -> None: ...
142
+
143
+ c_: CClass
144
+
145
+ class IndexExpression(Generic[_BoolType]):
146
+ maketuple: _BoolType
147
+ def __init__(self, maketuple: _BoolType) -> None: ...
148
+ @overload
149
+ def __getitem__(self, item: _TupType) -> _TupType: ... # type: ignore[misc]
150
+ @overload
151
+ def __getitem__(self: IndexExpression[Literal[True]], item: _T) -> tuple[_T]: ...
152
+ @overload
153
+ def __getitem__(self: IndexExpression[Literal[False]], item: _T) -> _T: ...
154
+
155
+ index_exp: IndexExpression[Literal[True]]
156
+ s_: IndexExpression[Literal[False]]
157
+
158
+ def fill_diagonal(a: ndarray[Any, Any], val: Any, wrap: bool = ...) -> None: ...
159
+ def diag_indices(n: int, ndim: int = ...) -> tuple[NDArray[int_], ...]: ...
160
+ def diag_indices_from(arr: ArrayLike) -> tuple[NDArray[int_], ...]: ...
161
+
162
+ # NOTE: see `numpy/__init__.pyi` for `ndenumerate` and `ndindex`
env-llmeval/lib/python3.10/site-packages/numpy/lib/mixins.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Mixin classes for custom array types that don't inherit from ndarray."""
2
+ from numpy.core import umath as um
3
+
4
+
5
+ __all__ = ['NDArrayOperatorsMixin']
6
+
7
+
8
+ def _disables_array_ufunc(obj):
9
+ """True when __array_ufunc__ is set to None."""
10
+ try:
11
+ return obj.__array_ufunc__ is None
12
+ except AttributeError:
13
+ return False
14
+
15
+
16
+ def _binary_method(ufunc, name):
17
+ """Implement a forward binary method with a ufunc, e.g., __add__."""
18
+ def func(self, other):
19
+ if _disables_array_ufunc(other):
20
+ return NotImplemented
21
+ return ufunc(self, other)
22
+ func.__name__ = '__{}__'.format(name)
23
+ return func
24
+
25
+
26
+ def _reflected_binary_method(ufunc, name):
27
+ """Implement a reflected binary method with a ufunc, e.g., __radd__."""
28
+ def func(self, other):
29
+ if _disables_array_ufunc(other):
30
+ return NotImplemented
31
+ return ufunc(other, self)
32
+ func.__name__ = '__r{}__'.format(name)
33
+ return func
34
+
35
+
36
+ def _inplace_binary_method(ufunc, name):
37
+ """Implement an in-place binary method with a ufunc, e.g., __iadd__."""
38
+ def func(self, other):
39
+ return ufunc(self, other, out=(self,))
40
+ func.__name__ = '__i{}__'.format(name)
41
+ return func
42
+
43
+
44
+ def _numeric_methods(ufunc, name):
45
+ """Implement forward, reflected and inplace binary methods with a ufunc."""
46
+ return (_binary_method(ufunc, name),
47
+ _reflected_binary_method(ufunc, name),
48
+ _inplace_binary_method(ufunc, name))
49
+
50
+
51
+ def _unary_method(ufunc, name):
52
+ """Implement a unary special method with a ufunc."""
53
+ def func(self):
54
+ return ufunc(self)
55
+ func.__name__ = '__{}__'.format(name)
56
+ return func
57
+
58
+
59
+ class NDArrayOperatorsMixin:
60
+ """Mixin defining all operator special methods using __array_ufunc__.
61
+
62
+ This class implements the special methods for almost all of Python's
63
+ builtin operators defined in the `operator` module, including comparisons
64
+ (``==``, ``>``, etc.) and arithmetic (``+``, ``*``, ``-``, etc.), by
65
+ deferring to the ``__array_ufunc__`` method, which subclasses must
66
+ implement.
67
+
68
+ It is useful for writing classes that do not inherit from `numpy.ndarray`,
69
+ but that should support arithmetic and numpy universal functions like
70
+ arrays as described in `A Mechanism for Overriding Ufuncs
71
+ <https://numpy.org/neps/nep-0013-ufunc-overrides.html>`_.
72
+
73
+ As an trivial example, consider this implementation of an ``ArrayLike``
74
+ class that simply wraps a NumPy array and ensures that the result of any
75
+ arithmetic operation is also an ``ArrayLike`` object::
76
+
77
+ class ArrayLike(np.lib.mixins.NDArrayOperatorsMixin):
78
+ def __init__(self, value):
79
+ self.value = np.asarray(value)
80
+
81
+ # One might also consider adding the built-in list type to this
82
+ # list, to support operations like np.add(array_like, list)
83
+ _HANDLED_TYPES = (np.ndarray, numbers.Number)
84
+
85
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
86
+ out = kwargs.get('out', ())
87
+ for x in inputs + out:
88
+ # Only support operations with instances of _HANDLED_TYPES.
89
+ # Use ArrayLike instead of type(self) for isinstance to
90
+ # allow subclasses that don't override __array_ufunc__ to
91
+ # handle ArrayLike objects.
92
+ if not isinstance(x, self._HANDLED_TYPES + (ArrayLike,)):
93
+ return NotImplemented
94
+
95
+ # Defer to the implementation of the ufunc on unwrapped values.
96
+ inputs = tuple(x.value if isinstance(x, ArrayLike) else x
97
+ for x in inputs)
98
+ if out:
99
+ kwargs['out'] = tuple(
100
+ x.value if isinstance(x, ArrayLike) else x
101
+ for x in out)
102
+ result = getattr(ufunc, method)(*inputs, **kwargs)
103
+
104
+ if type(result) is tuple:
105
+ # multiple return values
106
+ return tuple(type(self)(x) for x in result)
107
+ elif method == 'at':
108
+ # no return value
109
+ return None
110
+ else:
111
+ # one return value
112
+ return type(self)(result)
113
+
114
+ def __repr__(self):
115
+ return '%s(%r)' % (type(self).__name__, self.value)
116
+
117
+ In interactions between ``ArrayLike`` objects and numbers or numpy arrays,
118
+ the result is always another ``ArrayLike``:
119
+
120
+ >>> x = ArrayLike([1, 2, 3])
121
+ >>> x - 1
122
+ ArrayLike(array([0, 1, 2]))
123
+ >>> 1 - x
124
+ ArrayLike(array([ 0, -1, -2]))
125
+ >>> np.arange(3) - x
126
+ ArrayLike(array([-1, -1, -1]))
127
+ >>> x - np.arange(3)
128
+ ArrayLike(array([1, 1, 1]))
129
+
130
+ Note that unlike ``numpy.ndarray``, ``ArrayLike`` does not allow operations
131
+ with arbitrary, unrecognized types. This ensures that interactions with
132
+ ArrayLike preserve a well-defined casting hierarchy.
133
+
134
+ .. versionadded:: 1.13
135
+ """
136
+ __slots__ = ()
137
+ # Like np.ndarray, this mixin class implements "Option 1" from the ufunc
138
+ # overrides NEP.
139
+
140
+ # comparisons don't have reflected and in-place versions
141
+ __lt__ = _binary_method(um.less, 'lt')
142
+ __le__ = _binary_method(um.less_equal, 'le')
143
+ __eq__ = _binary_method(um.equal, 'eq')
144
+ __ne__ = _binary_method(um.not_equal, 'ne')
145
+ __gt__ = _binary_method(um.greater, 'gt')
146
+ __ge__ = _binary_method(um.greater_equal, 'ge')
147
+
148
+ # numeric methods
149
+ __add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
150
+ __sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
151
+ __mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
152
+ __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
153
+ um.matmul, 'matmul')
154
+ # Python 3 does not use __div__, __rdiv__, or __idiv__
155
+ __truediv__, __rtruediv__, __itruediv__ = _numeric_methods(
156
+ um.true_divide, 'truediv')
157
+ __floordiv__, __rfloordiv__, __ifloordiv__ = _numeric_methods(
158
+ um.floor_divide, 'floordiv')
159
+ __mod__, __rmod__, __imod__ = _numeric_methods(um.remainder, 'mod')
160
+ __divmod__ = _binary_method(um.divmod, 'divmod')
161
+ __rdivmod__ = _reflected_binary_method(um.divmod, 'divmod')
162
+ # __idivmod__ does not exist
163
+ # TODO: handle the optional third argument for __pow__?
164
+ __pow__, __rpow__, __ipow__ = _numeric_methods(um.power, 'pow')
165
+ __lshift__, __rlshift__, __ilshift__ = _numeric_methods(
166
+ um.left_shift, 'lshift')
167
+ __rshift__, __rrshift__, __irshift__ = _numeric_methods(
168
+ um.right_shift, 'rshift')
169
+ __and__, __rand__, __iand__ = _numeric_methods(um.bitwise_and, 'and')
170
+ __xor__, __rxor__, __ixor__ = _numeric_methods(um.bitwise_xor, 'xor')
171
+ __or__, __ror__, __ior__ = _numeric_methods(um.bitwise_or, 'or')
172
+
173
+ # unary methods
174
+ __neg__ = _unary_method(um.negative, 'neg')
175
+ __pos__ = _unary_method(um.positive, 'pos')
176
+ __abs__ = _unary_method(um.absolute, 'abs')
177
+ __invert__ = _unary_method(um.invert, 'invert')
env-llmeval/lib/python3.10/site-packages/numpy/lib/mixins.pyi ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABCMeta, abstractmethod
2
+ from typing import Literal as L, Any
3
+
4
+ from numpy import ufunc
5
+
6
+ __all__: list[str]
7
+
8
+ # NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
9
+ # even though it's reliant on subclasses implementing `__array_ufunc__`
10
+
11
+ # NOTE: The accepted input- and output-types of the various dunders are
12
+ # completely dependent on how `__array_ufunc__` is implemented.
13
+ # As such, only little type safety can be provided here.
14
+
15
+ class NDArrayOperatorsMixin(metaclass=ABCMeta):
16
+ @abstractmethod
17
+ def __array_ufunc__(
18
+ self,
19
+ ufunc: ufunc,
20
+ method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
21
+ *inputs: Any,
22
+ **kwargs: Any,
23
+ ) -> Any: ...
24
+ def __lt__(self, other: Any) -> Any: ...
25
+ def __le__(self, other: Any) -> Any: ...
26
+ def __eq__(self, other: Any) -> Any: ...
27
+ def __ne__(self, other: Any) -> Any: ...
28
+ def __gt__(self, other: Any) -> Any: ...
29
+ def __ge__(self, other: Any) -> Any: ...
30
+ def __add__(self, other: Any) -> Any: ...
31
+ def __radd__(self, other: Any) -> Any: ...
32
+ def __iadd__(self, other: Any) -> Any: ...
33
+ def __sub__(self, other: Any) -> Any: ...
34
+ def __rsub__(self, other: Any) -> Any: ...
35
+ def __isub__(self, other: Any) -> Any: ...
36
+ def __mul__(self, other: Any) -> Any: ...
37
+ def __rmul__(self, other: Any) -> Any: ...
38
+ def __imul__(self, other: Any) -> Any: ...
39
+ def __matmul__(self, other: Any) -> Any: ...
40
+ def __rmatmul__(self, other: Any) -> Any: ...
41
+ def __imatmul__(self, other: Any) -> Any: ...
42
+ def __truediv__(self, other: Any) -> Any: ...
43
+ def __rtruediv__(self, other: Any) -> Any: ...
44
+ def __itruediv__(self, other: Any) -> Any: ...
45
+ def __floordiv__(self, other: Any) -> Any: ...
46
+ def __rfloordiv__(self, other: Any) -> Any: ...
47
+ def __ifloordiv__(self, other: Any) -> Any: ...
48
+ def __mod__(self, other: Any) -> Any: ...
49
+ def __rmod__(self, other: Any) -> Any: ...
50
+ def __imod__(self, other: Any) -> Any: ...
51
+ def __divmod__(self, other: Any) -> Any: ...
52
+ def __rdivmod__(self, other: Any) -> Any: ...
53
+ def __pow__(self, other: Any) -> Any: ...
54
+ def __rpow__(self, other: Any) -> Any: ...
55
+ def __ipow__(self, other: Any) -> Any: ...
56
+ def __lshift__(self, other: Any) -> Any: ...
57
+ def __rlshift__(self, other: Any) -> Any: ...
58
+ def __ilshift__(self, other: Any) -> Any: ...
59
+ def __rshift__(self, other: Any) -> Any: ...
60
+ def __rrshift__(self, other: Any) -> Any: ...
61
+ def __irshift__(self, other: Any) -> Any: ...
62
+ def __and__(self, other: Any) -> Any: ...
63
+ def __rand__(self, other: Any) -> Any: ...
64
+ def __iand__(self, other: Any) -> Any: ...
65
+ def __xor__(self, other: Any) -> Any: ...
66
+ def __rxor__(self, other: Any) -> Any: ...
67
+ def __ixor__(self, other: Any) -> Any: ...
68
+ def __or__(self, other: Any) -> Any: ...
69
+ def __ror__(self, other: Any) -> Any: ...
70
+ def __ior__(self, other: Any) -> Any: ...
71
+ def __neg__(self) -> Any: ...
72
+ def __pos__(self) -> Any: ...
73
+ def __abs__(self) -> Any: ...
74
+ def __invert__(self) -> Any: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/nanfunctions.py ADDED
@@ -0,0 +1,1887 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions that ignore NaN.
3
+
4
+ Functions
5
+ ---------
6
+
7
+ - `nanmin` -- minimum non-NaN value
8
+ - `nanmax` -- maximum non-NaN value
9
+ - `nanargmin` -- index of minimum non-NaN value
10
+ - `nanargmax` -- index of maximum non-NaN value
11
+ - `nansum` -- sum of non-NaN values
12
+ - `nanprod` -- product of non-NaN values
13
+ - `nancumsum` -- cumulative sum of non-NaN values
14
+ - `nancumprod` -- cumulative product of non-NaN values
15
+ - `nanmean` -- mean of non-NaN values
16
+ - `nanvar` -- variance of non-NaN values
17
+ - `nanstd` -- standard deviation of non-NaN values
18
+ - `nanmedian` -- median of non-NaN values
19
+ - `nanquantile` -- qth quantile of non-NaN values
20
+ - `nanpercentile` -- qth percentile of non-NaN values
21
+
22
+ """
23
+ import functools
24
+ import warnings
25
+ import numpy as np
26
+ from numpy.lib import function_base
27
+ from numpy.core import overrides
28
+
29
+
30
+ array_function_dispatch = functools.partial(
31
+ overrides.array_function_dispatch, module='numpy')
32
+
33
+
34
+ __all__ = [
35
+ 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
36
+ 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
37
+ 'nancumsum', 'nancumprod', 'nanquantile'
38
+ ]
39
+
40
+
41
+ def _nan_mask(a, out=None):
42
+ """
43
+ Parameters
44
+ ----------
45
+ a : array-like
46
+ Input array with at least 1 dimension.
47
+ out : ndarray, optional
48
+ Alternate output array in which to place the result. The default
49
+ is ``None``; if provided, it must have the same shape as the
50
+ expected output and will prevent the allocation of a new array.
51
+
52
+ Returns
53
+ -------
54
+ y : bool ndarray or True
55
+ A bool array where ``np.nan`` positions are marked with ``False``
56
+ and other positions are marked with ``True``. If the type of ``a``
57
+ is such that it can't possibly contain ``np.nan``, returns ``True``.
58
+ """
59
+ # we assume that a is an array for this private function
60
+
61
+ if a.dtype.kind not in 'fc':
62
+ return True
63
+
64
+ y = np.isnan(a, out=out)
65
+ y = np.invert(y, out=y)
66
+ return y
67
+
68
+ def _replace_nan(a, val):
69
+ """
70
+ If `a` is of inexact type, make a copy of `a`, replace NaNs with
71
+ the `val` value, and return the copy together with a boolean mask
72
+ marking the locations where NaNs were present. If `a` is not of
73
+ inexact type, do nothing and return `a` together with a mask of None.
74
+
75
+ Note that scalars will end up as array scalars, which is important
76
+ for using the result as the value of the out argument in some
77
+ operations.
78
+
79
+ Parameters
80
+ ----------
81
+ a : array-like
82
+ Input array.
83
+ val : float
84
+ NaN values are set to val before doing the operation.
85
+
86
+ Returns
87
+ -------
88
+ y : ndarray
89
+ If `a` is of inexact type, return a copy of `a` with the NaNs
90
+ replaced by the fill value, otherwise return `a`.
91
+ mask: {bool, None}
92
+ If `a` is of inexact type, return a boolean mask marking locations of
93
+ NaNs, otherwise return None.
94
+
95
+ """
96
+ a = np.asanyarray(a)
97
+
98
+ if a.dtype == np.object_:
99
+ # object arrays do not support `isnan` (gh-9009), so make a guess
100
+ mask = np.not_equal(a, a, dtype=bool)
101
+ elif issubclass(a.dtype.type, np.inexact):
102
+ mask = np.isnan(a)
103
+ else:
104
+ mask = None
105
+
106
+ if mask is not None:
107
+ a = np.array(a, subok=True, copy=True)
108
+ np.copyto(a, val, where=mask)
109
+
110
+ return a, mask
111
+
112
+
113
+ def _copyto(a, val, mask):
114
+ """
115
+ Replace values in `a` with NaN where `mask` is True. This differs from
116
+ copyto in that it will deal with the case where `a` is a numpy scalar.
117
+
118
+ Parameters
119
+ ----------
120
+ a : ndarray or numpy scalar
121
+ Array or numpy scalar some of whose values are to be replaced
122
+ by val.
123
+ val : numpy scalar
124
+ Value used a replacement.
125
+ mask : ndarray, scalar
126
+ Boolean array. Where True the corresponding element of `a` is
127
+ replaced by `val`. Broadcasts.
128
+
129
+ Returns
130
+ -------
131
+ res : ndarray, scalar
132
+ Array with elements replaced or scalar `val`.
133
+
134
+ """
135
+ if isinstance(a, np.ndarray):
136
+ np.copyto(a, val, where=mask, casting='unsafe')
137
+ else:
138
+ a = a.dtype.type(val)
139
+ return a
140
+
141
+
142
+ def _remove_nan_1d(arr1d, overwrite_input=False):
143
+ """
144
+ Equivalent to arr1d[~arr1d.isnan()], but in a different order
145
+
146
+ Presumably faster as it incurs fewer copies
147
+
148
+ Parameters
149
+ ----------
150
+ arr1d : ndarray
151
+ Array to remove nans from
152
+ overwrite_input : bool
153
+ True if `arr1d` can be modified in place
154
+
155
+ Returns
156
+ -------
157
+ res : ndarray
158
+ Array with nan elements removed
159
+ overwrite_input : bool
160
+ True if `res` can be modified in place, given the constraint on the
161
+ input
162
+ """
163
+ if arr1d.dtype == object:
164
+ # object arrays do not support `isnan` (gh-9009), so make a guess
165
+ c = np.not_equal(arr1d, arr1d, dtype=bool)
166
+ else:
167
+ c = np.isnan(arr1d)
168
+
169
+ s = np.nonzero(c)[0]
170
+ if s.size == arr1d.size:
171
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
172
+ stacklevel=6)
173
+ return arr1d[:0], True
174
+ elif s.size == 0:
175
+ return arr1d, overwrite_input
176
+ else:
177
+ if not overwrite_input:
178
+ arr1d = arr1d.copy()
179
+ # select non-nans at end of array
180
+ enonan = arr1d[-s.size:][~c[-s.size:]]
181
+ # fill nans in beginning of array with non-nans of end
182
+ arr1d[s[:enonan.size]] = enonan
183
+
184
+ return arr1d[:-s.size], True
185
+
186
+
187
+ def _divide_by_count(a, b, out=None):
188
+ """
189
+ Compute a/b ignoring invalid results. If `a` is an array the division
190
+ is done in place. If `a` is a scalar, then its type is preserved in the
191
+ output. If out is None, then a is used instead so that the division
192
+ is in place. Note that this is only called with `a` an inexact type.
193
+
194
+ Parameters
195
+ ----------
196
+ a : {ndarray, numpy scalar}
197
+ Numerator. Expected to be of inexact type but not checked.
198
+ b : {ndarray, numpy scalar}
199
+ Denominator.
200
+ out : ndarray, optional
201
+ Alternate output array in which to place the result. The default
202
+ is ``None``; if provided, it must have the same shape as the
203
+ expected output, but the type will be cast if necessary.
204
+
205
+ Returns
206
+ -------
207
+ ret : {ndarray, numpy scalar}
208
+ The return value is a/b. If `a` was an ndarray the division is done
209
+ in place. If `a` is a numpy scalar, the division preserves its type.
210
+
211
+ """
212
+ with np.errstate(invalid='ignore', divide='ignore'):
213
+ if isinstance(a, np.ndarray):
214
+ if out is None:
215
+ return np.divide(a, b, out=a, casting='unsafe')
216
+ else:
217
+ return np.divide(a, b, out=out, casting='unsafe')
218
+ else:
219
+ if out is None:
220
+ # Precaution against reduced object arrays
221
+ try:
222
+ return a.dtype.type(a / b)
223
+ except AttributeError:
224
+ return a / b
225
+ else:
226
+ # This is questionable, but currently a numpy scalar can
227
+ # be output to a zero dimensional array.
228
+ return np.divide(a, b, out=out, casting='unsafe')
229
+
230
+
231
+ def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None,
232
+ initial=None, where=None):
233
+ return (a, out)
234
+
235
+
236
+ @array_function_dispatch(_nanmin_dispatcher)
237
+ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
238
+ where=np._NoValue):
239
+ """
240
+ Return minimum of an array or minimum along an axis, ignoring any NaNs.
241
+ When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
242
+ Nan is returned for that slice.
243
+
244
+ Parameters
245
+ ----------
246
+ a : array_like
247
+ Array containing numbers whose minimum is desired. If `a` is not an
248
+ array, a conversion is attempted.
249
+ axis : {int, tuple of int, None}, optional
250
+ Axis or axes along which the minimum is computed. The default is to compute
251
+ the minimum of the flattened array.
252
+ out : ndarray, optional
253
+ Alternate output array in which to place the result. The default
254
+ is ``None``; if provided, it must have the same shape as the
255
+ expected output, but the type will be cast if necessary. See
256
+ :ref:`ufuncs-output-type` for more details.
257
+
258
+ .. versionadded:: 1.8.0
259
+ keepdims : bool, optional
260
+ If this is set to True, the axes which are reduced are left
261
+ in the result as dimensions with size one. With this option,
262
+ the result will broadcast correctly against the original `a`.
263
+
264
+ If the value is anything but the default, then
265
+ `keepdims` will be passed through to the `min` method
266
+ of sub-classes of `ndarray`. If the sub-classes methods
267
+ does not implement `keepdims` any exceptions will be raised.
268
+
269
+ .. versionadded:: 1.8.0
270
+ initial : scalar, optional
271
+ The maximum value of an output element. Must be present to allow
272
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
273
+
274
+ .. versionadded:: 1.22.0
275
+ where : array_like of bool, optional
276
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
277
+ for details.
278
+
279
+ .. versionadded:: 1.22.0
280
+
281
+ Returns
282
+ -------
283
+ nanmin : ndarray
284
+ An array with the same shape as `a`, with the specified axis
285
+ removed. If `a` is a 0-d array, or if axis is None, an ndarray
286
+ scalar is returned. The same dtype as `a` is returned.
287
+
288
+ See Also
289
+ --------
290
+ nanmax :
291
+ The maximum value of an array along a given axis, ignoring any NaNs.
292
+ amin :
293
+ The minimum value of an array along a given axis, propagating any NaNs.
294
+ fmin :
295
+ Element-wise minimum of two arrays, ignoring any NaNs.
296
+ minimum :
297
+ Element-wise minimum of two arrays, propagating any NaNs.
298
+ isnan :
299
+ Shows which elements are Not a Number (NaN).
300
+ isfinite:
301
+ Shows which elements are neither NaN nor infinity.
302
+
303
+ amax, fmax, maximum
304
+
305
+ Notes
306
+ -----
307
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
308
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
309
+ Positive infinity is treated as a very large number and negative
310
+ infinity is treated as a very small (i.e. negative) number.
311
+
312
+ If the input has a integer type the function is equivalent to np.min.
313
+
314
+ Examples
315
+ --------
316
+ >>> a = np.array([[1, 2], [3, np.nan]])
317
+ >>> np.nanmin(a)
318
+ 1.0
319
+ >>> np.nanmin(a, axis=0)
320
+ array([1., 2.])
321
+ >>> np.nanmin(a, axis=1)
322
+ array([1., 3.])
323
+
324
+ When positive infinity and negative infinity are present:
325
+
326
+ >>> np.nanmin([1, 2, np.nan, np.inf])
327
+ 1.0
328
+ >>> np.nanmin([1, 2, np.nan, np.NINF])
329
+ -inf
330
+
331
+ """
332
+ kwargs = {}
333
+ if keepdims is not np._NoValue:
334
+ kwargs['keepdims'] = keepdims
335
+ if initial is not np._NoValue:
336
+ kwargs['initial'] = initial
337
+ if where is not np._NoValue:
338
+ kwargs['where'] = where
339
+
340
+ if type(a) is np.ndarray and a.dtype != np.object_:
341
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
342
+ # which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
343
+ res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
344
+ if np.isnan(res).any():
345
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
346
+ stacklevel=2)
347
+ else:
348
+ # Slow, but safe for subclasses of ndarray
349
+ a, mask = _replace_nan(a, +np.inf)
350
+ res = np.amin(a, axis=axis, out=out, **kwargs)
351
+ if mask is None:
352
+ return res
353
+
354
+ # Check for all-NaN axis
355
+ kwargs.pop("initial", None)
356
+ mask = np.all(mask, axis=axis, **kwargs)
357
+ if np.any(mask):
358
+ res = _copyto(res, np.nan, mask)
359
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
360
+ stacklevel=2)
361
+ return res
362
+
363
+
364
+ def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None,
365
+ initial=None, where=None):
366
+ return (a, out)
367
+
368
+
369
+ @array_function_dispatch(_nanmax_dispatcher)
370
+ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
371
+ where=np._NoValue):
372
+ """
373
+ Return the maximum of an array or maximum along an axis, ignoring any
374
+ NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
375
+ raised and NaN is returned for that slice.
376
+
377
+ Parameters
378
+ ----------
379
+ a : array_like
380
+ Array containing numbers whose maximum is desired. If `a` is not an
381
+ array, a conversion is attempted.
382
+ axis : {int, tuple of int, None}, optional
383
+ Axis or axes along which the maximum is computed. The default is to compute
384
+ the maximum of the flattened array.
385
+ out : ndarray, optional
386
+ Alternate output array in which to place the result. The default
387
+ is ``None``; if provided, it must have the same shape as the
388
+ expected output, but the type will be cast if necessary. See
389
+ :ref:`ufuncs-output-type` for more details.
390
+
391
+ .. versionadded:: 1.8.0
392
+ keepdims : bool, optional
393
+ If this is set to True, the axes which are reduced are left
394
+ in the result as dimensions with size one. With this option,
395
+ the result will broadcast correctly against the original `a`.
396
+
397
+ If the value is anything but the default, then
398
+ `keepdims` will be passed through to the `max` method
399
+ of sub-classes of `ndarray`. If the sub-classes methods
400
+ does not implement `keepdims` any exceptions will be raised.
401
+
402
+ .. versionadded:: 1.8.0
403
+ initial : scalar, optional
404
+ The minimum value of an output element. Must be present to allow
405
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
406
+
407
+ .. versionadded:: 1.22.0
408
+ where : array_like of bool, optional
409
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
410
+ for details.
411
+
412
+ .. versionadded:: 1.22.0
413
+
414
+ Returns
415
+ -------
416
+ nanmax : ndarray
417
+ An array with the same shape as `a`, with the specified axis removed.
418
+ If `a` is a 0-d array, or if axis is None, an ndarray scalar is
419
+ returned. The same dtype as `a` is returned.
420
+
421
+ See Also
422
+ --------
423
+ nanmin :
424
+ The minimum value of an array along a given axis, ignoring any NaNs.
425
+ amax :
426
+ The maximum value of an array along a given axis, propagating any NaNs.
427
+ fmax :
428
+ Element-wise maximum of two arrays, ignoring any NaNs.
429
+ maximum :
430
+ Element-wise maximum of two arrays, propagating any NaNs.
431
+ isnan :
432
+ Shows which elements are Not a Number (NaN).
433
+ isfinite:
434
+ Shows which elements are neither NaN nor infinity.
435
+
436
+ amin, fmin, minimum
437
+
438
+ Notes
439
+ -----
440
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
441
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
442
+ Positive infinity is treated as a very large number and negative
443
+ infinity is treated as a very small (i.e. negative) number.
444
+
445
+ If the input has a integer type the function is equivalent to np.max.
446
+
447
+ Examples
448
+ --------
449
+ >>> a = np.array([[1, 2], [3, np.nan]])
450
+ >>> np.nanmax(a)
451
+ 3.0
452
+ >>> np.nanmax(a, axis=0)
453
+ array([3., 2.])
454
+ >>> np.nanmax(a, axis=1)
455
+ array([2., 3.])
456
+
457
+ When positive infinity and negative infinity are present:
458
+
459
+ >>> np.nanmax([1, 2, np.nan, np.NINF])
460
+ 2.0
461
+ >>> np.nanmax([1, 2, np.nan, np.inf])
462
+ inf
463
+
464
+ """
465
+ kwargs = {}
466
+ if keepdims is not np._NoValue:
467
+ kwargs['keepdims'] = keepdims
468
+ if initial is not np._NoValue:
469
+ kwargs['initial'] = initial
470
+ if where is not np._NoValue:
471
+ kwargs['where'] = where
472
+
473
+ if type(a) is np.ndarray and a.dtype != np.object_:
474
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
475
+ # which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
476
+ res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
477
+ if np.isnan(res).any():
478
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
479
+ stacklevel=2)
480
+ else:
481
+ # Slow, but safe for subclasses of ndarray
482
+ a, mask = _replace_nan(a, -np.inf)
483
+ res = np.amax(a, axis=axis, out=out, **kwargs)
484
+ if mask is None:
485
+ return res
486
+
487
+ # Check for all-NaN axis
488
+ kwargs.pop("initial", None)
489
+ mask = np.all(mask, axis=axis, **kwargs)
490
+ if np.any(mask):
491
+ res = _copyto(res, np.nan, mask)
492
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
493
+ stacklevel=2)
494
+ return res
495
+
496
+
497
+ def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None):
498
+ return (a,)
499
+
500
+
501
+ @array_function_dispatch(_nanargmin_dispatcher)
502
+ def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue):
503
+ """
504
+ Return the indices of the minimum values in the specified axis ignoring
505
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
506
+ cannot be trusted if a slice contains only NaNs and Infs.
507
+
508
+ Parameters
509
+ ----------
510
+ a : array_like
511
+ Input data.
512
+ axis : int, optional
513
+ Axis along which to operate. By default flattened input is used.
514
+ out : array, optional
515
+ If provided, the result will be inserted into this array. It should
516
+ be of the appropriate shape and dtype.
517
+
518
+ .. versionadded:: 1.22.0
519
+ keepdims : bool, optional
520
+ If this is set to True, the axes which are reduced are left
521
+ in the result as dimensions with size one. With this option,
522
+ the result will broadcast correctly against the array.
523
+
524
+ .. versionadded:: 1.22.0
525
+
526
+ Returns
527
+ -------
528
+ index_array : ndarray
529
+ An array of indices or a single index value.
530
+
531
+ See Also
532
+ --------
533
+ argmin, nanargmax
534
+
535
+ Examples
536
+ --------
537
+ >>> a = np.array([[np.nan, 4], [2, 3]])
538
+ >>> np.argmin(a)
539
+ 0
540
+ >>> np.nanargmin(a)
541
+ 2
542
+ >>> np.nanargmin(a, axis=0)
543
+ array([1, 1])
544
+ >>> np.nanargmin(a, axis=1)
545
+ array([1, 0])
546
+
547
+ """
548
+ a, mask = _replace_nan(a, np.inf)
549
+ if mask is not None:
550
+ mask = np.all(mask, axis=axis)
551
+ if np.any(mask):
552
+ raise ValueError("All-NaN slice encountered")
553
+ res = np.argmin(a, axis=axis, out=out, keepdims=keepdims)
554
+ return res
555
+
556
+
557
+ def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None):
558
+ return (a,)
559
+
560
+
561
+ @array_function_dispatch(_nanargmax_dispatcher)
562
+ def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue):
563
+ """
564
+ Return the indices of the maximum values in the specified axis ignoring
565
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
566
+ results cannot be trusted if a slice contains only NaNs and -Infs.
567
+
568
+
569
+ Parameters
570
+ ----------
571
+ a : array_like
572
+ Input data.
573
+ axis : int, optional
574
+ Axis along which to operate. By default flattened input is used.
575
+ out : array, optional
576
+ If provided, the result will be inserted into this array. It should
577
+ be of the appropriate shape and dtype.
578
+
579
+ .. versionadded:: 1.22.0
580
+ keepdims : bool, optional
581
+ If this is set to True, the axes which are reduced are left
582
+ in the result as dimensions with size one. With this option,
583
+ the result will broadcast correctly against the array.
584
+
585
+ .. versionadded:: 1.22.0
586
+
587
+ Returns
588
+ -------
589
+ index_array : ndarray
590
+ An array of indices or a single index value.
591
+
592
+ See Also
593
+ --------
594
+ argmax, nanargmin
595
+
596
+ Examples
597
+ --------
598
+ >>> a = np.array([[np.nan, 4], [2, 3]])
599
+ >>> np.argmax(a)
600
+ 0
601
+ >>> np.nanargmax(a)
602
+ 1
603
+ >>> np.nanargmax(a, axis=0)
604
+ array([1, 0])
605
+ >>> np.nanargmax(a, axis=1)
606
+ array([1, 1])
607
+
608
+ """
609
+ a, mask = _replace_nan(a, -np.inf)
610
+ if mask is not None:
611
+ mask = np.all(mask, axis=axis)
612
+ if np.any(mask):
613
+ raise ValueError("All-NaN slice encountered")
614
+ res = np.argmax(a, axis=axis, out=out, keepdims=keepdims)
615
+ return res
616
+
617
+
618
+ def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
619
+ initial=None, where=None):
620
+ return (a, out)
621
+
622
+
623
+ @array_function_dispatch(_nansum_dispatcher)
624
+ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
625
+ initial=np._NoValue, where=np._NoValue):
626
+ """
627
+ Return the sum of array elements over a given axis treating Not a
628
+ Numbers (NaNs) as zero.
629
+
630
+ In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or
631
+ empty. In later versions zero is returned.
632
+
633
+ Parameters
634
+ ----------
635
+ a : array_like
636
+ Array containing numbers whose sum is desired. If `a` is not an
637
+ array, a conversion is attempted.
638
+ axis : {int, tuple of int, None}, optional
639
+ Axis or axes along which the sum is computed. The default is to compute the
640
+ sum of the flattened array.
641
+ dtype : data-type, optional
642
+ The type of the returned array and of the accumulator in which the
643
+ elements are summed. By default, the dtype of `a` is used. An
644
+ exception is when `a` has an integer type with less precision than
645
+ the platform (u)intp. In that case, the default will be either
646
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
647
+ bits. For inexact inputs, dtype must be inexact.
648
+
649
+ .. versionadded:: 1.8.0
650
+ out : ndarray, optional
651
+ Alternate output array in which to place the result. The default
652
+ is ``None``. If provided, it must have the same shape as the
653
+ expected output, but the type will be cast if necessary. See
654
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
655
+ can yield unexpected results.
656
+
657
+ .. versionadded:: 1.8.0
658
+ keepdims : bool, optional
659
+ If this is set to True, the axes which are reduced are left
660
+ in the result as dimensions with size one. With this option,
661
+ the result will broadcast correctly against the original `a`.
662
+
663
+
664
+ If the value is anything but the default, then
665
+ `keepdims` will be passed through to the `mean` or `sum` methods
666
+ of sub-classes of `ndarray`. If the sub-classes methods
667
+ does not implement `keepdims` any exceptions will be raised.
668
+
669
+ .. versionadded:: 1.8.0
670
+ initial : scalar, optional
671
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
672
+
673
+ .. versionadded:: 1.22.0
674
+ where : array_like of bool, optional
675
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
676
+
677
+ .. versionadded:: 1.22.0
678
+
679
+ Returns
680
+ -------
681
+ nansum : ndarray.
682
+ A new array holding the result is returned unless `out` is
683
+ specified, in which it is returned. The result has the same
684
+ size as `a`, and the same shape as `a` if `axis` is not None
685
+ or `a` is a 1-d array.
686
+
687
+ See Also
688
+ --------
689
+ numpy.sum : Sum across array propagating NaNs.
690
+ isnan : Show which elements are NaN.
691
+ isfinite : Show which elements are not NaN or +/-inf.
692
+
693
+ Notes
694
+ -----
695
+ If both positive and negative infinity are present, the sum will be Not
696
+ A Number (NaN).
697
+
698
+ Examples
699
+ --------
700
+ >>> np.nansum(1)
701
+ 1
702
+ >>> np.nansum([1])
703
+ 1
704
+ >>> np.nansum([1, np.nan])
705
+ 1.0
706
+ >>> a = np.array([[1, 1], [1, np.nan]])
707
+ >>> np.nansum(a)
708
+ 3.0
709
+ >>> np.nansum(a, axis=0)
710
+ array([2., 1.])
711
+ >>> np.nansum([1, np.nan, np.inf])
712
+ inf
713
+ >>> np.nansum([1, np.nan, np.NINF])
714
+ -inf
715
+ >>> from numpy.testing import suppress_warnings
716
+ >>> with suppress_warnings() as sup:
717
+ ... sup.filter(RuntimeWarning)
718
+ ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
719
+ nan
720
+
721
+ """
722
+ a, mask = _replace_nan(a, 0)
723
+ return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
724
+ initial=initial, where=where)
725
+
726
+
727
+ def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
728
+ initial=None, where=None):
729
+ return (a, out)
730
+
731
+
732
+ @array_function_dispatch(_nanprod_dispatcher)
733
+ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
734
+ initial=np._NoValue, where=np._NoValue):
735
+ """
736
+ Return the product of array elements over a given axis treating Not a
737
+ Numbers (NaNs) as ones.
738
+
739
+ One is returned for slices that are all-NaN or empty.
740
+
741
+ .. versionadded:: 1.10.0
742
+
743
+ Parameters
744
+ ----------
745
+ a : array_like
746
+ Array containing numbers whose product is desired. If `a` is not an
747
+ array, a conversion is attempted.
748
+ axis : {int, tuple of int, None}, optional
749
+ Axis or axes along which the product is computed. The default is to compute
750
+ the product of the flattened array.
751
+ dtype : data-type, optional
752
+ The type of the returned array and of the accumulator in which the
753
+ elements are summed. By default, the dtype of `a` is used. An
754
+ exception is when `a` has an integer type with less precision than
755
+ the platform (u)intp. In that case, the default will be either
756
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
757
+ bits. For inexact inputs, dtype must be inexact.
758
+ out : ndarray, optional
759
+ Alternate output array in which to place the result. The default
760
+ is ``None``. If provided, it must have the same shape as the
761
+ expected output, but the type will be cast if necessary. See
762
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
763
+ can yield unexpected results.
764
+ keepdims : bool, optional
765
+ If True, the axes which are reduced are left in the result as
766
+ dimensions with size one. With this option, the result will
767
+ broadcast correctly against the original `arr`.
768
+ initial : scalar, optional
769
+ The starting value for this product. See `~numpy.ufunc.reduce`
770
+ for details.
771
+
772
+ .. versionadded:: 1.22.0
773
+ where : array_like of bool, optional
774
+ Elements to include in the product. See `~numpy.ufunc.reduce`
775
+ for details.
776
+
777
+ .. versionadded:: 1.22.0
778
+
779
+ Returns
780
+ -------
781
+ nanprod : ndarray
782
+ A new array holding the result is returned unless `out` is
783
+ specified, in which case it is returned.
784
+
785
+ See Also
786
+ --------
787
+ numpy.prod : Product across array propagating NaNs.
788
+ isnan : Show which elements are NaN.
789
+
790
+ Examples
791
+ --------
792
+ >>> np.nanprod(1)
793
+ 1
794
+ >>> np.nanprod([1])
795
+ 1
796
+ >>> np.nanprod([1, np.nan])
797
+ 1.0
798
+ >>> a = np.array([[1, 2], [3, np.nan]])
799
+ >>> np.nanprod(a)
800
+ 6.0
801
+ >>> np.nanprod(a, axis=0)
802
+ array([3., 2.])
803
+
804
+ """
805
+ a, mask = _replace_nan(a, 1)
806
+ return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
807
+ initial=initial, where=where)
808
+
809
+
810
+ def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
811
+ return (a, out)
812
+
813
+
814
+ @array_function_dispatch(_nancumsum_dispatcher)
815
+ def nancumsum(a, axis=None, dtype=None, out=None):
816
+ """
817
+ Return the cumulative sum of array elements over a given axis treating Not a
818
+ Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
819
+ encountered and leading NaNs are replaced by zeros.
820
+
821
+ Zeros are returned for slices that are all-NaN or empty.
822
+
823
+ .. versionadded:: 1.12.0
824
+
825
+ Parameters
826
+ ----------
827
+ a : array_like
828
+ Input array.
829
+ axis : int, optional
830
+ Axis along which the cumulative sum is computed. The default
831
+ (None) is to compute the cumsum over the flattened array.
832
+ dtype : dtype, optional
833
+ Type of the returned array and of the accumulator in which the
834
+ elements are summed. If `dtype` is not specified, it defaults
835
+ to the dtype of `a`, unless `a` has an integer dtype with a
836
+ precision less than that of the default platform integer. In
837
+ that case, the default platform integer is used.
838
+ out : ndarray, optional
839
+ Alternative output array in which to place the result. It must
840
+ have the same shape and buffer length as the expected output
841
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
842
+ more details.
843
+
844
+ Returns
845
+ -------
846
+ nancumsum : ndarray.
847
+ A new array holding the result is returned unless `out` is
848
+ specified, in which it is returned. The result has the same
849
+ size as `a`, and the same shape as `a` if `axis` is not None
850
+ or `a` is a 1-d array.
851
+
852
+ See Also
853
+ --------
854
+ numpy.cumsum : Cumulative sum across array propagating NaNs.
855
+ isnan : Show which elements are NaN.
856
+
857
+ Examples
858
+ --------
859
+ >>> np.nancumsum(1)
860
+ array([1])
861
+ >>> np.nancumsum([1])
862
+ array([1])
863
+ >>> np.nancumsum([1, np.nan])
864
+ array([1., 1.])
865
+ >>> a = np.array([[1, 2], [3, np.nan]])
866
+ >>> np.nancumsum(a)
867
+ array([1., 3., 6., 6.])
868
+ >>> np.nancumsum(a, axis=0)
869
+ array([[1., 2.],
870
+ [4., 2.]])
871
+ >>> np.nancumsum(a, axis=1)
872
+ array([[1., 3.],
873
+ [3., 3.]])
874
+
875
+ """
876
+ a, mask = _replace_nan(a, 0)
877
+ return np.cumsum(a, axis=axis, dtype=dtype, out=out)
878
+
879
+
880
+ def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):
881
+ return (a, out)
882
+
883
+
884
+ @array_function_dispatch(_nancumprod_dispatcher)
885
+ def nancumprod(a, axis=None, dtype=None, out=None):
886
+ """
887
+ Return the cumulative product of array elements over a given axis treating Not a
888
+ Numbers (NaNs) as one. The cumulative product does not change when NaNs are
889
+ encountered and leading NaNs are replaced by ones.
890
+
891
+ Ones are returned for slices that are all-NaN or empty.
892
+
893
+ .. versionadded:: 1.12.0
894
+
895
+ Parameters
896
+ ----------
897
+ a : array_like
898
+ Input array.
899
+ axis : int, optional
900
+ Axis along which the cumulative product is computed. By default
901
+ the input is flattened.
902
+ dtype : dtype, optional
903
+ Type of the returned array, as well as of the accumulator in which
904
+ the elements are multiplied. If *dtype* is not specified, it
905
+ defaults to the dtype of `a`, unless `a` has an integer dtype with
906
+ a precision less than that of the default platform integer. In
907
+ that case, the default platform integer is used instead.
908
+ out : ndarray, optional
909
+ Alternative output array in which to place the result. It must
910
+ have the same shape and buffer length as the expected output
911
+ but the type of the resulting values will be cast if necessary.
912
+
913
+ Returns
914
+ -------
915
+ nancumprod : ndarray
916
+ A new array holding the result is returned unless `out` is
917
+ specified, in which case it is returned.
918
+
919
+ See Also
920
+ --------
921
+ numpy.cumprod : Cumulative product across array propagating NaNs.
922
+ isnan : Show which elements are NaN.
923
+
924
+ Examples
925
+ --------
926
+ >>> np.nancumprod(1)
927
+ array([1])
928
+ >>> np.nancumprod([1])
929
+ array([1])
930
+ >>> np.nancumprod([1, np.nan])
931
+ array([1., 1.])
932
+ >>> a = np.array([[1, 2], [3, np.nan]])
933
+ >>> np.nancumprod(a)
934
+ array([1., 2., 6., 6.])
935
+ >>> np.nancumprod(a, axis=0)
936
+ array([[1., 2.],
937
+ [3., 2.]])
938
+ >>> np.nancumprod(a, axis=1)
939
+ array([[1., 2.],
940
+ [3., 3.]])
941
+
942
+ """
943
+ a, mask = _replace_nan(a, 1)
944
+ return np.cumprod(a, axis=axis, dtype=dtype, out=out)
945
+
946
+
947
+ def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
948
+ *, where=None):
949
+ return (a, out)
950
+
951
+
952
+ @array_function_dispatch(_nanmean_dispatcher)
953
+ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
954
+ *, where=np._NoValue):
955
+ """
956
+ Compute the arithmetic mean along the specified axis, ignoring NaNs.
957
+
958
+ Returns the average of the array elements. The average is taken over
959
+ the flattened array by default, otherwise over the specified axis.
960
+ `float64` intermediate and return values are used for integer inputs.
961
+
962
+ For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
963
+
964
+ .. versionadded:: 1.8.0
965
+
966
+ Parameters
967
+ ----------
968
+ a : array_like
969
+ Array containing numbers whose mean is desired. If `a` is not an
970
+ array, a conversion is attempted.
971
+ axis : {int, tuple of int, None}, optional
972
+ Axis or axes along which the means are computed. The default is to compute
973
+ the mean of the flattened array.
974
+ dtype : data-type, optional
975
+ Type to use in computing the mean. For integer inputs, the default
976
+ is `float64`; for inexact inputs, it is the same as the input
977
+ dtype.
978
+ out : ndarray, optional
979
+ Alternate output array in which to place the result. The default
980
+ is ``None``; if provided, it must have the same shape as the
981
+ expected output, but the type will be cast if necessary. See
982
+ :ref:`ufuncs-output-type` for more details.
983
+ keepdims : bool, optional
984
+ If this is set to True, the axes which are reduced are left
985
+ in the result as dimensions with size one. With this option,
986
+ the result will broadcast correctly against the original `a`.
987
+
988
+ If the value is anything but the default, then
989
+ `keepdims` will be passed through to the `mean` or `sum` methods
990
+ of sub-classes of `ndarray`. If the sub-classes methods
991
+ does not implement `keepdims` any exceptions will be raised.
992
+ where : array_like of bool, optional
993
+ Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
994
+
995
+ .. versionadded:: 1.22.0
996
+
997
+ Returns
998
+ -------
999
+ m : ndarray, see dtype parameter above
1000
+ If `out=None`, returns a new array containing the mean values,
1001
+ otherwise a reference to the output array is returned. Nan is
1002
+ returned for slices that contain only NaNs.
1003
+
1004
+ See Also
1005
+ --------
1006
+ average : Weighted average
1007
+ mean : Arithmetic mean taken while not ignoring NaNs
1008
+ var, nanvar
1009
+
1010
+ Notes
1011
+ -----
1012
+ The arithmetic mean is the sum of the non-NaN elements along the axis
1013
+ divided by the number of non-NaN elements.
1014
+
1015
+ Note that for floating-point input, the mean is computed using the same
1016
+ precision the input has. Depending on the input data, this can cause
1017
+ the results to be inaccurate, especially for `float32`. Specifying a
1018
+ higher-precision accumulator using the `dtype` keyword can alleviate
1019
+ this issue.
1020
+
1021
+ Examples
1022
+ --------
1023
+ >>> a = np.array([[1, np.nan], [3, 4]])
1024
+ >>> np.nanmean(a)
1025
+ 2.6666666666666665
1026
+ >>> np.nanmean(a, axis=0)
1027
+ array([2., 4.])
1028
+ >>> np.nanmean(a, axis=1)
1029
+ array([1., 3.5]) # may vary
1030
+
1031
+ """
1032
+ arr, mask = _replace_nan(a, 0)
1033
+ if mask is None:
1034
+ return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1035
+ where=where)
1036
+
1037
+ if dtype is not None:
1038
+ dtype = np.dtype(dtype)
1039
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
1040
+ raise TypeError("If a is inexact, then dtype must be inexact")
1041
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
1042
+ raise TypeError("If a is inexact, then out must be inexact")
1043
+
1044
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims,
1045
+ where=where)
1046
+ tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1047
+ where=where)
1048
+ avg = _divide_by_count(tot, cnt, out=out)
1049
+
1050
+ isbad = (cnt == 0)
1051
+ if isbad.any():
1052
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
1053
+ # NaN is the only possible bad value, so no further
1054
+ # action is needed to handle bad results.
1055
+ return avg
1056
+
1057
+
1058
+ def _nanmedian1d(arr1d, overwrite_input=False):
1059
+ """
1060
+ Private function for rank 1 arrays. Compute the median ignoring NaNs.
1061
+ See nanmedian for parameter usage
1062
+ """
1063
+ arr1d_parsed, overwrite_input = _remove_nan_1d(
1064
+ arr1d, overwrite_input=overwrite_input,
1065
+ )
1066
+
1067
+ if arr1d_parsed.size == 0:
1068
+ # Ensure that a nan-esque scalar of the appropriate type (and unit)
1069
+ # is returned for `timedelta64` and `complexfloating`
1070
+ return arr1d[-1]
1071
+
1072
+ return np.median(arr1d_parsed, overwrite_input=overwrite_input)
1073
+
1074
+
1075
+ def _nanmedian(a, axis=None, out=None, overwrite_input=False):
1076
+ """
1077
+ Private function that doesn't support extended axis or keepdims.
1078
+ These methods are extended to this function using _ureduce
1079
+ See nanmedian for parameter usage
1080
+
1081
+ """
1082
+ if axis is None or a.ndim == 1:
1083
+ part = a.ravel()
1084
+ if out is None:
1085
+ return _nanmedian1d(part, overwrite_input)
1086
+ else:
1087
+ out[...] = _nanmedian1d(part, overwrite_input)
1088
+ return out
1089
+ else:
1090
+ # for small medians use sort + indexing which is still faster than
1091
+ # apply_along_axis
1092
+ # benchmarked with shuffled (50, 50, x) containing a few NaN
1093
+ if a.shape[axis] < 600:
1094
+ return _nanmedian_small(a, axis, out, overwrite_input)
1095
+ result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
1096
+ if out is not None:
1097
+ out[...] = result
1098
+ return result
1099
+
1100
+
1101
+ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
1102
+ """
1103
+ sort + indexing median, faster for small medians along multiple
1104
+ dimensions due to the high overhead of apply_along_axis
1105
+
1106
+ see nanmedian for parameter usage
1107
+ """
1108
+ a = np.ma.masked_array(a, np.isnan(a))
1109
+ m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
1110
+ for i in range(np.count_nonzero(m.mask.ravel())):
1111
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
1112
+ stacklevel=5)
1113
+
1114
+ fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan
1115
+ if out is not None:
1116
+ out[...] = m.filled(fill_value)
1117
+ return out
1118
+ return m.filled(fill_value)
1119
+
1120
+
1121
+ def _nanmedian_dispatcher(
1122
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
1123
+ return (a, out)
1124
+
1125
+
1126
+ @array_function_dispatch(_nanmedian_dispatcher)
1127
+ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
1128
+ """
1129
+ Compute the median along the specified axis, while ignoring NaNs.
1130
+
1131
+ Returns the median of the array elements.
1132
+
1133
+ .. versionadded:: 1.9.0
1134
+
1135
+ Parameters
1136
+ ----------
1137
+ a : array_like
1138
+ Input array or object that can be converted to an array.
1139
+ axis : {int, sequence of int, None}, optional
1140
+ Axis or axes along which the medians are computed. The default
1141
+ is to compute the median along a flattened version of the array.
1142
+ A sequence of axes is supported since version 1.9.0.
1143
+ out : ndarray, optional
1144
+ Alternative output array in which to place the result. It must
1145
+ have the same shape and buffer length as the expected output,
1146
+ but the type (of the output) will be cast if necessary.
1147
+ overwrite_input : bool, optional
1148
+ If True, then allow use of memory of input array `a` for
1149
+ calculations. The input array will be modified by the call to
1150
+ `median`. This will save memory when you do not need to preserve
1151
+ the contents of the input array. Treat the input as undefined,
1152
+ but it will probably be fully or partially sorted. Default is
1153
+ False. If `overwrite_input` is ``True`` and `a` is not already an
1154
+ `ndarray`, an error will be raised.
1155
+ keepdims : bool, optional
1156
+ If this is set to True, the axes which are reduced are left
1157
+ in the result as dimensions with size one. With this option,
1158
+ the result will broadcast correctly against the original `a`.
1159
+
1160
+ If this is anything but the default value it will be passed
1161
+ through (in the special case of an empty array) to the
1162
+ `mean` function of the underlying array. If the array is
1163
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1164
+ will raise a RuntimeError.
1165
+
1166
+ Returns
1167
+ -------
1168
+ median : ndarray
1169
+ A new array holding the result. If the input contains integers
1170
+ or floats smaller than ``float64``, then the output data-type is
1171
+ ``np.float64``. Otherwise, the data-type of the output is the
1172
+ same as that of the input. If `out` is specified, that array is
1173
+ returned instead.
1174
+
1175
+ See Also
1176
+ --------
1177
+ mean, median, percentile
1178
+
1179
+ Notes
1180
+ -----
1181
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
1182
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
1183
+ ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
1184
+ middle values of ``V_sorted`` when ``N`` is even.
1185
+
1186
+ Examples
1187
+ --------
1188
+ >>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
1189
+ >>> a[0, 1] = np.nan
1190
+ >>> a
1191
+ array([[10., nan, 4.],
1192
+ [ 3., 2., 1.]])
1193
+ >>> np.median(a)
1194
+ nan
1195
+ >>> np.nanmedian(a)
1196
+ 3.0
1197
+ >>> np.nanmedian(a, axis=0)
1198
+ array([6.5, 2. , 2.5])
1199
+ >>> np.median(a, axis=1)
1200
+ array([nan, 2.])
1201
+ >>> b = a.copy()
1202
+ >>> np.nanmedian(b, axis=1, overwrite_input=True)
1203
+ array([7., 2.])
1204
+ >>> assert not np.all(a==b)
1205
+ >>> b = a.copy()
1206
+ >>> np.nanmedian(b, axis=None, overwrite_input=True)
1207
+ 3.0
1208
+ >>> assert not np.all(a==b)
1209
+
1210
+ """
1211
+ a = np.asanyarray(a)
1212
+ # apply_along_axis in _nanmedian doesn't handle empty arrays well,
1213
+ # so deal them upfront
1214
+ if a.size == 0:
1215
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
1216
+
1217
+ return function_base._ureduce(a, func=_nanmedian, keepdims=keepdims,
1218
+ axis=axis, out=out,
1219
+ overwrite_input=overwrite_input)
1220
+
1221
+
1222
+ def _nanpercentile_dispatcher(
1223
+ a, q, axis=None, out=None, overwrite_input=None,
1224
+ method=None, keepdims=None, *, interpolation=None):
1225
+ return (a, q, out)
1226
+
1227
+
1228
+ @array_function_dispatch(_nanpercentile_dispatcher)
1229
+ def nanpercentile(
1230
+ a,
1231
+ q,
1232
+ axis=None,
1233
+ out=None,
1234
+ overwrite_input=False,
1235
+ method="linear",
1236
+ keepdims=np._NoValue,
1237
+ *,
1238
+ interpolation=None,
1239
+ ):
1240
+ """
1241
+ Compute the qth percentile of the data along the specified axis,
1242
+ while ignoring nan values.
1243
+
1244
+ Returns the qth percentile(s) of the array elements.
1245
+
1246
+ .. versionadded:: 1.9.0
1247
+
1248
+ Parameters
1249
+ ----------
1250
+ a : array_like
1251
+ Input array or object that can be converted to an array, containing
1252
+ nan values to be ignored.
1253
+ q : array_like of float
1254
+ Percentile or sequence of percentiles to compute, which must be
1255
+ between 0 and 100 inclusive.
1256
+ axis : {int, tuple of int, None}, optional
1257
+ Axis or axes along which the percentiles are computed. The default
1258
+ is to compute the percentile(s) along a flattened version of the
1259
+ array.
1260
+ out : ndarray, optional
1261
+ Alternative output array in which to place the result. It must have
1262
+ the same shape and buffer length as the expected output, but the
1263
+ type (of the output) will be cast if necessary.
1264
+ overwrite_input : bool, optional
1265
+ If True, then allow the input array `a` to be modified by
1266
+ intermediate calculations, to save memory. In this case, the
1267
+ contents of the input `a` after this function completes is
1268
+ undefined.
1269
+ method : str, optional
1270
+ This parameter specifies the method to use for estimating the
1271
+ percentile. There are many different methods, some unique to NumPy.
1272
+ See the notes for explanation. The options sorted by their R type
1273
+ as summarized in the H&F paper [1]_ are:
1274
+
1275
+ 1. 'inverted_cdf'
1276
+ 2. 'averaged_inverted_cdf'
1277
+ 3. 'closest_observation'
1278
+ 4. 'interpolated_inverted_cdf'
1279
+ 5. 'hazen'
1280
+ 6. 'weibull'
1281
+ 7. 'linear' (default)
1282
+ 8. 'median_unbiased'
1283
+ 9. 'normal_unbiased'
1284
+
1285
+ The first three methods are discontinuous. NumPy further defines the
1286
+ following discontinuous variations of the default 'linear' (7.) option:
1287
+
1288
+ * 'lower'
1289
+ * 'higher',
1290
+ * 'midpoint'
1291
+ * 'nearest'
1292
+
1293
+ .. versionchanged:: 1.22.0
1294
+ This argument was previously called "interpolation" and only
1295
+ offered the "linear" default and last four options.
1296
+
1297
+ keepdims : bool, optional
1298
+ If this is set to True, the axes which are reduced are left in
1299
+ the result as dimensions with size one. With this option, the
1300
+ result will broadcast correctly against the original array `a`.
1301
+
1302
+ If this is anything but the default value it will be passed
1303
+ through (in the special case of an empty array) to the
1304
+ `mean` function of the underlying array. If the array is
1305
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1306
+ will raise a RuntimeError.
1307
+
1308
+ interpolation : str, optional
1309
+ Deprecated name for the method keyword argument.
1310
+
1311
+ .. deprecated:: 1.22.0
1312
+
1313
+ Returns
1314
+ -------
1315
+ percentile : scalar or ndarray
1316
+ If `q` is a single percentile and `axis=None`, then the result
1317
+ is a scalar. If multiple percentiles are given, first axis of
1318
+ the result corresponds to the percentiles. The other axes are
1319
+ the axes that remain after the reduction of `a`. If the input
1320
+ contains integers or floats smaller than ``float64``, the output
1321
+ data-type is ``float64``. Otherwise, the output data-type is the
1322
+ same as that of the input. If `out` is specified, that array is
1323
+ returned instead.
1324
+
1325
+ See Also
1326
+ --------
1327
+ nanmean
1328
+ nanmedian : equivalent to ``nanpercentile(..., 50)``
1329
+ percentile, median, mean
1330
+ nanquantile : equivalent to nanpercentile, except q in range [0, 1].
1331
+
1332
+ Notes
1333
+ -----
1334
+ For more information please see `numpy.percentile`
1335
+
1336
+ Examples
1337
+ --------
1338
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
1339
+ >>> a[0][1] = np.nan
1340
+ >>> a
1341
+ array([[10., nan, 4.],
1342
+ [ 3., 2., 1.]])
1343
+ >>> np.percentile(a, 50)
1344
+ nan
1345
+ >>> np.nanpercentile(a, 50)
1346
+ 3.0
1347
+ >>> np.nanpercentile(a, 50, axis=0)
1348
+ array([6.5, 2. , 2.5])
1349
+ >>> np.nanpercentile(a, 50, axis=1, keepdims=True)
1350
+ array([[7.],
1351
+ [2.]])
1352
+ >>> m = np.nanpercentile(a, 50, axis=0)
1353
+ >>> out = np.zeros_like(m)
1354
+ >>> np.nanpercentile(a, 50, axis=0, out=out)
1355
+ array([6.5, 2. , 2.5])
1356
+ >>> m
1357
+ array([6.5, 2. , 2.5])
1358
+
1359
+ >>> b = a.copy()
1360
+ >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
1361
+ array([7., 2.])
1362
+ >>> assert not np.all(a==b)
1363
+
1364
+ References
1365
+ ----------
1366
+ .. [1] R. J. Hyndman and Y. Fan,
1367
+ "Sample quantiles in statistical packages,"
1368
+ The American Statistician, 50(4), pp. 361-365, 1996
1369
+
1370
+ """
1371
+ if interpolation is not None:
1372
+ method = function_base._check_interpolation_as_method(
1373
+ method, interpolation, "nanpercentile")
1374
+
1375
+ a = np.asanyarray(a)
1376
+ if a.dtype.kind == "c":
1377
+ raise TypeError("a must be an array of real numbers")
1378
+
1379
+ q = np.true_divide(q, 100.0)
1380
+ # undo any decay that the ufunc performed (see gh-13105)
1381
+ q = np.asanyarray(q)
1382
+ if not function_base._quantile_is_valid(q):
1383
+ raise ValueError("Percentiles must be in the range [0, 100]")
1384
+ return _nanquantile_unchecked(
1385
+ a, q, axis, out, overwrite_input, method, keepdims)
1386
+
1387
+
1388
+ def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
1389
+ method=None, keepdims=None, *, interpolation=None):
1390
+ return (a, q, out)
1391
+
1392
+
1393
+ @array_function_dispatch(_nanquantile_dispatcher)
1394
+ def nanquantile(
1395
+ a,
1396
+ q,
1397
+ axis=None,
1398
+ out=None,
1399
+ overwrite_input=False,
1400
+ method="linear",
1401
+ keepdims=np._NoValue,
1402
+ *,
1403
+ interpolation=None,
1404
+ ):
1405
+ """
1406
+ Compute the qth quantile of the data along the specified axis,
1407
+ while ignoring nan values.
1408
+ Returns the qth quantile(s) of the array elements.
1409
+
1410
+ .. versionadded:: 1.15.0
1411
+
1412
+ Parameters
1413
+ ----------
1414
+ a : array_like
1415
+ Input array or object that can be converted to an array, containing
1416
+ nan values to be ignored
1417
+ q : array_like of float
1418
+ Probability or sequence of probabilities for the quantiles to compute.
1419
+ Values must be between 0 and 1 inclusive.
1420
+ axis : {int, tuple of int, None}, optional
1421
+ Axis or axes along which the quantiles are computed. The
1422
+ default is to compute the quantile(s) along a flattened
1423
+ version of the array.
1424
+ out : ndarray, optional
1425
+ Alternative output array in which to place the result. It must
1426
+ have the same shape and buffer length as the expected output,
1427
+ but the type (of the output) will be cast if necessary.
1428
+ overwrite_input : bool, optional
1429
+ If True, then allow the input array `a` to be modified by intermediate
1430
+ calculations, to save memory. In this case, the contents of the input
1431
+ `a` after this function completes is undefined.
1432
+ method : str, optional
1433
+ This parameter specifies the method to use for estimating the
1434
+ quantile. There are many different methods, some unique to NumPy.
1435
+ See the notes for explanation. The options sorted by their R type
1436
+ as summarized in the H&F paper [1]_ are:
1437
+
1438
+ 1. 'inverted_cdf'
1439
+ 2. 'averaged_inverted_cdf'
1440
+ 3. 'closest_observation'
1441
+ 4. 'interpolated_inverted_cdf'
1442
+ 5. 'hazen'
1443
+ 6. 'weibull'
1444
+ 7. 'linear' (default)
1445
+ 8. 'median_unbiased'
1446
+ 9. 'normal_unbiased'
1447
+
1448
+ The first three methods are discontinuous. NumPy further defines the
1449
+ following discontinuous variations of the default 'linear' (7.) option:
1450
+
1451
+ * 'lower'
1452
+ * 'higher',
1453
+ * 'midpoint'
1454
+ * 'nearest'
1455
+
1456
+ .. versionchanged:: 1.22.0
1457
+ This argument was previously called "interpolation" and only
1458
+ offered the "linear" default and last four options.
1459
+
1460
+ keepdims : bool, optional
1461
+ If this is set to True, the axes which are reduced are left in
1462
+ the result as dimensions with size one. With this option, the
1463
+ result will broadcast correctly against the original array `a`.
1464
+
1465
+ If this is anything but the default value it will be passed
1466
+ through (in the special case of an empty array) to the
1467
+ `mean` function of the underlying array. If the array is
1468
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1469
+ will raise a RuntimeError.
1470
+
1471
+ interpolation : str, optional
1472
+ Deprecated name for the method keyword argument.
1473
+
1474
+ .. deprecated:: 1.22.0
1475
+
1476
+ Returns
1477
+ -------
1478
+ quantile : scalar or ndarray
1479
+ If `q` is a single probability and `axis=None`, then the result
1480
+ is a scalar. If multiple probability levels are given, first axis of
1481
+ the result corresponds to the quantiles. The other axes are
1482
+ the axes that remain after the reduction of `a`. If the input
1483
+ contains integers or floats smaller than ``float64``, the output
1484
+ data-type is ``float64``. Otherwise, the output data-type is the
1485
+ same as that of the input. If `out` is specified, that array is
1486
+ returned instead.
1487
+
1488
+ See Also
1489
+ --------
1490
+ quantile
1491
+ nanmean, nanmedian
1492
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
1493
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
1494
+
1495
+ Notes
1496
+ -----
1497
+ For more information please see `numpy.quantile`
1498
+
1499
+ Examples
1500
+ --------
1501
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
1502
+ >>> a[0][1] = np.nan
1503
+ >>> a
1504
+ array([[10., nan, 4.],
1505
+ [ 3., 2., 1.]])
1506
+ >>> np.quantile(a, 0.5)
1507
+ nan
1508
+ >>> np.nanquantile(a, 0.5)
1509
+ 3.0
1510
+ >>> np.nanquantile(a, 0.5, axis=0)
1511
+ array([6.5, 2. , 2.5])
1512
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
1513
+ array([[7.],
1514
+ [2.]])
1515
+ >>> m = np.nanquantile(a, 0.5, axis=0)
1516
+ >>> out = np.zeros_like(m)
1517
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
1518
+ array([6.5, 2. , 2.5])
1519
+ >>> m
1520
+ array([6.5, 2. , 2.5])
1521
+ >>> b = a.copy()
1522
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
1523
+ array([7., 2.])
1524
+ >>> assert not np.all(a==b)
1525
+
1526
+ References
1527
+ ----------
1528
+ .. [1] R. J. Hyndman and Y. Fan,
1529
+ "Sample quantiles in statistical packages,"
1530
+ The American Statistician, 50(4), pp. 361-365, 1996
1531
+
1532
+ """
1533
+
1534
+ if interpolation is not None:
1535
+ method = function_base._check_interpolation_as_method(
1536
+ method, interpolation, "nanquantile")
1537
+
1538
+ a = np.asanyarray(a)
1539
+ if a.dtype.kind == "c":
1540
+ raise TypeError("a must be an array of real numbers")
1541
+
1542
+ q = np.asanyarray(q)
1543
+ if not function_base._quantile_is_valid(q):
1544
+ raise ValueError("Quantiles must be in the range [0, 1]")
1545
+ return _nanquantile_unchecked(
1546
+ a, q, axis, out, overwrite_input, method, keepdims)
1547
+
1548
+
1549
+ def _nanquantile_unchecked(
1550
+ a,
1551
+ q,
1552
+ axis=None,
1553
+ out=None,
1554
+ overwrite_input=False,
1555
+ method="linear",
1556
+ keepdims=np._NoValue,
1557
+ ):
1558
+ """Assumes that q is in [0, 1], and is an ndarray"""
1559
+ # apply_along_axis in _nanpercentile doesn't handle empty arrays well,
1560
+ # so deal them upfront
1561
+ if a.size == 0:
1562
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
1563
+ return function_base._ureduce(a,
1564
+ func=_nanquantile_ureduce_func,
1565
+ q=q,
1566
+ keepdims=keepdims,
1567
+ axis=axis,
1568
+ out=out,
1569
+ overwrite_input=overwrite_input,
1570
+ method=method)
1571
+
1572
+
1573
+ def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
1574
+ method="linear"):
1575
+ """
1576
+ Private function that doesn't support extended axis or keepdims.
1577
+ These methods are extended to this function using _ureduce
1578
+ See nanpercentile for parameter usage
1579
+ """
1580
+ if axis is None or a.ndim == 1:
1581
+ part = a.ravel()
1582
+ result = _nanquantile_1d(part, q, overwrite_input, method)
1583
+ else:
1584
+ result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
1585
+ overwrite_input, method)
1586
+ # apply_along_axis fills in collapsed axis with results.
1587
+ # Move that axis to the beginning to match percentile's
1588
+ # convention.
1589
+ if q.ndim != 0:
1590
+ result = np.moveaxis(result, axis, 0)
1591
+
1592
+ if out is not None:
1593
+ out[...] = result
1594
+ return result
1595
+
1596
+
1597
+ def _nanquantile_1d(arr1d, q, overwrite_input=False, method="linear"):
1598
+ """
1599
+ Private function for rank 1 arrays. Compute quantile ignoring NaNs.
1600
+ See nanpercentile for parameter usage
1601
+ """
1602
+ arr1d, overwrite_input = _remove_nan_1d(arr1d,
1603
+ overwrite_input=overwrite_input)
1604
+ if arr1d.size == 0:
1605
+ # convert to scalar
1606
+ return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()]
1607
+
1608
+ return function_base._quantile_unchecked(
1609
+ arr1d, q, overwrite_input=overwrite_input, method=method)
1610
+
1611
+
1612
+ def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
1613
+ keepdims=None, *, where=None):
1614
+ return (a, out)
1615
+
1616
+
1617
+ @array_function_dispatch(_nanvar_dispatcher)
1618
+ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
1619
+ *, where=np._NoValue):
1620
+ """
1621
+ Compute the variance along the specified axis, while ignoring NaNs.
1622
+
1623
+ Returns the variance of the array elements, a measure of the spread of
1624
+ a distribution. The variance is computed for the flattened array by
1625
+ default, otherwise over the specified axis.
1626
+
1627
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
1628
+ returned and a `RuntimeWarning` is raised.
1629
+
1630
+ .. versionadded:: 1.8.0
1631
+
1632
+ Parameters
1633
+ ----------
1634
+ a : array_like
1635
+ Array containing numbers whose variance is desired. If `a` is not an
1636
+ array, a conversion is attempted.
1637
+ axis : {int, tuple of int, None}, optional
1638
+ Axis or axes along which the variance is computed. The default is to compute
1639
+ the variance of the flattened array.
1640
+ dtype : data-type, optional
1641
+ Type to use in computing the variance. For arrays of integer type
1642
+ the default is `float64`; for arrays of float types it is the same as
1643
+ the array type.
1644
+ out : ndarray, optional
1645
+ Alternate output array in which to place the result. It must have
1646
+ the same shape as the expected output, but the type is cast if
1647
+ necessary.
1648
+ ddof : int, optional
1649
+ "Delta Degrees of Freedom": the divisor used in the calculation is
1650
+ ``N - ddof``, where ``N`` represents the number of non-NaN
1651
+ elements. By default `ddof` is zero.
1652
+ keepdims : bool, optional
1653
+ If this is set to True, the axes which are reduced are left
1654
+ in the result as dimensions with size one. With this option,
1655
+ the result will broadcast correctly against the original `a`.
1656
+ where : array_like of bool, optional
1657
+ Elements to include in the variance. See `~numpy.ufunc.reduce` for
1658
+ details.
1659
+
1660
+ .. versionadded:: 1.22.0
1661
+
1662
+ Returns
1663
+ -------
1664
+ variance : ndarray, see dtype parameter above
1665
+ If `out` is None, return a new array containing the variance,
1666
+ otherwise return a reference to the output array. If ddof is >= the
1667
+ number of non-NaN elements in a slice or the slice contains only
1668
+ NaNs, then the result for that slice is NaN.
1669
+
1670
+ See Also
1671
+ --------
1672
+ std : Standard deviation
1673
+ mean : Average
1674
+ var : Variance while not ignoring NaNs
1675
+ nanstd, nanmean
1676
+ :ref:`ufuncs-output-type`
1677
+
1678
+ Notes
1679
+ -----
1680
+ The variance is the average of the squared deviations from the mean,
1681
+ i.e., ``var = mean(abs(x - x.mean())**2)``.
1682
+
1683
+ The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
1684
+ If, however, `ddof` is specified, the divisor ``N - ddof`` is used
1685
+ instead. In standard statistical practice, ``ddof=1`` provides an
1686
+ unbiased estimator of the variance of a hypothetical infinite
1687
+ population. ``ddof=0`` provides a maximum likelihood estimate of the
1688
+ variance for normally distributed variables.
1689
+
1690
+ Note that for complex numbers, the absolute value is taken before
1691
+ squaring, so that the result is always real and nonnegative.
1692
+
1693
+ For floating-point input, the variance is computed using the same
1694
+ precision the input has. Depending on the input data, this can cause
1695
+ the results to be inaccurate, especially for `float32` (see example
1696
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
1697
+ keyword can alleviate this issue.
1698
+
1699
+ For this function to work on sub-classes of ndarray, they must define
1700
+ `sum` with the kwarg `keepdims`
1701
+
1702
+ Examples
1703
+ --------
1704
+ >>> a = np.array([[1, np.nan], [3, 4]])
1705
+ >>> np.nanvar(a)
1706
+ 1.5555555555555554
1707
+ >>> np.nanvar(a, axis=0)
1708
+ array([1., 0.])
1709
+ >>> np.nanvar(a, axis=1)
1710
+ array([0., 0.25]) # may vary
1711
+
1712
+ """
1713
+ arr, mask = _replace_nan(a, 0)
1714
+ if mask is None:
1715
+ return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
1716
+ keepdims=keepdims, where=where)
1717
+
1718
+ if dtype is not None:
1719
+ dtype = np.dtype(dtype)
1720
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
1721
+ raise TypeError("If a is inexact, then dtype must be inexact")
1722
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
1723
+ raise TypeError("If a is inexact, then out must be inexact")
1724
+
1725
+ # Compute mean
1726
+ if type(arr) is np.matrix:
1727
+ _keepdims = np._NoValue
1728
+ else:
1729
+ _keepdims = True
1730
+ # we need to special case matrix for reverse compatibility
1731
+ # in order for this to work, these sums need to be called with
1732
+ # keepdims=True, however matrix now raises an error in this case, but
1733
+ # the reason that it drops the keepdims kwarg is to force keepdims=True
1734
+ # so this used to work by serendipity.
1735
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims,
1736
+ where=where)
1737
+ avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims, where=where)
1738
+ avg = _divide_by_count(avg, cnt)
1739
+
1740
+ # Compute squared deviation from mean.
1741
+ np.subtract(arr, avg, out=arr, casting='unsafe', where=where)
1742
+ arr = _copyto(arr, 0, mask)
1743
+ if issubclass(arr.dtype.type, np.complexfloating):
1744
+ sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real
1745
+ else:
1746
+ sqr = np.multiply(arr, arr, out=arr, where=where)
1747
+
1748
+ # Compute variance.
1749
+ var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1750
+ where=where)
1751
+
1752
+ # Precaution against reduced object arrays
1753
+ try:
1754
+ var_ndim = var.ndim
1755
+ except AttributeError:
1756
+ var_ndim = np.ndim(var)
1757
+ if var_ndim < cnt.ndim:
1758
+ # Subclasses of ndarray may ignore keepdims, so check here.
1759
+ cnt = cnt.squeeze(axis)
1760
+ dof = cnt - ddof
1761
+ var = _divide_by_count(var, dof)
1762
+
1763
+ isbad = (dof <= 0)
1764
+ if np.any(isbad):
1765
+ warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
1766
+ stacklevel=2)
1767
+ # NaN, inf, or negative numbers are all possible bad
1768
+ # values, so explicitly replace them with NaN.
1769
+ var = _copyto(var, np.nan, isbad)
1770
+ return var
1771
+
1772
+
1773
+ def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
1774
+ keepdims=None, *, where=None):
1775
+ return (a, out)
1776
+
1777
+
1778
+ @array_function_dispatch(_nanstd_dispatcher)
1779
+ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
1780
+ *, where=np._NoValue):
1781
+ """
1782
+ Compute the standard deviation along the specified axis, while
1783
+ ignoring NaNs.
1784
+
1785
+ Returns the standard deviation, a measure of the spread of a
1786
+ distribution, of the non-NaN array elements. The standard deviation is
1787
+ computed for the flattened array by default, otherwise over the
1788
+ specified axis.
1789
+
1790
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
1791
+ returned and a `RuntimeWarning` is raised.
1792
+
1793
+ .. versionadded:: 1.8.0
1794
+
1795
+ Parameters
1796
+ ----------
1797
+ a : array_like
1798
+ Calculate the standard deviation of the non-NaN values.
1799
+ axis : {int, tuple of int, None}, optional
1800
+ Axis or axes along which the standard deviation is computed. The default is
1801
+ to compute the standard deviation of the flattened array.
1802
+ dtype : dtype, optional
1803
+ Type to use in computing the standard deviation. For arrays of
1804
+ integer type the default is float64, for arrays of float types it
1805
+ is the same as the array type.
1806
+ out : ndarray, optional
1807
+ Alternative output array in which to place the result. It must have
1808
+ the same shape as the expected output but the type (of the
1809
+ calculated values) will be cast if necessary.
1810
+ ddof : int, optional
1811
+ Means Delta Degrees of Freedom. The divisor used in calculations
1812
+ is ``N - ddof``, where ``N`` represents the number of non-NaN
1813
+ elements. By default `ddof` is zero.
1814
+
1815
+ keepdims : bool, optional
1816
+ If this is set to True, the axes which are reduced are left
1817
+ in the result as dimensions with size one. With this option,
1818
+ the result will broadcast correctly against the original `a`.
1819
+
1820
+ If this value is anything but the default it is passed through
1821
+ as-is to the relevant functions of the sub-classes. If these
1822
+ functions do not have a `keepdims` kwarg, a RuntimeError will
1823
+ be raised.
1824
+ where : array_like of bool, optional
1825
+ Elements to include in the standard deviation.
1826
+ See `~numpy.ufunc.reduce` for details.
1827
+
1828
+ .. versionadded:: 1.22.0
1829
+
1830
+ Returns
1831
+ -------
1832
+ standard_deviation : ndarray, see dtype parameter above.
1833
+ If `out` is None, return a new array containing the standard
1834
+ deviation, otherwise return a reference to the output array. If
1835
+ ddof is >= the number of non-NaN elements in a slice or the slice
1836
+ contains only NaNs, then the result for that slice is NaN.
1837
+
1838
+ See Also
1839
+ --------
1840
+ var, mean, std
1841
+ nanvar, nanmean
1842
+ :ref:`ufuncs-output-type`
1843
+
1844
+ Notes
1845
+ -----
1846
+ The standard deviation is the square root of the average of the squared
1847
+ deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
1848
+
1849
+ The average squared deviation is normally calculated as
1850
+ ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
1851
+ specified, the divisor ``N - ddof`` is used instead. In standard
1852
+ statistical practice, ``ddof=1`` provides an unbiased estimator of the
1853
+ variance of the infinite population. ``ddof=0`` provides a maximum
1854
+ likelihood estimate of the variance for normally distributed variables.
1855
+ The standard deviation computed in this function is the square root of
1856
+ the estimated variance, so even with ``ddof=1``, it will not be an
1857
+ unbiased estimate of the standard deviation per se.
1858
+
1859
+ Note that, for complex numbers, `std` takes the absolute value before
1860
+ squaring, so that the result is always real and nonnegative.
1861
+
1862
+ For floating-point input, the *std* is computed using the same
1863
+ precision the input has. Depending on the input data, this can cause
1864
+ the results to be inaccurate, especially for float32 (see example
1865
+ below). Specifying a higher-accuracy accumulator using the `dtype`
1866
+ keyword can alleviate this issue.
1867
+
1868
+ Examples
1869
+ --------
1870
+ >>> a = np.array([[1, np.nan], [3, 4]])
1871
+ >>> np.nanstd(a)
1872
+ 1.247219128924647
1873
+ >>> np.nanstd(a, axis=0)
1874
+ array([1., 0.])
1875
+ >>> np.nanstd(a, axis=1)
1876
+ array([0., 0.5]) # may vary
1877
+
1878
+ """
1879
+ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
1880
+ keepdims=keepdims, where=where)
1881
+ if isinstance(var, np.ndarray):
1882
+ std = np.sqrt(var, out=var)
1883
+ elif hasattr(var, 'dtype'):
1884
+ std = var.dtype.type(np.sqrt(var))
1885
+ else:
1886
+ std = np.sqrt(var)
1887
+ return std
env-llmeval/lib/python3.10/site-packages/numpy/lib/nanfunctions.pyi ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.core.fromnumeric import (
2
+ amin,
3
+ amax,
4
+ argmin,
5
+ argmax,
6
+ sum,
7
+ prod,
8
+ cumsum,
9
+ cumprod,
10
+ mean,
11
+ var,
12
+ std
13
+ )
14
+
15
+ from numpy.lib.function_base import (
16
+ median,
17
+ percentile,
18
+ quantile,
19
+ )
20
+
21
+ __all__: list[str]
22
+
23
+ # NOTE: In reaility these functions are not aliases but distinct functions
24
+ # with identical signatures.
25
+ nanmin = amin
26
+ nanmax = amax
27
+ nanargmin = argmin
28
+ nanargmax = argmax
29
+ nansum = sum
30
+ nanprod = prod
31
+ nancumsum = cumsum
32
+ nancumprod = cumprod
33
+ nanmean = mean
34
+ nanvar = var
35
+ nanstd = std
36
+ nanmedian = median
37
+ nanpercentile = percentile
38
+ nanquantile = quantile
env-llmeval/lib/python3.10/site-packages/numpy/lib/npyio.py ADDED
@@ -0,0 +1,2547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import functools
4
+ import itertools
5
+ import warnings
6
+ import weakref
7
+ import contextlib
8
+ import operator
9
+ from operator import itemgetter, index as opindex, methodcaller
10
+ from collections.abc import Mapping
11
+
12
+ import numpy as np
13
+ from . import format
14
+ from ._datasource import DataSource
15
+ from numpy.core import overrides
16
+ from numpy.core.multiarray import packbits, unpackbits
17
+ from numpy.core._multiarray_umath import _load_from_filelike
18
+ from numpy.core.overrides import set_array_function_like_doc, set_module
19
+ from ._iotools import (
20
+ LineSplitter, NameValidator, StringConverter, ConverterError,
21
+ ConverterLockError, ConversionWarning, _is_string_like,
22
+ has_nested_fields, flatten_dtype, easy_dtype, _decode_line
23
+ )
24
+
25
+ from numpy.compat import (
26
+ asbytes, asstr, asunicode, os_fspath, os_PathLike,
27
+ pickle
28
+ )
29
+
30
+
31
+ __all__ = [
32
+ 'savetxt', 'loadtxt', 'genfromtxt',
33
+ 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
34
+ 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
35
+ ]
36
+
37
+
38
+ array_function_dispatch = functools.partial(
39
+ overrides.array_function_dispatch, module='numpy')
40
+
41
+
42
+ class BagObj:
43
+ """
44
+ BagObj(obj)
45
+
46
+ Convert attribute look-ups to getitems on the object passed in.
47
+
48
+ Parameters
49
+ ----------
50
+ obj : class instance
51
+ Object on which attribute look-up is performed.
52
+
53
+ Examples
54
+ --------
55
+ >>> from numpy.lib.npyio import BagObj as BO
56
+ >>> class BagDemo:
57
+ ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
58
+ ... # will call this method when any
59
+ ... # attribute look-up is required
60
+ ... result = "Doesn't matter what you want, "
61
+ ... return result + "you're gonna get this"
62
+ ...
63
+ >>> demo_obj = BagDemo()
64
+ >>> bagobj = BO(demo_obj)
65
+ >>> bagobj.hello_there
66
+ "Doesn't matter what you want, you're gonna get this"
67
+ >>> bagobj.I_can_be_anything
68
+ "Doesn't matter what you want, you're gonna get this"
69
+
70
+ """
71
+
72
+ def __init__(self, obj):
73
+ # Use weakref to make NpzFile objects collectable by refcount
74
+ self._obj = weakref.proxy(obj)
75
+
76
+ def __getattribute__(self, key):
77
+ try:
78
+ return object.__getattribute__(self, '_obj')[key]
79
+ except KeyError:
80
+ raise AttributeError(key) from None
81
+
82
+ def __dir__(self):
83
+ """
84
+ Enables dir(bagobj) to list the files in an NpzFile.
85
+
86
+ This also enables tab-completion in an interpreter or IPython.
87
+ """
88
+ return list(object.__getattribute__(self, '_obj').keys())
89
+
90
+
91
+ def zipfile_factory(file, *args, **kwargs):
92
+ """
93
+ Create a ZipFile.
94
+
95
+ Allows for Zip64, and the `file` argument can accept file, str, or
96
+ pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
97
+ constructor.
98
+ """
99
+ if not hasattr(file, 'read'):
100
+ file = os_fspath(file)
101
+ import zipfile
102
+ kwargs['allowZip64'] = True
103
+ return zipfile.ZipFile(file, *args, **kwargs)
104
+
105
+
106
+ class NpzFile(Mapping):
107
+ """
108
+ NpzFile(fid)
109
+
110
+ A dictionary-like object with lazy-loading of files in the zipped
111
+ archive provided on construction.
112
+
113
+ `NpzFile` is used to load files in the NumPy ``.npz`` data archive
114
+ format. It assumes that files in the archive have a ``.npy`` extension,
115
+ other files are ignored.
116
+
117
+ The arrays and file strings are lazily loaded on either
118
+ getitem access using ``obj['key']`` or attribute lookup using
119
+ ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
120
+ be obtained with ``obj.files`` and the ZipFile object itself using
121
+ ``obj.zip``.
122
+
123
+ Attributes
124
+ ----------
125
+ files : list of str
126
+ List of all files in the archive with a ``.npy`` extension.
127
+ zip : ZipFile instance
128
+ The ZipFile object initialized with the zipped archive.
129
+ f : BagObj instance
130
+ An object on which attribute can be performed as an alternative
131
+ to getitem access on the `NpzFile` instance itself.
132
+ allow_pickle : bool, optional
133
+ Allow loading pickled data. Default: False
134
+
135
+ .. versionchanged:: 1.16.3
136
+ Made default False in response to CVE-2019-6446.
137
+
138
+ pickle_kwargs : dict, optional
139
+ Additional keyword arguments to pass on to pickle.load.
140
+ These are only useful when loading object arrays saved on
141
+ Python 2 when using Python 3.
142
+ max_header_size : int, optional
143
+ Maximum allowed size of the header. Large headers may not be safe
144
+ to load securely and thus require explicitly passing a larger value.
145
+ See :py:func:`ast.literal_eval()` for details.
146
+ This option is ignored when `allow_pickle` is passed. In that case
147
+ the file is by definition trusted and the limit is unnecessary.
148
+
149
+ Parameters
150
+ ----------
151
+ fid : file or str
152
+ The zipped archive to open. This is either a file-like object
153
+ or a string containing the path to the archive.
154
+ own_fid : bool, optional
155
+ Whether NpzFile should close the file handle.
156
+ Requires that `fid` is a file-like object.
157
+
158
+ Examples
159
+ --------
160
+ >>> from tempfile import TemporaryFile
161
+ >>> outfile = TemporaryFile()
162
+ >>> x = np.arange(10)
163
+ >>> y = np.sin(x)
164
+ >>> np.savez(outfile, x=x, y=y)
165
+ >>> _ = outfile.seek(0)
166
+
167
+ >>> npz = np.load(outfile)
168
+ >>> isinstance(npz, np.lib.npyio.NpzFile)
169
+ True
170
+ >>> npz
171
+ NpzFile 'object' with keys x, y
172
+ >>> sorted(npz.files)
173
+ ['x', 'y']
174
+ >>> npz['x'] # getitem access
175
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
176
+ >>> npz.f.x # attribute lookup
177
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
178
+
179
+ """
180
+ # Make __exit__ safe if zipfile_factory raises an exception
181
+ zip = None
182
+ fid = None
183
+ _MAX_REPR_ARRAY_COUNT = 5
184
+
185
+ def __init__(self, fid, own_fid=False, allow_pickle=False,
186
+ pickle_kwargs=None, *,
187
+ max_header_size=format._MAX_HEADER_SIZE):
188
+ # Import is postponed to here since zipfile depends on gzip, an
189
+ # optional component of the so-called standard library.
190
+ _zip = zipfile_factory(fid)
191
+ self._files = _zip.namelist()
192
+ self.files = []
193
+ self.allow_pickle = allow_pickle
194
+ self.max_header_size = max_header_size
195
+ self.pickle_kwargs = pickle_kwargs
196
+ for x in self._files:
197
+ if x.endswith('.npy'):
198
+ self.files.append(x[:-4])
199
+ else:
200
+ self.files.append(x)
201
+ self.zip = _zip
202
+ self.f = BagObj(self)
203
+ if own_fid:
204
+ self.fid = fid
205
+
206
+ def __enter__(self):
207
+ return self
208
+
209
+ def __exit__(self, exc_type, exc_value, traceback):
210
+ self.close()
211
+
212
+ def close(self):
213
+ """
214
+ Close the file.
215
+
216
+ """
217
+ if self.zip is not None:
218
+ self.zip.close()
219
+ self.zip = None
220
+ if self.fid is not None:
221
+ self.fid.close()
222
+ self.fid = None
223
+ self.f = None # break reference cycle
224
+
225
+ def __del__(self):
226
+ self.close()
227
+
228
+ # Implement the Mapping ABC
229
+ def __iter__(self):
230
+ return iter(self.files)
231
+
232
+ def __len__(self):
233
+ return len(self.files)
234
+
235
+ def __getitem__(self, key):
236
+ # FIXME: This seems like it will copy strings around
237
+ # more than is strictly necessary. The zipfile
238
+ # will read the string and then
239
+ # the format.read_array will copy the string
240
+ # to another place in memory.
241
+ # It would be better if the zipfile could read
242
+ # (or at least uncompress) the data
243
+ # directly into the array memory.
244
+ member = False
245
+ if key in self._files:
246
+ member = True
247
+ elif key in self.files:
248
+ member = True
249
+ key += '.npy'
250
+ if member:
251
+ bytes = self.zip.open(key)
252
+ magic = bytes.read(len(format.MAGIC_PREFIX))
253
+ bytes.close()
254
+ if magic == format.MAGIC_PREFIX:
255
+ bytes = self.zip.open(key)
256
+ return format.read_array(bytes,
257
+ allow_pickle=self.allow_pickle,
258
+ pickle_kwargs=self.pickle_kwargs,
259
+ max_header_size=self.max_header_size)
260
+ else:
261
+ return self.zip.read(key)
262
+ else:
263
+ raise KeyError(f"{key} is not a file in the archive")
264
+
265
+ def __contains__(self, key):
266
+ return (key in self._files or key in self.files)
267
+
268
+ def __repr__(self):
269
+ # Get filename or default to `object`
270
+ if isinstance(self.fid, str):
271
+ filename = self.fid
272
+ else:
273
+ filename = getattr(self.fid, "name", "object")
274
+
275
+ # Get the name of arrays
276
+ array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
277
+ if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
278
+ array_names += "..."
279
+ return f"NpzFile {filename!r} with keys: {array_names}"
280
+
281
+
282
+ @set_module('numpy')
283
+ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
284
+ encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE):
285
+ """
286
+ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
287
+
288
+ .. warning:: Loading files that contain object arrays uses the ``pickle``
289
+ module, which is not secure against erroneous or maliciously
290
+ constructed data. Consider passing ``allow_pickle=False`` to
291
+ load data that is known not to contain object arrays for the
292
+ safer handling of untrusted sources.
293
+
294
+ Parameters
295
+ ----------
296
+ file : file-like object, string, or pathlib.Path
297
+ The file to read. File-like objects must support the
298
+ ``seek()`` and ``read()`` methods and must always
299
+ be opened in binary mode. Pickled files require that the
300
+ file-like object support the ``readline()`` method as well.
301
+ mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
302
+ If not None, then memory-map the file, using the given mode (see
303
+ `numpy.memmap` for a detailed description of the modes). A
304
+ memory-mapped array is kept on disk. However, it can be accessed
305
+ and sliced like any ndarray. Memory mapping is especially useful
306
+ for accessing small fragments of large files without reading the
307
+ entire file into memory.
308
+ allow_pickle : bool, optional
309
+ Allow loading pickled object arrays stored in npy files. Reasons for
310
+ disallowing pickles include security, as loading pickled data can
311
+ execute arbitrary code. If pickles are disallowed, loading object
312
+ arrays will fail. Default: False
313
+
314
+ .. versionchanged:: 1.16.3
315
+ Made default False in response to CVE-2019-6446.
316
+
317
+ fix_imports : bool, optional
318
+ Only useful when loading Python 2 generated pickled files on Python 3,
319
+ which includes npy/npz files containing object arrays. If `fix_imports`
320
+ is True, pickle will try to map the old Python 2 names to the new names
321
+ used in Python 3.
322
+ encoding : str, optional
323
+ What encoding to use when reading Python 2 strings. Only useful when
324
+ loading Python 2 generated pickled files in Python 3, which includes
325
+ npy/npz files containing object arrays. Values other than 'latin1',
326
+ 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
327
+ data. Default: 'ASCII'
328
+ max_header_size : int, optional
329
+ Maximum allowed size of the header. Large headers may not be safe
330
+ to load securely and thus require explicitly passing a larger value.
331
+ See :py:func:`ast.literal_eval()` for details.
332
+ This option is ignored when `allow_pickle` is passed. In that case
333
+ the file is by definition trusted and the limit is unnecessary.
334
+
335
+ Returns
336
+ -------
337
+ result : array, tuple, dict, etc.
338
+ Data stored in the file. For ``.npz`` files, the returned instance
339
+ of NpzFile class must be closed to avoid leaking file descriptors.
340
+
341
+ Raises
342
+ ------
343
+ OSError
344
+ If the input file does not exist or cannot be read.
345
+ UnpicklingError
346
+ If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
347
+ ValueError
348
+ The file contains an object array, but ``allow_pickle=False`` given.
349
+ EOFError
350
+ When calling ``np.load`` multiple times on the same file handle,
351
+ if all data has already been read
352
+
353
+ See Also
354
+ --------
355
+ save, savez, savez_compressed, loadtxt
356
+ memmap : Create a memory-map to an array stored in a file on disk.
357
+ lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
358
+
359
+ Notes
360
+ -----
361
+ - If the file contains pickle data, then whatever object is stored
362
+ in the pickle is returned.
363
+ - If the file is a ``.npy`` file, then a single array is returned.
364
+ - If the file is a ``.npz`` file, then a dictionary-like object is
365
+ returned, containing ``{filename: array}`` key-value pairs, one for
366
+ each file in the archive.
367
+ - If the file is a ``.npz`` file, the returned value supports the
368
+ context manager protocol in a similar fashion to the open function::
369
+
370
+ with load('foo.npz') as data:
371
+ a = data['a']
372
+
373
+ The underlying file descriptor is closed when exiting the 'with'
374
+ block.
375
+
376
+ Examples
377
+ --------
378
+ Store data to disk, and load it again:
379
+
380
+ >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
381
+ >>> np.load('/tmp/123.npy')
382
+ array([[1, 2, 3],
383
+ [4, 5, 6]])
384
+
385
+ Store compressed data to disk, and load it again:
386
+
387
+ >>> a=np.array([[1, 2, 3], [4, 5, 6]])
388
+ >>> b=np.array([1, 2])
389
+ >>> np.savez('/tmp/123.npz', a=a, b=b)
390
+ >>> data = np.load('/tmp/123.npz')
391
+ >>> data['a']
392
+ array([[1, 2, 3],
393
+ [4, 5, 6]])
394
+ >>> data['b']
395
+ array([1, 2])
396
+ >>> data.close()
397
+
398
+ Mem-map the stored array, and then access the second row
399
+ directly from disk:
400
+
401
+ >>> X = np.load('/tmp/123.npy', mmap_mode='r')
402
+ >>> X[1, :]
403
+ memmap([4, 5, 6])
404
+
405
+ """
406
+ if encoding not in ('ASCII', 'latin1', 'bytes'):
407
+ # The 'encoding' value for pickle also affects what encoding
408
+ # the serialized binary data of NumPy arrays is loaded
409
+ # in. Pickle does not pass on the encoding information to
410
+ # NumPy. The unpickling code in numpy.core.multiarray is
411
+ # written to assume that unicode data appearing where binary
412
+ # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
413
+ #
414
+ # Other encoding values can corrupt binary data, and we
415
+ # purposefully disallow them. For the same reason, the errors=
416
+ # argument is not exposed, as values other than 'strict'
417
+ # result can similarly silently corrupt numerical data.
418
+ raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
419
+
420
+ pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
421
+
422
+ with contextlib.ExitStack() as stack:
423
+ if hasattr(file, 'read'):
424
+ fid = file
425
+ own_fid = False
426
+ else:
427
+ fid = stack.enter_context(open(os_fspath(file), "rb"))
428
+ own_fid = True
429
+
430
+ # Code to distinguish from NumPy binary files and pickles.
431
+ _ZIP_PREFIX = b'PK\x03\x04'
432
+ _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
433
+ N = len(format.MAGIC_PREFIX)
434
+ magic = fid.read(N)
435
+ if not magic:
436
+ raise EOFError("No data left in file")
437
+ # If the file size is less than N, we need to make sure not
438
+ # to seek past the beginning of the file
439
+ fid.seek(-min(N, len(magic)), 1) # back-up
440
+ if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
441
+ # zip-file (assume .npz)
442
+ # Potentially transfer file ownership to NpzFile
443
+ stack.pop_all()
444
+ ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
445
+ pickle_kwargs=pickle_kwargs,
446
+ max_header_size=max_header_size)
447
+ return ret
448
+ elif magic == format.MAGIC_PREFIX:
449
+ # .npy file
450
+ if mmap_mode:
451
+ if allow_pickle:
452
+ max_header_size = 2**64
453
+ return format.open_memmap(file, mode=mmap_mode,
454
+ max_header_size=max_header_size)
455
+ else:
456
+ return format.read_array(fid, allow_pickle=allow_pickle,
457
+ pickle_kwargs=pickle_kwargs,
458
+ max_header_size=max_header_size)
459
+ else:
460
+ # Try a pickle
461
+ if not allow_pickle:
462
+ raise ValueError("Cannot load file containing pickled data "
463
+ "when allow_pickle=False")
464
+ try:
465
+ return pickle.load(fid, **pickle_kwargs)
466
+ except Exception as e:
467
+ raise pickle.UnpicklingError(
468
+ f"Failed to interpret file {file!r} as a pickle") from e
469
+
470
+
471
+ def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
472
+ return (arr,)
473
+
474
+
475
+ @array_function_dispatch(_save_dispatcher)
476
+ def save(file, arr, allow_pickle=True, fix_imports=True):
477
+ """
478
+ Save an array to a binary file in NumPy ``.npy`` format.
479
+
480
+ Parameters
481
+ ----------
482
+ file : file, str, or pathlib.Path
483
+ File or filename to which the data is saved. If file is a file-object,
484
+ then the filename is unchanged. If file is a string or Path, a ``.npy``
485
+ extension will be appended to the filename if it does not already
486
+ have one.
487
+ arr : array_like
488
+ Array data to be saved.
489
+ allow_pickle : bool, optional
490
+ Allow saving object arrays using Python pickles. Reasons for disallowing
491
+ pickles include security (loading pickled data can execute arbitrary
492
+ code) and portability (pickled objects may not be loadable on different
493
+ Python installations, for example if the stored objects require libraries
494
+ that are not available, and not all pickled data is compatible between
495
+ Python 2 and Python 3).
496
+ Default: True
497
+ fix_imports : bool, optional
498
+ Only useful in forcing objects in object arrays on Python 3 to be
499
+ pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
500
+ will try to map the new Python 3 names to the old module names used in
501
+ Python 2, so that the pickle data stream is readable with Python 2.
502
+
503
+ See Also
504
+ --------
505
+ savez : Save several arrays into a ``.npz`` archive
506
+ savetxt, load
507
+
508
+ Notes
509
+ -----
510
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
511
+
512
+ Any data saved to the file is appended to the end of the file.
513
+
514
+ Examples
515
+ --------
516
+ >>> from tempfile import TemporaryFile
517
+ >>> outfile = TemporaryFile()
518
+
519
+ >>> x = np.arange(10)
520
+ >>> np.save(outfile, x)
521
+
522
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
523
+ >>> np.load(outfile)
524
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
525
+
526
+
527
+ >>> with open('test.npy', 'wb') as f:
528
+ ... np.save(f, np.array([1, 2]))
529
+ ... np.save(f, np.array([1, 3]))
530
+ >>> with open('test.npy', 'rb') as f:
531
+ ... a = np.load(f)
532
+ ... b = np.load(f)
533
+ >>> print(a, b)
534
+ # [1 2] [1 3]
535
+ """
536
+ if hasattr(file, 'write'):
537
+ file_ctx = contextlib.nullcontext(file)
538
+ else:
539
+ file = os_fspath(file)
540
+ if not file.endswith('.npy'):
541
+ file = file + '.npy'
542
+ file_ctx = open(file, "wb")
543
+
544
+ with file_ctx as fid:
545
+ arr = np.asanyarray(arr)
546
+ format.write_array(fid, arr, allow_pickle=allow_pickle,
547
+ pickle_kwargs=dict(fix_imports=fix_imports))
548
+
549
+
550
+ def _savez_dispatcher(file, *args, **kwds):
551
+ yield from args
552
+ yield from kwds.values()
553
+
554
+
555
+ @array_function_dispatch(_savez_dispatcher)
556
+ def savez(file, *args, **kwds):
557
+ """Save several arrays into a single file in uncompressed ``.npz`` format.
558
+
559
+ Provide arrays as keyword arguments to store them under the
560
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
561
+
562
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
563
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
564
+
565
+ Parameters
566
+ ----------
567
+ file : str or file
568
+ Either the filename (string) or an open file (file-like object)
569
+ where the data will be saved. If file is a string or a Path, the
570
+ ``.npz`` extension will be appended to the filename if it is not
571
+ already there.
572
+ args : Arguments, optional
573
+ Arrays to save to the file. Please use keyword arguments (see
574
+ `kwds` below) to assign names to arrays. Arrays specified as
575
+ args will be named "arr_0", "arr_1", and so on.
576
+ kwds : Keyword arguments, optional
577
+ Arrays to save to the file. Each array will be saved to the
578
+ output file with its corresponding keyword name.
579
+
580
+ Returns
581
+ -------
582
+ None
583
+
584
+ See Also
585
+ --------
586
+ save : Save a single array to a binary file in NumPy format.
587
+ savetxt : Save an array to a file as plain text.
588
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
589
+
590
+ Notes
591
+ -----
592
+ The ``.npz`` file format is a zipped archive of files named after the
593
+ variables they contain. The archive is not compressed and each file
594
+ in the archive contains one variable in ``.npy`` format. For a
595
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
596
+
597
+ When opening the saved ``.npz`` file with `load` a `NpzFile` object is
598
+ returned. This is a dictionary-like object which can be queried for
599
+ its list of arrays (with the ``.files`` attribute), and for the arrays
600
+ themselves.
601
+
602
+ Keys passed in `kwds` are used as filenames inside the ZIP archive.
603
+ Therefore, keys should be valid filenames; e.g., avoid keys that begin with
604
+ ``/`` or contain ``.``.
605
+
606
+ When naming variables with keyword arguments, it is not possible to name a
607
+ variable ``file``, as this would cause the ``file`` argument to be defined
608
+ twice in the call to ``savez``.
609
+
610
+ Examples
611
+ --------
612
+ >>> from tempfile import TemporaryFile
613
+ >>> outfile = TemporaryFile()
614
+ >>> x = np.arange(10)
615
+ >>> y = np.sin(x)
616
+
617
+ Using `savez` with \\*args, the arrays are saved with default names.
618
+
619
+ >>> np.savez(outfile, x, y)
620
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
621
+ >>> npzfile = np.load(outfile)
622
+ >>> npzfile.files
623
+ ['arr_0', 'arr_1']
624
+ >>> npzfile['arr_0']
625
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
626
+
627
+ Using `savez` with \\**kwds, the arrays are saved with the keyword names.
628
+
629
+ >>> outfile = TemporaryFile()
630
+ >>> np.savez(outfile, x=x, y=y)
631
+ >>> _ = outfile.seek(0)
632
+ >>> npzfile = np.load(outfile)
633
+ >>> sorted(npzfile.files)
634
+ ['x', 'y']
635
+ >>> npzfile['x']
636
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
637
+
638
+ """
639
+ _savez(file, args, kwds, False)
640
+
641
+
642
+ def _savez_compressed_dispatcher(file, *args, **kwds):
643
+ yield from args
644
+ yield from kwds.values()
645
+
646
+
647
+ @array_function_dispatch(_savez_compressed_dispatcher)
648
+ def savez_compressed(file, *args, **kwds):
649
+ """
650
+ Save several arrays into a single file in compressed ``.npz`` format.
651
+
652
+ Provide arrays as keyword arguments to store them under the
653
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
654
+
655
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
656
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
657
+
658
+ Parameters
659
+ ----------
660
+ file : str or file
661
+ Either the filename (string) or an open file (file-like object)
662
+ where the data will be saved. If file is a string or a Path, the
663
+ ``.npz`` extension will be appended to the filename if it is not
664
+ already there.
665
+ args : Arguments, optional
666
+ Arrays to save to the file. Please use keyword arguments (see
667
+ `kwds` below) to assign names to arrays. Arrays specified as
668
+ args will be named "arr_0", "arr_1", and so on.
669
+ kwds : Keyword arguments, optional
670
+ Arrays to save to the file. Each array will be saved to the
671
+ output file with its corresponding keyword name.
672
+
673
+ Returns
674
+ -------
675
+ None
676
+
677
+ See Also
678
+ --------
679
+ numpy.save : Save a single array to a binary file in NumPy format.
680
+ numpy.savetxt : Save an array to a file as plain text.
681
+ numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
682
+ numpy.load : Load the files created by savez_compressed.
683
+
684
+ Notes
685
+ -----
686
+ The ``.npz`` file format is a zipped archive of files named after the
687
+ variables they contain. The archive is compressed with
688
+ ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
689
+ in ``.npy`` format. For a description of the ``.npy`` format, see
690
+ :py:mod:`numpy.lib.format`.
691
+
692
+
693
+ When opening the saved ``.npz`` file with `load` a `NpzFile` object is
694
+ returned. This is a dictionary-like object which can be queried for
695
+ its list of arrays (with the ``.files`` attribute), and for the arrays
696
+ themselves.
697
+
698
+ Examples
699
+ --------
700
+ >>> test_array = np.random.rand(3, 2)
701
+ >>> test_vector = np.random.rand(4)
702
+ >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
703
+ >>> loaded = np.load('/tmp/123.npz')
704
+ >>> print(np.array_equal(test_array, loaded['a']))
705
+ True
706
+ >>> print(np.array_equal(test_vector, loaded['b']))
707
+ True
708
+
709
+ """
710
+ _savez(file, args, kwds, True)
711
+
712
+
713
+ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
714
+ # Import is postponed to here since zipfile depends on gzip, an optional
715
+ # component of the so-called standard library.
716
+ import zipfile
717
+
718
+ if not hasattr(file, 'write'):
719
+ file = os_fspath(file)
720
+ if not file.endswith('.npz'):
721
+ file = file + '.npz'
722
+
723
+ namedict = kwds
724
+ for i, val in enumerate(args):
725
+ key = 'arr_%d' % i
726
+ if key in namedict.keys():
727
+ raise ValueError(
728
+ "Cannot use un-named variables and keyword %s" % key)
729
+ namedict[key] = val
730
+
731
+ if compress:
732
+ compression = zipfile.ZIP_DEFLATED
733
+ else:
734
+ compression = zipfile.ZIP_STORED
735
+
736
+ zipf = zipfile_factory(file, mode="w", compression=compression)
737
+
738
+ for key, val in namedict.items():
739
+ fname = key + '.npy'
740
+ val = np.asanyarray(val)
741
+ # always force zip64, gh-10776
742
+ with zipf.open(fname, 'w', force_zip64=True) as fid:
743
+ format.write_array(fid, val,
744
+ allow_pickle=allow_pickle,
745
+ pickle_kwargs=pickle_kwargs)
746
+
747
+ zipf.close()
748
+
749
+
750
+ def _ensure_ndmin_ndarray_check_param(ndmin):
751
+ """Just checks if the param ndmin is supported on
752
+ _ensure_ndmin_ndarray. It is intended to be used as
753
+ verification before running anything expensive.
754
+ e.g. loadtxt, genfromtxt
755
+ """
756
+ # Check correctness of the values of `ndmin`
757
+ if ndmin not in [0, 1, 2]:
758
+ raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
759
+
760
+ def _ensure_ndmin_ndarray(a, *, ndmin: int):
761
+ """This is a helper function of loadtxt and genfromtxt to ensure
762
+ proper minimum dimension as requested
763
+
764
+ ndim : int. Supported values 1, 2, 3
765
+ ^^ whenever this changes, keep in sync with
766
+ _ensure_ndmin_ndarray_check_param
767
+ """
768
+ # Verify that the array has at least dimensions `ndmin`.
769
+ # Tweak the size and shape of the arrays - remove extraneous dimensions
770
+ if a.ndim > ndmin:
771
+ a = np.squeeze(a)
772
+ # and ensure we have the minimum number of dimensions asked for
773
+ # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
774
+ if a.ndim < ndmin:
775
+ if ndmin == 1:
776
+ a = np.atleast_1d(a)
777
+ elif ndmin == 2:
778
+ a = np.atleast_2d(a).T
779
+
780
+ return a
781
+
782
+
783
+ # amount of lines loadtxt reads in one chunk, can be overridden for testing
784
+ _loadtxt_chunksize = 50000
785
+
786
+
787
+ def _check_nonneg_int(value, name="argument"):
788
+ try:
789
+ operator.index(value)
790
+ except TypeError:
791
+ raise TypeError(f"{name} must be an integer") from None
792
+ if value < 0:
793
+ raise ValueError(f"{name} must be nonnegative")
794
+
795
+
796
+ def _preprocess_comments(iterable, comments, encoding):
797
+ """
798
+ Generator that consumes a line iterated iterable and strips out the
799
+ multiple (or multi-character) comments from lines.
800
+ This is a pre-processing step to achieve feature parity with loadtxt
801
+ (we assume that this feature is a nieche feature).
802
+ """
803
+ for line in iterable:
804
+ if isinstance(line, bytes):
805
+ # Need to handle conversion here, or the splitting would fail
806
+ line = line.decode(encoding)
807
+
808
+ for c in comments:
809
+ line = line.split(c, 1)[0]
810
+
811
+ yield line
812
+
813
+
814
+ # The number of rows we read in one go if confronted with a parametric dtype
815
+ _loadtxt_chunksize = 50000
816
+
817
+
818
+ def _read(fname, *, delimiter=',', comment='#', quote='"',
819
+ imaginary_unit='j', usecols=None, skiplines=0,
820
+ max_rows=None, converters=None, ndmin=None, unpack=False,
821
+ dtype=np.float64, encoding="bytes"):
822
+ r"""
823
+ Read a NumPy array from a text file.
824
+
825
+ Parameters
826
+ ----------
827
+ fname : str or file object
828
+ The filename or the file to be read.
829
+ delimiter : str, optional
830
+ Field delimiter of the fields in line of the file.
831
+ Default is a comma, ','. If None any sequence of whitespace is
832
+ considered a delimiter.
833
+ comment : str or sequence of str or None, optional
834
+ Character that begins a comment. All text from the comment
835
+ character to the end of the line is ignored.
836
+ Multiple comments or multiple-character comment strings are supported,
837
+ but may be slower and `quote` must be empty if used.
838
+ Use None to disable all use of comments.
839
+ quote : str or None, optional
840
+ Character that is used to quote string fields. Default is '"'
841
+ (a double quote). Use None to disable quote support.
842
+ imaginary_unit : str, optional
843
+ Character that represent the imaginay unit `sqrt(-1)`.
844
+ Default is 'j'.
845
+ usecols : array_like, optional
846
+ A one-dimensional array of integer column numbers. These are the
847
+ columns from the file to be included in the array. If this value
848
+ is not given, all the columns are used.
849
+ skiplines : int, optional
850
+ Number of lines to skip before interpreting the data in the file.
851
+ max_rows : int, optional
852
+ Maximum number of rows of data to read. Default is to read the
853
+ entire file.
854
+ converters : dict or callable, optional
855
+ A function to parse all columns strings into the desired value, or
856
+ a dictionary mapping column number to a parser function.
857
+ E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
858
+ Converters can also be used to provide a default value for missing
859
+ data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
860
+ convert empty fields to 0.
861
+ Default: None
862
+ ndmin : int, optional
863
+ Minimum dimension of the array returned.
864
+ Allowed values are 0, 1 or 2. Default is 0.
865
+ unpack : bool, optional
866
+ If True, the returned array is transposed, so that arguments may be
867
+ unpacked using ``x, y, z = read(...)``. When used with a structured
868
+ data-type, arrays are returned for each field. Default is False.
869
+ dtype : numpy data type
870
+ A NumPy dtype instance, can be a structured dtype to map to the
871
+ columns of the file.
872
+ encoding : str, optional
873
+ Encoding used to decode the inputfile. The special value 'bytes'
874
+ (the default) enables backwards-compatible behavior for `converters`,
875
+ ensuring that inputs to the converter functions are encoded
876
+ bytes objects. The special value 'bytes' has no additional effect if
877
+ ``converters=None``. If encoding is ``'bytes'`` or ``None``, the
878
+ default system encoding is used.
879
+
880
+ Returns
881
+ -------
882
+ ndarray
883
+ NumPy array.
884
+
885
+ Examples
886
+ --------
887
+ First we create a file for the example.
888
+
889
+ >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n'
890
+ >>> with open('example1.csv', 'w') as f:
891
+ ... f.write(s1)
892
+ >>> a1 = read_from_filename('example1.csv')
893
+ >>> a1
894
+ array([[1., 2., 3.],
895
+ [4., 5., 6.]])
896
+
897
+ The second example has columns with different data types, so a
898
+ one-dimensional array with a structured data type is returned.
899
+ The tab character is used as the field delimiter.
900
+
901
+ >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n'
902
+ >>> with open('example2.tsv', 'w') as f:
903
+ ... f.write(s2)
904
+ >>> a2 = read_from_filename('example2.tsv', delimiter='\t')
905
+ >>> a2
906
+ array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')],
907
+ dtype=[('f0', '<f8'), ('f1', 'u1'), ('f2', 'S5')])
908
+ """
909
+ # Handle special 'bytes' keyword for encoding
910
+ byte_converters = False
911
+ if encoding == 'bytes':
912
+ encoding = None
913
+ byte_converters = True
914
+
915
+ if dtype is None:
916
+ raise TypeError("a dtype must be provided.")
917
+ dtype = np.dtype(dtype)
918
+
919
+ read_dtype_via_object_chunks = None
920
+ if dtype.kind in 'SUM' and (
921
+ dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'):
922
+ # This is a legacy "flexible" dtype. We do not truly support
923
+ # parametric dtypes currently (no dtype discovery step in the core),
924
+ # but have to support these for backward compatibility.
925
+ read_dtype_via_object_chunks = dtype
926
+ dtype = np.dtype(object)
927
+
928
+ if usecols is not None:
929
+ # Allow usecols to be a single int or a sequence of ints, the C-code
930
+ # handles the rest
931
+ try:
932
+ usecols = list(usecols)
933
+ except TypeError:
934
+ usecols = [usecols]
935
+
936
+ _ensure_ndmin_ndarray_check_param(ndmin)
937
+
938
+ if comment is None:
939
+ comments = None
940
+ else:
941
+ # assume comments are a sequence of strings
942
+ if "" in comment:
943
+ raise ValueError(
944
+ "comments cannot be an empty string. Use comments=None to "
945
+ "disable comments."
946
+ )
947
+ comments = tuple(comment)
948
+ comment = None
949
+ if len(comments) == 0:
950
+ comments = None # No comments at all
951
+ elif len(comments) == 1:
952
+ # If there is only one comment, and that comment has one character,
953
+ # the normal parsing can deal with it just fine.
954
+ if isinstance(comments[0], str) and len(comments[0]) == 1:
955
+ comment = comments[0]
956
+ comments = None
957
+ else:
958
+ # Input validation if there are multiple comment characters
959
+ if delimiter in comments:
960
+ raise TypeError(
961
+ f"Comment characters '{comments}' cannot include the "
962
+ f"delimiter '{delimiter}'"
963
+ )
964
+
965
+ # comment is now either a 1 or 0 character string or a tuple:
966
+ if comments is not None:
967
+ # Note: An earlier version support two character comments (and could
968
+ # have been extended to multiple characters, we assume this is
969
+ # rare enough to not optimize for.
970
+ if quote is not None:
971
+ raise ValueError(
972
+ "when multiple comments or a multi-character comment is "
973
+ "given, quotes are not supported. In this case quotechar "
974
+ "must be set to None.")
975
+
976
+ if len(imaginary_unit) != 1:
977
+ raise ValueError('len(imaginary_unit) must be 1.')
978
+
979
+ _check_nonneg_int(skiplines)
980
+ if max_rows is not None:
981
+ _check_nonneg_int(max_rows)
982
+ else:
983
+ # Passing -1 to the C code means "read the entire file".
984
+ max_rows = -1
985
+
986
+ fh_closing_ctx = contextlib.nullcontext()
987
+ filelike = False
988
+ try:
989
+ if isinstance(fname, os.PathLike):
990
+ fname = os.fspath(fname)
991
+ if isinstance(fname, str):
992
+ fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
993
+ if encoding is None:
994
+ encoding = getattr(fh, 'encoding', 'latin1')
995
+
996
+ fh_closing_ctx = contextlib.closing(fh)
997
+ data = fh
998
+ filelike = True
999
+ else:
1000
+ if encoding is None:
1001
+ encoding = getattr(fname, 'encoding', 'latin1')
1002
+ data = iter(fname)
1003
+ except TypeError as e:
1004
+ raise ValueError(
1005
+ f"fname must be a string, filehandle, list of strings,\n"
1006
+ f"or generator. Got {type(fname)} instead.") from e
1007
+
1008
+ with fh_closing_ctx:
1009
+ if comments is not None:
1010
+ if filelike:
1011
+ data = iter(data)
1012
+ filelike = False
1013
+ data = _preprocess_comments(data, comments, encoding)
1014
+
1015
+ if read_dtype_via_object_chunks is None:
1016
+ arr = _load_from_filelike(
1017
+ data, delimiter=delimiter, comment=comment, quote=quote,
1018
+ imaginary_unit=imaginary_unit,
1019
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
1020
+ converters=converters, dtype=dtype,
1021
+ encoding=encoding, filelike=filelike,
1022
+ byte_converters=byte_converters)
1023
+
1024
+ else:
1025
+ # This branch reads the file into chunks of object arrays and then
1026
+ # casts them to the desired actual dtype. This ensures correct
1027
+ # string-length and datetime-unit discovery (like `arr.astype()`).
1028
+ # Due to chunking, certain error reports are less clear, currently.
1029
+ if filelike:
1030
+ data = iter(data) # cannot chunk when reading from file
1031
+
1032
+ c_byte_converters = False
1033
+ if read_dtype_via_object_chunks == "S":
1034
+ c_byte_converters = True # Use latin1 rather than ascii
1035
+
1036
+ chunks = []
1037
+ while max_rows != 0:
1038
+ if max_rows < 0:
1039
+ chunk_size = _loadtxt_chunksize
1040
+ else:
1041
+ chunk_size = min(_loadtxt_chunksize, max_rows)
1042
+
1043
+ next_arr = _load_from_filelike(
1044
+ data, delimiter=delimiter, comment=comment, quote=quote,
1045
+ imaginary_unit=imaginary_unit,
1046
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
1047
+ converters=converters, dtype=dtype,
1048
+ encoding=encoding, filelike=filelike,
1049
+ byte_converters=byte_converters,
1050
+ c_byte_converters=c_byte_converters)
1051
+ # Cast here already. We hope that this is better even for
1052
+ # large files because the storage is more compact. It could
1053
+ # be adapted (in principle the concatenate could cast).
1054
+ chunks.append(next_arr.astype(read_dtype_via_object_chunks))
1055
+
1056
+ skiprows = 0 # Only have to skip for first chunk
1057
+ if max_rows >= 0:
1058
+ max_rows -= chunk_size
1059
+ if len(next_arr) < chunk_size:
1060
+ # There was less data than requested, so we are done.
1061
+ break
1062
+
1063
+ # Need at least one chunk, but if empty, the last one may have
1064
+ # the wrong shape.
1065
+ if len(chunks) > 1 and len(chunks[-1]) == 0:
1066
+ del chunks[-1]
1067
+ if len(chunks) == 1:
1068
+ arr = chunks[0]
1069
+ else:
1070
+ arr = np.concatenate(chunks, axis=0)
1071
+
1072
+ # NOTE: ndmin works as advertised for structured dtypes, but normally
1073
+ # these would return a 1D result plus the structured dimension,
1074
+ # so ndmin=2 adds a third dimension even when no squeezing occurs.
1075
+ # A `squeeze=False` could be a better solution (pandas uses squeeze).
1076
+ arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
1077
+
1078
+ if arr.shape:
1079
+ if arr.shape[0] == 0:
1080
+ warnings.warn(
1081
+ f'loadtxt: input contained no data: "{fname}"',
1082
+ category=UserWarning,
1083
+ stacklevel=3
1084
+ )
1085
+
1086
+ if unpack:
1087
+ # Unpack structured dtypes if requested:
1088
+ dt = arr.dtype
1089
+ if dt.names is not None:
1090
+ # For structured arrays, return an array for each field.
1091
+ return [arr[field] for field in dt.names]
1092
+ else:
1093
+ return arr.T
1094
+ else:
1095
+ return arr
1096
+
1097
+
1098
+ @set_array_function_like_doc
1099
+ @set_module('numpy')
1100
+ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
1101
+ converters=None, skiprows=0, usecols=None, unpack=False,
1102
+ ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None,
1103
+ like=None):
1104
+ r"""
1105
+ Load data from a text file.
1106
+
1107
+ Parameters
1108
+ ----------
1109
+ fname : file, str, pathlib.Path, list of str, generator
1110
+ File, filename, list, or generator to read. If the filename
1111
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
1112
+ that generators must return bytes or strings. The strings
1113
+ in a list or produced by a generator are treated as lines.
1114
+ dtype : data-type, optional
1115
+ Data-type of the resulting array; default: float. If this is a
1116
+ structured data-type, the resulting array will be 1-dimensional, and
1117
+ each row will be interpreted as an element of the array. In this
1118
+ case, the number of columns used must match the number of fields in
1119
+ the data-type.
1120
+ comments : str or sequence of str or None, optional
1121
+ The characters or list of characters used to indicate the start of a
1122
+ comment. None implies no comments. For backwards compatibility, byte
1123
+ strings will be decoded as 'latin1'. The default is '#'.
1124
+ delimiter : str, optional
1125
+ The character used to separate the values. For backwards compatibility,
1126
+ byte strings will be decoded as 'latin1'. The default is whitespace.
1127
+
1128
+ .. versionchanged:: 1.23.0
1129
+ Only single character delimiters are supported. Newline characters
1130
+ cannot be used as the delimiter.
1131
+
1132
+ converters : dict or callable, optional
1133
+ Converter functions to customize value parsing. If `converters` is
1134
+ callable, the function is applied to all columns, else it must be a
1135
+ dict that maps column number to a parser function.
1136
+ See examples for further details.
1137
+ Default: None.
1138
+
1139
+ .. versionchanged:: 1.23.0
1140
+ The ability to pass a single callable to be applied to all columns
1141
+ was added.
1142
+
1143
+ skiprows : int, optional
1144
+ Skip the first `skiprows` lines, including comments; default: 0.
1145
+ usecols : int or sequence, optional
1146
+ Which columns to read, with 0 being the first. For example,
1147
+ ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
1148
+ The default, None, results in all columns being read.
1149
+
1150
+ .. versionchanged:: 1.11.0
1151
+ When a single column has to be read it is possible to use
1152
+ an integer instead of a tuple. E.g ``usecols = 3`` reads the
1153
+ fourth column the same way as ``usecols = (3,)`` would.
1154
+ unpack : bool, optional
1155
+ If True, the returned array is transposed, so that arguments may be
1156
+ unpacked using ``x, y, z = loadtxt(...)``. When used with a
1157
+ structured data-type, arrays are returned for each field.
1158
+ Default is False.
1159
+ ndmin : int, optional
1160
+ The returned array will have at least `ndmin` dimensions.
1161
+ Otherwise mono-dimensional axes will be squeezed.
1162
+ Legal values: 0 (default), 1 or 2.
1163
+
1164
+ .. versionadded:: 1.6.0
1165
+ encoding : str, optional
1166
+ Encoding used to decode the inputfile. Does not apply to input streams.
1167
+ The special value 'bytes' enables backward compatibility workarounds
1168
+ that ensures you receive byte arrays as results if possible and passes
1169
+ 'latin1' encoded strings to converters. Override this value to receive
1170
+ unicode arrays and pass strings as input to converters. If set to None
1171
+ the system default is used. The default value is 'bytes'.
1172
+
1173
+ .. versionadded:: 1.14.0
1174
+ max_rows : int, optional
1175
+ Read `max_rows` rows of content after `skiprows` lines. The default is
1176
+ to read all the rows. Note that empty rows containing no data such as
1177
+ empty lines and comment lines are not counted towards `max_rows`,
1178
+ while such lines are counted in `skiprows`.
1179
+
1180
+ .. versionadded:: 1.16.0
1181
+
1182
+ .. versionchanged:: 1.23.0
1183
+ Lines containing no data, including comment lines (e.g., lines
1184
+ starting with '#' or as specified via `comments`) are not counted
1185
+ towards `max_rows`.
1186
+ quotechar : unicode character or None, optional
1187
+ The character used to denote the start and end of a quoted item.
1188
+ Occurrences of the delimiter or comment characters are ignored within
1189
+ a quoted item. The default value is ``quotechar=None``, which means
1190
+ quoting support is disabled.
1191
+
1192
+ If two consecutive instances of `quotechar` are found within a quoted
1193
+ field, the first is treated as an escape character. See examples.
1194
+
1195
+ .. versionadded:: 1.23.0
1196
+ ${ARRAY_FUNCTION_LIKE}
1197
+
1198
+ .. versionadded:: 1.20.0
1199
+
1200
+ Returns
1201
+ -------
1202
+ out : ndarray
1203
+ Data read from the text file.
1204
+
1205
+ See Also
1206
+ --------
1207
+ load, fromstring, fromregex
1208
+ genfromtxt : Load data with missing values handled as specified.
1209
+ scipy.io.loadmat : reads MATLAB data files
1210
+
1211
+ Notes
1212
+ -----
1213
+ This function aims to be a fast reader for simply formatted files. The
1214
+ `genfromtxt` function provides more sophisticated handling of, e.g.,
1215
+ lines with missing values.
1216
+
1217
+ Each row in the input text file must have the same number of values to be
1218
+ able to read all values. If all rows do not have same number of values, a
1219
+ subset of up to n columns (where n is the least number of values present
1220
+ in all rows) can be read by specifying the columns via `usecols`.
1221
+
1222
+ .. versionadded:: 1.10.0
1223
+
1224
+ The strings produced by the Python float.hex method can be used as
1225
+ input for floats.
1226
+
1227
+ Examples
1228
+ --------
1229
+ >>> from io import StringIO # StringIO behaves like a file object
1230
+ >>> c = StringIO("0 1\n2 3")
1231
+ >>> np.loadtxt(c)
1232
+ array([[0., 1.],
1233
+ [2., 3.]])
1234
+
1235
+ >>> d = StringIO("M 21 72\nF 35 58")
1236
+ >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
1237
+ ... 'formats': ('S1', 'i4', 'f4')})
1238
+ array([(b'M', 21, 72.), (b'F', 35, 58.)],
1239
+ dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
1240
+
1241
+ >>> c = StringIO("1,0,2\n3,0,4")
1242
+ >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
1243
+ >>> x
1244
+ array([1., 3.])
1245
+ >>> y
1246
+ array([2., 4.])
1247
+
1248
+ The `converters` argument is used to specify functions to preprocess the
1249
+ text prior to parsing. `converters` can be a dictionary that maps
1250
+ preprocessing functions to each column:
1251
+
1252
+ >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
1253
+ >>> conv = {
1254
+ ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0
1255
+ ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1
1256
+ ... }
1257
+ >>> np.loadtxt(s, delimiter=",", converters=conv)
1258
+ array([[1., 3.],
1259
+ [3., 5.]])
1260
+
1261
+ `converters` can be a callable instead of a dictionary, in which case it
1262
+ is applied to all columns:
1263
+
1264
+ >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
1265
+ >>> import functools
1266
+ >>> conv = functools.partial(int, base=16)
1267
+ >>> np.loadtxt(s, converters=conv)
1268
+ array([[222., 173.],
1269
+ [192., 222.]])
1270
+
1271
+ This example shows how `converters` can be used to convert a field
1272
+ with a trailing minus sign into a negative number.
1273
+
1274
+ >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
1275
+ >>> def conv(fld):
1276
+ ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
1277
+ ...
1278
+ >>> np.loadtxt(s, converters=conv)
1279
+ array([[ 10.01, -31.25],
1280
+ [ 19.22, 64.31],
1281
+ [-17.57, 63.94]])
1282
+
1283
+ Using a callable as the converter can be particularly useful for handling
1284
+ values with different formatting, e.g. floats with underscores:
1285
+
1286
+ >>> s = StringIO("1 2.7 100_000")
1287
+ >>> np.loadtxt(s, converters=float)
1288
+ array([1.e+00, 2.7e+00, 1.e+05])
1289
+
1290
+ This idea can be extended to automatically handle values specified in
1291
+ many different formats:
1292
+
1293
+ >>> def conv(val):
1294
+ ... try:
1295
+ ... return float(val)
1296
+ ... except ValueError:
1297
+ ... return float.fromhex(val)
1298
+ >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
1299
+ >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None)
1300
+ array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
1301
+
1302
+ Note that with the default ``encoding="bytes"``, the inputs to the
1303
+ converter function are latin-1 encoded byte strings. To deactivate the
1304
+ implicit encoding prior to conversion, use ``encoding=None``
1305
+
1306
+ >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
1307
+ >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x)
1308
+ >>> np.loadtxt(s, converters=conv, encoding=None)
1309
+ array([[ 10.01, -31.25],
1310
+ [ 19.22, 64.31],
1311
+ [-17.57, 63.94]])
1312
+
1313
+ Support for quoted fields is enabled with the `quotechar` parameter.
1314
+ Comment and delimiter characters are ignored when they appear within a
1315
+ quoted item delineated by `quotechar`:
1316
+
1317
+ >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
1318
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
1319
+ >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
1320
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
1321
+ dtype=[('label', '<U12'), ('value', '<f8')])
1322
+
1323
+ Quoted fields can be separated by multiple whitespace characters:
1324
+
1325
+ >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
1326
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
1327
+ >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
1328
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
1329
+ dtype=[('label', '<U12'), ('value', '<f8')])
1330
+
1331
+ Two consecutive quote characters within a quoted field are treated as a
1332
+ single escaped character:
1333
+
1334
+ >>> s = StringIO('"Hello, my name is ""Monty""!"')
1335
+ >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
1336
+ array('Hello, my name is "Monty"!', dtype='<U26')
1337
+
1338
+ Read subset of columns when all rows do not contain equal number of values:
1339
+
1340
+ >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
1341
+ >>> np.loadtxt(d, usecols=(0, 1))
1342
+ array([[ 1., 2.],
1343
+ [ 2., 4.],
1344
+ [ 3., 9.],
1345
+ [ 4., 16.]])
1346
+
1347
+ """
1348
+
1349
+ if like is not None:
1350
+ return _loadtxt_with_like(
1351
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
1352
+ converters=converters, skiprows=skiprows, usecols=usecols,
1353
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
1354
+ max_rows=max_rows
1355
+ )
1356
+
1357
+ if isinstance(delimiter, bytes):
1358
+ delimiter.decode("latin1")
1359
+
1360
+ if dtype is None:
1361
+ dtype = np.float64
1362
+
1363
+ comment = comments
1364
+ # Control character type conversions for Py3 convenience
1365
+ if comment is not None:
1366
+ if isinstance(comment, (str, bytes)):
1367
+ comment = [comment]
1368
+ comment = [
1369
+ x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
1370
+ if isinstance(delimiter, bytes):
1371
+ delimiter = delimiter.decode('latin1')
1372
+
1373
+ arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
1374
+ converters=converters, skiplines=skiprows, usecols=usecols,
1375
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
1376
+ max_rows=max_rows, quote=quotechar)
1377
+
1378
+ return arr
1379
+
1380
+
1381
+ _loadtxt_with_like = array_function_dispatch()(loadtxt)
1382
+
1383
+
1384
+ def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
1385
+ header=None, footer=None, comments=None,
1386
+ encoding=None):
1387
+ return (X,)
1388
+
1389
+
1390
+ @array_function_dispatch(_savetxt_dispatcher)
1391
+ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
1392
+ footer='', comments='# ', encoding=None):
1393
+ """
1394
+ Save an array to a text file.
1395
+
1396
+ Parameters
1397
+ ----------
1398
+ fname : filename or file handle
1399
+ If the filename ends in ``.gz``, the file is automatically saved in
1400
+ compressed gzip format. `loadtxt` understands gzipped files
1401
+ transparently.
1402
+ X : 1D or 2D array_like
1403
+ Data to be saved to a text file.
1404
+ fmt : str or sequence of strs, optional
1405
+ A single format (%10.5f), a sequence of formats, or a
1406
+ multi-format string, e.g. 'Iteration %d -- %10.5f', in which
1407
+ case `delimiter` is ignored. For complex `X`, the legal options
1408
+ for `fmt` are:
1409
+
1410
+ * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
1411
+ like `' (%s+%sj)' % (fmt, fmt)`
1412
+ * a full string specifying every real and imaginary part, e.g.
1413
+ `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
1414
+ * a list of specifiers, one per column - in this case, the real
1415
+ and imaginary part must have separate specifiers,
1416
+ e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
1417
+ delimiter : str, optional
1418
+ String or character separating columns.
1419
+ newline : str, optional
1420
+ String or character separating lines.
1421
+
1422
+ .. versionadded:: 1.5.0
1423
+ header : str, optional
1424
+ String that will be written at the beginning of the file.
1425
+
1426
+ .. versionadded:: 1.7.0
1427
+ footer : str, optional
1428
+ String that will be written at the end of the file.
1429
+
1430
+ .. versionadded:: 1.7.0
1431
+ comments : str, optional
1432
+ String that will be prepended to the ``header`` and ``footer`` strings,
1433
+ to mark them as comments. Default: '# ', as expected by e.g.
1434
+ ``numpy.loadtxt``.
1435
+
1436
+ .. versionadded:: 1.7.0
1437
+ encoding : {None, str}, optional
1438
+ Encoding used to encode the outputfile. Does not apply to output
1439
+ streams. If the encoding is something other than 'bytes' or 'latin1'
1440
+ you will not be able to load the file in NumPy versions < 1.14. Default
1441
+ is 'latin1'.
1442
+
1443
+ .. versionadded:: 1.14.0
1444
+
1445
+
1446
+ See Also
1447
+ --------
1448
+ save : Save an array to a binary file in NumPy ``.npy`` format
1449
+ savez : Save several arrays into an uncompressed ``.npz`` archive
1450
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
1451
+
1452
+ Notes
1453
+ -----
1454
+ Further explanation of the `fmt` parameter
1455
+ (``%[flag]width[.precision]specifier``):
1456
+
1457
+ flags:
1458
+ ``-`` : left justify
1459
+
1460
+ ``+`` : Forces to precede result with + or -.
1461
+
1462
+ ``0`` : Left pad the number with zeros instead of space (see width).
1463
+
1464
+ width:
1465
+ Minimum number of characters to be printed. The value is not truncated
1466
+ if it has more characters.
1467
+
1468
+ precision:
1469
+ - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
1470
+ digits.
1471
+ - For ``e, E`` and ``f`` specifiers, the number of digits to print
1472
+ after the decimal point.
1473
+ - For ``g`` and ``G``, the maximum number of significant digits.
1474
+ - For ``s``, the maximum number of characters.
1475
+
1476
+ specifiers:
1477
+ ``c`` : character
1478
+
1479
+ ``d`` or ``i`` : signed decimal integer
1480
+
1481
+ ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
1482
+
1483
+ ``f`` : decimal floating point
1484
+
1485
+ ``g,G`` : use the shorter of ``e,E`` or ``f``
1486
+
1487
+ ``o`` : signed octal
1488
+
1489
+ ``s`` : string of characters
1490
+
1491
+ ``u`` : unsigned decimal integer
1492
+
1493
+ ``x,X`` : unsigned hexadecimal integer
1494
+
1495
+ This explanation of ``fmt`` is not complete, for an exhaustive
1496
+ specification see [1]_.
1497
+
1498
+ References
1499
+ ----------
1500
+ .. [1] `Format Specification Mini-Language
1501
+ <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
1502
+ Python Documentation.
1503
+
1504
+ Examples
1505
+ --------
1506
+ >>> x = y = z = np.arange(0.0,5.0,1.0)
1507
+ >>> np.savetxt('test.out', x, delimiter=',') # X is an array
1508
+ >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
1509
+ >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
1510
+
1511
+ """
1512
+
1513
+ # Py3 conversions first
1514
+ if isinstance(fmt, bytes):
1515
+ fmt = asstr(fmt)
1516
+ delimiter = asstr(delimiter)
1517
+
1518
+ class WriteWrap:
1519
+ """Convert to bytes on bytestream inputs.
1520
+
1521
+ """
1522
+ def __init__(self, fh, encoding):
1523
+ self.fh = fh
1524
+ self.encoding = encoding
1525
+ self.do_write = self.first_write
1526
+
1527
+ def close(self):
1528
+ self.fh.close()
1529
+
1530
+ def write(self, v):
1531
+ self.do_write(v)
1532
+
1533
+ def write_bytes(self, v):
1534
+ if isinstance(v, bytes):
1535
+ self.fh.write(v)
1536
+ else:
1537
+ self.fh.write(v.encode(self.encoding))
1538
+
1539
+ def write_normal(self, v):
1540
+ self.fh.write(asunicode(v))
1541
+
1542
+ def first_write(self, v):
1543
+ try:
1544
+ self.write_normal(v)
1545
+ self.write = self.write_normal
1546
+ except TypeError:
1547
+ # input is probably a bytestream
1548
+ self.write_bytes(v)
1549
+ self.write = self.write_bytes
1550
+
1551
+ own_fh = False
1552
+ if isinstance(fname, os_PathLike):
1553
+ fname = os_fspath(fname)
1554
+ if _is_string_like(fname):
1555
+ # datasource doesn't support creating a new file ...
1556
+ open(fname, 'wt').close()
1557
+ fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
1558
+ own_fh = True
1559
+ elif hasattr(fname, 'write'):
1560
+ # wrap to handle byte output streams
1561
+ fh = WriteWrap(fname, encoding or 'latin1')
1562
+ else:
1563
+ raise ValueError('fname must be a string or file handle')
1564
+
1565
+ try:
1566
+ X = np.asarray(X)
1567
+
1568
+ # Handle 1-dimensional arrays
1569
+ if X.ndim == 0 or X.ndim > 2:
1570
+ raise ValueError(
1571
+ "Expected 1D or 2D array, got %dD array instead" % X.ndim)
1572
+ elif X.ndim == 1:
1573
+ # Common case -- 1d array of numbers
1574
+ if X.dtype.names is None:
1575
+ X = np.atleast_2d(X).T
1576
+ ncol = 1
1577
+
1578
+ # Complex dtype -- each field indicates a separate column
1579
+ else:
1580
+ ncol = len(X.dtype.names)
1581
+ else:
1582
+ ncol = X.shape[1]
1583
+
1584
+ iscomplex_X = np.iscomplexobj(X)
1585
+ # `fmt` can be a string with multiple insertion points or a
1586
+ # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
1587
+ if type(fmt) in (list, tuple):
1588
+ if len(fmt) != ncol:
1589
+ raise AttributeError('fmt has wrong shape. %s' % str(fmt))
1590
+ format = asstr(delimiter).join(map(asstr, fmt))
1591
+ elif isinstance(fmt, str):
1592
+ n_fmt_chars = fmt.count('%')
1593
+ error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
1594
+ if n_fmt_chars == 1:
1595
+ if iscomplex_X:
1596
+ fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
1597
+ else:
1598
+ fmt = [fmt, ] * ncol
1599
+ format = delimiter.join(fmt)
1600
+ elif iscomplex_X and n_fmt_chars != (2 * ncol):
1601
+ raise error
1602
+ elif ((not iscomplex_X) and n_fmt_chars != ncol):
1603
+ raise error
1604
+ else:
1605
+ format = fmt
1606
+ else:
1607
+ raise ValueError('invalid fmt: %r' % (fmt,))
1608
+
1609
+ if len(header) > 0:
1610
+ header = header.replace('\n', '\n' + comments)
1611
+ fh.write(comments + header + newline)
1612
+ if iscomplex_X:
1613
+ for row in X:
1614
+ row2 = []
1615
+ for number in row:
1616
+ row2.append(number.real)
1617
+ row2.append(number.imag)
1618
+ s = format % tuple(row2) + newline
1619
+ fh.write(s.replace('+-', '-'))
1620
+ else:
1621
+ for row in X:
1622
+ try:
1623
+ v = format % tuple(row) + newline
1624
+ except TypeError as e:
1625
+ raise TypeError("Mismatch between array dtype ('%s') and "
1626
+ "format specifier ('%s')"
1627
+ % (str(X.dtype), format)) from e
1628
+ fh.write(v)
1629
+
1630
+ if len(footer) > 0:
1631
+ footer = footer.replace('\n', '\n' + comments)
1632
+ fh.write(comments + footer + newline)
1633
+ finally:
1634
+ if own_fh:
1635
+ fh.close()
1636
+
1637
+
1638
+ @set_module('numpy')
1639
+ def fromregex(file, regexp, dtype, encoding=None):
1640
+ r"""
1641
+ Construct an array from a text file, using regular expression parsing.
1642
+
1643
+ The returned array is always a structured array, and is constructed from
1644
+ all matches of the regular expression in the file. Groups in the regular
1645
+ expression are converted to fields of the structured array.
1646
+
1647
+ Parameters
1648
+ ----------
1649
+ file : path or file
1650
+ Filename or file object to read.
1651
+
1652
+ .. versionchanged:: 1.22.0
1653
+ Now accepts `os.PathLike` implementations.
1654
+ regexp : str or regexp
1655
+ Regular expression used to parse the file.
1656
+ Groups in the regular expression correspond to fields in the dtype.
1657
+ dtype : dtype or list of dtypes
1658
+ Dtype for the structured array; must be a structured datatype.
1659
+ encoding : str, optional
1660
+ Encoding used to decode the inputfile. Does not apply to input streams.
1661
+
1662
+ .. versionadded:: 1.14.0
1663
+
1664
+ Returns
1665
+ -------
1666
+ output : ndarray
1667
+ The output array, containing the part of the content of `file` that
1668
+ was matched by `regexp`. `output` is always a structured array.
1669
+
1670
+ Raises
1671
+ ------
1672
+ TypeError
1673
+ When `dtype` is not a valid dtype for a structured array.
1674
+
1675
+ See Also
1676
+ --------
1677
+ fromstring, loadtxt
1678
+
1679
+ Notes
1680
+ -----
1681
+ Dtypes for structured arrays can be specified in several forms, but all
1682
+ forms specify at least the data type and field name. For details see
1683
+ `basics.rec`.
1684
+
1685
+ Examples
1686
+ --------
1687
+ >>> from io import StringIO
1688
+ >>> text = StringIO("1312 foo\n1534 bar\n444 qux")
1689
+
1690
+ >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
1691
+ >>> output = np.fromregex(text, regexp,
1692
+ ... [('num', np.int64), ('key', 'S3')])
1693
+ >>> output
1694
+ array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
1695
+ dtype=[('num', '<i8'), ('key', 'S3')])
1696
+ >>> output['num']
1697
+ array([1312, 1534, 444])
1698
+
1699
+ """
1700
+ own_fh = False
1701
+ if not hasattr(file, "read"):
1702
+ file = os.fspath(file)
1703
+ file = np.lib._datasource.open(file, 'rt', encoding=encoding)
1704
+ own_fh = True
1705
+
1706
+ try:
1707
+ if not isinstance(dtype, np.dtype):
1708
+ dtype = np.dtype(dtype)
1709
+ if dtype.names is None:
1710
+ raise TypeError('dtype must be a structured datatype.')
1711
+
1712
+ content = file.read()
1713
+ if isinstance(content, bytes) and isinstance(regexp, str):
1714
+ regexp = asbytes(regexp)
1715
+ elif isinstance(content, str) and isinstance(regexp, bytes):
1716
+ regexp = asstr(regexp)
1717
+
1718
+ if not hasattr(regexp, 'match'):
1719
+ regexp = re.compile(regexp)
1720
+ seq = regexp.findall(content)
1721
+ if seq and not isinstance(seq[0], tuple):
1722
+ # Only one group is in the regexp.
1723
+ # Create the new array as a single data-type and then
1724
+ # re-interpret as a single-field structured array.
1725
+ newdtype = np.dtype(dtype[dtype.names[0]])
1726
+ output = np.array(seq, dtype=newdtype)
1727
+ output.dtype = dtype
1728
+ else:
1729
+ output = np.array(seq, dtype=dtype)
1730
+
1731
+ return output
1732
+ finally:
1733
+ if own_fh:
1734
+ file.close()
1735
+
1736
+
1737
+ #####--------------------------------------------------------------------------
1738
+ #---- --- ASCII functions ---
1739
+ #####--------------------------------------------------------------------------
1740
+
1741
+
1742
+ @set_array_function_like_doc
1743
+ @set_module('numpy')
1744
+ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
1745
+ skip_header=0, skip_footer=0, converters=None,
1746
+ missing_values=None, filling_values=None, usecols=None,
1747
+ names=None, excludelist=None,
1748
+ deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
1749
+ replace_space='_', autostrip=False, case_sensitive=True,
1750
+ defaultfmt="f%i", unpack=None, usemask=False, loose=True,
1751
+ invalid_raise=True, max_rows=None, encoding='bytes',
1752
+ *, ndmin=0, like=None):
1753
+ """
1754
+ Load data from a text file, with missing values handled as specified.
1755
+
1756
+ Each line past the first `skip_header` lines is split at the `delimiter`
1757
+ character, and characters following the `comments` character are discarded.
1758
+
1759
+ Parameters
1760
+ ----------
1761
+ fname : file, str, pathlib.Path, list of str, generator
1762
+ File, filename, list, or generator to read. If the filename
1763
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
1764
+ that generators must return bytes or strings. The strings
1765
+ in a list or produced by a generator are treated as lines.
1766
+ dtype : dtype, optional
1767
+ Data type of the resulting array.
1768
+ If None, the dtypes will be determined by the contents of each
1769
+ column, individually.
1770
+ comments : str, optional
1771
+ The character used to indicate the start of a comment.
1772
+ All the characters occurring on a line after a comment are discarded.
1773
+ delimiter : str, int, or sequence, optional
1774
+ The string used to separate values. By default, any consecutive
1775
+ whitespaces act as delimiter. An integer or sequence of integers
1776
+ can also be provided as width(s) of each field.
1777
+ skiprows : int, optional
1778
+ `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
1779
+ skip_header : int, optional
1780
+ The number of lines to skip at the beginning of the file.
1781
+ skip_footer : int, optional
1782
+ The number of lines to skip at the end of the file.
1783
+ converters : variable, optional
1784
+ The set of functions that convert the data of a column to a value.
1785
+ The converters can also be used to provide a default value
1786
+ for missing data: ``converters = {3: lambda s: float(s or 0)}``.
1787
+ missing : variable, optional
1788
+ `missing` was removed in numpy 1.10. Please use `missing_values`
1789
+ instead.
1790
+ missing_values : variable, optional
1791
+ The set of strings corresponding to missing data.
1792
+ filling_values : variable, optional
1793
+ The set of values to be used as default when the data are missing.
1794
+ usecols : sequence, optional
1795
+ Which columns to read, with 0 being the first. For example,
1796
+ ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
1797
+ names : {None, True, str, sequence}, optional
1798
+ If `names` is True, the field names are read from the first line after
1799
+ the first `skip_header` lines. This line can optionally be preceded
1800
+ by a comment delimiter. If `names` is a sequence or a single-string of
1801
+ comma-separated names, the names will be used to define the field names
1802
+ in a structured dtype. If `names` is None, the names of the dtype
1803
+ fields will be used, if any.
1804
+ excludelist : sequence, optional
1805
+ A list of names to exclude. This list is appended to the default list
1806
+ ['return','file','print']. Excluded names are appended with an
1807
+ underscore: for example, `file` would become `file_`.
1808
+ deletechars : str, optional
1809
+ A string combining invalid characters that must be deleted from the
1810
+ names.
1811
+ defaultfmt : str, optional
1812
+ A format used to define default field names, such as "f%i" or "f_%02i".
1813
+ autostrip : bool, optional
1814
+ Whether to automatically strip white spaces from the variables.
1815
+ replace_space : char, optional
1816
+ Character(s) used in replacement of white spaces in the variable
1817
+ names. By default, use a '_'.
1818
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
1819
+ If True, field names are case sensitive.
1820
+ If False or 'upper', field names are converted to upper case.
1821
+ If 'lower', field names are converted to lower case.
1822
+ unpack : bool, optional
1823
+ If True, the returned array is transposed, so that arguments may be
1824
+ unpacked using ``x, y, z = genfromtxt(...)``. When used with a
1825
+ structured data-type, arrays are returned for each field.
1826
+ Default is False.
1827
+ usemask : bool, optional
1828
+ If True, return a masked array.
1829
+ If False, return a regular array.
1830
+ loose : bool, optional
1831
+ If True, do not raise errors for invalid values.
1832
+ invalid_raise : bool, optional
1833
+ If True, an exception is raised if an inconsistency is detected in the
1834
+ number of columns.
1835
+ If False, a warning is emitted and the offending lines are skipped.
1836
+ max_rows : int, optional
1837
+ The maximum number of rows to read. Must not be used with skip_footer
1838
+ at the same time. If given, the value must be at least 1. Default is
1839
+ to read the entire file.
1840
+
1841
+ .. versionadded:: 1.10.0
1842
+ encoding : str, optional
1843
+ Encoding used to decode the inputfile. Does not apply when `fname` is
1844
+ a file object. The special value 'bytes' enables backward compatibility
1845
+ workarounds that ensure that you receive byte arrays when possible
1846
+ and passes latin1 encoded strings to converters. Override this value to
1847
+ receive unicode arrays and pass strings as input to converters. If set
1848
+ to None the system default is used. The default value is 'bytes'.
1849
+
1850
+ .. versionadded:: 1.14.0
1851
+ ndmin : int, optional
1852
+ Same parameter as `loadtxt`
1853
+
1854
+ .. versionadded:: 1.23.0
1855
+ ${ARRAY_FUNCTION_LIKE}
1856
+
1857
+ .. versionadded:: 1.20.0
1858
+
1859
+ Returns
1860
+ -------
1861
+ out : ndarray
1862
+ Data read from the text file. If `usemask` is True, this is a
1863
+ masked array.
1864
+
1865
+ See Also
1866
+ --------
1867
+ numpy.loadtxt : equivalent function when no data is missing.
1868
+
1869
+ Notes
1870
+ -----
1871
+ * When spaces are used as delimiters, or when no delimiter has been given
1872
+ as input, there should not be any missing data between two fields.
1873
+ * When the variables are named (either by a flexible dtype or with `names`),
1874
+ there must not be any header in the file (else a ValueError
1875
+ exception is raised).
1876
+ * Individual values are not stripped of spaces by default.
1877
+ When using a custom converter, make sure the function does remove spaces.
1878
+
1879
+ References
1880
+ ----------
1881
+ .. [1] NumPy User Guide, section `I/O with NumPy
1882
+ <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
1883
+
1884
+ Examples
1885
+ --------
1886
+ >>> from io import StringIO
1887
+ >>> import numpy as np
1888
+
1889
+ Comma delimited file with mixed dtype
1890
+
1891
+ >>> s = StringIO(u"1,1.3,abcde")
1892
+ >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
1893
+ ... ('mystring','S5')], delimiter=",")
1894
+ >>> data
1895
+ array((1, 1.3, b'abcde'),
1896
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1897
+
1898
+ Using dtype = None
1899
+
1900
+ >>> _ = s.seek(0) # needed for StringIO example only
1901
+ >>> data = np.genfromtxt(s, dtype=None,
1902
+ ... names = ['myint','myfloat','mystring'], delimiter=",")
1903
+ >>> data
1904
+ array((1, 1.3, b'abcde'),
1905
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1906
+
1907
+ Specifying dtype and names
1908
+
1909
+ >>> _ = s.seek(0)
1910
+ >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
1911
+ ... names=['myint','myfloat','mystring'], delimiter=",")
1912
+ >>> data
1913
+ array((1, 1.3, b'abcde'),
1914
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1915
+
1916
+ An example with fixed-width columns
1917
+
1918
+ >>> s = StringIO(u"11.3abcde")
1919
+ >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
1920
+ ... delimiter=[1,3,5])
1921
+ >>> data
1922
+ array((1, 1.3, b'abcde'),
1923
+ dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
1924
+
1925
+ An example to show comments
1926
+
1927
+ >>> f = StringIO('''
1928
+ ... text,# of chars
1929
+ ... hello world,11
1930
+ ... numpy,5''')
1931
+ >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
1932
+ array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
1933
+ dtype=[('f0', 'S12'), ('f1', 'S12')])
1934
+
1935
+ """
1936
+
1937
+ if like is not None:
1938
+ return _genfromtxt_with_like(
1939
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
1940
+ skip_header=skip_header, skip_footer=skip_footer,
1941
+ converters=converters, missing_values=missing_values,
1942
+ filling_values=filling_values, usecols=usecols, names=names,
1943
+ excludelist=excludelist, deletechars=deletechars,
1944
+ replace_space=replace_space, autostrip=autostrip,
1945
+ case_sensitive=case_sensitive, defaultfmt=defaultfmt,
1946
+ unpack=unpack, usemask=usemask, loose=loose,
1947
+ invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
1948
+ ndmin=ndmin,
1949
+ )
1950
+
1951
+ _ensure_ndmin_ndarray_check_param(ndmin)
1952
+
1953
+ if max_rows is not None:
1954
+ if skip_footer:
1955
+ raise ValueError(
1956
+ "The keywords 'skip_footer' and 'max_rows' can not be "
1957
+ "specified at the same time.")
1958
+ if max_rows < 1:
1959
+ raise ValueError("'max_rows' must be at least 1.")
1960
+
1961
+ if usemask:
1962
+ from numpy.ma import MaskedArray, make_mask_descr
1963
+ # Check the input dictionary of converters
1964
+ user_converters = converters or {}
1965
+ if not isinstance(user_converters, dict):
1966
+ raise TypeError(
1967
+ "The input argument 'converter' should be a valid dictionary "
1968
+ "(got '%s' instead)" % type(user_converters))
1969
+
1970
+ if encoding == 'bytes':
1971
+ encoding = None
1972
+ byte_converters = True
1973
+ else:
1974
+ byte_converters = False
1975
+
1976
+ # Initialize the filehandle, the LineSplitter and the NameValidator
1977
+ if isinstance(fname, os_PathLike):
1978
+ fname = os_fspath(fname)
1979
+ if isinstance(fname, str):
1980
+ fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
1981
+ fid_ctx = contextlib.closing(fid)
1982
+ else:
1983
+ fid = fname
1984
+ fid_ctx = contextlib.nullcontext(fid)
1985
+ try:
1986
+ fhd = iter(fid)
1987
+ except TypeError as e:
1988
+ raise TypeError(
1989
+ "fname must be a string, a filehandle, a sequence of strings,\n"
1990
+ f"or an iterator of strings. Got {type(fname)} instead."
1991
+ ) from e
1992
+ with fid_ctx:
1993
+ split_line = LineSplitter(delimiter=delimiter, comments=comments,
1994
+ autostrip=autostrip, encoding=encoding)
1995
+ validate_names = NameValidator(excludelist=excludelist,
1996
+ deletechars=deletechars,
1997
+ case_sensitive=case_sensitive,
1998
+ replace_space=replace_space)
1999
+
2000
+ # Skip the first `skip_header` rows
2001
+ try:
2002
+ for i in range(skip_header):
2003
+ next(fhd)
2004
+
2005
+ # Keep on until we find the first valid values
2006
+ first_values = None
2007
+
2008
+ while not first_values:
2009
+ first_line = _decode_line(next(fhd), encoding)
2010
+ if (names is True) and (comments is not None):
2011
+ if comments in first_line:
2012
+ first_line = (
2013
+ ''.join(first_line.split(comments)[1:]))
2014
+ first_values = split_line(first_line)
2015
+ except StopIteration:
2016
+ # return an empty array if the datafile is empty
2017
+ first_line = ''
2018
+ first_values = []
2019
+ warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
2020
+
2021
+ # Should we take the first values as names ?
2022
+ if names is True:
2023
+ fval = first_values[0].strip()
2024
+ if comments is not None:
2025
+ if fval in comments:
2026
+ del first_values[0]
2027
+
2028
+ # Check the columns to use: make sure `usecols` is a list
2029
+ if usecols is not None:
2030
+ try:
2031
+ usecols = [_.strip() for _ in usecols.split(",")]
2032
+ except AttributeError:
2033
+ try:
2034
+ usecols = list(usecols)
2035
+ except TypeError:
2036
+ usecols = [usecols, ]
2037
+ nbcols = len(usecols or first_values)
2038
+
2039
+ # Check the names and overwrite the dtype.names if needed
2040
+ if names is True:
2041
+ names = validate_names([str(_.strip()) for _ in first_values])
2042
+ first_line = ''
2043
+ elif _is_string_like(names):
2044
+ names = validate_names([_.strip() for _ in names.split(',')])
2045
+ elif names:
2046
+ names = validate_names(names)
2047
+ # Get the dtype
2048
+ if dtype is not None:
2049
+ dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
2050
+ excludelist=excludelist,
2051
+ deletechars=deletechars,
2052
+ case_sensitive=case_sensitive,
2053
+ replace_space=replace_space)
2054
+ # Make sure the names is a list (for 2.5)
2055
+ if names is not None:
2056
+ names = list(names)
2057
+
2058
+ if usecols:
2059
+ for (i, current) in enumerate(usecols):
2060
+ # if usecols is a list of names, convert to a list of indices
2061
+ if _is_string_like(current):
2062
+ usecols[i] = names.index(current)
2063
+ elif current < 0:
2064
+ usecols[i] = current + len(first_values)
2065
+ # If the dtype is not None, make sure we update it
2066
+ if (dtype is not None) and (len(dtype) > nbcols):
2067
+ descr = dtype.descr
2068
+ dtype = np.dtype([descr[_] for _ in usecols])
2069
+ names = list(dtype.names)
2070
+ # If `names` is not None, update the names
2071
+ elif (names is not None) and (len(names) > nbcols):
2072
+ names = [names[_] for _ in usecols]
2073
+ elif (names is not None) and (dtype is not None):
2074
+ names = list(dtype.names)
2075
+
2076
+ # Process the missing values ...............................
2077
+ # Rename missing_values for convenience
2078
+ user_missing_values = missing_values or ()
2079
+ if isinstance(user_missing_values, bytes):
2080
+ user_missing_values = user_missing_values.decode('latin1')
2081
+
2082
+ # Define the list of missing_values (one column: one list)
2083
+ missing_values = [list(['']) for _ in range(nbcols)]
2084
+
2085
+ # We have a dictionary: process it field by field
2086
+ if isinstance(user_missing_values, dict):
2087
+ # Loop on the items
2088
+ for (key, val) in user_missing_values.items():
2089
+ # Is the key a string ?
2090
+ if _is_string_like(key):
2091
+ try:
2092
+ # Transform it into an integer
2093
+ key = names.index(key)
2094
+ except ValueError:
2095
+ # We couldn't find it: the name must have been dropped
2096
+ continue
2097
+ # Redefine the key as needed if it's a column number
2098
+ if usecols:
2099
+ try:
2100
+ key = usecols.index(key)
2101
+ except ValueError:
2102
+ pass
2103
+ # Transform the value as a list of string
2104
+ if isinstance(val, (list, tuple)):
2105
+ val = [str(_) for _ in val]
2106
+ else:
2107
+ val = [str(val), ]
2108
+ # Add the value(s) to the current list of missing
2109
+ if key is None:
2110
+ # None acts as default
2111
+ for miss in missing_values:
2112
+ miss.extend(val)
2113
+ else:
2114
+ missing_values[key].extend(val)
2115
+ # We have a sequence : each item matches a column
2116
+ elif isinstance(user_missing_values, (list, tuple)):
2117
+ for (value, entry) in zip(user_missing_values, missing_values):
2118
+ value = str(value)
2119
+ if value not in entry:
2120
+ entry.append(value)
2121
+ # We have a string : apply it to all entries
2122
+ elif isinstance(user_missing_values, str):
2123
+ user_value = user_missing_values.split(",")
2124
+ for entry in missing_values:
2125
+ entry.extend(user_value)
2126
+ # We have something else: apply it to all entries
2127
+ else:
2128
+ for entry in missing_values:
2129
+ entry.extend([str(user_missing_values)])
2130
+
2131
+ # Process the filling_values ...............................
2132
+ # Rename the input for convenience
2133
+ user_filling_values = filling_values
2134
+ if user_filling_values is None:
2135
+ user_filling_values = []
2136
+ # Define the default
2137
+ filling_values = [None] * nbcols
2138
+ # We have a dictionary : update each entry individually
2139
+ if isinstance(user_filling_values, dict):
2140
+ for (key, val) in user_filling_values.items():
2141
+ if _is_string_like(key):
2142
+ try:
2143
+ # Transform it into an integer
2144
+ key = names.index(key)
2145
+ except ValueError:
2146
+ # We couldn't find it: the name must have been dropped,
2147
+ continue
2148
+ # Redefine the key if it's a column number and usecols is defined
2149
+ if usecols:
2150
+ try:
2151
+ key = usecols.index(key)
2152
+ except ValueError:
2153
+ pass
2154
+ # Add the value to the list
2155
+ filling_values[key] = val
2156
+ # We have a sequence : update on a one-to-one basis
2157
+ elif isinstance(user_filling_values, (list, tuple)):
2158
+ n = len(user_filling_values)
2159
+ if (n <= nbcols):
2160
+ filling_values[:n] = user_filling_values
2161
+ else:
2162
+ filling_values = user_filling_values[:nbcols]
2163
+ # We have something else : use it for all entries
2164
+ else:
2165
+ filling_values = [user_filling_values] * nbcols
2166
+
2167
+ # Initialize the converters ................................
2168
+ if dtype is None:
2169
+ # Note: we can't use a [...]*nbcols, as we would have 3 times the same
2170
+ # ... converter, instead of 3 different converters.
2171
+ converters = [StringConverter(None, missing_values=miss, default=fill)
2172
+ for (miss, fill) in zip(missing_values, filling_values)]
2173
+ else:
2174
+ dtype_flat = flatten_dtype(dtype, flatten_base=True)
2175
+ # Initialize the converters
2176
+ if len(dtype_flat) > 1:
2177
+ # Flexible type : get a converter from each dtype
2178
+ zipit = zip(dtype_flat, missing_values, filling_values)
2179
+ converters = [StringConverter(dt, locked=True,
2180
+ missing_values=miss, default=fill)
2181
+ for (dt, miss, fill) in zipit]
2182
+ else:
2183
+ # Set to a default converter (but w/ different missing values)
2184
+ zipit = zip(missing_values, filling_values)
2185
+ converters = [StringConverter(dtype, locked=True,
2186
+ missing_values=miss, default=fill)
2187
+ for (miss, fill) in zipit]
2188
+ # Update the converters to use the user-defined ones
2189
+ uc_update = []
2190
+ for (j, conv) in user_converters.items():
2191
+ # If the converter is specified by column names, use the index instead
2192
+ if _is_string_like(j):
2193
+ try:
2194
+ j = names.index(j)
2195
+ i = j
2196
+ except ValueError:
2197
+ continue
2198
+ elif usecols:
2199
+ try:
2200
+ i = usecols.index(j)
2201
+ except ValueError:
2202
+ # Unused converter specified
2203
+ continue
2204
+ else:
2205
+ i = j
2206
+ # Find the value to test - first_line is not filtered by usecols:
2207
+ if len(first_line):
2208
+ testing_value = first_values[j]
2209
+ else:
2210
+ testing_value = None
2211
+ if conv is bytes:
2212
+ user_conv = asbytes
2213
+ elif byte_converters:
2214
+ # converters may use decode to workaround numpy's old behaviour,
2215
+ # so encode the string again before passing to the user converter
2216
+ def tobytes_first(x, conv):
2217
+ if type(x) is bytes:
2218
+ return conv(x)
2219
+ return conv(x.encode("latin1"))
2220
+ user_conv = functools.partial(tobytes_first, conv=conv)
2221
+ else:
2222
+ user_conv = conv
2223
+ converters[i].update(user_conv, locked=True,
2224
+ testing_value=testing_value,
2225
+ default=filling_values[i],
2226
+ missing_values=missing_values[i],)
2227
+ uc_update.append((i, user_conv))
2228
+ # Make sure we have the corrected keys in user_converters...
2229
+ user_converters.update(uc_update)
2230
+
2231
+ # Fixme: possible error as following variable never used.
2232
+ # miss_chars = [_.missing_values for _ in converters]
2233
+
2234
+ # Initialize the output lists ...
2235
+ # ... rows
2236
+ rows = []
2237
+ append_to_rows = rows.append
2238
+ # ... masks
2239
+ if usemask:
2240
+ masks = []
2241
+ append_to_masks = masks.append
2242
+ # ... invalid
2243
+ invalid = []
2244
+ append_to_invalid = invalid.append
2245
+
2246
+ # Parse each line
2247
+ for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
2248
+ values = split_line(line)
2249
+ nbvalues = len(values)
2250
+ # Skip an empty line
2251
+ if nbvalues == 0:
2252
+ continue
2253
+ if usecols:
2254
+ # Select only the columns we need
2255
+ try:
2256
+ values = [values[_] for _ in usecols]
2257
+ except IndexError:
2258
+ append_to_invalid((i + skip_header + 1, nbvalues))
2259
+ continue
2260
+ elif nbvalues != nbcols:
2261
+ append_to_invalid((i + skip_header + 1, nbvalues))
2262
+ continue
2263
+ # Store the values
2264
+ append_to_rows(tuple(values))
2265
+ if usemask:
2266
+ append_to_masks(tuple([v.strip() in m
2267
+ for (v, m) in zip(values,
2268
+ missing_values)]))
2269
+ if len(rows) == max_rows:
2270
+ break
2271
+
2272
+ # Upgrade the converters (if needed)
2273
+ if dtype is None:
2274
+ for (i, converter) in enumerate(converters):
2275
+ current_column = [itemgetter(i)(_m) for _m in rows]
2276
+ try:
2277
+ converter.iterupgrade(current_column)
2278
+ except ConverterLockError:
2279
+ errmsg = "Converter #%i is locked and cannot be upgraded: " % i
2280
+ current_column = map(itemgetter(i), rows)
2281
+ for (j, value) in enumerate(current_column):
2282
+ try:
2283
+ converter.upgrade(value)
2284
+ except (ConverterError, ValueError):
2285
+ errmsg += "(occurred line #%i for value '%s')"
2286
+ errmsg %= (j + 1 + skip_header, value)
2287
+ raise ConverterError(errmsg)
2288
+
2289
+ # Check that we don't have invalid values
2290
+ nbinvalid = len(invalid)
2291
+ if nbinvalid > 0:
2292
+ nbrows = len(rows) + nbinvalid - skip_footer
2293
+ # Construct the error message
2294
+ template = " Line #%%i (got %%i columns instead of %i)" % nbcols
2295
+ if skip_footer > 0:
2296
+ nbinvalid_skipped = len([_ for _ in invalid
2297
+ if _[0] > nbrows + skip_header])
2298
+ invalid = invalid[:nbinvalid - nbinvalid_skipped]
2299
+ skip_footer -= nbinvalid_skipped
2300
+ #
2301
+ # nbrows -= skip_footer
2302
+ # errmsg = [template % (i, nb)
2303
+ # for (i, nb) in invalid if i < nbrows]
2304
+ # else:
2305
+ errmsg = [template % (i, nb)
2306
+ for (i, nb) in invalid]
2307
+ if len(errmsg):
2308
+ errmsg.insert(0, "Some errors were detected !")
2309
+ errmsg = "\n".join(errmsg)
2310
+ # Raise an exception ?
2311
+ if invalid_raise:
2312
+ raise ValueError(errmsg)
2313
+ # Issue a warning ?
2314
+ else:
2315
+ warnings.warn(errmsg, ConversionWarning, stacklevel=2)
2316
+
2317
+ # Strip the last skip_footer data
2318
+ if skip_footer > 0:
2319
+ rows = rows[:-skip_footer]
2320
+ if usemask:
2321
+ masks = masks[:-skip_footer]
2322
+
2323
+ # Convert each value according to the converter:
2324
+ # We want to modify the list in place to avoid creating a new one...
2325
+ if loose:
2326
+ rows = list(
2327
+ zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
2328
+ for (i, conv) in enumerate(converters)]))
2329
+ else:
2330
+ rows = list(
2331
+ zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
2332
+ for (i, conv) in enumerate(converters)]))
2333
+
2334
+ # Reset the dtype
2335
+ data = rows
2336
+ if dtype is None:
2337
+ # Get the dtypes from the types of the converters
2338
+ column_types = [conv.type for conv in converters]
2339
+ # Find the columns with strings...
2340
+ strcolidx = [i for (i, v) in enumerate(column_types)
2341
+ if v == np.str_]
2342
+
2343
+ if byte_converters and strcolidx:
2344
+ # convert strings back to bytes for backward compatibility
2345
+ warnings.warn(
2346
+ "Reading unicode strings without specifying the encoding "
2347
+ "argument is deprecated. Set the encoding, use None for the "
2348
+ "system default.",
2349
+ np.VisibleDeprecationWarning, stacklevel=2)
2350
+ def encode_unicode_cols(row_tup):
2351
+ row = list(row_tup)
2352
+ for i in strcolidx:
2353
+ row[i] = row[i].encode('latin1')
2354
+ return tuple(row)
2355
+
2356
+ try:
2357
+ data = [encode_unicode_cols(r) for r in data]
2358
+ except UnicodeEncodeError:
2359
+ pass
2360
+ else:
2361
+ for i in strcolidx:
2362
+ column_types[i] = np.bytes_
2363
+
2364
+ # Update string types to be the right length
2365
+ sized_column_types = column_types[:]
2366
+ for i, col_type in enumerate(column_types):
2367
+ if np.issubdtype(col_type, np.character):
2368
+ n_chars = max(len(row[i]) for row in data)
2369
+ sized_column_types[i] = (col_type, n_chars)
2370
+
2371
+ if names is None:
2372
+ # If the dtype is uniform (before sizing strings)
2373
+ base = {
2374
+ c_type
2375
+ for c, c_type in zip(converters, column_types)
2376
+ if c._checked}
2377
+ if len(base) == 1:
2378
+ uniform_type, = base
2379
+ (ddtype, mdtype) = (uniform_type, bool)
2380
+ else:
2381
+ ddtype = [(defaultfmt % i, dt)
2382
+ for (i, dt) in enumerate(sized_column_types)]
2383
+ if usemask:
2384
+ mdtype = [(defaultfmt % i, bool)
2385
+ for (i, dt) in enumerate(sized_column_types)]
2386
+ else:
2387
+ ddtype = list(zip(names, sized_column_types))
2388
+ mdtype = list(zip(names, [bool] * len(sized_column_types)))
2389
+ output = np.array(data, dtype=ddtype)
2390
+ if usemask:
2391
+ outputmask = np.array(masks, dtype=mdtype)
2392
+ else:
2393
+ # Overwrite the initial dtype names if needed
2394
+ if names and dtype.names is not None:
2395
+ dtype.names = names
2396
+ # Case 1. We have a structured type
2397
+ if len(dtype_flat) > 1:
2398
+ # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
2399
+ # First, create the array using a flattened dtype:
2400
+ # [('a', int), ('b1', int), ('b2', float)]
2401
+ # Then, view the array using the specified dtype.
2402
+ if 'O' in (_.char for _ in dtype_flat):
2403
+ if has_nested_fields(dtype):
2404
+ raise NotImplementedError(
2405
+ "Nested fields involving objects are not supported...")
2406
+ else:
2407
+ output = np.array(data, dtype=dtype)
2408
+ else:
2409
+ rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
2410
+ output = rows.view(dtype)
2411
+ # Now, process the rowmasks the same way
2412
+ if usemask:
2413
+ rowmasks = np.array(
2414
+ masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
2415
+ # Construct the new dtype
2416
+ mdtype = make_mask_descr(dtype)
2417
+ outputmask = rowmasks.view(mdtype)
2418
+ # Case #2. We have a basic dtype
2419
+ else:
2420
+ # We used some user-defined converters
2421
+ if user_converters:
2422
+ ishomogeneous = True
2423
+ descr = []
2424
+ for i, ttype in enumerate([conv.type for conv in converters]):
2425
+ # Keep the dtype of the current converter
2426
+ if i in user_converters:
2427
+ ishomogeneous &= (ttype == dtype.type)
2428
+ if np.issubdtype(ttype, np.character):
2429
+ ttype = (ttype, max(len(row[i]) for row in data))
2430
+ descr.append(('', ttype))
2431
+ else:
2432
+ descr.append(('', dtype))
2433
+ # So we changed the dtype ?
2434
+ if not ishomogeneous:
2435
+ # We have more than one field
2436
+ if len(descr) > 1:
2437
+ dtype = np.dtype(descr)
2438
+ # We have only one field: drop the name if not needed.
2439
+ else:
2440
+ dtype = np.dtype(ttype)
2441
+ #
2442
+ output = np.array(data, dtype)
2443
+ if usemask:
2444
+ if dtype.names is not None:
2445
+ mdtype = [(_, bool) for _ in dtype.names]
2446
+ else:
2447
+ mdtype = bool
2448
+ outputmask = np.array(masks, dtype=mdtype)
2449
+ # Try to take care of the missing data we missed
2450
+ names = output.dtype.names
2451
+ if usemask and names:
2452
+ for (name, conv) in zip(names, converters):
2453
+ missing_values = [conv(_) for _ in conv.missing_values
2454
+ if _ != '']
2455
+ for mval in missing_values:
2456
+ outputmask[name] |= (output[name] == mval)
2457
+ # Construct the final array
2458
+ if usemask:
2459
+ output = output.view(MaskedArray)
2460
+ output._mask = outputmask
2461
+
2462
+ output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
2463
+
2464
+ if unpack:
2465
+ if names is None:
2466
+ return output.T
2467
+ elif len(names) == 1:
2468
+ # squeeze single-name dtypes too
2469
+ return output[names[0]]
2470
+ else:
2471
+ # For structured arrays with multiple fields,
2472
+ # return an array for each field.
2473
+ return [output[field] for field in names]
2474
+ return output
2475
+
2476
+
2477
+ _genfromtxt_with_like = array_function_dispatch()(genfromtxt)
2478
+
2479
+
2480
+ def recfromtxt(fname, **kwargs):
2481
+ """
2482
+ Load ASCII data from a file and return it in a record array.
2483
+
2484
+ If ``usemask=False`` a standard `recarray` is returned,
2485
+ if ``usemask=True`` a MaskedRecords array is returned.
2486
+
2487
+ Parameters
2488
+ ----------
2489
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
2490
+
2491
+ See Also
2492
+ --------
2493
+ numpy.genfromtxt : generic function
2494
+
2495
+ Notes
2496
+ -----
2497
+ By default, `dtype` is None, which means that the data-type of the output
2498
+ array will be determined from the data.
2499
+
2500
+ """
2501
+ kwargs.setdefault("dtype", None)
2502
+ usemask = kwargs.get('usemask', False)
2503
+ output = genfromtxt(fname, **kwargs)
2504
+ if usemask:
2505
+ from numpy.ma.mrecords import MaskedRecords
2506
+ output = output.view(MaskedRecords)
2507
+ else:
2508
+ output = output.view(np.recarray)
2509
+ return output
2510
+
2511
+
2512
+ def recfromcsv(fname, **kwargs):
2513
+ """
2514
+ Load ASCII data stored in a comma-separated file.
2515
+
2516
+ The returned array is a record array (if ``usemask=False``, see
2517
+ `recarray`) or a masked record array (if ``usemask=True``,
2518
+ see `ma.mrecords.MaskedRecords`).
2519
+
2520
+ Parameters
2521
+ ----------
2522
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
2523
+
2524
+ See Also
2525
+ --------
2526
+ numpy.genfromtxt : generic function to load ASCII data.
2527
+
2528
+ Notes
2529
+ -----
2530
+ By default, `dtype` is None, which means that the data-type of the output
2531
+ array will be determined from the data.
2532
+
2533
+ """
2534
+ # Set default kwargs for genfromtxt as relevant to csv import.
2535
+ kwargs.setdefault("case_sensitive", "lower")
2536
+ kwargs.setdefault("names", True)
2537
+ kwargs.setdefault("delimiter", ",")
2538
+ kwargs.setdefault("dtype", None)
2539
+ output = genfromtxt(fname, **kwargs)
2540
+
2541
+ usemask = kwargs.get("usemask", False)
2542
+ if usemask:
2543
+ from numpy.ma.mrecords import MaskedRecords
2544
+ output = output.view(MaskedRecords)
2545
+ else:
2546
+ output = output.view(np.recarray)
2547
+ return output
env-llmeval/lib/python3.10/site-packages/numpy/lib/polynomial.py ADDED
@@ -0,0 +1,1453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions to operate on polynomials.
3
+
4
+ """
5
+ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
6
+ 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
7
+ 'polyfit', 'RankWarning']
8
+
9
+ import functools
10
+ import re
11
+ import warnings
12
+
13
+ from .._utils import set_module
14
+ import numpy.core.numeric as NX
15
+
16
+ from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
17
+ ones)
18
+ from numpy.core import overrides
19
+ from numpy.lib.twodim_base import diag, vander
20
+ from numpy.lib.function_base import trim_zeros
21
+ from numpy.lib.type_check import iscomplex, real, imag, mintypecode
22
+ from numpy.linalg import eigvals, lstsq, inv
23
+
24
+
25
+ array_function_dispatch = functools.partial(
26
+ overrides.array_function_dispatch, module='numpy')
27
+
28
+
29
+ @set_module('numpy')
30
+ class RankWarning(UserWarning):
31
+ """
32
+ Issued by `polyfit` when the Vandermonde matrix is rank deficient.
33
+
34
+ For more information, a way to suppress the warning, and an example of
35
+ `RankWarning` being issued, see `polyfit`.
36
+
37
+ """
38
+ pass
39
+
40
+
41
+ def _poly_dispatcher(seq_of_zeros):
42
+ return seq_of_zeros
43
+
44
+
45
+ @array_function_dispatch(_poly_dispatcher)
46
+ def poly(seq_of_zeros):
47
+ """
48
+ Find the coefficients of a polynomial with the given sequence of roots.
49
+
50
+ .. note::
51
+ This forms part of the old polynomial API. Since version 1.4, the
52
+ new polynomial API defined in `numpy.polynomial` is preferred.
53
+ A summary of the differences can be found in the
54
+ :doc:`transition guide </reference/routines.polynomials>`.
55
+
56
+ Returns the coefficients of the polynomial whose leading coefficient
57
+ is one for the given sequence of zeros (multiple roots must be included
58
+ in the sequence as many times as their multiplicity; see Examples).
59
+ A square matrix (or array, which will be treated as a matrix) can also
60
+ be given, in which case the coefficients of the characteristic polynomial
61
+ of the matrix are returned.
62
+
63
+ Parameters
64
+ ----------
65
+ seq_of_zeros : array_like, shape (N,) or (N, N)
66
+ A sequence of polynomial roots, or a square array or matrix object.
67
+
68
+ Returns
69
+ -------
70
+ c : ndarray
71
+ 1D array of polynomial coefficients from highest to lowest degree:
72
+
73
+ ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
74
+ where c[0] always equals 1.
75
+
76
+ Raises
77
+ ------
78
+ ValueError
79
+ If input is the wrong shape (the input must be a 1-D or square
80
+ 2-D array).
81
+
82
+ See Also
83
+ --------
84
+ polyval : Compute polynomial values.
85
+ roots : Return the roots of a polynomial.
86
+ polyfit : Least squares polynomial fit.
87
+ poly1d : A one-dimensional polynomial class.
88
+
89
+ Notes
90
+ -----
91
+ Specifying the roots of a polynomial still leaves one degree of
92
+ freedom, typically represented by an undetermined leading
93
+ coefficient. [1]_ In the case of this function, that coefficient -
94
+ the first one in the returned array - is always taken as one. (If
95
+ for some reason you have one other point, the only automatic way
96
+ presently to leverage that information is to use ``polyfit``.)
97
+
98
+ The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
99
+ matrix **A** is given by
100
+
101
+ :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
102
+
103
+ where **I** is the `n`-by-`n` identity matrix. [2]_
104
+
105
+ References
106
+ ----------
107
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,
108
+ Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
109
+
110
+ .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
111
+ Academic Press, pg. 182, 1980.
112
+
113
+ Examples
114
+ --------
115
+ Given a sequence of a polynomial's zeros:
116
+
117
+ >>> np.poly((0, 0, 0)) # Multiple root example
118
+ array([1., 0., 0., 0.])
119
+
120
+ The line above represents z**3 + 0*z**2 + 0*z + 0.
121
+
122
+ >>> np.poly((-1./2, 0, 1./2))
123
+ array([ 1. , 0. , -0.25, 0. ])
124
+
125
+ The line above represents z**3 - z/4
126
+
127
+ >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
128
+ array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
129
+
130
+ Given a square array object:
131
+
132
+ >>> P = np.array([[0, 1./3], [-1./2, 0]])
133
+ >>> np.poly(P)
134
+ array([1. , 0. , 0.16666667])
135
+
136
+ Note how in all cases the leading coefficient is always 1.
137
+
138
+ """
139
+ seq_of_zeros = atleast_1d(seq_of_zeros)
140
+ sh = seq_of_zeros.shape
141
+
142
+ if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
143
+ seq_of_zeros = eigvals(seq_of_zeros)
144
+ elif len(sh) == 1:
145
+ dt = seq_of_zeros.dtype
146
+ # Let object arrays slip through, e.g. for arbitrary precision
147
+ if dt != object:
148
+ seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
149
+ else:
150
+ raise ValueError("input must be 1d or non-empty square 2d array.")
151
+
152
+ if len(seq_of_zeros) == 0:
153
+ return 1.0
154
+ dt = seq_of_zeros.dtype
155
+ a = ones((1,), dtype=dt)
156
+ for zero in seq_of_zeros:
157
+ a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
158
+
159
+ if issubclass(a.dtype.type, NX.complexfloating):
160
+ # if complex roots are all complex conjugates, the roots are real.
161
+ roots = NX.asarray(seq_of_zeros, complex)
162
+ if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
163
+ a = a.real.copy()
164
+
165
+ return a
166
+
167
+
168
+ def _roots_dispatcher(p):
169
+ return p
170
+
171
+
172
+ @array_function_dispatch(_roots_dispatcher)
173
+ def roots(p):
174
+ """
175
+ Return the roots of a polynomial with coefficients given in p.
176
+
177
+ .. note::
178
+ This forms part of the old polynomial API. Since version 1.4, the
179
+ new polynomial API defined in `numpy.polynomial` is preferred.
180
+ A summary of the differences can be found in the
181
+ :doc:`transition guide </reference/routines.polynomials>`.
182
+
183
+ The values in the rank-1 array `p` are coefficients of a polynomial.
184
+ If the length of `p` is n+1 then the polynomial is described by::
185
+
186
+ p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
187
+
188
+ Parameters
189
+ ----------
190
+ p : array_like
191
+ Rank-1 array of polynomial coefficients.
192
+
193
+ Returns
194
+ -------
195
+ out : ndarray
196
+ An array containing the roots of the polynomial.
197
+
198
+ Raises
199
+ ------
200
+ ValueError
201
+ When `p` cannot be converted to a rank-1 array.
202
+
203
+ See also
204
+ --------
205
+ poly : Find the coefficients of a polynomial with a given sequence
206
+ of roots.
207
+ polyval : Compute polynomial values.
208
+ polyfit : Least squares polynomial fit.
209
+ poly1d : A one-dimensional polynomial class.
210
+
211
+ Notes
212
+ -----
213
+ The algorithm relies on computing the eigenvalues of the
214
+ companion matrix [1]_.
215
+
216
+ References
217
+ ----------
218
+ .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
219
+ Cambridge University Press, 1999, pp. 146-7.
220
+
221
+ Examples
222
+ --------
223
+ >>> coeff = [3.2, 2, 1]
224
+ >>> np.roots(coeff)
225
+ array([-0.3125+0.46351241j, -0.3125-0.46351241j])
226
+
227
+ """
228
+ # If input is scalar, this makes it an array
229
+ p = atleast_1d(p)
230
+ if p.ndim != 1:
231
+ raise ValueError("Input must be a rank-1 array.")
232
+
233
+ # find non-zero array entries
234
+ non_zero = NX.nonzero(NX.ravel(p))[0]
235
+
236
+ # Return an empty array if polynomial is all zeros
237
+ if len(non_zero) == 0:
238
+ return NX.array([])
239
+
240
+ # find the number of trailing zeros -- this is the number of roots at 0.
241
+ trailing_zeros = len(p) - non_zero[-1] - 1
242
+
243
+ # strip leading and trailing zeros
244
+ p = p[int(non_zero[0]):int(non_zero[-1])+1]
245
+
246
+ # casting: if incoming array isn't floating point, make it floating point.
247
+ if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
248
+ p = p.astype(float)
249
+
250
+ N = len(p)
251
+ if N > 1:
252
+ # build companion matrix and find its eigenvalues (the roots)
253
+ A = diag(NX.ones((N-2,), p.dtype), -1)
254
+ A[0,:] = -p[1:] / p[0]
255
+ roots = eigvals(A)
256
+ else:
257
+ roots = NX.array([])
258
+
259
+ # tack any zeros onto the back of the array
260
+ roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
261
+ return roots
262
+
263
+
264
+ def _polyint_dispatcher(p, m=None, k=None):
265
+ return (p,)
266
+
267
+
268
+ @array_function_dispatch(_polyint_dispatcher)
269
+ def polyint(p, m=1, k=None):
270
+ """
271
+ Return an antiderivative (indefinite integral) of a polynomial.
272
+
273
+ .. note::
274
+ This forms part of the old polynomial API. Since version 1.4, the
275
+ new polynomial API defined in `numpy.polynomial` is preferred.
276
+ A summary of the differences can be found in the
277
+ :doc:`transition guide </reference/routines.polynomials>`.
278
+
279
+ The returned order `m` antiderivative `P` of polynomial `p` satisfies
280
+ :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
281
+ integration constants `k`. The constants determine the low-order
282
+ polynomial part
283
+
284
+ .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
285
+
286
+ of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
287
+
288
+ Parameters
289
+ ----------
290
+ p : array_like or poly1d
291
+ Polynomial to integrate.
292
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
293
+ m : int, optional
294
+ Order of the antiderivative. (Default: 1)
295
+ k : list of `m` scalars or scalar, optional
296
+ Integration constants. They are given in the order of integration:
297
+ those corresponding to highest-order terms come first.
298
+
299
+ If ``None`` (default), all constants are assumed to be zero.
300
+ If `m = 1`, a single scalar can be given instead of a list.
301
+
302
+ See Also
303
+ --------
304
+ polyder : derivative of a polynomial
305
+ poly1d.integ : equivalent method
306
+
307
+ Examples
308
+ --------
309
+ The defining property of the antiderivative:
310
+
311
+ >>> p = np.poly1d([1,1,1])
312
+ >>> P = np.polyint(p)
313
+ >>> P
314
+ poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
315
+ >>> np.polyder(P) == p
316
+ True
317
+
318
+ The integration constants default to zero, but can be specified:
319
+
320
+ >>> P = np.polyint(p, 3)
321
+ >>> P(0)
322
+ 0.0
323
+ >>> np.polyder(P)(0)
324
+ 0.0
325
+ >>> np.polyder(P, 2)(0)
326
+ 0.0
327
+ >>> P = np.polyint(p, 3, k=[6,5,3])
328
+ >>> P
329
+ poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
330
+
331
+ Note that 3 = 6 / 2!, and that the constants are given in the order of
332
+ integrations. Constant of the highest-order polynomial term comes first:
333
+
334
+ >>> np.polyder(P, 2)(0)
335
+ 6.0
336
+ >>> np.polyder(P, 1)(0)
337
+ 5.0
338
+ >>> P(0)
339
+ 3.0
340
+
341
+ """
342
+ m = int(m)
343
+ if m < 0:
344
+ raise ValueError("Order of integral must be positive (see polyder)")
345
+ if k is None:
346
+ k = NX.zeros(m, float)
347
+ k = atleast_1d(k)
348
+ if len(k) == 1 and m > 1:
349
+ k = k[0]*NX.ones(m, float)
350
+ if len(k) < m:
351
+ raise ValueError(
352
+ "k must be a scalar or a rank-1 array of length 1 or >m.")
353
+
354
+ truepoly = isinstance(p, poly1d)
355
+ p = NX.asarray(p)
356
+ if m == 0:
357
+ if truepoly:
358
+ return poly1d(p)
359
+ return p
360
+ else:
361
+ # Note: this must work also with object and integer arrays
362
+ y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
363
+ val = polyint(y, m - 1, k=k[1:])
364
+ if truepoly:
365
+ return poly1d(val)
366
+ return val
367
+
368
+
369
+ def _polyder_dispatcher(p, m=None):
370
+ return (p,)
371
+
372
+
373
+ @array_function_dispatch(_polyder_dispatcher)
374
+ def polyder(p, m=1):
375
+ """
376
+ Return the derivative of the specified order of a polynomial.
377
+
378
+ .. note::
379
+ This forms part of the old polynomial API. Since version 1.4, the
380
+ new polynomial API defined in `numpy.polynomial` is preferred.
381
+ A summary of the differences can be found in the
382
+ :doc:`transition guide </reference/routines.polynomials>`.
383
+
384
+ Parameters
385
+ ----------
386
+ p : poly1d or sequence
387
+ Polynomial to differentiate.
388
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
389
+ m : int, optional
390
+ Order of differentiation (default: 1)
391
+
392
+ Returns
393
+ -------
394
+ der : poly1d
395
+ A new polynomial representing the derivative.
396
+
397
+ See Also
398
+ --------
399
+ polyint : Anti-derivative of a polynomial.
400
+ poly1d : Class for one-dimensional polynomials.
401
+
402
+ Examples
403
+ --------
404
+ The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
405
+
406
+ >>> p = np.poly1d([1,1,1,1])
407
+ >>> p2 = np.polyder(p)
408
+ >>> p2
409
+ poly1d([3, 2, 1])
410
+
411
+ which evaluates to:
412
+
413
+ >>> p2(2.)
414
+ 17.0
415
+
416
+ We can verify this, approximating the derivative with
417
+ ``(f(x + h) - f(x))/h``:
418
+
419
+ >>> (p(2. + 0.001) - p(2.)) / 0.001
420
+ 17.007000999997857
421
+
422
+ The fourth-order derivative of a 3rd-order polynomial is zero:
423
+
424
+ >>> np.polyder(p, 2)
425
+ poly1d([6, 2])
426
+ >>> np.polyder(p, 3)
427
+ poly1d([6])
428
+ >>> np.polyder(p, 4)
429
+ poly1d([0])
430
+
431
+ """
432
+ m = int(m)
433
+ if m < 0:
434
+ raise ValueError("Order of derivative must be positive (see polyint)")
435
+
436
+ truepoly = isinstance(p, poly1d)
437
+ p = NX.asarray(p)
438
+ n = len(p) - 1
439
+ y = p[:-1] * NX.arange(n, 0, -1)
440
+ if m == 0:
441
+ val = p
442
+ else:
443
+ val = polyder(y, m - 1)
444
+ if truepoly:
445
+ val = poly1d(val)
446
+ return val
447
+
448
+
449
+ def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
450
+ return (x, y, w)
451
+
452
+
453
+ @array_function_dispatch(_polyfit_dispatcher)
454
+ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
455
+ """
456
+ Least squares polynomial fit.
457
+
458
+ .. note::
459
+ This forms part of the old polynomial API. Since version 1.4, the
460
+ new polynomial API defined in `numpy.polynomial` is preferred.
461
+ A summary of the differences can be found in the
462
+ :doc:`transition guide </reference/routines.polynomials>`.
463
+
464
+ Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
465
+ to points `(x, y)`. Returns a vector of coefficients `p` that minimises
466
+ the squared error in the order `deg`, `deg-1`, ... `0`.
467
+
468
+ The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
469
+ method is recommended for new code as it is more stable numerically. See
470
+ the documentation of the method for more information.
471
+
472
+ Parameters
473
+ ----------
474
+ x : array_like, shape (M,)
475
+ x-coordinates of the M sample points ``(x[i], y[i])``.
476
+ y : array_like, shape (M,) or (M, K)
477
+ y-coordinates of the sample points. Several data sets of sample
478
+ points sharing the same x-coordinates can be fitted at once by
479
+ passing in a 2D-array that contains one dataset per column.
480
+ deg : int
481
+ Degree of the fitting polynomial
482
+ rcond : float, optional
483
+ Relative condition number of the fit. Singular values smaller than
484
+ this relative to the largest singular value will be ignored. The
485
+ default value is len(x)*eps, where eps is the relative precision of
486
+ the float type, about 2e-16 in most cases.
487
+ full : bool, optional
488
+ Switch determining nature of return value. When it is False (the
489
+ default) just the coefficients are returned, when True diagnostic
490
+ information from the singular value decomposition is also returned.
491
+ w : array_like, shape (M,), optional
492
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
493
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
494
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
495
+ same variance. When using inverse-variance weighting, use
496
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
497
+ cov : bool or str, optional
498
+ If given and not `False`, return not just the estimate but also its
499
+ covariance matrix. By default, the covariance are scaled by
500
+ chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed
501
+ to be unreliable except in a relative sense and everything is scaled
502
+ such that the reduced chi2 is unity. This scaling is omitted if
503
+ ``cov='unscaled'``, as is relevant for the case that the weights are
504
+ w = 1/sigma, with sigma known to be a reliable estimate of the
505
+ uncertainty.
506
+
507
+ Returns
508
+ -------
509
+ p : ndarray, shape (deg + 1,) or (deg + 1, K)
510
+ Polynomial coefficients, highest power first. If `y` was 2-D, the
511
+ coefficients for `k`-th data set are in ``p[:,k]``.
512
+
513
+ residuals, rank, singular_values, rcond
514
+ These values are only returned if ``full == True``
515
+
516
+ - residuals -- sum of squared residuals of the least squares fit
517
+ - rank -- the effective rank of the scaled Vandermonde
518
+ coefficient matrix
519
+ - singular_values -- singular values of the scaled Vandermonde
520
+ coefficient matrix
521
+ - rcond -- value of `rcond`.
522
+
523
+ For more details, see `numpy.linalg.lstsq`.
524
+
525
+ V : ndarray, shape (M,M) or (M,M,K)
526
+ Present only if ``full == False`` and ``cov == True``. The covariance
527
+ matrix of the polynomial coefficient estimates. The diagonal of
528
+ this matrix are the variance estimates for each coefficient. If y
529
+ is a 2-D array, then the covariance matrix for the `k`-th data set
530
+ are in ``V[:,:,k]``
531
+
532
+
533
+ Warns
534
+ -----
535
+ RankWarning
536
+ The rank of the coefficient matrix in the least-squares fit is
537
+ deficient. The warning is only raised if ``full == False``.
538
+
539
+ The warnings can be turned off by
540
+
541
+ >>> import warnings
542
+ >>> warnings.simplefilter('ignore', np.RankWarning)
543
+
544
+ See Also
545
+ --------
546
+ polyval : Compute polynomial values.
547
+ linalg.lstsq : Computes a least-squares fit.
548
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
549
+
550
+ Notes
551
+ -----
552
+ The solution minimizes the squared error
553
+
554
+ .. math::
555
+ E = \\sum_{j=0}^k |p(x_j) - y_j|^2
556
+
557
+ in the equations::
558
+
559
+ x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
560
+ x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
561
+ ...
562
+ x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
563
+
564
+ The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
565
+
566
+ `polyfit` issues a `RankWarning` when the least-squares fit is badly
567
+ conditioned. This implies that the best fit is not well-defined due
568
+ to numerical error. The results may be improved by lowering the polynomial
569
+ degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
570
+ can also be set to a value smaller than its default, but the resulting
571
+ fit may be spurious: including contributions from the small singular
572
+ values can add numerical noise to the result.
573
+
574
+ Note that fitting polynomial coefficients is inherently badly conditioned
575
+ when the degree of the polynomial is large or the interval of sample points
576
+ is badly centered. The quality of the fit should always be checked in these
577
+ cases. When polynomial fits are not satisfactory, splines may be a good
578
+ alternative.
579
+
580
+ References
581
+ ----------
582
+ .. [1] Wikipedia, "Curve fitting",
583
+ https://en.wikipedia.org/wiki/Curve_fitting
584
+ .. [2] Wikipedia, "Polynomial interpolation",
585
+ https://en.wikipedia.org/wiki/Polynomial_interpolation
586
+
587
+ Examples
588
+ --------
589
+ >>> import warnings
590
+ >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
591
+ >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
592
+ >>> z = np.polyfit(x, y, 3)
593
+ >>> z
594
+ array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
595
+
596
+ It is convenient to use `poly1d` objects for dealing with polynomials:
597
+
598
+ >>> p = np.poly1d(z)
599
+ >>> p(0.5)
600
+ 0.6143849206349179 # may vary
601
+ >>> p(3.5)
602
+ -0.34732142857143039 # may vary
603
+ >>> p(10)
604
+ 22.579365079365115 # may vary
605
+
606
+ High-order polynomials may oscillate wildly:
607
+
608
+ >>> with warnings.catch_warnings():
609
+ ... warnings.simplefilter('ignore', np.RankWarning)
610
+ ... p30 = np.poly1d(np.polyfit(x, y, 30))
611
+ ...
612
+ >>> p30(4)
613
+ -0.80000000000000204 # may vary
614
+ >>> p30(5)
615
+ -0.99999999999999445 # may vary
616
+ >>> p30(4.5)
617
+ -0.10547061179440398 # may vary
618
+
619
+ Illustration:
620
+
621
+ >>> import matplotlib.pyplot as plt
622
+ >>> xp = np.linspace(-2, 6, 100)
623
+ >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
624
+ >>> plt.ylim(-2,2)
625
+ (-2, 2)
626
+ >>> plt.show()
627
+
628
+ """
629
+ order = int(deg) + 1
630
+ x = NX.asarray(x) + 0.0
631
+ y = NX.asarray(y) + 0.0
632
+
633
+ # check arguments.
634
+ if deg < 0:
635
+ raise ValueError("expected deg >= 0")
636
+ if x.ndim != 1:
637
+ raise TypeError("expected 1D vector for x")
638
+ if x.size == 0:
639
+ raise TypeError("expected non-empty vector for x")
640
+ if y.ndim < 1 or y.ndim > 2:
641
+ raise TypeError("expected 1D or 2D array for y")
642
+ if x.shape[0] != y.shape[0]:
643
+ raise TypeError("expected x and y to have same length")
644
+
645
+ # set rcond
646
+ if rcond is None:
647
+ rcond = len(x)*finfo(x.dtype).eps
648
+
649
+ # set up least squares equation for powers of x
650
+ lhs = vander(x, order)
651
+ rhs = y
652
+
653
+ # apply weighting
654
+ if w is not None:
655
+ w = NX.asarray(w) + 0.0
656
+ if w.ndim != 1:
657
+ raise TypeError("expected a 1-d array for weights")
658
+ if w.shape[0] != y.shape[0]:
659
+ raise TypeError("expected w and y to have the same length")
660
+ lhs *= w[:, NX.newaxis]
661
+ if rhs.ndim == 2:
662
+ rhs *= w[:, NX.newaxis]
663
+ else:
664
+ rhs *= w
665
+
666
+ # scale lhs to improve condition number and solve
667
+ scale = NX.sqrt((lhs*lhs).sum(axis=0))
668
+ lhs /= scale
669
+ c, resids, rank, s = lstsq(lhs, rhs, rcond)
670
+ c = (c.T/scale).T # broadcast scale coefficients
671
+
672
+ # warn on rank reduction, which indicates an ill conditioned matrix
673
+ if rank != order and not full:
674
+ msg = "Polyfit may be poorly conditioned"
675
+ warnings.warn(msg, RankWarning, stacklevel=2)
676
+
677
+ if full:
678
+ return c, resids, rank, s, rcond
679
+ elif cov:
680
+ Vbase = inv(dot(lhs.T, lhs))
681
+ Vbase /= NX.outer(scale, scale)
682
+ if cov == "unscaled":
683
+ fac = 1
684
+ else:
685
+ if len(x) <= order:
686
+ raise ValueError("the number of data points must exceed order "
687
+ "to scale the covariance matrix")
688
+ # note, this used to be: fac = resids / (len(x) - order - 2.0)
689
+ # it was deciced that the "- 2" (originally justified by "Bayesian
690
+ # uncertainty analysis") is not what the user expects
691
+ # (see gh-11196 and gh-11197)
692
+ fac = resids / (len(x) - order)
693
+ if y.ndim == 1:
694
+ return c, Vbase * fac
695
+ else:
696
+ return c, Vbase[:,:, NX.newaxis] * fac
697
+ else:
698
+ return c
699
+
700
+
701
+ def _polyval_dispatcher(p, x):
702
+ return (p, x)
703
+
704
+
705
+ @array_function_dispatch(_polyval_dispatcher)
706
+ def polyval(p, x):
707
+ """
708
+ Evaluate a polynomial at specific values.
709
+
710
+ .. note::
711
+ This forms part of the old polynomial API. Since version 1.4, the
712
+ new polynomial API defined in `numpy.polynomial` is preferred.
713
+ A summary of the differences can be found in the
714
+ :doc:`transition guide </reference/routines.polynomials>`.
715
+
716
+ If `p` is of length N, this function returns the value:
717
+
718
+ ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
719
+
720
+ If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.
721
+ If `x` is another polynomial then the composite polynomial ``p(x(t))``
722
+ is returned.
723
+
724
+ Parameters
725
+ ----------
726
+ p : array_like or poly1d object
727
+ 1D array of polynomial coefficients (including coefficients equal
728
+ to zero) from highest degree to the constant term, or an
729
+ instance of poly1d.
730
+ x : array_like or poly1d object
731
+ A number, an array of numbers, or an instance of poly1d, at
732
+ which to evaluate `p`.
733
+
734
+ Returns
735
+ -------
736
+ values : ndarray or poly1d
737
+ If `x` is a poly1d instance, the result is the composition of the two
738
+ polynomials, i.e., `x` is "substituted" in `p` and the simplified
739
+ result is returned. In addition, the type of `x` - array_like or
740
+ poly1d - governs the type of the output: `x` array_like => `values`
741
+ array_like, `x` a poly1d object => `values` is also.
742
+
743
+ See Also
744
+ --------
745
+ poly1d: A polynomial class.
746
+
747
+ Notes
748
+ -----
749
+ Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
750
+ for polynomials of high degree the values may be inaccurate due to
751
+ rounding errors. Use carefully.
752
+
753
+ If `x` is a subtype of `ndarray` the return value will be of the same type.
754
+
755
+ References
756
+ ----------
757
+ .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
758
+ trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
759
+ Reinhold Co., 1985, pg. 720.
760
+
761
+ Examples
762
+ --------
763
+ >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
764
+ 76
765
+ >>> np.polyval([3,0,1], np.poly1d(5))
766
+ poly1d([76])
767
+ >>> np.polyval(np.poly1d([3,0,1]), 5)
768
+ 76
769
+ >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
770
+ poly1d([76])
771
+
772
+ """
773
+ p = NX.asarray(p)
774
+ if isinstance(x, poly1d):
775
+ y = 0
776
+ else:
777
+ x = NX.asanyarray(x)
778
+ y = NX.zeros_like(x)
779
+ for pv in p:
780
+ y = y * x + pv
781
+ return y
782
+
783
+
784
+ def _binary_op_dispatcher(a1, a2):
785
+ return (a1, a2)
786
+
787
+
788
+ @array_function_dispatch(_binary_op_dispatcher)
789
+ def polyadd(a1, a2):
790
+ """
791
+ Find the sum of two polynomials.
792
+
793
+ .. note::
794
+ This forms part of the old polynomial API. Since version 1.4, the
795
+ new polynomial API defined in `numpy.polynomial` is preferred.
796
+ A summary of the differences can be found in the
797
+ :doc:`transition guide </reference/routines.polynomials>`.
798
+
799
+ Returns the polynomial resulting from the sum of two input polynomials.
800
+ Each input must be either a poly1d object or a 1D sequence of polynomial
801
+ coefficients, from highest to lowest degree.
802
+
803
+ Parameters
804
+ ----------
805
+ a1, a2 : array_like or poly1d object
806
+ Input polynomials.
807
+
808
+ Returns
809
+ -------
810
+ out : ndarray or poly1d object
811
+ The sum of the inputs. If either input is a poly1d object, then the
812
+ output is also a poly1d object. Otherwise, it is a 1D array of
813
+ polynomial coefficients from highest to lowest degree.
814
+
815
+ See Also
816
+ --------
817
+ poly1d : A one-dimensional polynomial class.
818
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
819
+
820
+ Examples
821
+ --------
822
+ >>> np.polyadd([1, 2], [9, 5, 4])
823
+ array([9, 6, 6])
824
+
825
+ Using poly1d objects:
826
+
827
+ >>> p1 = np.poly1d([1, 2])
828
+ >>> p2 = np.poly1d([9, 5, 4])
829
+ >>> print(p1)
830
+ 1 x + 2
831
+ >>> print(p2)
832
+ 2
833
+ 9 x + 5 x + 4
834
+ >>> print(np.polyadd(p1, p2))
835
+ 2
836
+ 9 x + 6 x + 6
837
+
838
+ """
839
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
840
+ a1 = atleast_1d(a1)
841
+ a2 = atleast_1d(a2)
842
+ diff = len(a2) - len(a1)
843
+ if diff == 0:
844
+ val = a1 + a2
845
+ elif diff > 0:
846
+ zr = NX.zeros(diff, a1.dtype)
847
+ val = NX.concatenate((zr, a1)) + a2
848
+ else:
849
+ zr = NX.zeros(abs(diff), a2.dtype)
850
+ val = a1 + NX.concatenate((zr, a2))
851
+ if truepoly:
852
+ val = poly1d(val)
853
+ return val
854
+
855
+
856
+ @array_function_dispatch(_binary_op_dispatcher)
857
+ def polysub(a1, a2):
858
+ """
859
+ Difference (subtraction) of two polynomials.
860
+
861
+ .. note::
862
+ This forms part of the old polynomial API. Since version 1.4, the
863
+ new polynomial API defined in `numpy.polynomial` is preferred.
864
+ A summary of the differences can be found in the
865
+ :doc:`transition guide </reference/routines.polynomials>`.
866
+
867
+ Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
868
+ `a1` and `a2` can be either array_like sequences of the polynomials'
869
+ coefficients (including coefficients equal to zero), or `poly1d` objects.
870
+
871
+ Parameters
872
+ ----------
873
+ a1, a2 : array_like or poly1d
874
+ Minuend and subtrahend polynomials, respectively.
875
+
876
+ Returns
877
+ -------
878
+ out : ndarray or poly1d
879
+ Array or `poly1d` object of the difference polynomial's coefficients.
880
+
881
+ See Also
882
+ --------
883
+ polyval, polydiv, polymul, polyadd
884
+
885
+ Examples
886
+ --------
887
+ .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
888
+
889
+ >>> np.polysub([2, 10, -2], [3, 10, -4])
890
+ array([-1, 0, 2])
891
+
892
+ """
893
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
894
+ a1 = atleast_1d(a1)
895
+ a2 = atleast_1d(a2)
896
+ diff = len(a2) - len(a1)
897
+ if diff == 0:
898
+ val = a1 - a2
899
+ elif diff > 0:
900
+ zr = NX.zeros(diff, a1.dtype)
901
+ val = NX.concatenate((zr, a1)) - a2
902
+ else:
903
+ zr = NX.zeros(abs(diff), a2.dtype)
904
+ val = a1 - NX.concatenate((zr, a2))
905
+ if truepoly:
906
+ val = poly1d(val)
907
+ return val
908
+
909
+
910
+ @array_function_dispatch(_binary_op_dispatcher)
911
+ def polymul(a1, a2):
912
+ """
913
+ Find the product of two polynomials.
914
+
915
+ .. note::
916
+ This forms part of the old polynomial API. Since version 1.4, the
917
+ new polynomial API defined in `numpy.polynomial` is preferred.
918
+ A summary of the differences can be found in the
919
+ :doc:`transition guide </reference/routines.polynomials>`.
920
+
921
+ Finds the polynomial resulting from the multiplication of the two input
922
+ polynomials. Each input must be either a poly1d object or a 1D sequence
923
+ of polynomial coefficients, from highest to lowest degree.
924
+
925
+ Parameters
926
+ ----------
927
+ a1, a2 : array_like or poly1d object
928
+ Input polynomials.
929
+
930
+ Returns
931
+ -------
932
+ out : ndarray or poly1d object
933
+ The polynomial resulting from the multiplication of the inputs. If
934
+ either inputs is a poly1d object, then the output is also a poly1d
935
+ object. Otherwise, it is a 1D array of polynomial coefficients from
936
+ highest to lowest degree.
937
+
938
+ See Also
939
+ --------
940
+ poly1d : A one-dimensional polynomial class.
941
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
942
+ convolve : Array convolution. Same output as polymul, but has parameter
943
+ for overlap mode.
944
+
945
+ Examples
946
+ --------
947
+ >>> np.polymul([1, 2, 3], [9, 5, 1])
948
+ array([ 9, 23, 38, 17, 3])
949
+
950
+ Using poly1d objects:
951
+
952
+ >>> p1 = np.poly1d([1, 2, 3])
953
+ >>> p2 = np.poly1d([9, 5, 1])
954
+ >>> print(p1)
955
+ 2
956
+ 1 x + 2 x + 3
957
+ >>> print(p2)
958
+ 2
959
+ 9 x + 5 x + 1
960
+ >>> print(np.polymul(p1, p2))
961
+ 4 3 2
962
+ 9 x + 23 x + 38 x + 17 x + 3
963
+
964
+ """
965
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
966
+ a1, a2 = poly1d(a1), poly1d(a2)
967
+ val = NX.convolve(a1, a2)
968
+ if truepoly:
969
+ val = poly1d(val)
970
+ return val
971
+
972
+
973
+ def _polydiv_dispatcher(u, v):
974
+ return (u, v)
975
+
976
+
977
+ @array_function_dispatch(_polydiv_dispatcher)
978
+ def polydiv(u, v):
979
+ """
980
+ Returns the quotient and remainder of polynomial division.
981
+
982
+ .. note::
983
+ This forms part of the old polynomial API. Since version 1.4, the
984
+ new polynomial API defined in `numpy.polynomial` is preferred.
985
+ A summary of the differences can be found in the
986
+ :doc:`transition guide </reference/routines.polynomials>`.
987
+
988
+ The input arrays are the coefficients (including any coefficients
989
+ equal to zero) of the "numerator" (dividend) and "denominator"
990
+ (divisor) polynomials, respectively.
991
+
992
+ Parameters
993
+ ----------
994
+ u : array_like or poly1d
995
+ Dividend polynomial's coefficients.
996
+
997
+ v : array_like or poly1d
998
+ Divisor polynomial's coefficients.
999
+
1000
+ Returns
1001
+ -------
1002
+ q : ndarray
1003
+ Coefficients, including those equal to zero, of the quotient.
1004
+ r : ndarray
1005
+ Coefficients, including those equal to zero, of the remainder.
1006
+
1007
+ See Also
1008
+ --------
1009
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
1010
+ polyval
1011
+
1012
+ Notes
1013
+ -----
1014
+ Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
1015
+ not equal `v.ndim`. In other words, all four possible combinations -
1016
+ ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
1017
+ ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
1018
+
1019
+ Examples
1020
+ --------
1021
+ .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
1022
+
1023
+ >>> x = np.array([3.0, 5.0, 2.0])
1024
+ >>> y = np.array([2.0, 1.0])
1025
+ >>> np.polydiv(x, y)
1026
+ (array([1.5 , 1.75]), array([0.25]))
1027
+
1028
+ """
1029
+ truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
1030
+ u = atleast_1d(u) + 0.0
1031
+ v = atleast_1d(v) + 0.0
1032
+ # w has the common type
1033
+ w = u[0] + v[0]
1034
+ m = len(u) - 1
1035
+ n = len(v) - 1
1036
+ scale = 1. / v[0]
1037
+ q = NX.zeros((max(m - n + 1, 1),), w.dtype)
1038
+ r = u.astype(w.dtype)
1039
+ for k in range(0, m-n+1):
1040
+ d = scale * r[k]
1041
+ q[k] = d
1042
+ r[k:k+n+1] -= d*v
1043
+ while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
1044
+ r = r[1:]
1045
+ if truepoly:
1046
+ return poly1d(q), poly1d(r)
1047
+ return q, r
1048
+
1049
+ _poly_mat = re.compile(r"\*\*([0-9]*)")
1050
+ def _raise_power(astr, wrap=70):
1051
+ n = 0
1052
+ line1 = ''
1053
+ line2 = ''
1054
+ output = ' '
1055
+ while True:
1056
+ mat = _poly_mat.search(astr, n)
1057
+ if mat is None:
1058
+ break
1059
+ span = mat.span()
1060
+ power = mat.groups()[0]
1061
+ partstr = astr[n:span[0]]
1062
+ n = span[1]
1063
+ toadd2 = partstr + ' '*(len(power)-1)
1064
+ toadd1 = ' '*(len(partstr)-1) + power
1065
+ if ((len(line2) + len(toadd2) > wrap) or
1066
+ (len(line1) + len(toadd1) > wrap)):
1067
+ output += line1 + "\n" + line2 + "\n "
1068
+ line1 = toadd1
1069
+ line2 = toadd2
1070
+ else:
1071
+ line2 += partstr + ' '*(len(power)-1)
1072
+ line1 += ' '*(len(partstr)-1) + power
1073
+ output += line1 + "\n" + line2
1074
+ return output + astr[n:]
1075
+
1076
+
1077
+ @set_module('numpy')
1078
+ class poly1d:
1079
+ """
1080
+ A one-dimensional polynomial class.
1081
+
1082
+ .. note::
1083
+ This forms part of the old polynomial API. Since version 1.4, the
1084
+ new polynomial API defined in `numpy.polynomial` is preferred.
1085
+ A summary of the differences can be found in the
1086
+ :doc:`transition guide </reference/routines.polynomials>`.
1087
+
1088
+ A convenience class, used to encapsulate "natural" operations on
1089
+ polynomials so that said operations may take on their customary
1090
+ form in code (see Examples).
1091
+
1092
+ Parameters
1093
+ ----------
1094
+ c_or_r : array_like
1095
+ The polynomial's coefficients, in decreasing powers, or if
1096
+ the value of the second parameter is True, the polynomial's
1097
+ roots (values where the polynomial evaluates to 0). For example,
1098
+ ``poly1d([1, 2, 3])`` returns an object that represents
1099
+ :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
1100
+ one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
1101
+ r : bool, optional
1102
+ If True, `c_or_r` specifies the polynomial's roots; the default
1103
+ is False.
1104
+ variable : str, optional
1105
+ Changes the variable used when printing `p` from `x` to `variable`
1106
+ (see Examples).
1107
+
1108
+ Examples
1109
+ --------
1110
+ Construct the polynomial :math:`x^2 + 2x + 3`:
1111
+
1112
+ >>> p = np.poly1d([1, 2, 3])
1113
+ >>> print(np.poly1d(p))
1114
+ 2
1115
+ 1 x + 2 x + 3
1116
+
1117
+ Evaluate the polynomial at :math:`x = 0.5`:
1118
+
1119
+ >>> p(0.5)
1120
+ 4.25
1121
+
1122
+ Find the roots:
1123
+
1124
+ >>> p.r
1125
+ array([-1.+1.41421356j, -1.-1.41421356j])
1126
+ >>> p(p.r)
1127
+ array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
1128
+
1129
+ These numbers in the previous line represent (0, 0) to machine precision
1130
+
1131
+ Show the coefficients:
1132
+
1133
+ >>> p.c
1134
+ array([1, 2, 3])
1135
+
1136
+ Display the order (the leading zero-coefficients are removed):
1137
+
1138
+ >>> p.order
1139
+ 2
1140
+
1141
+ Show the coefficient of the k-th power in the polynomial
1142
+ (which is equivalent to ``p.c[-(i+1)]``):
1143
+
1144
+ >>> p[1]
1145
+ 2
1146
+
1147
+ Polynomials can be added, subtracted, multiplied, and divided
1148
+ (returns quotient and remainder):
1149
+
1150
+ >>> p * p
1151
+ poly1d([ 1, 4, 10, 12, 9])
1152
+
1153
+ >>> (p**3 + 4) / p
1154
+ (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
1155
+
1156
+ ``asarray(p)`` gives the coefficient array, so polynomials can be
1157
+ used in all functions that accept arrays:
1158
+
1159
+ >>> p**2 # square of polynomial
1160
+ poly1d([ 1, 4, 10, 12, 9])
1161
+
1162
+ >>> np.square(p) # square of individual coefficients
1163
+ array([1, 4, 9])
1164
+
1165
+ The variable used in the string representation of `p` can be modified,
1166
+ using the `variable` parameter:
1167
+
1168
+ >>> p = np.poly1d([1,2,3], variable='z')
1169
+ >>> print(p)
1170
+ 2
1171
+ 1 z + 2 z + 3
1172
+
1173
+ Construct a polynomial from its roots:
1174
+
1175
+ >>> np.poly1d([1, 2], True)
1176
+ poly1d([ 1., -3., 2.])
1177
+
1178
+ This is the same polynomial as obtained by:
1179
+
1180
+ >>> np.poly1d([1, -1]) * np.poly1d([1, -2])
1181
+ poly1d([ 1, -3, 2])
1182
+
1183
+ """
1184
+ __hash__ = None
1185
+
1186
+ @property
1187
+ def coeffs(self):
1188
+ """ The polynomial coefficients """
1189
+ return self._coeffs
1190
+
1191
+ @coeffs.setter
1192
+ def coeffs(self, value):
1193
+ # allowing this makes p.coeffs *= 2 legal
1194
+ if value is not self._coeffs:
1195
+ raise AttributeError("Cannot set attribute")
1196
+
1197
+ @property
1198
+ def variable(self):
1199
+ """ The name of the polynomial variable """
1200
+ return self._variable
1201
+
1202
+ # calculated attributes
1203
+ @property
1204
+ def order(self):
1205
+ """ The order or degree of the polynomial """
1206
+ return len(self._coeffs) - 1
1207
+
1208
+ @property
1209
+ def roots(self):
1210
+ """ The roots of the polynomial, where self(x) == 0 """
1211
+ return roots(self._coeffs)
1212
+
1213
+ # our internal _coeffs property need to be backed by __dict__['coeffs'] for
1214
+ # scipy to work correctly.
1215
+ @property
1216
+ def _coeffs(self):
1217
+ return self.__dict__['coeffs']
1218
+ @_coeffs.setter
1219
+ def _coeffs(self, coeffs):
1220
+ self.__dict__['coeffs'] = coeffs
1221
+
1222
+ # alias attributes
1223
+ r = roots
1224
+ c = coef = coefficients = coeffs
1225
+ o = order
1226
+
1227
+ def __init__(self, c_or_r, r=False, variable=None):
1228
+ if isinstance(c_or_r, poly1d):
1229
+ self._variable = c_or_r._variable
1230
+ self._coeffs = c_or_r._coeffs
1231
+
1232
+ if set(c_or_r.__dict__) - set(self.__dict__):
1233
+ msg = ("In the future extra properties will not be copied "
1234
+ "across when constructing one poly1d from another")
1235
+ warnings.warn(msg, FutureWarning, stacklevel=2)
1236
+ self.__dict__.update(c_or_r.__dict__)
1237
+
1238
+ if variable is not None:
1239
+ self._variable = variable
1240
+ return
1241
+ if r:
1242
+ c_or_r = poly(c_or_r)
1243
+ c_or_r = atleast_1d(c_or_r)
1244
+ if c_or_r.ndim > 1:
1245
+ raise ValueError("Polynomial must be 1d only.")
1246
+ c_or_r = trim_zeros(c_or_r, trim='f')
1247
+ if len(c_or_r) == 0:
1248
+ c_or_r = NX.array([0], dtype=c_or_r.dtype)
1249
+ self._coeffs = c_or_r
1250
+ if variable is None:
1251
+ variable = 'x'
1252
+ self._variable = variable
1253
+
1254
+ def __array__(self, t=None):
1255
+ if t:
1256
+ return NX.asarray(self.coeffs, t)
1257
+ else:
1258
+ return NX.asarray(self.coeffs)
1259
+
1260
+ def __repr__(self):
1261
+ vals = repr(self.coeffs)
1262
+ vals = vals[6:-1]
1263
+ return "poly1d(%s)" % vals
1264
+
1265
+ def __len__(self):
1266
+ return self.order
1267
+
1268
+ def __str__(self):
1269
+ thestr = "0"
1270
+ var = self.variable
1271
+
1272
+ # Remove leading zeros
1273
+ coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
1274
+ N = len(coeffs)-1
1275
+
1276
+ def fmt_float(q):
1277
+ s = '%.4g' % q
1278
+ if s.endswith('.0000'):
1279
+ s = s[:-5]
1280
+ return s
1281
+
1282
+ for k, coeff in enumerate(coeffs):
1283
+ if not iscomplex(coeff):
1284
+ coefstr = fmt_float(real(coeff))
1285
+ elif real(coeff) == 0:
1286
+ coefstr = '%sj' % fmt_float(imag(coeff))
1287
+ else:
1288
+ coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),
1289
+ fmt_float(imag(coeff)))
1290
+
1291
+ power = (N-k)
1292
+ if power == 0:
1293
+ if coefstr != '0':
1294
+ newstr = '%s' % (coefstr,)
1295
+ else:
1296
+ if k == 0:
1297
+ newstr = '0'
1298
+ else:
1299
+ newstr = ''
1300
+ elif power == 1:
1301
+ if coefstr == '0':
1302
+ newstr = ''
1303
+ elif coefstr == 'b':
1304
+ newstr = var
1305
+ else:
1306
+ newstr = '%s %s' % (coefstr, var)
1307
+ else:
1308
+ if coefstr == '0':
1309
+ newstr = ''
1310
+ elif coefstr == 'b':
1311
+ newstr = '%s**%d' % (var, power,)
1312
+ else:
1313
+ newstr = '%s %s**%d' % (coefstr, var, power)
1314
+
1315
+ if k > 0:
1316
+ if newstr != '':
1317
+ if newstr.startswith('-'):
1318
+ thestr = "%s - %s" % (thestr, newstr[1:])
1319
+ else:
1320
+ thestr = "%s + %s" % (thestr, newstr)
1321
+ else:
1322
+ thestr = newstr
1323
+ return _raise_power(thestr)
1324
+
1325
+ def __call__(self, val):
1326
+ return polyval(self.coeffs, val)
1327
+
1328
+ def __neg__(self):
1329
+ return poly1d(-self.coeffs)
1330
+
1331
+ def __pos__(self):
1332
+ return self
1333
+
1334
+ def __mul__(self, other):
1335
+ if isscalar(other):
1336
+ return poly1d(self.coeffs * other)
1337
+ else:
1338
+ other = poly1d(other)
1339
+ return poly1d(polymul(self.coeffs, other.coeffs))
1340
+
1341
+ def __rmul__(self, other):
1342
+ if isscalar(other):
1343
+ return poly1d(other * self.coeffs)
1344
+ else:
1345
+ other = poly1d(other)
1346
+ return poly1d(polymul(self.coeffs, other.coeffs))
1347
+
1348
+ def __add__(self, other):
1349
+ other = poly1d(other)
1350
+ return poly1d(polyadd(self.coeffs, other.coeffs))
1351
+
1352
+ def __radd__(self, other):
1353
+ other = poly1d(other)
1354
+ return poly1d(polyadd(self.coeffs, other.coeffs))
1355
+
1356
+ def __pow__(self, val):
1357
+ if not isscalar(val) or int(val) != val or val < 0:
1358
+ raise ValueError("Power to non-negative integers only.")
1359
+ res = [1]
1360
+ for _ in range(val):
1361
+ res = polymul(self.coeffs, res)
1362
+ return poly1d(res)
1363
+
1364
+ def __sub__(self, other):
1365
+ other = poly1d(other)
1366
+ return poly1d(polysub(self.coeffs, other.coeffs))
1367
+
1368
+ def __rsub__(self, other):
1369
+ other = poly1d(other)
1370
+ return poly1d(polysub(other.coeffs, self.coeffs))
1371
+
1372
+ def __div__(self, other):
1373
+ if isscalar(other):
1374
+ return poly1d(self.coeffs/other)
1375
+ else:
1376
+ other = poly1d(other)
1377
+ return polydiv(self, other)
1378
+
1379
+ __truediv__ = __div__
1380
+
1381
+ def __rdiv__(self, other):
1382
+ if isscalar(other):
1383
+ return poly1d(other/self.coeffs)
1384
+ else:
1385
+ other = poly1d(other)
1386
+ return polydiv(other, self)
1387
+
1388
+ __rtruediv__ = __rdiv__
1389
+
1390
+ def __eq__(self, other):
1391
+ if not isinstance(other, poly1d):
1392
+ return NotImplemented
1393
+ if self.coeffs.shape != other.coeffs.shape:
1394
+ return False
1395
+ return (self.coeffs == other.coeffs).all()
1396
+
1397
+ def __ne__(self, other):
1398
+ if not isinstance(other, poly1d):
1399
+ return NotImplemented
1400
+ return not self.__eq__(other)
1401
+
1402
+
1403
+ def __getitem__(self, val):
1404
+ ind = self.order - val
1405
+ if val > self.order:
1406
+ return self.coeffs.dtype.type(0)
1407
+ if val < 0:
1408
+ return self.coeffs.dtype.type(0)
1409
+ return self.coeffs[ind]
1410
+
1411
+ def __setitem__(self, key, val):
1412
+ ind = self.order - key
1413
+ if key < 0:
1414
+ raise ValueError("Does not support negative powers.")
1415
+ if key > self.order:
1416
+ zr = NX.zeros(key-self.order, self.coeffs.dtype)
1417
+ self._coeffs = NX.concatenate((zr, self.coeffs))
1418
+ ind = 0
1419
+ self._coeffs[ind] = val
1420
+ return
1421
+
1422
+ def __iter__(self):
1423
+ return iter(self.coeffs)
1424
+
1425
+ def integ(self, m=1, k=0):
1426
+ """
1427
+ Return an antiderivative (indefinite integral) of this polynomial.
1428
+
1429
+ Refer to `polyint` for full documentation.
1430
+
1431
+ See Also
1432
+ --------
1433
+ polyint : equivalent function
1434
+
1435
+ """
1436
+ return poly1d(polyint(self.coeffs, m=m, k=k))
1437
+
1438
+ def deriv(self, m=1):
1439
+ """
1440
+ Return a derivative of this polynomial.
1441
+
1442
+ Refer to `polyder` for full documentation.
1443
+
1444
+ See Also
1445
+ --------
1446
+ polyder : equivalent function
1447
+
1448
+ """
1449
+ return poly1d(polyder(self.coeffs, m=m))
1450
+
1451
+ # Stuff to do on module import
1452
+
1453
+ warnings.simplefilter('always', RankWarning)
env-llmeval/lib/python3.10/site-packages/numpy/lib/polynomial.pyi ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ overload,
4
+ Any,
5
+ SupportsInt,
6
+ SupportsIndex,
7
+ TypeVar,
8
+ NoReturn,
9
+ )
10
+
11
+ from numpy import (
12
+ RankWarning as RankWarning,
13
+ poly1d as poly1d,
14
+ unsignedinteger,
15
+ signedinteger,
16
+ floating,
17
+ complexfloating,
18
+ bool_,
19
+ int32,
20
+ int64,
21
+ float64,
22
+ complex128,
23
+ object_,
24
+ )
25
+
26
+ from numpy._typing import (
27
+ NDArray,
28
+ ArrayLike,
29
+ _ArrayLikeBool_co,
30
+ _ArrayLikeUInt_co,
31
+ _ArrayLikeInt_co,
32
+ _ArrayLikeFloat_co,
33
+ _ArrayLikeComplex_co,
34
+ _ArrayLikeObject_co,
35
+ )
36
+
37
+ _T = TypeVar("_T")
38
+
39
+ _2Tup = tuple[_T, _T]
40
+ _5Tup = tuple[
41
+ _T,
42
+ NDArray[float64],
43
+ NDArray[int32],
44
+ NDArray[float64],
45
+ NDArray[float64],
46
+ ]
47
+
48
+ __all__: list[str]
49
+
50
+ def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
51
+
52
+ # Returns either a float or complex array depending on the input values.
53
+ # See `np.linalg.eigvals`.
54
+ def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
55
+
56
+ @overload
57
+ def polyint(
58
+ p: poly1d,
59
+ m: SupportsInt | SupportsIndex = ...,
60
+ k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
61
+ ) -> poly1d: ...
62
+ @overload
63
+ def polyint(
64
+ p: _ArrayLikeFloat_co,
65
+ m: SupportsInt | SupportsIndex = ...,
66
+ k: None | _ArrayLikeFloat_co = ...,
67
+ ) -> NDArray[floating[Any]]: ...
68
+ @overload
69
+ def polyint(
70
+ p: _ArrayLikeComplex_co,
71
+ m: SupportsInt | SupportsIndex = ...,
72
+ k: None | _ArrayLikeComplex_co = ...,
73
+ ) -> NDArray[complexfloating[Any, Any]]: ...
74
+ @overload
75
+ def polyint(
76
+ p: _ArrayLikeObject_co,
77
+ m: SupportsInt | SupportsIndex = ...,
78
+ k: None | _ArrayLikeObject_co = ...,
79
+ ) -> NDArray[object_]: ...
80
+
81
+ @overload
82
+ def polyder(
83
+ p: poly1d,
84
+ m: SupportsInt | SupportsIndex = ...,
85
+ ) -> poly1d: ...
86
+ @overload
87
+ def polyder(
88
+ p: _ArrayLikeFloat_co,
89
+ m: SupportsInt | SupportsIndex = ...,
90
+ ) -> NDArray[floating[Any]]: ...
91
+ @overload
92
+ def polyder(
93
+ p: _ArrayLikeComplex_co,
94
+ m: SupportsInt | SupportsIndex = ...,
95
+ ) -> NDArray[complexfloating[Any, Any]]: ...
96
+ @overload
97
+ def polyder(
98
+ p: _ArrayLikeObject_co,
99
+ m: SupportsInt | SupportsIndex = ...,
100
+ ) -> NDArray[object_]: ...
101
+
102
+ @overload
103
+ def polyfit(
104
+ x: _ArrayLikeFloat_co,
105
+ y: _ArrayLikeFloat_co,
106
+ deg: SupportsIndex | SupportsInt,
107
+ rcond: None | float = ...,
108
+ full: L[False] = ...,
109
+ w: None | _ArrayLikeFloat_co = ...,
110
+ cov: L[False] = ...,
111
+ ) -> NDArray[float64]: ...
112
+ @overload
113
+ def polyfit(
114
+ x: _ArrayLikeComplex_co,
115
+ y: _ArrayLikeComplex_co,
116
+ deg: SupportsIndex | SupportsInt,
117
+ rcond: None | float = ...,
118
+ full: L[False] = ...,
119
+ w: None | _ArrayLikeFloat_co = ...,
120
+ cov: L[False] = ...,
121
+ ) -> NDArray[complex128]: ...
122
+ @overload
123
+ def polyfit(
124
+ x: _ArrayLikeFloat_co,
125
+ y: _ArrayLikeFloat_co,
126
+ deg: SupportsIndex | SupportsInt,
127
+ rcond: None | float = ...,
128
+ full: L[False] = ...,
129
+ w: None | _ArrayLikeFloat_co = ...,
130
+ cov: L[True, "unscaled"] = ...,
131
+ ) -> _2Tup[NDArray[float64]]: ...
132
+ @overload
133
+ def polyfit(
134
+ x: _ArrayLikeComplex_co,
135
+ y: _ArrayLikeComplex_co,
136
+ deg: SupportsIndex | SupportsInt,
137
+ rcond: None | float = ...,
138
+ full: L[False] = ...,
139
+ w: None | _ArrayLikeFloat_co = ...,
140
+ cov: L[True, "unscaled"] = ...,
141
+ ) -> _2Tup[NDArray[complex128]]: ...
142
+ @overload
143
+ def polyfit(
144
+ x: _ArrayLikeFloat_co,
145
+ y: _ArrayLikeFloat_co,
146
+ deg: SupportsIndex | SupportsInt,
147
+ rcond: None | float = ...,
148
+ full: L[True] = ...,
149
+ w: None | _ArrayLikeFloat_co = ...,
150
+ cov: bool | L["unscaled"] = ...,
151
+ ) -> _5Tup[NDArray[float64]]: ...
152
+ @overload
153
+ def polyfit(
154
+ x: _ArrayLikeComplex_co,
155
+ y: _ArrayLikeComplex_co,
156
+ deg: SupportsIndex | SupportsInt,
157
+ rcond: None | float = ...,
158
+ full: L[True] = ...,
159
+ w: None | _ArrayLikeFloat_co = ...,
160
+ cov: bool | L["unscaled"] = ...,
161
+ ) -> _5Tup[NDArray[complex128]]: ...
162
+
163
+ @overload
164
+ def polyval(
165
+ p: _ArrayLikeBool_co,
166
+ x: _ArrayLikeBool_co,
167
+ ) -> NDArray[int64]: ...
168
+ @overload
169
+ def polyval(
170
+ p: _ArrayLikeUInt_co,
171
+ x: _ArrayLikeUInt_co,
172
+ ) -> NDArray[unsignedinteger[Any]]: ...
173
+ @overload
174
+ def polyval(
175
+ p: _ArrayLikeInt_co,
176
+ x: _ArrayLikeInt_co,
177
+ ) -> NDArray[signedinteger[Any]]: ...
178
+ @overload
179
+ def polyval(
180
+ p: _ArrayLikeFloat_co,
181
+ x: _ArrayLikeFloat_co,
182
+ ) -> NDArray[floating[Any]]: ...
183
+ @overload
184
+ def polyval(
185
+ p: _ArrayLikeComplex_co,
186
+ x: _ArrayLikeComplex_co,
187
+ ) -> NDArray[complexfloating[Any, Any]]: ...
188
+ @overload
189
+ def polyval(
190
+ p: _ArrayLikeObject_co,
191
+ x: _ArrayLikeObject_co,
192
+ ) -> NDArray[object_]: ...
193
+
194
+ @overload
195
+ def polyadd(
196
+ a1: poly1d,
197
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
198
+ ) -> poly1d: ...
199
+ @overload
200
+ def polyadd(
201
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
202
+ a2: poly1d,
203
+ ) -> poly1d: ...
204
+ @overload
205
+ def polyadd(
206
+ a1: _ArrayLikeBool_co,
207
+ a2: _ArrayLikeBool_co,
208
+ ) -> NDArray[bool_]: ...
209
+ @overload
210
+ def polyadd(
211
+ a1: _ArrayLikeUInt_co,
212
+ a2: _ArrayLikeUInt_co,
213
+ ) -> NDArray[unsignedinteger[Any]]: ...
214
+ @overload
215
+ def polyadd(
216
+ a1: _ArrayLikeInt_co,
217
+ a2: _ArrayLikeInt_co,
218
+ ) -> NDArray[signedinteger[Any]]: ...
219
+ @overload
220
+ def polyadd(
221
+ a1: _ArrayLikeFloat_co,
222
+ a2: _ArrayLikeFloat_co,
223
+ ) -> NDArray[floating[Any]]: ...
224
+ @overload
225
+ def polyadd(
226
+ a1: _ArrayLikeComplex_co,
227
+ a2: _ArrayLikeComplex_co,
228
+ ) -> NDArray[complexfloating[Any, Any]]: ...
229
+ @overload
230
+ def polyadd(
231
+ a1: _ArrayLikeObject_co,
232
+ a2: _ArrayLikeObject_co,
233
+ ) -> NDArray[object_]: ...
234
+
235
+ @overload
236
+ def polysub(
237
+ a1: poly1d,
238
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
239
+ ) -> poly1d: ...
240
+ @overload
241
+ def polysub(
242
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
243
+ a2: poly1d,
244
+ ) -> poly1d: ...
245
+ @overload
246
+ def polysub(
247
+ a1: _ArrayLikeBool_co,
248
+ a2: _ArrayLikeBool_co,
249
+ ) -> NoReturn: ...
250
+ @overload
251
+ def polysub(
252
+ a1: _ArrayLikeUInt_co,
253
+ a2: _ArrayLikeUInt_co,
254
+ ) -> NDArray[unsignedinteger[Any]]: ...
255
+ @overload
256
+ def polysub(
257
+ a1: _ArrayLikeInt_co,
258
+ a2: _ArrayLikeInt_co,
259
+ ) -> NDArray[signedinteger[Any]]: ...
260
+ @overload
261
+ def polysub(
262
+ a1: _ArrayLikeFloat_co,
263
+ a2: _ArrayLikeFloat_co,
264
+ ) -> NDArray[floating[Any]]: ...
265
+ @overload
266
+ def polysub(
267
+ a1: _ArrayLikeComplex_co,
268
+ a2: _ArrayLikeComplex_co,
269
+ ) -> NDArray[complexfloating[Any, Any]]: ...
270
+ @overload
271
+ def polysub(
272
+ a1: _ArrayLikeObject_co,
273
+ a2: _ArrayLikeObject_co,
274
+ ) -> NDArray[object_]: ...
275
+
276
+ # NOTE: Not an alias, but they do have the same signature (that we can reuse)
277
+ polymul = polyadd
278
+
279
+ @overload
280
+ def polydiv(
281
+ u: poly1d,
282
+ v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
283
+ ) -> _2Tup[poly1d]: ...
284
+ @overload
285
+ def polydiv(
286
+ u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
287
+ v: poly1d,
288
+ ) -> _2Tup[poly1d]: ...
289
+ @overload
290
+ def polydiv(
291
+ u: _ArrayLikeFloat_co,
292
+ v: _ArrayLikeFloat_co,
293
+ ) -> _2Tup[NDArray[floating[Any]]]: ...
294
+ @overload
295
+ def polydiv(
296
+ u: _ArrayLikeComplex_co,
297
+ v: _ArrayLikeComplex_co,
298
+ ) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
299
+ @overload
300
+ def polydiv(
301
+ u: _ArrayLikeObject_co,
302
+ v: _ArrayLikeObject_co,
303
+ ) -> _2Tup[NDArray[Any]]: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/recfunctions.py ADDED
@@ -0,0 +1,1673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Collection of utilities to manipulate structured arrays.
3
+
4
+ Most of these functions were initially implemented by John Hunter for
5
+ matplotlib. They have been rewritten and extended for convenience.
6
+
7
+ """
8
+ import itertools
9
+ import numpy as np
10
+ import numpy.ma as ma
11
+ from numpy import ndarray, recarray
12
+ from numpy.ma import MaskedArray
13
+ from numpy.ma.mrecords import MaskedRecords
14
+ from numpy.core.overrides import array_function_dispatch
15
+ from numpy.lib._iotools import _is_string_like
16
+
17
+ _check_fill_value = np.ma.core._check_fill_value
18
+
19
+
20
+ __all__ = [
21
+ 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
22
+ 'drop_fields', 'find_duplicates', 'flatten_descr',
23
+ 'get_fieldstructure', 'get_names', 'get_names_flat',
24
+ 'join_by', 'merge_arrays', 'rec_append_fields',
25
+ 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
26
+ 'rename_fields', 'repack_fields', 'require_fields',
27
+ 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
28
+ ]
29
+
30
+
31
+ def _recursive_fill_fields_dispatcher(input, output):
32
+ return (input, output)
33
+
34
+
35
+ @array_function_dispatch(_recursive_fill_fields_dispatcher)
36
+ def recursive_fill_fields(input, output):
37
+ """
38
+ Fills fields from output with fields from input,
39
+ with support for nested structures.
40
+
41
+ Parameters
42
+ ----------
43
+ input : ndarray
44
+ Input array.
45
+ output : ndarray
46
+ Output array.
47
+
48
+ Notes
49
+ -----
50
+ * `output` should be at least the same size as `input`
51
+
52
+ Examples
53
+ --------
54
+ >>> from numpy.lib import recfunctions as rfn
55
+ >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
56
+ >>> b = np.zeros((3,), dtype=a.dtype)
57
+ >>> rfn.recursive_fill_fields(a, b)
58
+ array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
59
+
60
+ """
61
+ newdtype = output.dtype
62
+ for field in newdtype.names:
63
+ try:
64
+ current = input[field]
65
+ except ValueError:
66
+ continue
67
+ if current.dtype.names is not None:
68
+ recursive_fill_fields(current, output[field])
69
+ else:
70
+ output[field][:len(current)] = current
71
+ return output
72
+
73
+
74
+ def _get_fieldspec(dtype):
75
+ """
76
+ Produce a list of name/dtype pairs corresponding to the dtype fields
77
+
78
+ Similar to dtype.descr, but the second item of each tuple is a dtype, not a
79
+ string. As a result, this handles subarray dtypes
80
+
81
+ Can be passed to the dtype constructor to reconstruct the dtype, noting that
82
+ this (deliberately) discards field offsets.
83
+
84
+ Examples
85
+ --------
86
+ >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
87
+ >>> dt.descr
88
+ [(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
89
+ >>> _get_fieldspec(dt)
90
+ [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
91
+
92
+ """
93
+ if dtype.names is None:
94
+ # .descr returns a nameless field, so we should too
95
+ return [('', dtype)]
96
+ else:
97
+ fields = ((name, dtype.fields[name]) for name in dtype.names)
98
+ # keep any titles, if present
99
+ return [
100
+ (name if len(f) == 2 else (f[2], name), f[0])
101
+ for name, f in fields
102
+ ]
103
+
104
+
105
+ def get_names(adtype):
106
+ """
107
+ Returns the field names of the input datatype as a tuple. Input datatype
108
+ must have fields otherwise error is raised.
109
+
110
+ Parameters
111
+ ----------
112
+ adtype : dtype
113
+ Input datatype
114
+
115
+ Examples
116
+ --------
117
+ >>> from numpy.lib import recfunctions as rfn
118
+ >>> rfn.get_names(np.empty((1,), dtype=[('A', int)]).dtype)
119
+ ('A',)
120
+ >>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]).dtype)
121
+ ('A', 'B')
122
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
123
+ >>> rfn.get_names(adtype)
124
+ ('a', ('b', ('ba', 'bb')))
125
+ """
126
+ listnames = []
127
+ names = adtype.names
128
+ for name in names:
129
+ current = adtype[name]
130
+ if current.names is not None:
131
+ listnames.append((name, tuple(get_names(current))))
132
+ else:
133
+ listnames.append(name)
134
+ return tuple(listnames)
135
+
136
+
137
+ def get_names_flat(adtype):
138
+ """
139
+ Returns the field names of the input datatype as a tuple. Input datatype
140
+ must have fields otherwise error is raised.
141
+ Nested structure are flattened beforehand.
142
+
143
+ Parameters
144
+ ----------
145
+ adtype : dtype
146
+ Input datatype
147
+
148
+ Examples
149
+ --------
150
+ >>> from numpy.lib import recfunctions as rfn
151
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A', int)]).dtype) is None
152
+ False
153
+ >>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', str)]).dtype)
154
+ ('A', 'B')
155
+ >>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
156
+ >>> rfn.get_names_flat(adtype)
157
+ ('a', 'b', 'ba', 'bb')
158
+ """
159
+ listnames = []
160
+ names = adtype.names
161
+ for name in names:
162
+ listnames.append(name)
163
+ current = adtype[name]
164
+ if current.names is not None:
165
+ listnames.extend(get_names_flat(current))
166
+ return tuple(listnames)
167
+
168
+
169
+ def flatten_descr(ndtype):
170
+ """
171
+ Flatten a structured data-type description.
172
+
173
+ Examples
174
+ --------
175
+ >>> from numpy.lib import recfunctions as rfn
176
+ >>> ndtype = np.dtype([('a', '<i4'), ('b', [('ba', '<f8'), ('bb', '<i4')])])
177
+ >>> rfn.flatten_descr(ndtype)
178
+ (('a', dtype('int32')), ('ba', dtype('float64')), ('bb', dtype('int32')))
179
+
180
+ """
181
+ names = ndtype.names
182
+ if names is None:
183
+ return (('', ndtype),)
184
+ else:
185
+ descr = []
186
+ for field in names:
187
+ (typ, _) = ndtype.fields[field]
188
+ if typ.names is not None:
189
+ descr.extend(flatten_descr(typ))
190
+ else:
191
+ descr.append((field, typ))
192
+ return tuple(descr)
193
+
194
+
195
+ def _zip_dtype(seqarrays, flatten=False):
196
+ newdtype = []
197
+ if flatten:
198
+ for a in seqarrays:
199
+ newdtype.extend(flatten_descr(a.dtype))
200
+ else:
201
+ for a in seqarrays:
202
+ current = a.dtype
203
+ if current.names is not None and len(current.names) == 1:
204
+ # special case - dtypes of 1 field are flattened
205
+ newdtype.extend(_get_fieldspec(current))
206
+ else:
207
+ newdtype.append(('', current))
208
+ return np.dtype(newdtype)
209
+
210
+
211
+ def _zip_descr(seqarrays, flatten=False):
212
+ """
213
+ Combine the dtype description of a series of arrays.
214
+
215
+ Parameters
216
+ ----------
217
+ seqarrays : sequence of arrays
218
+ Sequence of arrays
219
+ flatten : {boolean}, optional
220
+ Whether to collapse nested descriptions.
221
+ """
222
+ return _zip_dtype(seqarrays, flatten=flatten).descr
223
+
224
+
225
+ def get_fieldstructure(adtype, lastname=None, parents=None,):
226
+ """
227
+ Returns a dictionary with fields indexing lists of their parent fields.
228
+
229
+ This function is used to simplify access to fields nested in other fields.
230
+
231
+ Parameters
232
+ ----------
233
+ adtype : np.dtype
234
+ Input datatype
235
+ lastname : optional
236
+ Last processed field name (used internally during recursion).
237
+ parents : dictionary
238
+ Dictionary of parent fields (used interbally during recursion).
239
+
240
+ Examples
241
+ --------
242
+ >>> from numpy.lib import recfunctions as rfn
243
+ >>> ndtype = np.dtype([('A', int),
244
+ ... ('B', [('BA', int),
245
+ ... ('BB', [('BBA', int), ('BBB', int)])])])
246
+ >>> rfn.get_fieldstructure(ndtype)
247
+ ... # XXX: possible regression, order of BBA and BBB is swapped
248
+ {'A': [], 'B': [], 'BA': ['B'], 'BB': ['B'], 'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
249
+
250
+ """
251
+ if parents is None:
252
+ parents = {}
253
+ names = adtype.names
254
+ for name in names:
255
+ current = adtype[name]
256
+ if current.names is not None:
257
+ if lastname:
258
+ parents[name] = [lastname, ]
259
+ else:
260
+ parents[name] = []
261
+ parents.update(get_fieldstructure(current, name, parents))
262
+ else:
263
+ lastparent = [_ for _ in (parents.get(lastname, []) or [])]
264
+ if lastparent:
265
+ lastparent.append(lastname)
266
+ elif lastname:
267
+ lastparent = [lastname, ]
268
+ parents[name] = lastparent or []
269
+ return parents
270
+
271
+
272
+ def _izip_fields_flat(iterable):
273
+ """
274
+ Returns an iterator of concatenated fields from a sequence of arrays,
275
+ collapsing any nested structure.
276
+
277
+ """
278
+ for element in iterable:
279
+ if isinstance(element, np.void):
280
+ yield from _izip_fields_flat(tuple(element))
281
+ else:
282
+ yield element
283
+
284
+
285
+ def _izip_fields(iterable):
286
+ """
287
+ Returns an iterator of concatenated fields from a sequence of arrays.
288
+
289
+ """
290
+ for element in iterable:
291
+ if (hasattr(element, '__iter__') and
292
+ not isinstance(element, str)):
293
+ yield from _izip_fields(element)
294
+ elif isinstance(element, np.void) and len(tuple(element)) == 1:
295
+ # this statement is the same from the previous expression
296
+ yield from _izip_fields(element)
297
+ else:
298
+ yield element
299
+
300
+
301
+ def _izip_records(seqarrays, fill_value=None, flatten=True):
302
+ """
303
+ Returns an iterator of concatenated items from a sequence of arrays.
304
+
305
+ Parameters
306
+ ----------
307
+ seqarrays : sequence of arrays
308
+ Sequence of arrays.
309
+ fill_value : {None, integer}
310
+ Value used to pad shorter iterables.
311
+ flatten : {True, False},
312
+ Whether to
313
+ """
314
+
315
+ # Should we flatten the items, or just use a nested approach
316
+ if flatten:
317
+ zipfunc = _izip_fields_flat
318
+ else:
319
+ zipfunc = _izip_fields
320
+
321
+ for tup in itertools.zip_longest(*seqarrays, fillvalue=fill_value):
322
+ yield tuple(zipfunc(tup))
323
+
324
+
325
+ def _fix_output(output, usemask=True, asrecarray=False):
326
+ """
327
+ Private function: return a recarray, a ndarray, a MaskedArray
328
+ or a MaskedRecords depending on the input parameters
329
+ """
330
+ if not isinstance(output, MaskedArray):
331
+ usemask = False
332
+ if usemask:
333
+ if asrecarray:
334
+ output = output.view(MaskedRecords)
335
+ else:
336
+ output = ma.filled(output)
337
+ if asrecarray:
338
+ output = output.view(recarray)
339
+ return output
340
+
341
+
342
+ def _fix_defaults(output, defaults=None):
343
+ """
344
+ Update the fill_value and masked data of `output`
345
+ from the default given in a dictionary defaults.
346
+ """
347
+ names = output.dtype.names
348
+ (data, mask, fill_value) = (output.data, output.mask, output.fill_value)
349
+ for (k, v) in (defaults or {}).items():
350
+ if k in names:
351
+ fill_value[k] = v
352
+ data[k][mask[k]] = v
353
+ return output
354
+
355
+
356
+ def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
357
+ usemask=None, asrecarray=None):
358
+ return seqarrays
359
+
360
+
361
+ @array_function_dispatch(_merge_arrays_dispatcher)
362
+ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
363
+ usemask=False, asrecarray=False):
364
+ """
365
+ Merge arrays field by field.
366
+
367
+ Parameters
368
+ ----------
369
+ seqarrays : sequence of ndarrays
370
+ Sequence of arrays
371
+ fill_value : {float}, optional
372
+ Filling value used to pad missing data on the shorter arrays.
373
+ flatten : {False, True}, optional
374
+ Whether to collapse nested fields.
375
+ usemask : {False, True}, optional
376
+ Whether to return a masked array or not.
377
+ asrecarray : {False, True}, optional
378
+ Whether to return a recarray (MaskedRecords) or not.
379
+
380
+ Examples
381
+ --------
382
+ >>> from numpy.lib import recfunctions as rfn
383
+ >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
384
+ array([( 1, 10.), ( 2, 20.), (-1, 30.)],
385
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
386
+
387
+ >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
388
+ ... np.array([10., 20., 30.])), usemask=False)
389
+ array([(1, 10.0), (2, 20.0), (-1, 30.0)],
390
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
391
+ >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
392
+ ... np.array([10., 20., 30.])),
393
+ ... usemask=False, asrecarray=True)
394
+ rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
395
+ dtype=[('a', '<i8'), ('f1', '<f8')])
396
+
397
+ Notes
398
+ -----
399
+ * Without a mask, the missing value will be filled with something,
400
+ depending on what its corresponding type:
401
+
402
+ * ``-1`` for integers
403
+ * ``-1.0`` for floating point numbers
404
+ * ``'-'`` for characters
405
+ * ``'-1'`` for strings
406
+ * ``True`` for boolean values
407
+ * XXX: I just obtained these values empirically
408
+ """
409
+ # Only one item in the input sequence ?
410
+ if (len(seqarrays) == 1):
411
+ seqarrays = np.asanyarray(seqarrays[0])
412
+ # Do we have a single ndarray as input ?
413
+ if isinstance(seqarrays, (ndarray, np.void)):
414
+ seqdtype = seqarrays.dtype
415
+ # Make sure we have named fields
416
+ if seqdtype.names is None:
417
+ seqdtype = np.dtype([('', seqdtype)])
418
+ if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
419
+ # Minimal processing needed: just make sure everything's a-ok
420
+ seqarrays = seqarrays.ravel()
421
+ # Find what type of array we must return
422
+ if usemask:
423
+ if asrecarray:
424
+ seqtype = MaskedRecords
425
+ else:
426
+ seqtype = MaskedArray
427
+ elif asrecarray:
428
+ seqtype = recarray
429
+ else:
430
+ seqtype = ndarray
431
+ return seqarrays.view(dtype=seqdtype, type=seqtype)
432
+ else:
433
+ seqarrays = (seqarrays,)
434
+ else:
435
+ # Make sure we have arrays in the input sequence
436
+ seqarrays = [np.asanyarray(_m) for _m in seqarrays]
437
+ # Find the sizes of the inputs and their maximum
438
+ sizes = tuple(a.size for a in seqarrays)
439
+ maxlength = max(sizes)
440
+ # Get the dtype of the output (flattening if needed)
441
+ newdtype = _zip_dtype(seqarrays, flatten=flatten)
442
+ # Initialize the sequences for data and mask
443
+ seqdata = []
444
+ seqmask = []
445
+ # If we expect some kind of MaskedArray, make a special loop.
446
+ if usemask:
447
+ for (a, n) in zip(seqarrays, sizes):
448
+ nbmissing = (maxlength - n)
449
+ # Get the data and mask
450
+ data = a.ravel().__array__()
451
+ mask = ma.getmaskarray(a).ravel()
452
+ # Get the filling value (if needed)
453
+ if nbmissing:
454
+ fval = _check_fill_value(fill_value, a.dtype)
455
+ if isinstance(fval, (ndarray, np.void)):
456
+ if len(fval.dtype) == 1:
457
+ fval = fval.item()[0]
458
+ fmsk = True
459
+ else:
460
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
461
+ fmsk = np.ones((1,), dtype=mask.dtype)
462
+ else:
463
+ fval = None
464
+ fmsk = True
465
+ # Store an iterator padding the input to the expected length
466
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
467
+ seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
468
+ # Create an iterator for the data
469
+ data = tuple(_izip_records(seqdata, flatten=flatten))
470
+ output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
471
+ mask=list(_izip_records(seqmask, flatten=flatten)))
472
+ if asrecarray:
473
+ output = output.view(MaskedRecords)
474
+ else:
475
+ # Same as before, without the mask we don't need...
476
+ for (a, n) in zip(seqarrays, sizes):
477
+ nbmissing = (maxlength - n)
478
+ data = a.ravel().__array__()
479
+ if nbmissing:
480
+ fval = _check_fill_value(fill_value, a.dtype)
481
+ if isinstance(fval, (ndarray, np.void)):
482
+ if len(fval.dtype) == 1:
483
+ fval = fval.item()[0]
484
+ else:
485
+ fval = np.array(fval, dtype=a.dtype, ndmin=1)
486
+ else:
487
+ fval = None
488
+ seqdata.append(itertools.chain(data, [fval] * nbmissing))
489
+ output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
490
+ dtype=newdtype, count=maxlength)
491
+ if asrecarray:
492
+ output = output.view(recarray)
493
+ # And we're done...
494
+ return output
495
+
496
+
497
+ def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
498
+ return (base,)
499
+
500
+
501
+ @array_function_dispatch(_drop_fields_dispatcher)
502
+ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
503
+ """
504
+ Return a new array with fields in `drop_names` dropped.
505
+
506
+ Nested fields are supported.
507
+
508
+ .. versionchanged:: 1.18.0
509
+ `drop_fields` returns an array with 0 fields if all fields are dropped,
510
+ rather than returning ``None`` as it did previously.
511
+
512
+ Parameters
513
+ ----------
514
+ base : array
515
+ Input array
516
+ drop_names : string or sequence
517
+ String or sequence of strings corresponding to the names of the
518
+ fields to drop.
519
+ usemask : {False, True}, optional
520
+ Whether to return a masked array or not.
521
+ asrecarray : string or sequence, optional
522
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
523
+ a plain ndarray or masked array with flexible dtype. The default
524
+ is False.
525
+
526
+ Examples
527
+ --------
528
+ >>> from numpy.lib import recfunctions as rfn
529
+ >>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
530
+ ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
531
+ >>> rfn.drop_fields(a, 'a')
532
+ array([((2., 3),), ((5., 6),)],
533
+ dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
534
+ >>> rfn.drop_fields(a, 'ba')
535
+ array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
536
+ >>> rfn.drop_fields(a, ['ba', 'bb'])
537
+ array([(1,), (4,)], dtype=[('a', '<i8')])
538
+ """
539
+ if _is_string_like(drop_names):
540
+ drop_names = [drop_names]
541
+ else:
542
+ drop_names = set(drop_names)
543
+
544
+ def _drop_descr(ndtype, drop_names):
545
+ names = ndtype.names
546
+ newdtype = []
547
+ for name in names:
548
+ current = ndtype[name]
549
+ if name in drop_names:
550
+ continue
551
+ if current.names is not None:
552
+ descr = _drop_descr(current, drop_names)
553
+ if descr:
554
+ newdtype.append((name, descr))
555
+ else:
556
+ newdtype.append((name, current))
557
+ return newdtype
558
+
559
+ newdtype = _drop_descr(base.dtype, drop_names)
560
+
561
+ output = np.empty(base.shape, dtype=newdtype)
562
+ output = recursive_fill_fields(base, output)
563
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
564
+
565
+
566
+ def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
567
+ """
568
+ Return a new array keeping only the fields in `keep_names`,
569
+ and preserving the order of those fields.
570
+
571
+ Parameters
572
+ ----------
573
+ base : array
574
+ Input array
575
+ keep_names : string or sequence
576
+ String or sequence of strings corresponding to the names of the
577
+ fields to keep. Order of the names will be preserved.
578
+ usemask : {False, True}, optional
579
+ Whether to return a masked array or not.
580
+ asrecarray : string or sequence, optional
581
+ Whether to return a recarray or a mrecarray (`asrecarray=True`) or
582
+ a plain ndarray or masked array with flexible dtype. The default
583
+ is False.
584
+ """
585
+ newdtype = [(n, base.dtype[n]) for n in keep_names]
586
+ output = np.empty(base.shape, dtype=newdtype)
587
+ output = recursive_fill_fields(base, output)
588
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
589
+
590
+
591
+ def _rec_drop_fields_dispatcher(base, drop_names):
592
+ return (base,)
593
+
594
+
595
+ @array_function_dispatch(_rec_drop_fields_dispatcher)
596
+ def rec_drop_fields(base, drop_names):
597
+ """
598
+ Returns a new numpy.recarray with fields in `drop_names` dropped.
599
+ """
600
+ return drop_fields(base, drop_names, usemask=False, asrecarray=True)
601
+
602
+
603
+ def _rename_fields_dispatcher(base, namemapper):
604
+ return (base,)
605
+
606
+
607
+ @array_function_dispatch(_rename_fields_dispatcher)
608
+ def rename_fields(base, namemapper):
609
+ """
610
+ Rename the fields from a flexible-datatype ndarray or recarray.
611
+
612
+ Nested fields are supported.
613
+
614
+ Parameters
615
+ ----------
616
+ base : ndarray
617
+ Input array whose fields must be modified.
618
+ namemapper : dictionary
619
+ Dictionary mapping old field names to their new version.
620
+
621
+ Examples
622
+ --------
623
+ >>> from numpy.lib import recfunctions as rfn
624
+ >>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
625
+ ... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
626
+ >>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
627
+ array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
628
+ dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
629
+
630
+ """
631
+ def _recursive_rename_fields(ndtype, namemapper):
632
+ newdtype = []
633
+ for name in ndtype.names:
634
+ newname = namemapper.get(name, name)
635
+ current = ndtype[name]
636
+ if current.names is not None:
637
+ newdtype.append(
638
+ (newname, _recursive_rename_fields(current, namemapper))
639
+ )
640
+ else:
641
+ newdtype.append((newname, current))
642
+ return newdtype
643
+ newdtype = _recursive_rename_fields(base.dtype, namemapper)
644
+ return base.view(newdtype)
645
+
646
+
647
+ def _append_fields_dispatcher(base, names, data, dtypes=None,
648
+ fill_value=None, usemask=None, asrecarray=None):
649
+ yield base
650
+ yield from data
651
+
652
+
653
+ @array_function_dispatch(_append_fields_dispatcher)
654
+ def append_fields(base, names, data, dtypes=None,
655
+ fill_value=-1, usemask=True, asrecarray=False):
656
+ """
657
+ Add new fields to an existing array.
658
+
659
+ The names of the fields are given with the `names` arguments,
660
+ the corresponding values with the `data` arguments.
661
+ If a single field is appended, `names`, `data` and `dtypes` do not have
662
+ to be lists but just values.
663
+
664
+ Parameters
665
+ ----------
666
+ base : array
667
+ Input array to extend.
668
+ names : string, sequence
669
+ String or sequence of strings corresponding to the names
670
+ of the new fields.
671
+ data : array or sequence of arrays
672
+ Array or sequence of arrays storing the fields to add to the base.
673
+ dtypes : sequence of datatypes, optional
674
+ Datatype or sequence of datatypes.
675
+ If None, the datatypes are estimated from the `data`.
676
+ fill_value : {float}, optional
677
+ Filling value used to pad missing data on the shorter arrays.
678
+ usemask : {False, True}, optional
679
+ Whether to return a masked array or not.
680
+ asrecarray : {False, True}, optional
681
+ Whether to return a recarray (MaskedRecords) or not.
682
+
683
+ """
684
+ # Check the names
685
+ if isinstance(names, (tuple, list)):
686
+ if len(names) != len(data):
687
+ msg = "The number of arrays does not match the number of names"
688
+ raise ValueError(msg)
689
+ elif isinstance(names, str):
690
+ names = [names, ]
691
+ data = [data, ]
692
+ #
693
+ if dtypes is None:
694
+ data = [np.array(a, copy=False, subok=True) for a in data]
695
+ data = [a.view([(name, a.dtype)]) for (name, a) in zip(names, data)]
696
+ else:
697
+ if not isinstance(dtypes, (tuple, list)):
698
+ dtypes = [dtypes, ]
699
+ if len(data) != len(dtypes):
700
+ if len(dtypes) == 1:
701
+ dtypes = dtypes * len(data)
702
+ else:
703
+ msg = "The dtypes argument must be None, a dtype, or a list."
704
+ raise ValueError(msg)
705
+ data = [np.array(a, copy=False, subok=True, dtype=d).view([(n, d)])
706
+ for (a, n, d) in zip(data, names, dtypes)]
707
+ #
708
+ base = merge_arrays(base, usemask=usemask, fill_value=fill_value)
709
+ if len(data) > 1:
710
+ data = merge_arrays(data, flatten=True, usemask=usemask,
711
+ fill_value=fill_value)
712
+ else:
713
+ data = data.pop()
714
+ #
715
+ output = ma.masked_all(
716
+ max(len(base), len(data)),
717
+ dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
718
+ output = recursive_fill_fields(base, output)
719
+ output = recursive_fill_fields(data, output)
720
+ #
721
+ return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
722
+
723
+
724
+ def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
725
+ yield base
726
+ yield from data
727
+
728
+
729
+ @array_function_dispatch(_rec_append_fields_dispatcher)
730
+ def rec_append_fields(base, names, data, dtypes=None):
731
+ """
732
+ Add new fields to an existing array.
733
+
734
+ The names of the fields are given with the `names` arguments,
735
+ the corresponding values with the `data` arguments.
736
+ If a single field is appended, `names`, `data` and `dtypes` do not have
737
+ to be lists but just values.
738
+
739
+ Parameters
740
+ ----------
741
+ base : array
742
+ Input array to extend.
743
+ names : string, sequence
744
+ String or sequence of strings corresponding to the names
745
+ of the new fields.
746
+ data : array or sequence of arrays
747
+ Array or sequence of arrays storing the fields to add to the base.
748
+ dtypes : sequence of datatypes, optional
749
+ Datatype or sequence of datatypes.
750
+ If None, the datatypes are estimated from the `data`.
751
+
752
+ See Also
753
+ --------
754
+ append_fields
755
+
756
+ Returns
757
+ -------
758
+ appended_array : np.recarray
759
+ """
760
+ return append_fields(base, names, data=data, dtypes=dtypes,
761
+ asrecarray=True, usemask=False)
762
+
763
+
764
+ def _repack_fields_dispatcher(a, align=None, recurse=None):
765
+ return (a,)
766
+
767
+
768
+ @array_function_dispatch(_repack_fields_dispatcher)
769
+ def repack_fields(a, align=False, recurse=False):
770
+ """
771
+ Re-pack the fields of a structured array or dtype in memory.
772
+
773
+ The memory layout of structured datatypes allows fields at arbitrary
774
+ byte offsets. This means the fields can be separated by padding bytes,
775
+ their offsets can be non-monotonically increasing, and they can overlap.
776
+
777
+ This method removes any overlaps and reorders the fields in memory so they
778
+ have increasing byte offsets, and adds or removes padding bytes depending
779
+ on the `align` option, which behaves like the `align` option to
780
+ `numpy.dtype`.
781
+
782
+ If `align=False`, this method produces a "packed" memory layout in which
783
+ each field starts at the byte the previous field ended, and any padding
784
+ bytes are removed.
785
+
786
+ If `align=True`, this methods produces an "aligned" memory layout in which
787
+ each field's offset is a multiple of its alignment, and the total itemsize
788
+ is a multiple of the largest alignment, by adding padding bytes as needed.
789
+
790
+ Parameters
791
+ ----------
792
+ a : ndarray or dtype
793
+ array or dtype for which to repack the fields.
794
+ align : boolean
795
+ If true, use an "aligned" memory layout, otherwise use a "packed" layout.
796
+ recurse : boolean
797
+ If True, also repack nested structures.
798
+
799
+ Returns
800
+ -------
801
+ repacked : ndarray or dtype
802
+ Copy of `a` with fields repacked, or `a` itself if no repacking was
803
+ needed.
804
+
805
+ Examples
806
+ --------
807
+
808
+ >>> from numpy.lib import recfunctions as rfn
809
+ >>> def print_offsets(d):
810
+ ... print("offsets:", [d.fields[name][1] for name in d.names])
811
+ ... print("itemsize:", d.itemsize)
812
+ ...
813
+ >>> dt = np.dtype('u1, <i8, <f8', align=True)
814
+ >>> dt
815
+ dtype({'names': ['f0', 'f1', 'f2'], 'formats': ['u1', '<i8', '<f8'], \
816
+ 'offsets': [0, 8, 16], 'itemsize': 24}, align=True)
817
+ >>> print_offsets(dt)
818
+ offsets: [0, 8, 16]
819
+ itemsize: 24
820
+ >>> packed_dt = rfn.repack_fields(dt)
821
+ >>> packed_dt
822
+ dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
823
+ >>> print_offsets(packed_dt)
824
+ offsets: [0, 1, 9]
825
+ itemsize: 17
826
+
827
+ """
828
+ if not isinstance(a, np.dtype):
829
+ dt = repack_fields(a.dtype, align=align, recurse=recurse)
830
+ return a.astype(dt, copy=False)
831
+
832
+ if a.names is None:
833
+ return a
834
+
835
+ fieldinfo = []
836
+ for name in a.names:
837
+ tup = a.fields[name]
838
+ if recurse:
839
+ fmt = repack_fields(tup[0], align=align, recurse=True)
840
+ else:
841
+ fmt = tup[0]
842
+
843
+ if len(tup) == 3:
844
+ name = (tup[2], name)
845
+
846
+ fieldinfo.append((name, fmt))
847
+
848
+ dt = np.dtype(fieldinfo, align=align)
849
+ return np.dtype((a.type, dt))
850
+
851
+ def _get_fields_and_offsets(dt, offset=0):
852
+ """
853
+ Returns a flat list of (dtype, count, offset) tuples of all the
854
+ scalar fields in the dtype "dt", including nested fields, in left
855
+ to right order.
856
+ """
857
+
858
+ # counts up elements in subarrays, including nested subarrays, and returns
859
+ # base dtype and count
860
+ def count_elem(dt):
861
+ count = 1
862
+ while dt.shape != ():
863
+ for size in dt.shape:
864
+ count *= size
865
+ dt = dt.base
866
+ return dt, count
867
+
868
+ fields = []
869
+ for name in dt.names:
870
+ field = dt.fields[name]
871
+ f_dt, f_offset = field[0], field[1]
872
+ f_dt, n = count_elem(f_dt)
873
+
874
+ if f_dt.names is None:
875
+ fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
876
+ else:
877
+ subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
878
+ size = f_dt.itemsize
879
+
880
+ for i in range(n):
881
+ if i == 0:
882
+ # optimization: avoid list comprehension if no subarray
883
+ fields.extend(subfields)
884
+ else:
885
+ fields.extend([(d, c, o + i*size) for d, c, o in subfields])
886
+ return fields
887
+
888
+ def _common_stride(offsets, counts, itemsize):
889
+ """
890
+ Returns the stride between the fields, or None if the stride is not
891
+ constant. The values in "counts" designate the lengths of
892
+ subarrays. Subarrays are treated as many contiguous fields, with
893
+ always positive stride.
894
+ """
895
+ if len(offsets) <= 1:
896
+ return itemsize
897
+
898
+ negative = offsets[1] < offsets[0] # negative stride
899
+ if negative:
900
+ # reverse, so offsets will be ascending
901
+ it = zip(reversed(offsets), reversed(counts))
902
+ else:
903
+ it = zip(offsets, counts)
904
+
905
+ prev_offset = None
906
+ stride = None
907
+ for offset, count in it:
908
+ if count != 1: # subarray: always c-contiguous
909
+ if negative:
910
+ return None # subarrays can never have a negative stride
911
+ if stride is None:
912
+ stride = itemsize
913
+ if stride != itemsize:
914
+ return None
915
+ end_offset = offset + (count - 1) * itemsize
916
+ else:
917
+ end_offset = offset
918
+
919
+ if prev_offset is not None:
920
+ new_stride = offset - prev_offset
921
+ if stride is None:
922
+ stride = new_stride
923
+ if stride != new_stride:
924
+ return None
925
+
926
+ prev_offset = end_offset
927
+
928
+ if negative:
929
+ return -stride
930
+ return stride
931
+
932
+
933
+ def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
934
+ casting=None):
935
+ return (arr,)
936
+
937
+ @array_function_dispatch(_structured_to_unstructured_dispatcher)
938
+ def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
939
+ """
940
+ Converts an n-D structured array into an (n+1)-D unstructured array.
941
+
942
+ The new array will have a new last dimension equal in size to the
943
+ number of field-elements of the input array. If not supplied, the output
944
+ datatype is determined from the numpy type promotion rules applied to all
945
+ the field datatypes.
946
+
947
+ Nested fields, as well as each element of any subarray fields, all count
948
+ as a single field-elements.
949
+
950
+ Parameters
951
+ ----------
952
+ arr : ndarray
953
+ Structured array or dtype to convert. Cannot contain object datatype.
954
+ dtype : dtype, optional
955
+ The dtype of the output unstructured array.
956
+ copy : bool, optional
957
+ If true, always return a copy. If false, a view is returned if
958
+ possible, such as when the `dtype` and strides of the fields are
959
+ suitable and the array subtype is one of `np.ndarray`, `np.recarray`
960
+ or `np.memmap`.
961
+
962
+ .. versionchanged:: 1.25.0
963
+ A view can now be returned if the fields are separated by a
964
+ uniform stride.
965
+
966
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
967
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
968
+ data casting may occur.
969
+
970
+ Returns
971
+ -------
972
+ unstructured : ndarray
973
+ Unstructured array with one more dimension.
974
+
975
+ Examples
976
+ --------
977
+
978
+ >>> from numpy.lib import recfunctions as rfn
979
+ >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
980
+ >>> a
981
+ array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
982
+ (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
983
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
984
+ >>> rfn.structured_to_unstructured(a)
985
+ array([[0., 0., 0., 0., 0.],
986
+ [0., 0., 0., 0., 0.],
987
+ [0., 0., 0., 0., 0.],
988
+ [0., 0., 0., 0., 0.]])
989
+
990
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
991
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
992
+ >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
993
+ array([ 3. , 5.5, 9. , 11. ])
994
+
995
+ """
996
+ if arr.dtype.names is None:
997
+ raise ValueError('arr must be a structured array')
998
+
999
+ fields = _get_fields_and_offsets(arr.dtype)
1000
+ n_fields = len(fields)
1001
+ if n_fields == 0 and dtype is None:
1002
+ raise ValueError("arr has no fields. Unable to guess dtype")
1003
+ elif n_fields == 0:
1004
+ # too many bugs elsewhere for this to work now
1005
+ raise NotImplementedError("arr with no fields is not supported")
1006
+
1007
+ dts, counts, offsets = zip(*fields)
1008
+ names = ['f{}'.format(n) for n in range(n_fields)]
1009
+
1010
+ if dtype is None:
1011
+ out_dtype = np.result_type(*[dt.base for dt in dts])
1012
+ else:
1013
+ out_dtype = np.dtype(dtype)
1014
+
1015
+ # Use a series of views and casts to convert to an unstructured array:
1016
+
1017
+ # first view using flattened fields (doesn't work for object arrays)
1018
+ # Note: dts may include a shape for subarrays
1019
+ flattened_fields = np.dtype({'names': names,
1020
+ 'formats': dts,
1021
+ 'offsets': offsets,
1022
+ 'itemsize': arr.dtype.itemsize})
1023
+ arr = arr.view(flattened_fields)
1024
+
1025
+ # we only allow a few types to be unstructured by manipulating the
1026
+ # strides, because we know it won't work with, for example, np.matrix nor
1027
+ # np.ma.MaskedArray.
1028
+ can_view = type(arr) in (np.ndarray, np.recarray, np.memmap)
1029
+ if (not copy) and can_view and all(dt.base == out_dtype for dt in dts):
1030
+ # all elements have the right dtype already; if they have a common
1031
+ # stride, we can just return a view
1032
+ common_stride = _common_stride(offsets, counts, out_dtype.itemsize)
1033
+ if common_stride is not None:
1034
+ wrap = arr.__array_wrap__
1035
+
1036
+ new_shape = arr.shape + (sum(counts), out_dtype.itemsize)
1037
+ new_strides = arr.strides + (abs(common_stride), 1)
1038
+
1039
+ arr = arr[..., np.newaxis].view(np.uint8) # view as bytes
1040
+ arr = arr[..., min(offsets):] # remove the leading unused data
1041
+ arr = np.lib.stride_tricks.as_strided(arr,
1042
+ new_shape,
1043
+ new_strides,
1044
+ subok=True)
1045
+
1046
+ # cast and drop the last dimension again
1047
+ arr = arr.view(out_dtype)[..., 0]
1048
+
1049
+ if common_stride < 0:
1050
+ arr = arr[..., ::-1] # reverse, if the stride was negative
1051
+ if type(arr) is not type(wrap.__self__):
1052
+ # Some types (e.g. recarray) turn into an ndarray along the
1053
+ # way, so we have to wrap it again in order to match the
1054
+ # behavior with copy=True.
1055
+ arr = wrap(arr)
1056
+ return arr
1057
+
1058
+ # next cast to a packed format with all fields converted to new dtype
1059
+ packed_fields = np.dtype({'names': names,
1060
+ 'formats': [(out_dtype, dt.shape) for dt in dts]})
1061
+ arr = arr.astype(packed_fields, copy=copy, casting=casting)
1062
+
1063
+ # finally is it safe to view the packed fields as the unstructured type
1064
+ return arr.view((out_dtype, (sum(counts),)))
1065
+
1066
+
1067
+ def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
1068
+ align=None, copy=None, casting=None):
1069
+ return (arr,)
1070
+
1071
+ @array_function_dispatch(_unstructured_to_structured_dispatcher)
1072
+ def unstructured_to_structured(arr, dtype=None, names=None, align=False,
1073
+ copy=False, casting='unsafe'):
1074
+ """
1075
+ Converts an n-D unstructured array into an (n-1)-D structured array.
1076
+
1077
+ The last dimension of the input array is converted into a structure, with
1078
+ number of field-elements equal to the size of the last dimension of the
1079
+ input array. By default all output fields have the input array's dtype, but
1080
+ an output structured dtype with an equal number of fields-elements can be
1081
+ supplied instead.
1082
+
1083
+ Nested fields, as well as each element of any subarray fields, all count
1084
+ towards the number of field-elements.
1085
+
1086
+ Parameters
1087
+ ----------
1088
+ arr : ndarray
1089
+ Unstructured array or dtype to convert.
1090
+ dtype : dtype, optional
1091
+ The structured dtype of the output array
1092
+ names : list of strings, optional
1093
+ If dtype is not supplied, this specifies the field names for the output
1094
+ dtype, in order. The field dtypes will be the same as the input array.
1095
+ align : boolean, optional
1096
+ Whether to create an aligned memory layout.
1097
+ copy : bool, optional
1098
+ See copy argument to `numpy.ndarray.astype`. If true, always return a
1099
+ copy. If false, and `dtype` requirements are satisfied, a view is
1100
+ returned.
1101
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
1102
+ See casting argument of `numpy.ndarray.astype`. Controls what kind of
1103
+ data casting may occur.
1104
+
1105
+ Returns
1106
+ -------
1107
+ structured : ndarray
1108
+ Structured array with fewer dimensions.
1109
+
1110
+ Examples
1111
+ --------
1112
+
1113
+ >>> from numpy.lib import recfunctions as rfn
1114
+ >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
1115
+ >>> a = np.arange(20).reshape((4,5))
1116
+ >>> a
1117
+ array([[ 0, 1, 2, 3, 4],
1118
+ [ 5, 6, 7, 8, 9],
1119
+ [10, 11, 12, 13, 14],
1120
+ [15, 16, 17, 18, 19]])
1121
+ >>> rfn.unstructured_to_structured(a, dt)
1122
+ array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
1123
+ (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
1124
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
1125
+
1126
+ """
1127
+ if arr.shape == ():
1128
+ raise ValueError('arr must have at least one dimension')
1129
+ n_elem = arr.shape[-1]
1130
+ if n_elem == 0:
1131
+ # too many bugs elsewhere for this to work now
1132
+ raise NotImplementedError("last axis with size 0 is not supported")
1133
+
1134
+ if dtype is None:
1135
+ if names is None:
1136
+ names = ['f{}'.format(n) for n in range(n_elem)]
1137
+ out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
1138
+ fields = _get_fields_and_offsets(out_dtype)
1139
+ dts, counts, offsets = zip(*fields)
1140
+ else:
1141
+ if names is not None:
1142
+ raise ValueError("don't supply both dtype and names")
1143
+ # if dtype is the args of np.dtype, construct it
1144
+ dtype = np.dtype(dtype)
1145
+ # sanity check of the input dtype
1146
+ fields = _get_fields_and_offsets(dtype)
1147
+ if len(fields) == 0:
1148
+ dts, counts, offsets = [], [], []
1149
+ else:
1150
+ dts, counts, offsets = zip(*fields)
1151
+
1152
+ if n_elem != sum(counts):
1153
+ raise ValueError('The length of the last dimension of arr must '
1154
+ 'be equal to the number of fields in dtype')
1155
+ out_dtype = dtype
1156
+ if align and not out_dtype.isalignedstruct:
1157
+ raise ValueError("align was True but dtype is not aligned")
1158
+
1159
+ names = ['f{}'.format(n) for n in range(len(fields))]
1160
+
1161
+ # Use a series of views and casts to convert to a structured array:
1162
+
1163
+ # first view as a packed structured array of one dtype
1164
+ packed_fields = np.dtype({'names': names,
1165
+ 'formats': [(arr.dtype, dt.shape) for dt in dts]})
1166
+ arr = np.ascontiguousarray(arr).view(packed_fields)
1167
+
1168
+ # next cast to an unpacked but flattened format with varied dtypes
1169
+ flattened_fields = np.dtype({'names': names,
1170
+ 'formats': dts,
1171
+ 'offsets': offsets,
1172
+ 'itemsize': out_dtype.itemsize})
1173
+ arr = arr.astype(flattened_fields, copy=copy, casting=casting)
1174
+
1175
+ # finally view as the final nested dtype and remove the last axis
1176
+ return arr.view(out_dtype)[..., 0]
1177
+
1178
+ def _apply_along_fields_dispatcher(func, arr):
1179
+ return (arr,)
1180
+
1181
+ @array_function_dispatch(_apply_along_fields_dispatcher)
1182
+ def apply_along_fields(func, arr):
1183
+ """
1184
+ Apply function 'func' as a reduction across fields of a structured array.
1185
+
1186
+ This is similar to `apply_along_axis`, but treats the fields of a
1187
+ structured array as an extra axis. The fields are all first cast to a
1188
+ common type following the type-promotion rules from `numpy.result_type`
1189
+ applied to the field's dtypes.
1190
+
1191
+ Parameters
1192
+ ----------
1193
+ func : function
1194
+ Function to apply on the "field" dimension. This function must
1195
+ support an `axis` argument, like np.mean, np.sum, etc.
1196
+ arr : ndarray
1197
+ Structured array for which to apply func.
1198
+
1199
+ Returns
1200
+ -------
1201
+ out : ndarray
1202
+ Result of the recution operation
1203
+
1204
+ Examples
1205
+ --------
1206
+
1207
+ >>> from numpy.lib import recfunctions as rfn
1208
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
1209
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
1210
+ >>> rfn.apply_along_fields(np.mean, b)
1211
+ array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
1212
+ >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
1213
+ array([ 3. , 5.5, 9. , 11. ])
1214
+
1215
+ """
1216
+ if arr.dtype.names is None:
1217
+ raise ValueError('arr must be a structured array')
1218
+
1219
+ uarr = structured_to_unstructured(arr)
1220
+ return func(uarr, axis=-1)
1221
+ # works and avoids axis requirement, but very, very slow:
1222
+ #return np.apply_along_axis(func, -1, uarr)
1223
+
1224
+ def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
1225
+ return dst, src
1226
+
1227
+ @array_function_dispatch(_assign_fields_by_name_dispatcher)
1228
+ def assign_fields_by_name(dst, src, zero_unassigned=True):
1229
+ """
1230
+ Assigns values from one structured array to another by field name.
1231
+
1232
+ Normally in numpy >= 1.14, assignment of one structured array to another
1233
+ copies fields "by position", meaning that the first field from the src is
1234
+ copied to the first field of the dst, and so on, regardless of field name.
1235
+
1236
+ This function instead copies "by field name", such that fields in the dst
1237
+ are assigned from the identically named field in the src. This applies
1238
+ recursively for nested structures. This is how structure assignment worked
1239
+ in numpy >= 1.6 to <= 1.13.
1240
+
1241
+ Parameters
1242
+ ----------
1243
+ dst : ndarray
1244
+ src : ndarray
1245
+ The source and destination arrays during assignment.
1246
+ zero_unassigned : bool, optional
1247
+ If True, fields in the dst for which there was no matching
1248
+ field in the src are filled with the value 0 (zero). This
1249
+ was the behavior of numpy <= 1.13. If False, those fields
1250
+ are not modified.
1251
+ """
1252
+
1253
+ if dst.dtype.names is None:
1254
+ dst[...] = src
1255
+ return
1256
+
1257
+ for name in dst.dtype.names:
1258
+ if name not in src.dtype.names:
1259
+ if zero_unassigned:
1260
+ dst[name] = 0
1261
+ else:
1262
+ assign_fields_by_name(dst[name], src[name],
1263
+ zero_unassigned)
1264
+
1265
+ def _require_fields_dispatcher(array, required_dtype):
1266
+ return (array,)
1267
+
1268
+ @array_function_dispatch(_require_fields_dispatcher)
1269
+ def require_fields(array, required_dtype):
1270
+ """
1271
+ Casts a structured array to a new dtype using assignment by field-name.
1272
+
1273
+ This function assigns from the old to the new array by name, so the
1274
+ value of a field in the output array is the value of the field with the
1275
+ same name in the source array. This has the effect of creating a new
1276
+ ndarray containing only the fields "required" by the required_dtype.
1277
+
1278
+ If a field name in the required_dtype does not exist in the
1279
+ input array, that field is created and set to 0 in the output array.
1280
+
1281
+ Parameters
1282
+ ----------
1283
+ a : ndarray
1284
+ array to cast
1285
+ required_dtype : dtype
1286
+ datatype for output array
1287
+
1288
+ Returns
1289
+ -------
1290
+ out : ndarray
1291
+ array with the new dtype, with field values copied from the fields in
1292
+ the input array with the same name
1293
+
1294
+ Examples
1295
+ --------
1296
+
1297
+ >>> from numpy.lib import recfunctions as rfn
1298
+ >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
1299
+ >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
1300
+ array([(1., 1), (1., 1), (1., 1), (1., 1)],
1301
+ dtype=[('b', '<f4'), ('c', 'u1')])
1302
+ >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
1303
+ array([(1., 0), (1., 0), (1., 0), (1., 0)],
1304
+ dtype=[('b', '<f4'), ('newf', 'u1')])
1305
+
1306
+ """
1307
+ out = np.empty(array.shape, dtype=required_dtype)
1308
+ assign_fields_by_name(out, array)
1309
+ return out
1310
+
1311
+
1312
+ def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
1313
+ asrecarray=None, autoconvert=None):
1314
+ return arrays
1315
+
1316
+
1317
+ @array_function_dispatch(_stack_arrays_dispatcher)
1318
+ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
1319
+ autoconvert=False):
1320
+ """
1321
+ Superposes arrays fields by fields
1322
+
1323
+ Parameters
1324
+ ----------
1325
+ arrays : array or sequence
1326
+ Sequence of input arrays.
1327
+ defaults : dictionary, optional
1328
+ Dictionary mapping field names to the corresponding default values.
1329
+ usemask : {True, False}, optional
1330
+ Whether to return a MaskedArray (or MaskedRecords is
1331
+ `asrecarray==True`) or a ndarray.
1332
+ asrecarray : {False, True}, optional
1333
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
1334
+ or just a flexible-type ndarray.
1335
+ autoconvert : {False, True}, optional
1336
+ Whether automatically cast the type of the field to the maximum.
1337
+
1338
+ Examples
1339
+ --------
1340
+ >>> from numpy.lib import recfunctions as rfn
1341
+ >>> x = np.array([1, 2,])
1342
+ >>> rfn.stack_arrays(x) is x
1343
+ True
1344
+ >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
1345
+ >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
1346
+ ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
1347
+ >>> test = rfn.stack_arrays((z,zz))
1348
+ >>> test
1349
+ masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
1350
+ (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
1351
+ mask=[(False, False, True), (False, False, True),
1352
+ (False, False, False), (False, False, False),
1353
+ (False, False, False)],
1354
+ fill_value=(b'N/A', 1.e+20, 1.e+20),
1355
+ dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
1356
+
1357
+ """
1358
+ if isinstance(arrays, ndarray):
1359
+ return arrays
1360
+ elif len(arrays) == 1:
1361
+ return arrays[0]
1362
+ seqarrays = [np.asanyarray(a).ravel() for a in arrays]
1363
+ nrecords = [len(a) for a in seqarrays]
1364
+ ndtype = [a.dtype for a in seqarrays]
1365
+ fldnames = [d.names for d in ndtype]
1366
+ #
1367
+ dtype_l = ndtype[0]
1368
+ newdescr = _get_fieldspec(dtype_l)
1369
+ names = [n for n, d in newdescr]
1370
+ for dtype_n in ndtype[1:]:
1371
+ for fname, fdtype in _get_fieldspec(dtype_n):
1372
+ if fname not in names:
1373
+ newdescr.append((fname, fdtype))
1374
+ names.append(fname)
1375
+ else:
1376
+ nameidx = names.index(fname)
1377
+ _, cdtype = newdescr[nameidx]
1378
+ if autoconvert:
1379
+ newdescr[nameidx] = (fname, max(fdtype, cdtype))
1380
+ elif fdtype != cdtype:
1381
+ raise TypeError("Incompatible type '%s' <> '%s'" %
1382
+ (cdtype, fdtype))
1383
+ # Only one field: use concatenate
1384
+ if len(newdescr) == 1:
1385
+ output = ma.concatenate(seqarrays)
1386
+ else:
1387
+ #
1388
+ output = ma.masked_all((np.sum(nrecords),), newdescr)
1389
+ offset = np.cumsum(np.r_[0, nrecords])
1390
+ seen = []
1391
+ for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]):
1392
+ names = a.dtype.names
1393
+ if names is None:
1394
+ output['f%i' % len(seen)][i:j] = a
1395
+ else:
1396
+ for name in n:
1397
+ output[name][i:j] = a[name]
1398
+ if name not in seen:
1399
+ seen.append(name)
1400
+ #
1401
+ return _fix_output(_fix_defaults(output, defaults),
1402
+ usemask=usemask, asrecarray=asrecarray)
1403
+
1404
+
1405
+ def _find_duplicates_dispatcher(
1406
+ a, key=None, ignoremask=None, return_index=None):
1407
+ return (a,)
1408
+
1409
+
1410
+ @array_function_dispatch(_find_duplicates_dispatcher)
1411
+ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
1412
+ """
1413
+ Find the duplicates in a structured array along a given key
1414
+
1415
+ Parameters
1416
+ ----------
1417
+ a : array-like
1418
+ Input array
1419
+ key : {string, None}, optional
1420
+ Name of the fields along which to check the duplicates.
1421
+ If None, the search is performed by records
1422
+ ignoremask : {True, False}, optional
1423
+ Whether masked data should be discarded or considered as duplicates.
1424
+ return_index : {False, True}, optional
1425
+ Whether to return the indices of the duplicated values.
1426
+
1427
+ Examples
1428
+ --------
1429
+ >>> from numpy.lib import recfunctions as rfn
1430
+ >>> ndtype = [('a', int)]
1431
+ >>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
1432
+ ... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
1433
+ >>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
1434
+ (masked_array(data=[(1,), (1,), (2,), (2,)],
1435
+ mask=[(False,), (False,), (False,), (False,)],
1436
+ fill_value=(999999,),
1437
+ dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
1438
+ """
1439
+ a = np.asanyarray(a).ravel()
1440
+ # Get a dictionary of fields
1441
+ fields = get_fieldstructure(a.dtype)
1442
+ # Get the sorting data (by selecting the corresponding field)
1443
+ base = a
1444
+ if key:
1445
+ for f in fields[key]:
1446
+ base = base[f]
1447
+ base = base[key]
1448
+ # Get the sorting indices and the sorted data
1449
+ sortidx = base.argsort()
1450
+ sortedbase = base[sortidx]
1451
+ sorteddata = sortedbase.filled()
1452
+ # Compare the sorting data
1453
+ flag = (sorteddata[:-1] == sorteddata[1:])
1454
+ # If masked data must be ignored, set the flag to false where needed
1455
+ if ignoremask:
1456
+ sortedmask = sortedbase.recordmask
1457
+ flag[sortedmask[1:]] = False
1458
+ flag = np.concatenate(([False], flag))
1459
+ # We need to take the point on the left as well (else we're missing it)
1460
+ flag[:-1] = flag[:-1] + flag[1:]
1461
+ duplicates = a[sortidx][flag]
1462
+ if return_index:
1463
+ return (duplicates, sortidx[flag])
1464
+ else:
1465
+ return duplicates
1466
+
1467
+
1468
+ def _join_by_dispatcher(
1469
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
1470
+ defaults=None, usemask=None, asrecarray=None):
1471
+ return (r1, r2)
1472
+
1473
+
1474
+ @array_function_dispatch(_join_by_dispatcher)
1475
+ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
1476
+ defaults=None, usemask=True, asrecarray=False):
1477
+ """
1478
+ Join arrays `r1` and `r2` on key `key`.
1479
+
1480
+ The key should be either a string or a sequence of string corresponding
1481
+ to the fields used to join the array. An exception is raised if the
1482
+ `key` field cannot be found in the two input arrays. Neither `r1` nor
1483
+ `r2` should have any duplicates along `key`: the presence of duplicates
1484
+ will make the output quite unreliable. Note that duplicates are not
1485
+ looked for by the algorithm.
1486
+
1487
+ Parameters
1488
+ ----------
1489
+ key : {string, sequence}
1490
+ A string or a sequence of strings corresponding to the fields used
1491
+ for comparison.
1492
+ r1, r2 : arrays
1493
+ Structured arrays.
1494
+ jointype : {'inner', 'outer', 'leftouter'}, optional
1495
+ If 'inner', returns the elements common to both r1 and r2.
1496
+ If 'outer', returns the common elements as well as the elements of
1497
+ r1 not in r2 and the elements of not in r2.
1498
+ If 'leftouter', returns the common elements and the elements of r1
1499
+ not in r2.
1500
+ r1postfix : string, optional
1501
+ String appended to the names of the fields of r1 that are present
1502
+ in r2 but absent of the key.
1503
+ r2postfix : string, optional
1504
+ String appended to the names of the fields of r2 that are present
1505
+ in r1 but absent of the key.
1506
+ defaults : {dictionary}, optional
1507
+ Dictionary mapping field names to the corresponding default values.
1508
+ usemask : {True, False}, optional
1509
+ Whether to return a MaskedArray (or MaskedRecords is
1510
+ `asrecarray==True`) or a ndarray.
1511
+ asrecarray : {False, True}, optional
1512
+ Whether to return a recarray (or MaskedRecords if `usemask==True`)
1513
+ or just a flexible-type ndarray.
1514
+
1515
+ Notes
1516
+ -----
1517
+ * The output is sorted along the key.
1518
+ * A temporary array is formed by dropping the fields not in the key for
1519
+ the two arrays and concatenating the result. This array is then
1520
+ sorted, and the common entries selected. The output is constructed by
1521
+ filling the fields with the selected entries. Matching is not
1522
+ preserved if there are some duplicates...
1523
+
1524
+ """
1525
+ # Check jointype
1526
+ if jointype not in ('inner', 'outer', 'leftouter'):
1527
+ raise ValueError(
1528
+ "The 'jointype' argument should be in 'inner', "
1529
+ "'outer' or 'leftouter' (got '%s' instead)" % jointype
1530
+ )
1531
+ # If we have a single key, put it in a tuple
1532
+ if isinstance(key, str):
1533
+ key = (key,)
1534
+
1535
+ # Check the keys
1536
+ if len(set(key)) != len(key):
1537
+ dup = next(x for n,x in enumerate(key) if x in key[n+1:])
1538
+ raise ValueError("duplicate join key %r" % dup)
1539
+ for name in key:
1540
+ if name not in r1.dtype.names:
1541
+ raise ValueError('r1 does not have key field %r' % name)
1542
+ if name not in r2.dtype.names:
1543
+ raise ValueError('r2 does not have key field %r' % name)
1544
+
1545
+ # Make sure we work with ravelled arrays
1546
+ r1 = r1.ravel()
1547
+ r2 = r2.ravel()
1548
+ # Fixme: nb2 below is never used. Commenting out for pyflakes.
1549
+ # (nb1, nb2) = (len(r1), len(r2))
1550
+ nb1 = len(r1)
1551
+ (r1names, r2names) = (r1.dtype.names, r2.dtype.names)
1552
+
1553
+ # Check the names for collision
1554
+ collisions = (set(r1names) & set(r2names)) - set(key)
1555
+ if collisions and not (r1postfix or r2postfix):
1556
+ msg = "r1 and r2 contain common names, r1postfix and r2postfix "
1557
+ msg += "can't both be empty"
1558
+ raise ValueError(msg)
1559
+
1560
+ # Make temporary arrays of just the keys
1561
+ # (use order of keys in `r1` for back-compatibility)
1562
+ key1 = [ n for n in r1names if n in key ]
1563
+ r1k = _keep_fields(r1, key1)
1564
+ r2k = _keep_fields(r2, key1)
1565
+
1566
+ # Concatenate the two arrays for comparison
1567
+ aux = ma.concatenate((r1k, r2k))
1568
+ idx_sort = aux.argsort(order=key)
1569
+ aux = aux[idx_sort]
1570
+ #
1571
+ # Get the common keys
1572
+ flag_in = ma.concatenate(([False], aux[1:] == aux[:-1]))
1573
+ flag_in[:-1] = flag_in[1:] + flag_in[:-1]
1574
+ idx_in = idx_sort[flag_in]
1575
+ idx_1 = idx_in[(idx_in < nb1)]
1576
+ idx_2 = idx_in[(idx_in >= nb1)] - nb1
1577
+ (r1cmn, r2cmn) = (len(idx_1), len(idx_2))
1578
+ if jointype == 'inner':
1579
+ (r1spc, r2spc) = (0, 0)
1580
+ elif jointype == 'outer':
1581
+ idx_out = idx_sort[~flag_in]
1582
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
1583
+ idx_2 = np.concatenate((idx_2, idx_out[(idx_out >= nb1)] - nb1))
1584
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, len(idx_2) - r2cmn)
1585
+ elif jointype == 'leftouter':
1586
+ idx_out = idx_sort[~flag_in]
1587
+ idx_1 = np.concatenate((idx_1, idx_out[(idx_out < nb1)]))
1588
+ (r1spc, r2spc) = (len(idx_1) - r1cmn, 0)
1589
+ # Select the entries from each input
1590
+ (s1, s2) = (r1[idx_1], r2[idx_2])
1591
+ #
1592
+ # Build the new description of the output array .......
1593
+ # Start with the key fields
1594
+ ndtype = _get_fieldspec(r1k.dtype)
1595
+
1596
+ # Add the fields from r1
1597
+ for fname, fdtype in _get_fieldspec(r1.dtype):
1598
+ if fname not in key:
1599
+ ndtype.append((fname, fdtype))
1600
+
1601
+ # Add the fields from r2
1602
+ for fname, fdtype in _get_fieldspec(r2.dtype):
1603
+ # Have we seen the current name already ?
1604
+ # we need to rebuild this list every time
1605
+ names = list(name for name, dtype in ndtype)
1606
+ try:
1607
+ nameidx = names.index(fname)
1608
+ except ValueError:
1609
+ #... we haven't: just add the description to the current list
1610
+ ndtype.append((fname, fdtype))
1611
+ else:
1612
+ # collision
1613
+ _, cdtype = ndtype[nameidx]
1614
+ if fname in key:
1615
+ # The current field is part of the key: take the largest dtype
1616
+ ndtype[nameidx] = (fname, max(fdtype, cdtype))
1617
+ else:
1618
+ # The current field is not part of the key: add the suffixes,
1619
+ # and place the new field adjacent to the old one
1620
+ ndtype[nameidx:nameidx + 1] = [
1621
+ (fname + r1postfix, cdtype),
1622
+ (fname + r2postfix, fdtype)
1623
+ ]
1624
+ # Rebuild a dtype from the new fields
1625
+ ndtype = np.dtype(ndtype)
1626
+ # Find the largest nb of common fields :
1627
+ # r1cmn and r2cmn should be equal, but...
1628
+ cmn = max(r1cmn, r2cmn)
1629
+ # Construct an empty array
1630
+ output = ma.masked_all((cmn + r1spc + r2spc,), dtype=ndtype)
1631
+ names = output.dtype.names
1632
+ for f in r1names:
1633
+ selected = s1[f]
1634
+ if f not in names or (f in r2names and not r2postfix and f not in key):
1635
+ f += r1postfix
1636
+ current = output[f]
1637
+ current[:r1cmn] = selected[:r1cmn]
1638
+ if jointype in ('outer', 'leftouter'):
1639
+ current[cmn:cmn + r1spc] = selected[r1cmn:]
1640
+ for f in r2names:
1641
+ selected = s2[f]
1642
+ if f not in names or (f in r1names and not r1postfix and f not in key):
1643
+ f += r2postfix
1644
+ current = output[f]
1645
+ current[:r2cmn] = selected[:r2cmn]
1646
+ if (jointype == 'outer') and r2spc:
1647
+ current[-r2spc:] = selected[r2cmn:]
1648
+ # Sort and finalize the output
1649
+ output.sort(order=key)
1650
+ kwargs = dict(usemask=usemask, asrecarray=asrecarray)
1651
+ return _fix_output(_fix_defaults(output, defaults), **kwargs)
1652
+
1653
+
1654
+ def _rec_join_dispatcher(
1655
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
1656
+ defaults=None):
1657
+ return (r1, r2)
1658
+
1659
+
1660
+ @array_function_dispatch(_rec_join_dispatcher)
1661
+ def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
1662
+ defaults=None):
1663
+ """
1664
+ Join arrays `r1` and `r2` on keys.
1665
+ Alternative to join_by, that always returns a np.recarray.
1666
+
1667
+ See Also
1668
+ --------
1669
+ join_by : equivalent function
1670
+ """
1671
+ kwargs = dict(jointype=jointype, r1postfix=r1postfix, r2postfix=r2postfix,
1672
+ defaults=defaults, usemask=False, asrecarray=True)
1673
+ return join_by(key, r1, r2, **kwargs)
env-llmeval/lib/python3.10/site-packages/numpy/lib/scimath.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Wrapper functions to more user-friendly calling of certain math functions
3
+ whose output data-type is different than the input data-type in certain
4
+ domains of the input.
5
+
6
+ For example, for functions like `log` with branch cuts, the versions in this
7
+ module provide the mathematically valid answers in the complex plane::
8
+
9
+ >>> import math
10
+ >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
11
+ True
12
+
13
+ Similarly, `sqrt`, other base logarithms, `power` and trig functions are
14
+ correctly handled. See their respective docstrings for specific examples.
15
+
16
+ Functions
17
+ ---------
18
+
19
+ .. autosummary::
20
+ :toctree: generated/
21
+
22
+ sqrt
23
+ log
24
+ log2
25
+ logn
26
+ log10
27
+ power
28
+ arccos
29
+ arcsin
30
+ arctanh
31
+
32
+ """
33
+ import numpy.core.numeric as nx
34
+ import numpy.core.numerictypes as nt
35
+ from numpy.core.numeric import asarray, any
36
+ from numpy.core.overrides import array_function_dispatch
37
+ from numpy.lib.type_check import isreal
38
+
39
+
40
+ __all__ = [
41
+ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
42
+ 'arctanh'
43
+ ]
44
+
45
+
46
+ _ln2 = nx.log(2.0)
47
+
48
+
49
+ def _tocomplex(arr):
50
+ """Convert its input `arr` to a complex array.
51
+
52
+ The input is returned as a complex array of the smallest type that will fit
53
+ the original data: types like single, byte, short, etc. become csingle,
54
+ while others become cdouble.
55
+
56
+ A copy of the input is always made.
57
+
58
+ Parameters
59
+ ----------
60
+ arr : array
61
+
62
+ Returns
63
+ -------
64
+ array
65
+ An array with the same input data as the input but in complex form.
66
+
67
+ Examples
68
+ --------
69
+
70
+ First, consider an input of type short:
71
+
72
+ >>> a = np.array([1,2,3],np.short)
73
+
74
+ >>> ac = np.lib.scimath._tocomplex(a); ac
75
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
76
+
77
+ >>> ac.dtype
78
+ dtype('complex64')
79
+
80
+ If the input is of type double, the output is correspondingly of the
81
+ complex double type as well:
82
+
83
+ >>> b = np.array([1,2,3],np.double)
84
+
85
+ >>> bc = np.lib.scimath._tocomplex(b); bc
86
+ array([1.+0.j, 2.+0.j, 3.+0.j])
87
+
88
+ >>> bc.dtype
89
+ dtype('complex128')
90
+
91
+ Note that even if the input was complex to begin with, a copy is still
92
+ made, since the astype() method always copies:
93
+
94
+ >>> c = np.array([1,2,3],np.csingle)
95
+
96
+ >>> cc = np.lib.scimath._tocomplex(c); cc
97
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
98
+
99
+ >>> c *= 2; c
100
+ array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
101
+
102
+ >>> cc
103
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
104
+ """
105
+ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
106
+ nt.ushort, nt.csingle)):
107
+ return arr.astype(nt.csingle)
108
+ else:
109
+ return arr.astype(nt.cdouble)
110
+
111
+
112
+ def _fix_real_lt_zero(x):
113
+ """Convert `x` to complex if it has real, negative components.
114
+
115
+ Otherwise, output is just the array version of the input (via asarray).
116
+
117
+ Parameters
118
+ ----------
119
+ x : array_like
120
+
121
+ Returns
122
+ -------
123
+ array
124
+
125
+ Examples
126
+ --------
127
+ >>> np.lib.scimath._fix_real_lt_zero([1,2])
128
+ array([1, 2])
129
+
130
+ >>> np.lib.scimath._fix_real_lt_zero([-1,2])
131
+ array([-1.+0.j, 2.+0.j])
132
+
133
+ """
134
+ x = asarray(x)
135
+ if any(isreal(x) & (x < 0)):
136
+ x = _tocomplex(x)
137
+ return x
138
+
139
+
140
+ def _fix_int_lt_zero(x):
141
+ """Convert `x` to double if it has real, negative components.
142
+
143
+ Otherwise, output is just the array version of the input (via asarray).
144
+
145
+ Parameters
146
+ ----------
147
+ x : array_like
148
+
149
+ Returns
150
+ -------
151
+ array
152
+
153
+ Examples
154
+ --------
155
+ >>> np.lib.scimath._fix_int_lt_zero([1,2])
156
+ array([1, 2])
157
+
158
+ >>> np.lib.scimath._fix_int_lt_zero([-1,2])
159
+ array([-1., 2.])
160
+ """
161
+ x = asarray(x)
162
+ if any(isreal(x) & (x < 0)):
163
+ x = x * 1.0
164
+ return x
165
+
166
+
167
+ def _fix_real_abs_gt_1(x):
168
+ """Convert `x` to complex if it has real components x_i with abs(x_i)>1.
169
+
170
+ Otherwise, output is just the array version of the input (via asarray).
171
+
172
+ Parameters
173
+ ----------
174
+ x : array_like
175
+
176
+ Returns
177
+ -------
178
+ array
179
+
180
+ Examples
181
+ --------
182
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,1])
183
+ array([0, 1])
184
+
185
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,2])
186
+ array([0.+0.j, 2.+0.j])
187
+ """
188
+ x = asarray(x)
189
+ if any(isreal(x) & (abs(x) > 1)):
190
+ x = _tocomplex(x)
191
+ return x
192
+
193
+
194
+ def _unary_dispatcher(x):
195
+ return (x,)
196
+
197
+
198
+ @array_function_dispatch(_unary_dispatcher)
199
+ def sqrt(x):
200
+ """
201
+ Compute the square root of x.
202
+
203
+ For negative input elements, a complex value is returned
204
+ (unlike `numpy.sqrt` which returns NaN).
205
+
206
+ Parameters
207
+ ----------
208
+ x : array_like
209
+ The input value(s).
210
+
211
+ Returns
212
+ -------
213
+ out : ndarray or scalar
214
+ The square root of `x`. If `x` was a scalar, so is `out`,
215
+ otherwise an array is returned.
216
+
217
+ See Also
218
+ --------
219
+ numpy.sqrt
220
+
221
+ Examples
222
+ --------
223
+ For real, non-negative inputs this works just like `numpy.sqrt`:
224
+
225
+ >>> np.emath.sqrt(1)
226
+ 1.0
227
+ >>> np.emath.sqrt([1, 4])
228
+ array([1., 2.])
229
+
230
+ But it automatically handles negative inputs:
231
+
232
+ >>> np.emath.sqrt(-1)
233
+ 1j
234
+ >>> np.emath.sqrt([-1,4])
235
+ array([0.+1.j, 2.+0.j])
236
+
237
+ Different results are expected because:
238
+ floating point 0.0 and -0.0 are distinct.
239
+
240
+ For more control, explicitly use complex() as follows:
241
+
242
+ >>> np.emath.sqrt(complex(-4.0, 0.0))
243
+ 2j
244
+ >>> np.emath.sqrt(complex(-4.0, -0.0))
245
+ -2j
246
+ """
247
+ x = _fix_real_lt_zero(x)
248
+ return nx.sqrt(x)
249
+
250
+
251
+ @array_function_dispatch(_unary_dispatcher)
252
+ def log(x):
253
+ """
254
+ Compute the natural logarithm of `x`.
255
+
256
+ Return the "principal value" (for a description of this, see `numpy.log`)
257
+ of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
258
+ returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
259
+ complex principle value is returned.
260
+
261
+ Parameters
262
+ ----------
263
+ x : array_like
264
+ The value(s) whose log is (are) required.
265
+
266
+ Returns
267
+ -------
268
+ out : ndarray or scalar
269
+ The log of the `x` value(s). If `x` was a scalar, so is `out`,
270
+ otherwise an array is returned.
271
+
272
+ See Also
273
+ --------
274
+ numpy.log
275
+
276
+ Notes
277
+ -----
278
+ For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
279
+ (note, however, that otherwise `numpy.log` and this `log` are identical,
280
+ i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
281
+ notably, the complex principle value if ``x.imag != 0``).
282
+
283
+ Examples
284
+ --------
285
+ >>> np.emath.log(np.exp(1))
286
+ 1.0
287
+
288
+ Negative arguments are handled "correctly" (recall that
289
+ ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
290
+
291
+ >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
292
+ True
293
+
294
+ """
295
+ x = _fix_real_lt_zero(x)
296
+ return nx.log(x)
297
+
298
+
299
+ @array_function_dispatch(_unary_dispatcher)
300
+ def log10(x):
301
+ """
302
+ Compute the logarithm base 10 of `x`.
303
+
304
+ Return the "principal value" (for a description of this, see
305
+ `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
306
+ is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
307
+ returns ``inf``). Otherwise, the complex principle value is returned.
308
+
309
+ Parameters
310
+ ----------
311
+ x : array_like or scalar
312
+ The value(s) whose log base 10 is (are) required.
313
+
314
+ Returns
315
+ -------
316
+ out : ndarray or scalar
317
+ The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
318
+ otherwise an array object is returned.
319
+
320
+ See Also
321
+ --------
322
+ numpy.log10
323
+
324
+ Notes
325
+ -----
326
+ For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
327
+ (note, however, that otherwise `numpy.log10` and this `log10` are
328
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
329
+ and, notably, the complex principle value if ``x.imag != 0``).
330
+
331
+ Examples
332
+ --------
333
+
334
+ (We set the printing precision so the example can be auto-tested)
335
+
336
+ >>> np.set_printoptions(precision=4)
337
+
338
+ >>> np.emath.log10(10**1)
339
+ 1.0
340
+
341
+ >>> np.emath.log10([-10**1, -10**2, 10**2])
342
+ array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
343
+
344
+ """
345
+ x = _fix_real_lt_zero(x)
346
+ return nx.log10(x)
347
+
348
+
349
+ def _logn_dispatcher(n, x):
350
+ return (n, x,)
351
+
352
+
353
+ @array_function_dispatch(_logn_dispatcher)
354
+ def logn(n, x):
355
+ """
356
+ Take log base n of x.
357
+
358
+ If `x` contains negative inputs, the answer is computed and returned in the
359
+ complex domain.
360
+
361
+ Parameters
362
+ ----------
363
+ n : array_like
364
+ The integer base(s) in which the log is taken.
365
+ x : array_like
366
+ The value(s) whose log base `n` is (are) required.
367
+
368
+ Returns
369
+ -------
370
+ out : ndarray or scalar
371
+ The log base `n` of the `x` value(s). If `x` was a scalar, so is
372
+ `out`, otherwise an array is returned.
373
+
374
+ Examples
375
+ --------
376
+ >>> np.set_printoptions(precision=4)
377
+
378
+ >>> np.emath.logn(2, [4, 8])
379
+ array([2., 3.])
380
+ >>> np.emath.logn(2, [-4, -8, 8])
381
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
382
+
383
+ """
384
+ x = _fix_real_lt_zero(x)
385
+ n = _fix_real_lt_zero(n)
386
+ return nx.log(x)/nx.log(n)
387
+
388
+
389
+ @array_function_dispatch(_unary_dispatcher)
390
+ def log2(x):
391
+ """
392
+ Compute the logarithm base 2 of `x`.
393
+
394
+ Return the "principal value" (for a description of this, see
395
+ `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
396
+ a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
397
+ ``inf``). Otherwise, the complex principle value is returned.
398
+
399
+ Parameters
400
+ ----------
401
+ x : array_like
402
+ The value(s) whose log base 2 is (are) required.
403
+
404
+ Returns
405
+ -------
406
+ out : ndarray or scalar
407
+ The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
408
+ otherwise an array is returned.
409
+
410
+ See Also
411
+ --------
412
+ numpy.log2
413
+
414
+ Notes
415
+ -----
416
+ For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
417
+ (note, however, that otherwise `numpy.log2` and this `log2` are
418
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
419
+ and, notably, the complex principle value if ``x.imag != 0``).
420
+
421
+ Examples
422
+ --------
423
+ We set the printing precision so the example can be auto-tested:
424
+
425
+ >>> np.set_printoptions(precision=4)
426
+
427
+ >>> np.emath.log2(8)
428
+ 3.0
429
+ >>> np.emath.log2([-4, -8, 8])
430
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
431
+
432
+ """
433
+ x = _fix_real_lt_zero(x)
434
+ return nx.log2(x)
435
+
436
+
437
+ def _power_dispatcher(x, p):
438
+ return (x, p)
439
+
440
+
441
+ @array_function_dispatch(_power_dispatcher)
442
+ def power(x, p):
443
+ """
444
+ Return x to the power p, (x**p).
445
+
446
+ If `x` contains negative values, the output is converted to the
447
+ complex domain.
448
+
449
+ Parameters
450
+ ----------
451
+ x : array_like
452
+ The input value(s).
453
+ p : array_like of ints
454
+ The power(s) to which `x` is raised. If `x` contains multiple values,
455
+ `p` has to either be a scalar, or contain the same number of values
456
+ as `x`. In the latter case, the result is
457
+ ``x[0]**p[0], x[1]**p[1], ...``.
458
+
459
+ Returns
460
+ -------
461
+ out : ndarray or scalar
462
+ The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
463
+ otherwise an array is returned.
464
+
465
+ See Also
466
+ --------
467
+ numpy.power
468
+
469
+ Examples
470
+ --------
471
+ >>> np.set_printoptions(precision=4)
472
+
473
+ >>> np.emath.power([2, 4], 2)
474
+ array([ 4, 16])
475
+ >>> np.emath.power([2, 4], -2)
476
+ array([0.25 , 0.0625])
477
+ >>> np.emath.power([-2, 4], 2)
478
+ array([ 4.-0.j, 16.+0.j])
479
+
480
+ """
481
+ x = _fix_real_lt_zero(x)
482
+ p = _fix_int_lt_zero(p)
483
+ return nx.power(x, p)
484
+
485
+
486
+ @array_function_dispatch(_unary_dispatcher)
487
+ def arccos(x):
488
+ """
489
+ Compute the inverse cosine of x.
490
+
491
+ Return the "principal value" (for a description of this, see
492
+ `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
493
+ `abs(x) <= 1`, this is a real number in the closed interval
494
+ :math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
495
+
496
+ Parameters
497
+ ----------
498
+ x : array_like or scalar
499
+ The value(s) whose arccos is (are) required.
500
+
501
+ Returns
502
+ -------
503
+ out : ndarray or scalar
504
+ The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
505
+ is `out`, otherwise an array object is returned.
506
+
507
+ See Also
508
+ --------
509
+ numpy.arccos
510
+
511
+ Notes
512
+ -----
513
+ For an arccos() that returns ``NAN`` when real `x` is not in the
514
+ interval ``[-1,1]``, use `numpy.arccos`.
515
+
516
+ Examples
517
+ --------
518
+ >>> np.set_printoptions(precision=4)
519
+
520
+ >>> np.emath.arccos(1) # a scalar is returned
521
+ 0.0
522
+
523
+ >>> np.emath.arccos([1,2])
524
+ array([0.-0.j , 0.-1.317j])
525
+
526
+ """
527
+ x = _fix_real_abs_gt_1(x)
528
+ return nx.arccos(x)
529
+
530
+
531
+ @array_function_dispatch(_unary_dispatcher)
532
+ def arcsin(x):
533
+ """
534
+ Compute the inverse sine of x.
535
+
536
+ Return the "principal value" (for a description of this, see
537
+ `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
538
+ `abs(x) <= 1`, this is a real number in the closed interval
539
+ :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
540
+ returned.
541
+
542
+ Parameters
543
+ ----------
544
+ x : array_like or scalar
545
+ The value(s) whose arcsin is (are) required.
546
+
547
+ Returns
548
+ -------
549
+ out : ndarray or scalar
550
+ The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
551
+ is `out`, otherwise an array object is returned.
552
+
553
+ See Also
554
+ --------
555
+ numpy.arcsin
556
+
557
+ Notes
558
+ -----
559
+ For an arcsin() that returns ``NAN`` when real `x` is not in the
560
+ interval ``[-1,1]``, use `numpy.arcsin`.
561
+
562
+ Examples
563
+ --------
564
+ >>> np.set_printoptions(precision=4)
565
+
566
+ >>> np.emath.arcsin(0)
567
+ 0.0
568
+
569
+ >>> np.emath.arcsin([0,1])
570
+ array([0. , 1.5708])
571
+
572
+ """
573
+ x = _fix_real_abs_gt_1(x)
574
+ return nx.arcsin(x)
575
+
576
+
577
+ @array_function_dispatch(_unary_dispatcher)
578
+ def arctanh(x):
579
+ """
580
+ Compute the inverse hyperbolic tangent of `x`.
581
+
582
+ Return the "principal value" (for a description of this, see
583
+ `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
584
+ ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
585
+ complex, the result is complex. Finally, `x = 1` returns``inf`` and
586
+ ``x=-1`` returns ``-inf``.
587
+
588
+ Parameters
589
+ ----------
590
+ x : array_like
591
+ The value(s) whose arctanh is (are) required.
592
+
593
+ Returns
594
+ -------
595
+ out : ndarray or scalar
596
+ The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
597
+ a scalar so is `out`, otherwise an array is returned.
598
+
599
+
600
+ See Also
601
+ --------
602
+ numpy.arctanh
603
+
604
+ Notes
605
+ -----
606
+ For an arctanh() that returns ``NAN`` when real `x` is not in the
607
+ interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
608
+ return +/-inf for ``x = +/-1``).
609
+
610
+ Examples
611
+ --------
612
+ >>> np.set_printoptions(precision=4)
613
+
614
+ >>> from numpy.testing import suppress_warnings
615
+ >>> with suppress_warnings() as sup:
616
+ ... sup.filter(RuntimeWarning)
617
+ ... np.emath.arctanh(np.eye(2))
618
+ array([[inf, 0.],
619
+ [ 0., inf]])
620
+ >>> np.emath.arctanh([1j])
621
+ array([0.+0.7854j])
622
+
623
+ """
624
+ x = _fix_real_abs_gt_1(x)
625
+ return nx.arctanh(x)
env-llmeval/lib/python3.10/site-packages/numpy/lib/scimath.pyi ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import overload, Any
2
+
3
+ from numpy import complexfloating
4
+
5
+ from numpy._typing import (
6
+ NDArray,
7
+ _ArrayLikeFloat_co,
8
+ _ArrayLikeComplex_co,
9
+ _ComplexLike_co,
10
+ _FloatLike_co,
11
+ )
12
+
13
+ __all__: list[str]
14
+
15
+ @overload
16
+ def sqrt(x: _FloatLike_co) -> Any: ...
17
+ @overload
18
+ def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
19
+ @overload
20
+ def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
21
+ @overload
22
+ def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
23
+
24
+ @overload
25
+ def log(x: _FloatLike_co) -> Any: ...
26
+ @overload
27
+ def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
28
+ @overload
29
+ def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
30
+ @overload
31
+ def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
32
+
33
+ @overload
34
+ def log10(x: _FloatLike_co) -> Any: ...
35
+ @overload
36
+ def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
37
+ @overload
38
+ def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
39
+ @overload
40
+ def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
41
+
42
+ @overload
43
+ def log2(x: _FloatLike_co) -> Any: ...
44
+ @overload
45
+ def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
46
+ @overload
47
+ def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
48
+ @overload
49
+ def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
50
+
51
+ @overload
52
+ def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
53
+ @overload
54
+ def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
55
+ @overload
56
+ def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
57
+ @overload
58
+ def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
59
+
60
+ @overload
61
+ def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
62
+ @overload
63
+ def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ...
64
+ @overload
65
+ def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
66
+ @overload
67
+ def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
68
+
69
+ @overload
70
+ def arccos(x: _FloatLike_co) -> Any: ...
71
+ @overload
72
+ def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
73
+ @overload
74
+ def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
75
+ @overload
76
+ def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
77
+
78
+ @overload
79
+ def arcsin(x: _FloatLike_co) -> Any: ...
80
+ @overload
81
+ def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
82
+ @overload
83
+ def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
84
+ @overload
85
+ def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
86
+
87
+ @overload
88
+ def arctanh(x: _FloatLike_co) -> Any: ...
89
+ @overload
90
+ def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
91
+ @overload
92
+ def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
93
+ @overload
94
+ def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/shape_base.py ADDED
@@ -0,0 +1,1274 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+
3
+ import numpy.core.numeric as _nx
4
+ from numpy.core.numeric import asarray, zeros, array, asanyarray
5
+ from numpy.core.fromnumeric import reshape, transpose
6
+ from numpy.core.multiarray import normalize_axis_index
7
+ from numpy.core import overrides
8
+ from numpy.core import vstack, atleast_3d
9
+ from numpy.core.numeric import normalize_axis_tuple
10
+ from numpy.core.shape_base import _arrays_for_stack_dispatcher
11
+ from numpy.lib.index_tricks import ndindex
12
+ from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
13
+
14
+
15
+ __all__ = [
16
+ 'column_stack', 'row_stack', 'dstack', 'array_split', 'split',
17
+ 'hsplit', 'vsplit', 'dsplit', 'apply_over_axes', 'expand_dims',
18
+ 'apply_along_axis', 'kron', 'tile', 'get_array_wrap', 'take_along_axis',
19
+ 'put_along_axis'
20
+ ]
21
+
22
+
23
+ array_function_dispatch = functools.partial(
24
+ overrides.array_function_dispatch, module='numpy')
25
+
26
+
27
+ def _make_along_axis_idx(arr_shape, indices, axis):
28
+ # compute dimensions to iterate over
29
+ if not _nx.issubdtype(indices.dtype, _nx.integer):
30
+ raise IndexError('`indices` must be an integer array')
31
+ if len(arr_shape) != indices.ndim:
32
+ raise ValueError(
33
+ "`indices` and `arr` must have the same number of dimensions")
34
+ shape_ones = (1,) * indices.ndim
35
+ dest_dims = list(range(axis)) + [None] + list(range(axis+1, indices.ndim))
36
+
37
+ # build a fancy index, consisting of orthogonal aranges, with the
38
+ # requested index inserted at the right location
39
+ fancy_index = []
40
+ for dim, n in zip(dest_dims, arr_shape):
41
+ if dim is None:
42
+ fancy_index.append(indices)
43
+ else:
44
+ ind_shape = shape_ones[:dim] + (-1,) + shape_ones[dim+1:]
45
+ fancy_index.append(_nx.arange(n).reshape(ind_shape))
46
+
47
+ return tuple(fancy_index)
48
+
49
+
50
+ def _take_along_axis_dispatcher(arr, indices, axis):
51
+ return (arr, indices)
52
+
53
+
54
+ @array_function_dispatch(_take_along_axis_dispatcher)
55
+ def take_along_axis(arr, indices, axis):
56
+ """
57
+ Take values from the input array by matching 1d index and data slices.
58
+
59
+ This iterates over matching 1d slices oriented along the specified axis in
60
+ the index and data arrays, and uses the former to look up values in the
61
+ latter. These slices can be different lengths.
62
+
63
+ Functions returning an index along an axis, like `argsort` and
64
+ `argpartition`, produce suitable indices for this function.
65
+
66
+ .. versionadded:: 1.15.0
67
+
68
+ Parameters
69
+ ----------
70
+ arr : ndarray (Ni..., M, Nk...)
71
+ Source array
72
+ indices : ndarray (Ni..., J, Nk...)
73
+ Indices to take along each 1d slice of `arr`. This must match the
74
+ dimension of arr, but dimensions Ni and Nj only need to broadcast
75
+ against `arr`.
76
+ axis : int
77
+ The axis to take 1d slices along. If axis is None, the input array is
78
+ treated as if it had first been flattened to 1d, for consistency with
79
+ `sort` and `argsort`.
80
+
81
+ Returns
82
+ -------
83
+ out: ndarray (Ni..., J, Nk...)
84
+ The indexed result.
85
+
86
+ Notes
87
+ -----
88
+ This is equivalent to (but faster than) the following use of `ndindex` and
89
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
90
+
91
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
92
+ J = indices.shape[axis] # Need not equal M
93
+ out = np.empty(Ni + (J,) + Nk)
94
+
95
+ for ii in ndindex(Ni):
96
+ for kk in ndindex(Nk):
97
+ a_1d = a [ii + s_[:,] + kk]
98
+ indices_1d = indices[ii + s_[:,] + kk]
99
+ out_1d = out [ii + s_[:,] + kk]
100
+ for j in range(J):
101
+ out_1d[j] = a_1d[indices_1d[j]]
102
+
103
+ Equivalently, eliminating the inner loop, the last two lines would be::
104
+
105
+ out_1d[:] = a_1d[indices_1d]
106
+
107
+ See Also
108
+ --------
109
+ take : Take along an axis, using the same indices for every 1d slice
110
+ put_along_axis :
111
+ Put values into the destination array by matching 1d index and data slices
112
+
113
+ Examples
114
+ --------
115
+
116
+ For this sample array
117
+
118
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
119
+
120
+ We can sort either by using sort directly, or argsort and this function
121
+
122
+ >>> np.sort(a, axis=1)
123
+ array([[10, 20, 30],
124
+ [40, 50, 60]])
125
+ >>> ai = np.argsort(a, axis=1)
126
+ >>> ai
127
+ array([[0, 2, 1],
128
+ [1, 2, 0]])
129
+ >>> np.take_along_axis(a, ai, axis=1)
130
+ array([[10, 20, 30],
131
+ [40, 50, 60]])
132
+
133
+ The same works for max and min, if you maintain the trivial dimension
134
+ with ``keepdims``:
135
+
136
+ >>> np.max(a, axis=1, keepdims=True)
137
+ array([[30],
138
+ [60]])
139
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
140
+ >>> ai
141
+ array([[1],
142
+ [0]])
143
+ >>> np.take_along_axis(a, ai, axis=1)
144
+ array([[30],
145
+ [60]])
146
+
147
+ If we want to get the max and min at the same time, we can stack the
148
+ indices first
149
+
150
+ >>> ai_min = np.argmin(a, axis=1, keepdims=True)
151
+ >>> ai_max = np.argmax(a, axis=1, keepdims=True)
152
+ >>> ai = np.concatenate([ai_min, ai_max], axis=1)
153
+ >>> ai
154
+ array([[0, 1],
155
+ [1, 0]])
156
+ >>> np.take_along_axis(a, ai, axis=1)
157
+ array([[10, 30],
158
+ [40, 60]])
159
+ """
160
+ # normalize inputs
161
+ if axis is None:
162
+ arr = arr.flat
163
+ arr_shape = (len(arr),) # flatiter has no .shape
164
+ axis = 0
165
+ else:
166
+ axis = normalize_axis_index(axis, arr.ndim)
167
+ arr_shape = arr.shape
168
+
169
+ # use the fancy index
170
+ return arr[_make_along_axis_idx(arr_shape, indices, axis)]
171
+
172
+
173
+ def _put_along_axis_dispatcher(arr, indices, values, axis):
174
+ return (arr, indices, values)
175
+
176
+
177
+ @array_function_dispatch(_put_along_axis_dispatcher)
178
+ def put_along_axis(arr, indices, values, axis):
179
+ """
180
+ Put values into the destination array by matching 1d index and data slices.
181
+
182
+ This iterates over matching 1d slices oriented along the specified axis in
183
+ the index and data arrays, and uses the former to place values into the
184
+ latter. These slices can be different lengths.
185
+
186
+ Functions returning an index along an axis, like `argsort` and
187
+ `argpartition`, produce suitable indices for this function.
188
+
189
+ .. versionadded:: 1.15.0
190
+
191
+ Parameters
192
+ ----------
193
+ arr : ndarray (Ni..., M, Nk...)
194
+ Destination array.
195
+ indices : ndarray (Ni..., J, Nk...)
196
+ Indices to change along each 1d slice of `arr`. This must match the
197
+ dimension of arr, but dimensions in Ni and Nj may be 1 to broadcast
198
+ against `arr`.
199
+ values : array_like (Ni..., J, Nk...)
200
+ values to insert at those indices. Its shape and dimension are
201
+ broadcast to match that of `indices`.
202
+ axis : int
203
+ The axis to take 1d slices along. If axis is None, the destination
204
+ array is treated as if a flattened 1d view had been created of it.
205
+
206
+ Notes
207
+ -----
208
+ This is equivalent to (but faster than) the following use of `ndindex` and
209
+ `s_`, which sets each of ``ii`` and ``kk`` to a tuple of indices::
210
+
211
+ Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
212
+ J = indices.shape[axis] # Need not equal M
213
+
214
+ for ii in ndindex(Ni):
215
+ for kk in ndindex(Nk):
216
+ a_1d = a [ii + s_[:,] + kk]
217
+ indices_1d = indices[ii + s_[:,] + kk]
218
+ values_1d = values [ii + s_[:,] + kk]
219
+ for j in range(J):
220
+ a_1d[indices_1d[j]] = values_1d[j]
221
+
222
+ Equivalently, eliminating the inner loop, the last two lines would be::
223
+
224
+ a_1d[indices_1d] = values_1d
225
+
226
+ See Also
227
+ --------
228
+ take_along_axis :
229
+ Take values from the input array by matching 1d index and data slices
230
+
231
+ Examples
232
+ --------
233
+
234
+ For this sample array
235
+
236
+ >>> a = np.array([[10, 30, 20], [60, 40, 50]])
237
+
238
+ We can replace the maximum values with:
239
+
240
+ >>> ai = np.argmax(a, axis=1, keepdims=True)
241
+ >>> ai
242
+ array([[1],
243
+ [0]])
244
+ >>> np.put_along_axis(a, ai, 99, axis=1)
245
+ >>> a
246
+ array([[10, 99, 20],
247
+ [99, 40, 50]])
248
+
249
+ """
250
+ # normalize inputs
251
+ if axis is None:
252
+ arr = arr.flat
253
+ axis = 0
254
+ arr_shape = (len(arr),) # flatiter has no .shape
255
+ else:
256
+ axis = normalize_axis_index(axis, arr.ndim)
257
+ arr_shape = arr.shape
258
+
259
+ # use the fancy index
260
+ arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
261
+
262
+
263
+ def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
264
+ return (arr,)
265
+
266
+
267
+ @array_function_dispatch(_apply_along_axis_dispatcher)
268
+ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
269
+ """
270
+ Apply a function to 1-D slices along the given axis.
271
+
272
+ Execute `func1d(a, *args, **kwargs)` where `func1d` operates on 1-D arrays
273
+ and `a` is a 1-D slice of `arr` along `axis`.
274
+
275
+ This is equivalent to (but faster than) the following use of `ndindex` and
276
+ `s_`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of indices::
277
+
278
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
279
+ for ii in ndindex(Ni):
280
+ for kk in ndindex(Nk):
281
+ f = func1d(arr[ii + s_[:,] + kk])
282
+ Nj = f.shape
283
+ for jj in ndindex(Nj):
284
+ out[ii + jj + kk] = f[jj]
285
+
286
+ Equivalently, eliminating the inner loop, this can be expressed as::
287
+
288
+ Ni, Nk = a.shape[:axis], a.shape[axis+1:]
289
+ for ii in ndindex(Ni):
290
+ for kk in ndindex(Nk):
291
+ out[ii + s_[...,] + kk] = func1d(arr[ii + s_[:,] + kk])
292
+
293
+ Parameters
294
+ ----------
295
+ func1d : function (M,) -> (Nj...)
296
+ This function should accept 1-D arrays. It is applied to 1-D
297
+ slices of `arr` along the specified axis.
298
+ axis : integer
299
+ Axis along which `arr` is sliced.
300
+ arr : ndarray (Ni..., M, Nk...)
301
+ Input array.
302
+ args : any
303
+ Additional arguments to `func1d`.
304
+ kwargs : any
305
+ Additional named arguments to `func1d`.
306
+
307
+ .. versionadded:: 1.9.0
308
+
309
+
310
+ Returns
311
+ -------
312
+ out : ndarray (Ni..., Nj..., Nk...)
313
+ The output array. The shape of `out` is identical to the shape of
314
+ `arr`, except along the `axis` dimension. This axis is removed, and
315
+ replaced with new dimensions equal to the shape of the return value
316
+ of `func1d`. So if `func1d` returns a scalar `out` will have one
317
+ fewer dimensions than `arr`.
318
+
319
+ See Also
320
+ --------
321
+ apply_over_axes : Apply a function repeatedly over multiple axes.
322
+
323
+ Examples
324
+ --------
325
+ >>> def my_func(a):
326
+ ... \"\"\"Average first and last element of a 1-D array\"\"\"
327
+ ... return (a[0] + a[-1]) * 0.5
328
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
329
+ >>> np.apply_along_axis(my_func, 0, b)
330
+ array([4., 5., 6.])
331
+ >>> np.apply_along_axis(my_func, 1, b)
332
+ array([2., 5., 8.])
333
+
334
+ For a function that returns a 1D array, the number of dimensions in
335
+ `outarr` is the same as `arr`.
336
+
337
+ >>> b = np.array([[8,1,7], [4,3,9], [5,2,6]])
338
+ >>> np.apply_along_axis(sorted, 1, b)
339
+ array([[1, 7, 8],
340
+ [3, 4, 9],
341
+ [2, 5, 6]])
342
+
343
+ For a function that returns a higher dimensional array, those dimensions
344
+ are inserted in place of the `axis` dimension.
345
+
346
+ >>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
347
+ >>> np.apply_along_axis(np.diag, -1, b)
348
+ array([[[1, 0, 0],
349
+ [0, 2, 0],
350
+ [0, 0, 3]],
351
+ [[4, 0, 0],
352
+ [0, 5, 0],
353
+ [0, 0, 6]],
354
+ [[7, 0, 0],
355
+ [0, 8, 0],
356
+ [0, 0, 9]]])
357
+ """
358
+ # handle negative axes
359
+ arr = asanyarray(arr)
360
+ nd = arr.ndim
361
+ axis = normalize_axis_index(axis, nd)
362
+
363
+ # arr, with the iteration axis at the end
364
+ in_dims = list(range(nd))
365
+ inarr_view = transpose(arr, in_dims[:axis] + in_dims[axis+1:] + [axis])
366
+
367
+ # compute indices for the iteration axes, and append a trailing ellipsis to
368
+ # prevent 0d arrays decaying to scalars, which fixes gh-8642
369
+ inds = ndindex(inarr_view.shape[:-1])
370
+ inds = (ind + (Ellipsis,) for ind in inds)
371
+
372
+ # invoke the function on the first item
373
+ try:
374
+ ind0 = next(inds)
375
+ except StopIteration:
376
+ raise ValueError(
377
+ 'Cannot apply_along_axis when any iteration dimensions are 0'
378
+ ) from None
379
+ res = asanyarray(func1d(inarr_view[ind0], *args, **kwargs))
380
+
381
+ # build a buffer for storing evaluations of func1d.
382
+ # remove the requested axis, and add the new ones on the end.
383
+ # laid out so that each write is contiguous.
384
+ # for a tuple index inds, buff[inds] = func1d(inarr_view[inds])
385
+ buff = zeros(inarr_view.shape[:-1] + res.shape, res.dtype)
386
+
387
+ # permutation of axes such that out = buff.transpose(buff_permute)
388
+ buff_dims = list(range(buff.ndim))
389
+ buff_permute = (
390
+ buff_dims[0 : axis] +
391
+ buff_dims[buff.ndim-res.ndim : buff.ndim] +
392
+ buff_dims[axis : buff.ndim-res.ndim]
393
+ )
394
+
395
+ # matrices have a nasty __array_prepare__ and __array_wrap__
396
+ if not isinstance(res, matrix):
397
+ buff = res.__array_prepare__(buff)
398
+
399
+ # save the first result, then compute and save all remaining results
400
+ buff[ind0] = res
401
+ for ind in inds:
402
+ buff[ind] = asanyarray(func1d(inarr_view[ind], *args, **kwargs))
403
+
404
+ if not isinstance(res, matrix):
405
+ # wrap the array, to preserve subclasses
406
+ buff = res.__array_wrap__(buff)
407
+
408
+ # finally, rotate the inserted axes back to where they belong
409
+ return transpose(buff, buff_permute)
410
+
411
+ else:
412
+ # matrices have to be transposed first, because they collapse dimensions!
413
+ out_arr = transpose(buff, buff_permute)
414
+ return res.__array_wrap__(out_arr)
415
+
416
+
417
+ def _apply_over_axes_dispatcher(func, a, axes):
418
+ return (a,)
419
+
420
+
421
+ @array_function_dispatch(_apply_over_axes_dispatcher)
422
+ def apply_over_axes(func, a, axes):
423
+ """
424
+ Apply a function repeatedly over multiple axes.
425
+
426
+ `func` is called as `res = func(a, axis)`, where `axis` is the first
427
+ element of `axes`. The result `res` of the function call must have
428
+ either the same dimensions as `a` or one less dimension. If `res`
429
+ has one less dimension than `a`, a dimension is inserted before
430
+ `axis`. The call to `func` is then repeated for each axis in `axes`,
431
+ with `res` as the first argument.
432
+
433
+ Parameters
434
+ ----------
435
+ func : function
436
+ This function must take two arguments, `func(a, axis)`.
437
+ a : array_like
438
+ Input array.
439
+ axes : array_like
440
+ Axes over which `func` is applied; the elements must be integers.
441
+
442
+ Returns
443
+ -------
444
+ apply_over_axis : ndarray
445
+ The output array. The number of dimensions is the same as `a`,
446
+ but the shape can be different. This depends on whether `func`
447
+ changes the shape of its output with respect to its input.
448
+
449
+ See Also
450
+ --------
451
+ apply_along_axis :
452
+ Apply a function to 1-D slices of an array along the given axis.
453
+
454
+ Notes
455
+ -----
456
+ This function is equivalent to tuple axis arguments to reorderable ufuncs
457
+ with keepdims=True. Tuple axis arguments to ufuncs have been available since
458
+ version 1.7.0.
459
+
460
+ Examples
461
+ --------
462
+ >>> a = np.arange(24).reshape(2,3,4)
463
+ >>> a
464
+ array([[[ 0, 1, 2, 3],
465
+ [ 4, 5, 6, 7],
466
+ [ 8, 9, 10, 11]],
467
+ [[12, 13, 14, 15],
468
+ [16, 17, 18, 19],
469
+ [20, 21, 22, 23]]])
470
+
471
+ Sum over axes 0 and 2. The result has same number of dimensions
472
+ as the original array:
473
+
474
+ >>> np.apply_over_axes(np.sum, a, [0,2])
475
+ array([[[ 60],
476
+ [ 92],
477
+ [124]]])
478
+
479
+ Tuple axis arguments to ufuncs are equivalent:
480
+
481
+ >>> np.sum(a, axis=(0,2), keepdims=True)
482
+ array([[[ 60],
483
+ [ 92],
484
+ [124]]])
485
+
486
+ """
487
+ val = asarray(a)
488
+ N = a.ndim
489
+ if array(axes).ndim == 0:
490
+ axes = (axes,)
491
+ for axis in axes:
492
+ if axis < 0:
493
+ axis = N + axis
494
+ args = (val, axis)
495
+ res = func(*args)
496
+ if res.ndim == val.ndim:
497
+ val = res
498
+ else:
499
+ res = expand_dims(res, axis)
500
+ if res.ndim == val.ndim:
501
+ val = res
502
+ else:
503
+ raise ValueError("function is not returning "
504
+ "an array of the correct shape")
505
+ return val
506
+
507
+
508
+ def _expand_dims_dispatcher(a, axis):
509
+ return (a,)
510
+
511
+
512
+ @array_function_dispatch(_expand_dims_dispatcher)
513
+ def expand_dims(a, axis):
514
+ """
515
+ Expand the shape of an array.
516
+
517
+ Insert a new axis that will appear at the `axis` position in the expanded
518
+ array shape.
519
+
520
+ Parameters
521
+ ----------
522
+ a : array_like
523
+ Input array.
524
+ axis : int or tuple of ints
525
+ Position in the expanded axes where the new axis (or axes) is placed.
526
+
527
+ .. deprecated:: 1.13.0
528
+ Passing an axis where ``axis > a.ndim`` will be treated as
529
+ ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
530
+ be treated as ``axis == 0``. This behavior is deprecated.
531
+
532
+ .. versionchanged:: 1.18.0
533
+ A tuple of axes is now supported. Out of range axes as
534
+ described above are now forbidden and raise an `AxisError`.
535
+
536
+ Returns
537
+ -------
538
+ result : ndarray
539
+ View of `a` with the number of dimensions increased.
540
+
541
+ See Also
542
+ --------
543
+ squeeze : The inverse operation, removing singleton dimensions
544
+ reshape : Insert, remove, and combine dimensions, and resize existing ones
545
+ doc.indexing, atleast_1d, atleast_2d, atleast_3d
546
+
547
+ Examples
548
+ --------
549
+ >>> x = np.array([1, 2])
550
+ >>> x.shape
551
+ (2,)
552
+
553
+ The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
554
+
555
+ >>> y = np.expand_dims(x, axis=0)
556
+ >>> y
557
+ array([[1, 2]])
558
+ >>> y.shape
559
+ (1, 2)
560
+
561
+ The following is equivalent to ``x[:, np.newaxis]``:
562
+
563
+ >>> y = np.expand_dims(x, axis=1)
564
+ >>> y
565
+ array([[1],
566
+ [2]])
567
+ >>> y.shape
568
+ (2, 1)
569
+
570
+ ``axis`` may also be a tuple:
571
+
572
+ >>> y = np.expand_dims(x, axis=(0, 1))
573
+ >>> y
574
+ array([[[1, 2]]])
575
+
576
+ >>> y = np.expand_dims(x, axis=(2, 0))
577
+ >>> y
578
+ array([[[1],
579
+ [2]]])
580
+
581
+ Note that some examples may use ``None`` instead of ``np.newaxis``. These
582
+ are the same objects:
583
+
584
+ >>> np.newaxis is None
585
+ True
586
+
587
+ """
588
+ if isinstance(a, matrix):
589
+ a = asarray(a)
590
+ else:
591
+ a = asanyarray(a)
592
+
593
+ if type(axis) not in (tuple, list):
594
+ axis = (axis,)
595
+
596
+ out_ndim = len(axis) + a.ndim
597
+ axis = normalize_axis_tuple(axis, out_ndim)
598
+
599
+ shape_it = iter(a.shape)
600
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
601
+
602
+ return a.reshape(shape)
603
+
604
+
605
+ row_stack = vstack
606
+
607
+
608
+ def _column_stack_dispatcher(tup):
609
+ return _arrays_for_stack_dispatcher(tup)
610
+
611
+
612
+ @array_function_dispatch(_column_stack_dispatcher)
613
+ def column_stack(tup):
614
+ """
615
+ Stack 1-D arrays as columns into a 2-D array.
616
+
617
+ Take a sequence of 1-D arrays and stack them as columns
618
+ to make a single 2-D array. 2-D arrays are stacked as-is,
619
+ just like with `hstack`. 1-D arrays are turned into 2-D columns
620
+ first.
621
+
622
+ Parameters
623
+ ----------
624
+ tup : sequence of 1-D or 2-D arrays.
625
+ Arrays to stack. All of them must have the same first dimension.
626
+
627
+ Returns
628
+ -------
629
+ stacked : 2-D array
630
+ The array formed by stacking the given arrays.
631
+
632
+ See Also
633
+ --------
634
+ stack, hstack, vstack, concatenate
635
+
636
+ Examples
637
+ --------
638
+ >>> a = np.array((1,2,3))
639
+ >>> b = np.array((2,3,4))
640
+ >>> np.column_stack((a,b))
641
+ array([[1, 2],
642
+ [2, 3],
643
+ [3, 4]])
644
+
645
+ """
646
+ arrays = []
647
+ for v in tup:
648
+ arr = asanyarray(v)
649
+ if arr.ndim < 2:
650
+ arr = array(arr, copy=False, subok=True, ndmin=2).T
651
+ arrays.append(arr)
652
+ return _nx.concatenate(arrays, 1)
653
+
654
+
655
+ def _dstack_dispatcher(tup):
656
+ return _arrays_for_stack_dispatcher(tup)
657
+
658
+
659
+ @array_function_dispatch(_dstack_dispatcher)
660
+ def dstack(tup):
661
+ """
662
+ Stack arrays in sequence depth wise (along third axis).
663
+
664
+ This is equivalent to concatenation along the third axis after 2-D arrays
665
+ of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
666
+ `(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
667
+ `dsplit`.
668
+
669
+ This function makes most sense for arrays with up to 3 dimensions. For
670
+ instance, for pixel-data with a height (first axis), width (second axis),
671
+ and r/g/b channels (third axis). The functions `concatenate`, `stack` and
672
+ `block` provide more general stacking and concatenation operations.
673
+
674
+ Parameters
675
+ ----------
676
+ tup : sequence of arrays
677
+ The arrays must have the same shape along all but the third axis.
678
+ 1-D or 2-D arrays must have the same shape.
679
+
680
+ Returns
681
+ -------
682
+ stacked : ndarray
683
+ The array formed by stacking the given arrays, will be at least 3-D.
684
+
685
+ See Also
686
+ --------
687
+ concatenate : Join a sequence of arrays along an existing axis.
688
+ stack : Join a sequence of arrays along a new axis.
689
+ block : Assemble an nd-array from nested lists of blocks.
690
+ vstack : Stack arrays in sequence vertically (row wise).
691
+ hstack : Stack arrays in sequence horizontally (column wise).
692
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
693
+ dsplit : Split array along third axis.
694
+
695
+ Examples
696
+ --------
697
+ >>> a = np.array((1,2,3))
698
+ >>> b = np.array((2,3,4))
699
+ >>> np.dstack((a,b))
700
+ array([[[1, 2],
701
+ [2, 3],
702
+ [3, 4]]])
703
+
704
+ >>> a = np.array([[1],[2],[3]])
705
+ >>> b = np.array([[2],[3],[4]])
706
+ >>> np.dstack((a,b))
707
+ array([[[1, 2]],
708
+ [[2, 3]],
709
+ [[3, 4]]])
710
+
711
+ """
712
+ arrs = atleast_3d(*tup)
713
+ if not isinstance(arrs, list):
714
+ arrs = [arrs]
715
+ return _nx.concatenate(arrs, 2)
716
+
717
+
718
+ def _replace_zero_by_x_arrays(sub_arys):
719
+ for i in range(len(sub_arys)):
720
+ if _nx.ndim(sub_arys[i]) == 0:
721
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
722
+ elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]), 0)):
723
+ sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
724
+ return sub_arys
725
+
726
+
727
+ def _array_split_dispatcher(ary, indices_or_sections, axis=None):
728
+ return (ary, indices_or_sections)
729
+
730
+
731
+ @array_function_dispatch(_array_split_dispatcher)
732
+ def array_split(ary, indices_or_sections, axis=0):
733
+ """
734
+ Split an array into multiple sub-arrays.
735
+
736
+ Please refer to the ``split`` documentation. The only difference
737
+ between these functions is that ``array_split`` allows
738
+ `indices_or_sections` to be an integer that does *not* equally
739
+ divide the axis. For an array of length l that should be split
740
+ into n sections, it returns l % n sub-arrays of size l//n + 1
741
+ and the rest of size l//n.
742
+
743
+ See Also
744
+ --------
745
+ split : Split array into multiple sub-arrays of equal size.
746
+
747
+ Examples
748
+ --------
749
+ >>> x = np.arange(8.0)
750
+ >>> np.array_split(x, 3)
751
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
752
+
753
+ >>> x = np.arange(9)
754
+ >>> np.array_split(x, 4)
755
+ [array([0, 1, 2]), array([3, 4]), array([5, 6]), array([7, 8])]
756
+
757
+ """
758
+ try:
759
+ Ntotal = ary.shape[axis]
760
+ except AttributeError:
761
+ Ntotal = len(ary)
762
+ try:
763
+ # handle array case.
764
+ Nsections = len(indices_or_sections) + 1
765
+ div_points = [0] + list(indices_or_sections) + [Ntotal]
766
+ except TypeError:
767
+ # indices_or_sections is a scalar, not an array.
768
+ Nsections = int(indices_or_sections)
769
+ if Nsections <= 0:
770
+ raise ValueError('number sections must be larger than 0.') from None
771
+ Neach_section, extras = divmod(Ntotal, Nsections)
772
+ section_sizes = ([0] +
773
+ extras * [Neach_section+1] +
774
+ (Nsections-extras) * [Neach_section])
775
+ div_points = _nx.array(section_sizes, dtype=_nx.intp).cumsum()
776
+
777
+ sub_arys = []
778
+ sary = _nx.swapaxes(ary, axis, 0)
779
+ for i in range(Nsections):
780
+ st = div_points[i]
781
+ end = div_points[i + 1]
782
+ sub_arys.append(_nx.swapaxes(sary[st:end], axis, 0))
783
+
784
+ return sub_arys
785
+
786
+
787
+ def _split_dispatcher(ary, indices_or_sections, axis=None):
788
+ return (ary, indices_or_sections)
789
+
790
+
791
+ @array_function_dispatch(_split_dispatcher)
792
+ def split(ary, indices_or_sections, axis=0):
793
+ """
794
+ Split an array into multiple sub-arrays as views into `ary`.
795
+
796
+ Parameters
797
+ ----------
798
+ ary : ndarray
799
+ Array to be divided into sub-arrays.
800
+ indices_or_sections : int or 1-D array
801
+ If `indices_or_sections` is an integer, N, the array will be divided
802
+ into N equal arrays along `axis`. If such a split is not possible,
803
+ an error is raised.
804
+
805
+ If `indices_or_sections` is a 1-D array of sorted integers, the entries
806
+ indicate where along `axis` the array is split. For example,
807
+ ``[2, 3]`` would, for ``axis=0``, result in
808
+
809
+ - ary[:2]
810
+ - ary[2:3]
811
+ - ary[3:]
812
+
813
+ If an index exceeds the dimension of the array along `axis`,
814
+ an empty sub-array is returned correspondingly.
815
+ axis : int, optional
816
+ The axis along which to split, default is 0.
817
+
818
+ Returns
819
+ -------
820
+ sub-arrays : list of ndarrays
821
+ A list of sub-arrays as views into `ary`.
822
+
823
+ Raises
824
+ ------
825
+ ValueError
826
+ If `indices_or_sections` is given as an integer, but
827
+ a split does not result in equal division.
828
+
829
+ See Also
830
+ --------
831
+ array_split : Split an array into multiple sub-arrays of equal or
832
+ near-equal size. Does not raise an exception if
833
+ an equal division cannot be made.
834
+ hsplit : Split array into multiple sub-arrays horizontally (column-wise).
835
+ vsplit : Split array into multiple sub-arrays vertically (row wise).
836
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
837
+ concatenate : Join a sequence of arrays along an existing axis.
838
+ stack : Join a sequence of arrays along a new axis.
839
+ hstack : Stack arrays in sequence horizontally (column wise).
840
+ vstack : Stack arrays in sequence vertically (row wise).
841
+ dstack : Stack arrays in sequence depth wise (along third dimension).
842
+
843
+ Examples
844
+ --------
845
+ >>> x = np.arange(9.0)
846
+ >>> np.split(x, 3)
847
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
848
+
849
+ >>> x = np.arange(8.0)
850
+ >>> np.split(x, [3, 5, 6, 10])
851
+ [array([0., 1., 2.]),
852
+ array([3., 4.]),
853
+ array([5.]),
854
+ array([6., 7.]),
855
+ array([], dtype=float64)]
856
+
857
+ """
858
+ try:
859
+ len(indices_or_sections)
860
+ except TypeError:
861
+ sections = indices_or_sections
862
+ N = ary.shape[axis]
863
+ if N % sections:
864
+ raise ValueError(
865
+ 'array split does not result in an equal division') from None
866
+ return array_split(ary, indices_or_sections, axis)
867
+
868
+
869
+ def _hvdsplit_dispatcher(ary, indices_or_sections):
870
+ return (ary, indices_or_sections)
871
+
872
+
873
+ @array_function_dispatch(_hvdsplit_dispatcher)
874
+ def hsplit(ary, indices_or_sections):
875
+ """
876
+ Split an array into multiple sub-arrays horizontally (column-wise).
877
+
878
+ Please refer to the `split` documentation. `hsplit` is equivalent
879
+ to `split` with ``axis=1``, the array is always split along the second
880
+ axis except for 1-D arrays, where it is split at ``axis=0``.
881
+
882
+ See Also
883
+ --------
884
+ split : Split an array into multiple sub-arrays of equal size.
885
+
886
+ Examples
887
+ --------
888
+ >>> x = np.arange(16.0).reshape(4, 4)
889
+ >>> x
890
+ array([[ 0., 1., 2., 3.],
891
+ [ 4., 5., 6., 7.],
892
+ [ 8., 9., 10., 11.],
893
+ [12., 13., 14., 15.]])
894
+ >>> np.hsplit(x, 2)
895
+ [array([[ 0., 1.],
896
+ [ 4., 5.],
897
+ [ 8., 9.],
898
+ [12., 13.]]),
899
+ array([[ 2., 3.],
900
+ [ 6., 7.],
901
+ [10., 11.],
902
+ [14., 15.]])]
903
+ >>> np.hsplit(x, np.array([3, 6]))
904
+ [array([[ 0., 1., 2.],
905
+ [ 4., 5., 6.],
906
+ [ 8., 9., 10.],
907
+ [12., 13., 14.]]),
908
+ array([[ 3.],
909
+ [ 7.],
910
+ [11.],
911
+ [15.]]),
912
+ array([], shape=(4, 0), dtype=float64)]
913
+
914
+ With a higher dimensional array the split is still along the second axis.
915
+
916
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
917
+ >>> x
918
+ array([[[0., 1.],
919
+ [2., 3.]],
920
+ [[4., 5.],
921
+ [6., 7.]]])
922
+ >>> np.hsplit(x, 2)
923
+ [array([[[0., 1.]],
924
+ [[4., 5.]]]),
925
+ array([[[2., 3.]],
926
+ [[6., 7.]]])]
927
+
928
+ With a 1-D array, the split is along axis 0.
929
+
930
+ >>> x = np.array([0, 1, 2, 3, 4, 5])
931
+ >>> np.hsplit(x, 2)
932
+ [array([0, 1, 2]), array([3, 4, 5])]
933
+
934
+ """
935
+ if _nx.ndim(ary) == 0:
936
+ raise ValueError('hsplit only works on arrays of 1 or more dimensions')
937
+ if ary.ndim > 1:
938
+ return split(ary, indices_or_sections, 1)
939
+ else:
940
+ return split(ary, indices_or_sections, 0)
941
+
942
+
943
+ @array_function_dispatch(_hvdsplit_dispatcher)
944
+ def vsplit(ary, indices_or_sections):
945
+ """
946
+ Split an array into multiple sub-arrays vertically (row-wise).
947
+
948
+ Please refer to the ``split`` documentation. ``vsplit`` is equivalent
949
+ to ``split`` with `axis=0` (default), the array is always split along the
950
+ first axis regardless of the array dimension.
951
+
952
+ See Also
953
+ --------
954
+ split : Split an array into multiple sub-arrays of equal size.
955
+
956
+ Examples
957
+ --------
958
+ >>> x = np.arange(16.0).reshape(4, 4)
959
+ >>> x
960
+ array([[ 0., 1., 2., 3.],
961
+ [ 4., 5., 6., 7.],
962
+ [ 8., 9., 10., 11.],
963
+ [12., 13., 14., 15.]])
964
+ >>> np.vsplit(x, 2)
965
+ [array([[0., 1., 2., 3.],
966
+ [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
967
+ [12., 13., 14., 15.]])]
968
+ >>> np.vsplit(x, np.array([3, 6]))
969
+ [array([[ 0., 1., 2., 3.],
970
+ [ 4., 5., 6., 7.],
971
+ [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]
972
+
973
+ With a higher dimensional array the split is still along the first axis.
974
+
975
+ >>> x = np.arange(8.0).reshape(2, 2, 2)
976
+ >>> x
977
+ array([[[0., 1.],
978
+ [2., 3.]],
979
+ [[4., 5.],
980
+ [6., 7.]]])
981
+ >>> np.vsplit(x, 2)
982
+ [array([[[0., 1.],
983
+ [2., 3.]]]), array([[[4., 5.],
984
+ [6., 7.]]])]
985
+
986
+ """
987
+ if _nx.ndim(ary) < 2:
988
+ raise ValueError('vsplit only works on arrays of 2 or more dimensions')
989
+ return split(ary, indices_or_sections, 0)
990
+
991
+
992
+ @array_function_dispatch(_hvdsplit_dispatcher)
993
+ def dsplit(ary, indices_or_sections):
994
+ """
995
+ Split array into multiple sub-arrays along the 3rd axis (depth).
996
+
997
+ Please refer to the `split` documentation. `dsplit` is equivalent
998
+ to `split` with ``axis=2``, the array is always split along the third
999
+ axis provided the array dimension is greater than or equal to 3.
1000
+
1001
+ See Also
1002
+ --------
1003
+ split : Split an array into multiple sub-arrays of equal size.
1004
+
1005
+ Examples
1006
+ --------
1007
+ >>> x = np.arange(16.0).reshape(2, 2, 4)
1008
+ >>> x
1009
+ array([[[ 0., 1., 2., 3.],
1010
+ [ 4., 5., 6., 7.]],
1011
+ [[ 8., 9., 10., 11.],
1012
+ [12., 13., 14., 15.]]])
1013
+ >>> np.dsplit(x, 2)
1014
+ [array([[[ 0., 1.],
1015
+ [ 4., 5.]],
1016
+ [[ 8., 9.],
1017
+ [12., 13.]]]), array([[[ 2., 3.],
1018
+ [ 6., 7.]],
1019
+ [[10., 11.],
1020
+ [14., 15.]]])]
1021
+ >>> np.dsplit(x, np.array([3, 6]))
1022
+ [array([[[ 0., 1., 2.],
1023
+ [ 4., 5., 6.]],
1024
+ [[ 8., 9., 10.],
1025
+ [12., 13., 14.]]]),
1026
+ array([[[ 3.],
1027
+ [ 7.]],
1028
+ [[11.],
1029
+ [15.]]]),
1030
+ array([], shape=(2, 2, 0), dtype=float64)]
1031
+ """
1032
+ if _nx.ndim(ary) < 3:
1033
+ raise ValueError('dsplit only works on arrays of 3 or more dimensions')
1034
+ return split(ary, indices_or_sections, 2)
1035
+
1036
+
1037
+ def get_array_prepare(*args):
1038
+ """Find the wrapper for the array with the highest priority.
1039
+
1040
+ In case of ties, leftmost wins. If no wrapper is found, return None
1041
+ """
1042
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
1043
+ x.__array_prepare__) for i, x in enumerate(args)
1044
+ if hasattr(x, '__array_prepare__'))
1045
+ if wrappers:
1046
+ return wrappers[-1][-1]
1047
+ return None
1048
+
1049
+
1050
+ def get_array_wrap(*args):
1051
+ """Find the wrapper for the array with the highest priority.
1052
+
1053
+ In case of ties, leftmost wins. If no wrapper is found, return None
1054
+ """
1055
+ wrappers = sorted((getattr(x, '__array_priority__', 0), -i,
1056
+ x.__array_wrap__) for i, x in enumerate(args)
1057
+ if hasattr(x, '__array_wrap__'))
1058
+ if wrappers:
1059
+ return wrappers[-1][-1]
1060
+ return None
1061
+
1062
+
1063
+ def _kron_dispatcher(a, b):
1064
+ return (a, b)
1065
+
1066
+
1067
+ @array_function_dispatch(_kron_dispatcher)
1068
+ def kron(a, b):
1069
+ """
1070
+ Kronecker product of two arrays.
1071
+
1072
+ Computes the Kronecker product, a composite array made of blocks of the
1073
+ second array scaled by the first.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ a, b : array_like
1078
+
1079
+ Returns
1080
+ -------
1081
+ out : ndarray
1082
+
1083
+ See Also
1084
+ --------
1085
+ outer : The outer product
1086
+
1087
+ Notes
1088
+ -----
1089
+ The function assumes that the number of dimensions of `a` and `b`
1090
+ are the same, if necessary prepending the smallest with ones.
1091
+ If ``a.shape = (r0,r1,..,rN)`` and ``b.shape = (s0,s1,...,sN)``,
1092
+ the Kronecker product has shape ``(r0*s0, r1*s1, ..., rN*SN)``.
1093
+ The elements are products of elements from `a` and `b`, organized
1094
+ explicitly by::
1095
+
1096
+ kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
1097
+
1098
+ where::
1099
+
1100
+ kt = it * st + jt, t = 0,...,N
1101
+
1102
+ In the common 2-D case (N=1), the block structure can be visualized::
1103
+
1104
+ [[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
1105
+ [ ... ... ],
1106
+ [ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
1107
+
1108
+
1109
+ Examples
1110
+ --------
1111
+ >>> np.kron([1,10,100], [5,6,7])
1112
+ array([ 5, 6, 7, ..., 500, 600, 700])
1113
+ >>> np.kron([5,6,7], [1,10,100])
1114
+ array([ 5, 50, 500, ..., 7, 70, 700])
1115
+
1116
+ >>> np.kron(np.eye(2), np.ones((2,2)))
1117
+ array([[1., 1., 0., 0.],
1118
+ [1., 1., 0., 0.],
1119
+ [0., 0., 1., 1.],
1120
+ [0., 0., 1., 1.]])
1121
+
1122
+ >>> a = np.arange(100).reshape((2,5,2,5))
1123
+ >>> b = np.arange(24).reshape((2,3,4))
1124
+ >>> c = np.kron(a,b)
1125
+ >>> c.shape
1126
+ (2, 10, 6, 20)
1127
+ >>> I = (1,3,0,2)
1128
+ >>> J = (0,2,1)
1129
+ >>> J1 = (0,) + J # extend to ndim=4
1130
+ >>> S1 = (1,) + b.shape
1131
+ >>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
1132
+ >>> c[K] == a[I]*b[J]
1133
+ True
1134
+
1135
+ """
1136
+ # Working:
1137
+ # 1. Equalise the shapes by prepending smaller array with 1s
1138
+ # 2. Expand shapes of both the arrays by adding new axes at
1139
+ # odd positions for 1st array and even positions for 2nd
1140
+ # 3. Compute the product of the modified array
1141
+ # 4. The inner most array elements now contain the rows of
1142
+ # the Kronecker product
1143
+ # 5. Reshape the result to kron's shape, which is same as
1144
+ # product of shapes of the two arrays.
1145
+ b = asanyarray(b)
1146
+ a = array(a, copy=False, subok=True, ndmin=b.ndim)
1147
+ is_any_mat = isinstance(a, matrix) or isinstance(b, matrix)
1148
+ ndb, nda = b.ndim, a.ndim
1149
+ nd = max(ndb, nda)
1150
+
1151
+ if (nda == 0 or ndb == 0):
1152
+ return _nx.multiply(a, b)
1153
+
1154
+ as_ = a.shape
1155
+ bs = b.shape
1156
+ if not a.flags.contiguous:
1157
+ a = reshape(a, as_)
1158
+ if not b.flags.contiguous:
1159
+ b = reshape(b, bs)
1160
+
1161
+ # Equalise the shapes by prepending smaller one with 1s
1162
+ as_ = (1,)*max(0, ndb-nda) + as_
1163
+ bs = (1,)*max(0, nda-ndb) + bs
1164
+
1165
+ # Insert empty dimensions
1166
+ a_arr = expand_dims(a, axis=tuple(range(ndb-nda)))
1167
+ b_arr = expand_dims(b, axis=tuple(range(nda-ndb)))
1168
+
1169
+ # Compute the product
1170
+ a_arr = expand_dims(a_arr, axis=tuple(range(1, nd*2, 2)))
1171
+ b_arr = expand_dims(b_arr, axis=tuple(range(0, nd*2, 2)))
1172
+ # In case of `mat`, convert result to `array`
1173
+ result = _nx.multiply(a_arr, b_arr, subok=(not is_any_mat))
1174
+
1175
+ # Reshape back
1176
+ result = result.reshape(_nx.multiply(as_, bs))
1177
+
1178
+ return result if not is_any_mat else matrix(result, copy=False)
1179
+
1180
+
1181
+ def _tile_dispatcher(A, reps):
1182
+ return (A, reps)
1183
+
1184
+
1185
+ @array_function_dispatch(_tile_dispatcher)
1186
+ def tile(A, reps):
1187
+ """
1188
+ Construct an array by repeating A the number of times given by reps.
1189
+
1190
+ If `reps` has length ``d``, the result will have dimension of
1191
+ ``max(d, A.ndim)``.
1192
+
1193
+ If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
1194
+ axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
1195
+ or shape (1, 1, 3) for 3-D replication. If this is not the desired
1196
+ behavior, promote `A` to d-dimensions manually before calling this
1197
+ function.
1198
+
1199
+ If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
1200
+ Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
1201
+ (1, 1, 2, 2).
1202
+
1203
+ Note : Although tile may be used for broadcasting, it is strongly
1204
+ recommended to use numpy's broadcasting operations and functions.
1205
+
1206
+ Parameters
1207
+ ----------
1208
+ A : array_like
1209
+ The input array.
1210
+ reps : array_like
1211
+ The number of repetitions of `A` along each axis.
1212
+
1213
+ Returns
1214
+ -------
1215
+ c : ndarray
1216
+ The tiled output array.
1217
+
1218
+ See Also
1219
+ --------
1220
+ repeat : Repeat elements of an array.
1221
+ broadcast_to : Broadcast an array to a new shape
1222
+
1223
+ Examples
1224
+ --------
1225
+ >>> a = np.array([0, 1, 2])
1226
+ >>> np.tile(a, 2)
1227
+ array([0, 1, 2, 0, 1, 2])
1228
+ >>> np.tile(a, (2, 2))
1229
+ array([[0, 1, 2, 0, 1, 2],
1230
+ [0, 1, 2, 0, 1, 2]])
1231
+ >>> np.tile(a, (2, 1, 2))
1232
+ array([[[0, 1, 2, 0, 1, 2]],
1233
+ [[0, 1, 2, 0, 1, 2]]])
1234
+
1235
+ >>> b = np.array([[1, 2], [3, 4]])
1236
+ >>> np.tile(b, 2)
1237
+ array([[1, 2, 1, 2],
1238
+ [3, 4, 3, 4]])
1239
+ >>> np.tile(b, (2, 1))
1240
+ array([[1, 2],
1241
+ [3, 4],
1242
+ [1, 2],
1243
+ [3, 4]])
1244
+
1245
+ >>> c = np.array([1,2,3,4])
1246
+ >>> np.tile(c,(4,1))
1247
+ array([[1, 2, 3, 4],
1248
+ [1, 2, 3, 4],
1249
+ [1, 2, 3, 4],
1250
+ [1, 2, 3, 4]])
1251
+ """
1252
+ try:
1253
+ tup = tuple(reps)
1254
+ except TypeError:
1255
+ tup = (reps,)
1256
+ d = len(tup)
1257
+ if all(x == 1 for x in tup) and isinstance(A, _nx.ndarray):
1258
+ # Fixes the problem that the function does not make a copy if A is a
1259
+ # numpy array and the repetitions are 1 in all dimensions
1260
+ return _nx.array(A, copy=True, subok=True, ndmin=d)
1261
+ else:
1262
+ # Note that no copy of zero-sized arrays is made. However since they
1263
+ # have no data there is no risk of an inadvertent overwrite.
1264
+ c = _nx.array(A, copy=False, subok=True, ndmin=d)
1265
+ if (d < c.ndim):
1266
+ tup = (1,)*(c.ndim-d) + tup
1267
+ shape_out = tuple(s*t for s, t in zip(c.shape, tup))
1268
+ n = c.size
1269
+ if n > 0:
1270
+ for dim_in, nrep in zip(c.shape, tup):
1271
+ if nrep != 1:
1272
+ c = c.reshape(-1, n).repeat(nrep, 0)
1273
+ n //= dim_in
1274
+ return c.reshape(shape_out)
env-llmeval/lib/python3.10/site-packages/numpy/lib/twodim_base.py ADDED
@@ -0,0 +1,1183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Basic functions for manipulating 2d arrays
2
+
3
+ """
4
+ import functools
5
+ import operator
6
+
7
+ from numpy.core.numeric import (
8
+ asanyarray, arange, zeros, greater_equal, multiply, ones,
9
+ asarray, where, int8, int16, int32, int64, intp, empty, promote_types,
10
+ diagonal, nonzero, indices
11
+ )
12
+ from numpy.core.overrides import set_array_function_like_doc, set_module
13
+ from numpy.core import overrides
14
+ from numpy.core import iinfo
15
+ from numpy.lib.stride_tricks import broadcast_to
16
+
17
+
18
+ __all__ = [
19
+ 'diag', 'diagflat', 'eye', 'fliplr', 'flipud', 'tri', 'triu',
20
+ 'tril', 'vander', 'histogram2d', 'mask_indices', 'tril_indices',
21
+ 'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
22
+
23
+
24
+ array_function_dispatch = functools.partial(
25
+ overrides.array_function_dispatch, module='numpy')
26
+
27
+
28
+ i1 = iinfo(int8)
29
+ i2 = iinfo(int16)
30
+ i4 = iinfo(int32)
31
+
32
+
33
+ def _min_int(low, high):
34
+ """ get small int that fits the range """
35
+ if high <= i1.max and low >= i1.min:
36
+ return int8
37
+ if high <= i2.max and low >= i2.min:
38
+ return int16
39
+ if high <= i4.max and low >= i4.min:
40
+ return int32
41
+ return int64
42
+
43
+
44
+ def _flip_dispatcher(m):
45
+ return (m,)
46
+
47
+
48
+ @array_function_dispatch(_flip_dispatcher)
49
+ def fliplr(m):
50
+ """
51
+ Reverse the order of elements along axis 1 (left/right).
52
+
53
+ For a 2-D array, this flips the entries in each row in the left/right
54
+ direction. Columns are preserved, but appear in a different order than
55
+ before.
56
+
57
+ Parameters
58
+ ----------
59
+ m : array_like
60
+ Input array, must be at least 2-D.
61
+
62
+ Returns
63
+ -------
64
+ f : ndarray
65
+ A view of `m` with the columns reversed. Since a view
66
+ is returned, this operation is :math:`\\mathcal O(1)`.
67
+
68
+ See Also
69
+ --------
70
+ flipud : Flip array in the up/down direction.
71
+ flip : Flip array in one or more dimensions.
72
+ rot90 : Rotate array counterclockwise.
73
+
74
+ Notes
75
+ -----
76
+ Equivalent to ``m[:,::-1]`` or ``np.flip(m, axis=1)``.
77
+ Requires the array to be at least 2-D.
78
+
79
+ Examples
80
+ --------
81
+ >>> A = np.diag([1.,2.,3.])
82
+ >>> A
83
+ array([[1., 0., 0.],
84
+ [0., 2., 0.],
85
+ [0., 0., 3.]])
86
+ >>> np.fliplr(A)
87
+ array([[0., 0., 1.],
88
+ [0., 2., 0.],
89
+ [3., 0., 0.]])
90
+
91
+ >>> A = np.random.randn(2,3,5)
92
+ >>> np.all(np.fliplr(A) == A[:,::-1,...])
93
+ True
94
+
95
+ """
96
+ m = asanyarray(m)
97
+ if m.ndim < 2:
98
+ raise ValueError("Input must be >= 2-d.")
99
+ return m[:, ::-1]
100
+
101
+
102
+ @array_function_dispatch(_flip_dispatcher)
103
+ def flipud(m):
104
+ """
105
+ Reverse the order of elements along axis 0 (up/down).
106
+
107
+ For a 2-D array, this flips the entries in each column in the up/down
108
+ direction. Rows are preserved, but appear in a different order than before.
109
+
110
+ Parameters
111
+ ----------
112
+ m : array_like
113
+ Input array.
114
+
115
+ Returns
116
+ -------
117
+ out : array_like
118
+ A view of `m` with the rows reversed. Since a view is
119
+ returned, this operation is :math:`\\mathcal O(1)`.
120
+
121
+ See Also
122
+ --------
123
+ fliplr : Flip array in the left/right direction.
124
+ flip : Flip array in one or more dimensions.
125
+ rot90 : Rotate array counterclockwise.
126
+
127
+ Notes
128
+ -----
129
+ Equivalent to ``m[::-1, ...]`` or ``np.flip(m, axis=0)``.
130
+ Requires the array to be at least 1-D.
131
+
132
+ Examples
133
+ --------
134
+ >>> A = np.diag([1.0, 2, 3])
135
+ >>> A
136
+ array([[1., 0., 0.],
137
+ [0., 2., 0.],
138
+ [0., 0., 3.]])
139
+ >>> np.flipud(A)
140
+ array([[0., 0., 3.],
141
+ [0., 2., 0.],
142
+ [1., 0., 0.]])
143
+
144
+ >>> A = np.random.randn(2,3,5)
145
+ >>> np.all(np.flipud(A) == A[::-1,...])
146
+ True
147
+
148
+ >>> np.flipud([1,2])
149
+ array([2, 1])
150
+
151
+ """
152
+ m = asanyarray(m)
153
+ if m.ndim < 1:
154
+ raise ValueError("Input must be >= 1-d.")
155
+ return m[::-1, ...]
156
+
157
+
158
+ @set_array_function_like_doc
159
+ @set_module('numpy')
160
+ def eye(N, M=None, k=0, dtype=float, order='C', *, like=None):
161
+ """
162
+ Return a 2-D array with ones on the diagonal and zeros elsewhere.
163
+
164
+ Parameters
165
+ ----------
166
+ N : int
167
+ Number of rows in the output.
168
+ M : int, optional
169
+ Number of columns in the output. If None, defaults to `N`.
170
+ k : int, optional
171
+ Index of the diagonal: 0 (the default) refers to the main diagonal,
172
+ a positive value refers to an upper diagonal, and a negative value
173
+ to a lower diagonal.
174
+ dtype : data-type, optional
175
+ Data-type of the returned array.
176
+ order : {'C', 'F'}, optional
177
+ Whether the output should be stored in row-major (C-style) or
178
+ column-major (Fortran-style) order in memory.
179
+
180
+ .. versionadded:: 1.14.0
181
+ ${ARRAY_FUNCTION_LIKE}
182
+
183
+ .. versionadded:: 1.20.0
184
+
185
+ Returns
186
+ -------
187
+ I : ndarray of shape (N,M)
188
+ An array where all elements are equal to zero, except for the `k`-th
189
+ diagonal, whose values are equal to one.
190
+
191
+ See Also
192
+ --------
193
+ identity : (almost) equivalent function
194
+ diag : diagonal 2-D array from a 1-D array specified by the user.
195
+
196
+ Examples
197
+ --------
198
+ >>> np.eye(2, dtype=int)
199
+ array([[1, 0],
200
+ [0, 1]])
201
+ >>> np.eye(3, k=1)
202
+ array([[0., 1., 0.],
203
+ [0., 0., 1.],
204
+ [0., 0., 0.]])
205
+
206
+ """
207
+ if like is not None:
208
+ return _eye_with_like(like, N, M=M, k=k, dtype=dtype, order=order)
209
+ if M is None:
210
+ M = N
211
+ m = zeros((N, M), dtype=dtype, order=order)
212
+ if k >= M:
213
+ return m
214
+ # Ensure M and k are integers, so we don't get any surprise casting
215
+ # results in the expressions `M-k` and `M+1` used below. This avoids
216
+ # a problem with inputs with type (for example) np.uint64.
217
+ M = operator.index(M)
218
+ k = operator.index(k)
219
+ if k >= 0:
220
+ i = k
221
+ else:
222
+ i = (-k) * M
223
+ m[:M-k].flat[i::M+1] = 1
224
+ return m
225
+
226
+
227
+ _eye_with_like = array_function_dispatch()(eye)
228
+
229
+
230
+ def _diag_dispatcher(v, k=None):
231
+ return (v,)
232
+
233
+
234
+ @array_function_dispatch(_diag_dispatcher)
235
+ def diag(v, k=0):
236
+ """
237
+ Extract a diagonal or construct a diagonal array.
238
+
239
+ See the more detailed documentation for ``numpy.diagonal`` if you use this
240
+ function to extract a diagonal and wish to write to the resulting array;
241
+ whether it returns a copy or a view depends on what version of numpy you
242
+ are using.
243
+
244
+ Parameters
245
+ ----------
246
+ v : array_like
247
+ If `v` is a 2-D array, return a copy of its `k`-th diagonal.
248
+ If `v` is a 1-D array, return a 2-D array with `v` on the `k`-th
249
+ diagonal.
250
+ k : int, optional
251
+ Diagonal in question. The default is 0. Use `k>0` for diagonals
252
+ above the main diagonal, and `k<0` for diagonals below the main
253
+ diagonal.
254
+
255
+ Returns
256
+ -------
257
+ out : ndarray
258
+ The extracted diagonal or constructed diagonal array.
259
+
260
+ See Also
261
+ --------
262
+ diagonal : Return specified diagonals.
263
+ diagflat : Create a 2-D array with the flattened input as a diagonal.
264
+ trace : Sum along diagonals.
265
+ triu : Upper triangle of an array.
266
+ tril : Lower triangle of an array.
267
+
268
+ Examples
269
+ --------
270
+ >>> x = np.arange(9).reshape((3,3))
271
+ >>> x
272
+ array([[0, 1, 2],
273
+ [3, 4, 5],
274
+ [6, 7, 8]])
275
+
276
+ >>> np.diag(x)
277
+ array([0, 4, 8])
278
+ >>> np.diag(x, k=1)
279
+ array([1, 5])
280
+ >>> np.diag(x, k=-1)
281
+ array([3, 7])
282
+
283
+ >>> np.diag(np.diag(x))
284
+ array([[0, 0, 0],
285
+ [0, 4, 0],
286
+ [0, 0, 8]])
287
+
288
+ """
289
+ v = asanyarray(v)
290
+ s = v.shape
291
+ if len(s) == 1:
292
+ n = s[0]+abs(k)
293
+ res = zeros((n, n), v.dtype)
294
+ if k >= 0:
295
+ i = k
296
+ else:
297
+ i = (-k) * n
298
+ res[:n-k].flat[i::n+1] = v
299
+ return res
300
+ elif len(s) == 2:
301
+ return diagonal(v, k)
302
+ else:
303
+ raise ValueError("Input must be 1- or 2-d.")
304
+
305
+
306
+ @array_function_dispatch(_diag_dispatcher)
307
+ def diagflat(v, k=0):
308
+ """
309
+ Create a two-dimensional array with the flattened input as a diagonal.
310
+
311
+ Parameters
312
+ ----------
313
+ v : array_like
314
+ Input data, which is flattened and set as the `k`-th
315
+ diagonal of the output.
316
+ k : int, optional
317
+ Diagonal to set; 0, the default, corresponds to the "main" diagonal,
318
+ a positive (negative) `k` giving the number of the diagonal above
319
+ (below) the main.
320
+
321
+ Returns
322
+ -------
323
+ out : ndarray
324
+ The 2-D output array.
325
+
326
+ See Also
327
+ --------
328
+ diag : MATLAB work-alike for 1-D and 2-D arrays.
329
+ diagonal : Return specified diagonals.
330
+ trace : Sum along diagonals.
331
+
332
+ Examples
333
+ --------
334
+ >>> np.diagflat([[1,2], [3,4]])
335
+ array([[1, 0, 0, 0],
336
+ [0, 2, 0, 0],
337
+ [0, 0, 3, 0],
338
+ [0, 0, 0, 4]])
339
+
340
+ >>> np.diagflat([1,2], 1)
341
+ array([[0, 1, 0],
342
+ [0, 0, 2],
343
+ [0, 0, 0]])
344
+
345
+ """
346
+ try:
347
+ wrap = v.__array_wrap__
348
+ except AttributeError:
349
+ wrap = None
350
+ v = asarray(v).ravel()
351
+ s = len(v)
352
+ n = s + abs(k)
353
+ res = zeros((n, n), v.dtype)
354
+ if (k >= 0):
355
+ i = arange(0, n-k, dtype=intp)
356
+ fi = i+k+i*n
357
+ else:
358
+ i = arange(0, n+k, dtype=intp)
359
+ fi = i+(i-k)*n
360
+ res.flat[fi] = v
361
+ if not wrap:
362
+ return res
363
+ return wrap(res)
364
+
365
+
366
+ @set_array_function_like_doc
367
+ @set_module('numpy')
368
+ def tri(N, M=None, k=0, dtype=float, *, like=None):
369
+ """
370
+ An array with ones at and below the given diagonal and zeros elsewhere.
371
+
372
+ Parameters
373
+ ----------
374
+ N : int
375
+ Number of rows in the array.
376
+ M : int, optional
377
+ Number of columns in the array.
378
+ By default, `M` is taken equal to `N`.
379
+ k : int, optional
380
+ The sub-diagonal at and below which the array is filled.
381
+ `k` = 0 is the main diagonal, while `k` < 0 is below it,
382
+ and `k` > 0 is above. The default is 0.
383
+ dtype : dtype, optional
384
+ Data type of the returned array. The default is float.
385
+ ${ARRAY_FUNCTION_LIKE}
386
+
387
+ .. versionadded:: 1.20.0
388
+
389
+ Returns
390
+ -------
391
+ tri : ndarray of shape (N, M)
392
+ Array with its lower triangle filled with ones and zero elsewhere;
393
+ in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
394
+
395
+ Examples
396
+ --------
397
+ >>> np.tri(3, 5, 2, dtype=int)
398
+ array([[1, 1, 1, 0, 0],
399
+ [1, 1, 1, 1, 0],
400
+ [1, 1, 1, 1, 1]])
401
+
402
+ >>> np.tri(3, 5, -1)
403
+ array([[0., 0., 0., 0., 0.],
404
+ [1., 0., 0., 0., 0.],
405
+ [1., 1., 0., 0., 0.]])
406
+
407
+ """
408
+ if like is not None:
409
+ return _tri_with_like(like, N, M=M, k=k, dtype=dtype)
410
+
411
+ if M is None:
412
+ M = N
413
+
414
+ m = greater_equal.outer(arange(N, dtype=_min_int(0, N)),
415
+ arange(-k, M-k, dtype=_min_int(-k, M - k)))
416
+
417
+ # Avoid making a copy if the requested type is already bool
418
+ m = m.astype(dtype, copy=False)
419
+
420
+ return m
421
+
422
+
423
+ _tri_with_like = array_function_dispatch()(tri)
424
+
425
+
426
+ def _trilu_dispatcher(m, k=None):
427
+ return (m,)
428
+
429
+
430
+ @array_function_dispatch(_trilu_dispatcher)
431
+ def tril(m, k=0):
432
+ """
433
+ Lower triangle of an array.
434
+
435
+ Return a copy of an array with elements above the `k`-th diagonal zeroed.
436
+ For arrays with ``ndim`` exceeding 2, `tril` will apply to the final two
437
+ axes.
438
+
439
+ Parameters
440
+ ----------
441
+ m : array_like, shape (..., M, N)
442
+ Input array.
443
+ k : int, optional
444
+ Diagonal above which to zero elements. `k = 0` (the default) is the
445
+ main diagonal, `k < 0` is below it and `k > 0` is above.
446
+
447
+ Returns
448
+ -------
449
+ tril : ndarray, shape (..., M, N)
450
+ Lower triangle of `m`, of same shape and data-type as `m`.
451
+
452
+ See Also
453
+ --------
454
+ triu : same thing, only for the upper triangle
455
+
456
+ Examples
457
+ --------
458
+ >>> np.tril([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
459
+ array([[ 0, 0, 0],
460
+ [ 4, 0, 0],
461
+ [ 7, 8, 0],
462
+ [10, 11, 12]])
463
+
464
+ >>> np.tril(np.arange(3*4*5).reshape(3, 4, 5))
465
+ array([[[ 0, 0, 0, 0, 0],
466
+ [ 5, 6, 0, 0, 0],
467
+ [10, 11, 12, 0, 0],
468
+ [15, 16, 17, 18, 0]],
469
+ [[20, 0, 0, 0, 0],
470
+ [25, 26, 0, 0, 0],
471
+ [30, 31, 32, 0, 0],
472
+ [35, 36, 37, 38, 0]],
473
+ [[40, 0, 0, 0, 0],
474
+ [45, 46, 0, 0, 0],
475
+ [50, 51, 52, 0, 0],
476
+ [55, 56, 57, 58, 0]]])
477
+
478
+ """
479
+ m = asanyarray(m)
480
+ mask = tri(*m.shape[-2:], k=k, dtype=bool)
481
+
482
+ return where(mask, m, zeros(1, m.dtype))
483
+
484
+
485
+ @array_function_dispatch(_trilu_dispatcher)
486
+ def triu(m, k=0):
487
+ """
488
+ Upper triangle of an array.
489
+
490
+ Return a copy of an array with the elements below the `k`-th diagonal
491
+ zeroed. For arrays with ``ndim`` exceeding 2, `triu` will apply to the
492
+ final two axes.
493
+
494
+ Please refer to the documentation for `tril` for further details.
495
+
496
+ See Also
497
+ --------
498
+ tril : lower triangle of an array
499
+
500
+ Examples
501
+ --------
502
+ >>> np.triu([[1,2,3],[4,5,6],[7,8,9],[10,11,12]], -1)
503
+ array([[ 1, 2, 3],
504
+ [ 4, 5, 6],
505
+ [ 0, 8, 9],
506
+ [ 0, 0, 12]])
507
+
508
+ >>> np.triu(np.arange(3*4*5).reshape(3, 4, 5))
509
+ array([[[ 0, 1, 2, 3, 4],
510
+ [ 0, 6, 7, 8, 9],
511
+ [ 0, 0, 12, 13, 14],
512
+ [ 0, 0, 0, 18, 19]],
513
+ [[20, 21, 22, 23, 24],
514
+ [ 0, 26, 27, 28, 29],
515
+ [ 0, 0, 32, 33, 34],
516
+ [ 0, 0, 0, 38, 39]],
517
+ [[40, 41, 42, 43, 44],
518
+ [ 0, 46, 47, 48, 49],
519
+ [ 0, 0, 52, 53, 54],
520
+ [ 0, 0, 0, 58, 59]]])
521
+
522
+ """
523
+ m = asanyarray(m)
524
+ mask = tri(*m.shape[-2:], k=k-1, dtype=bool)
525
+
526
+ return where(mask, zeros(1, m.dtype), m)
527
+
528
+
529
+ def _vander_dispatcher(x, N=None, increasing=None):
530
+ return (x,)
531
+
532
+
533
+ # Originally borrowed from John Hunter and matplotlib
534
+ @array_function_dispatch(_vander_dispatcher)
535
+ def vander(x, N=None, increasing=False):
536
+ """
537
+ Generate a Vandermonde matrix.
538
+
539
+ The columns of the output matrix are powers of the input vector. The
540
+ order of the powers is determined by the `increasing` boolean argument.
541
+ Specifically, when `increasing` is False, the `i`-th output column is
542
+ the input vector raised element-wise to the power of ``N - i - 1``. Such
543
+ a matrix with a geometric progression in each row is named for Alexandre-
544
+ Theophile Vandermonde.
545
+
546
+ Parameters
547
+ ----------
548
+ x : array_like
549
+ 1-D input array.
550
+ N : int, optional
551
+ Number of columns in the output. If `N` is not specified, a square
552
+ array is returned (``N = len(x)``).
553
+ increasing : bool, optional
554
+ Order of the powers of the columns. If True, the powers increase
555
+ from left to right, if False (the default) they are reversed.
556
+
557
+ .. versionadded:: 1.9.0
558
+
559
+ Returns
560
+ -------
561
+ out : ndarray
562
+ Vandermonde matrix. If `increasing` is False, the first column is
563
+ ``x^(N-1)``, the second ``x^(N-2)`` and so forth. If `increasing` is
564
+ True, the columns are ``x^0, x^1, ..., x^(N-1)``.
565
+
566
+ See Also
567
+ --------
568
+ polynomial.polynomial.polyvander
569
+
570
+ Examples
571
+ --------
572
+ >>> x = np.array([1, 2, 3, 5])
573
+ >>> N = 3
574
+ >>> np.vander(x, N)
575
+ array([[ 1, 1, 1],
576
+ [ 4, 2, 1],
577
+ [ 9, 3, 1],
578
+ [25, 5, 1]])
579
+
580
+ >>> np.column_stack([x**(N-1-i) for i in range(N)])
581
+ array([[ 1, 1, 1],
582
+ [ 4, 2, 1],
583
+ [ 9, 3, 1],
584
+ [25, 5, 1]])
585
+
586
+ >>> x = np.array([1, 2, 3, 5])
587
+ >>> np.vander(x)
588
+ array([[ 1, 1, 1, 1],
589
+ [ 8, 4, 2, 1],
590
+ [ 27, 9, 3, 1],
591
+ [125, 25, 5, 1]])
592
+ >>> np.vander(x, increasing=True)
593
+ array([[ 1, 1, 1, 1],
594
+ [ 1, 2, 4, 8],
595
+ [ 1, 3, 9, 27],
596
+ [ 1, 5, 25, 125]])
597
+
598
+ The determinant of a square Vandermonde matrix is the product
599
+ of the differences between the values of the input vector:
600
+
601
+ >>> np.linalg.det(np.vander(x))
602
+ 48.000000000000043 # may vary
603
+ >>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
604
+ 48
605
+
606
+ """
607
+ x = asarray(x)
608
+ if x.ndim != 1:
609
+ raise ValueError("x must be a one-dimensional array or sequence.")
610
+ if N is None:
611
+ N = len(x)
612
+
613
+ v = empty((len(x), N), dtype=promote_types(x.dtype, int))
614
+ tmp = v[:, ::-1] if not increasing else v
615
+
616
+ if N > 0:
617
+ tmp[:, 0] = 1
618
+ if N > 1:
619
+ tmp[:, 1:] = x[:, None]
620
+ multiply.accumulate(tmp[:, 1:], out=tmp[:, 1:], axis=1)
621
+
622
+ return v
623
+
624
+
625
+ def _histogram2d_dispatcher(x, y, bins=None, range=None, density=None,
626
+ weights=None):
627
+ yield x
628
+ yield y
629
+
630
+ # This terrible logic is adapted from the checks in histogram2d
631
+ try:
632
+ N = len(bins)
633
+ except TypeError:
634
+ N = 1
635
+ if N == 2:
636
+ yield from bins # bins=[x, y]
637
+ else:
638
+ yield bins
639
+
640
+ yield weights
641
+
642
+
643
+ @array_function_dispatch(_histogram2d_dispatcher)
644
+ def histogram2d(x, y, bins=10, range=None, density=None, weights=None):
645
+ """
646
+ Compute the bi-dimensional histogram of two data samples.
647
+
648
+ Parameters
649
+ ----------
650
+ x : array_like, shape (N,)
651
+ An array containing the x coordinates of the points to be
652
+ histogrammed.
653
+ y : array_like, shape (N,)
654
+ An array containing the y coordinates of the points to be
655
+ histogrammed.
656
+ bins : int or array_like or [int, int] or [array, array], optional
657
+ The bin specification:
658
+
659
+ * If int, the number of bins for the two dimensions (nx=ny=bins).
660
+ * If array_like, the bin edges for the two dimensions
661
+ (x_edges=y_edges=bins).
662
+ * If [int, int], the number of bins in each dimension
663
+ (nx, ny = bins).
664
+ * If [array, array], the bin edges in each dimension
665
+ (x_edges, y_edges = bins).
666
+ * A combination [int, array] or [array, int], where int
667
+ is the number of bins and array is the bin edges.
668
+
669
+ range : array_like, shape(2,2), optional
670
+ The leftmost and rightmost edges of the bins along each dimension
671
+ (if not specified explicitly in the `bins` parameters):
672
+ ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range
673
+ will be considered outliers and not tallied in the histogram.
674
+ density : bool, optional
675
+ If False, the default, returns the number of samples in each bin.
676
+ If True, returns the probability *density* function at the bin,
677
+ ``bin_count / sample_count / bin_area``.
678
+ weights : array_like, shape(N,), optional
679
+ An array of values ``w_i`` weighing each sample ``(x_i, y_i)``.
680
+ Weights are normalized to 1 if `density` is True. If `density` is
681
+ False, the values of the returned histogram are equal to the sum of
682
+ the weights belonging to the samples falling into each bin.
683
+
684
+ Returns
685
+ -------
686
+ H : ndarray, shape(nx, ny)
687
+ The bi-dimensional histogram of samples `x` and `y`. Values in `x`
688
+ are histogrammed along the first dimension and values in `y` are
689
+ histogrammed along the second dimension.
690
+ xedges : ndarray, shape(nx+1,)
691
+ The bin edges along the first dimension.
692
+ yedges : ndarray, shape(ny+1,)
693
+ The bin edges along the second dimension.
694
+
695
+ See Also
696
+ --------
697
+ histogram : 1D histogram
698
+ histogramdd : Multidimensional histogram
699
+
700
+ Notes
701
+ -----
702
+ When `density` is True, then the returned histogram is the sample
703
+ density, defined such that the sum over bins of the product
704
+ ``bin_value * bin_area`` is 1.
705
+
706
+ Please note that the histogram does not follow the Cartesian convention
707
+ where `x` values are on the abscissa and `y` values on the ordinate
708
+ axis. Rather, `x` is histogrammed along the first dimension of the
709
+ array (vertical), and `y` along the second dimension of the array
710
+ (horizontal). This ensures compatibility with `histogramdd`.
711
+
712
+ Examples
713
+ --------
714
+ >>> from matplotlib.image import NonUniformImage
715
+ >>> import matplotlib.pyplot as plt
716
+
717
+ Construct a 2-D histogram with variable bin width. First define the bin
718
+ edges:
719
+
720
+ >>> xedges = [0, 1, 3, 5]
721
+ >>> yedges = [0, 2, 3, 4, 6]
722
+
723
+ Next we create a histogram H with random bin content:
724
+
725
+ >>> x = np.random.normal(2, 1, 100)
726
+ >>> y = np.random.normal(1, 1, 100)
727
+ >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))
728
+ >>> # Histogram does not follow Cartesian convention (see Notes),
729
+ >>> # therefore transpose H for visualization purposes.
730
+ >>> H = H.T
731
+
732
+ :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins:
733
+
734
+ >>> fig = plt.figure(figsize=(7, 3))
735
+ >>> ax = fig.add_subplot(131, title='imshow: square bins')
736
+ >>> plt.imshow(H, interpolation='nearest', origin='lower',
737
+ ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
738
+ <matplotlib.image.AxesImage object at 0x...>
739
+
740
+ :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
741
+
742
+ >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges',
743
+ ... aspect='equal')
744
+ >>> X, Y = np.meshgrid(xedges, yedges)
745
+ >>> ax.pcolormesh(X, Y, H)
746
+ <matplotlib.collections.QuadMesh object at 0x...>
747
+
748
+ :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
749
+ display actual bin edges with interpolation:
750
+
751
+ >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
752
+ ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
753
+ >>> im = NonUniformImage(ax, interpolation='bilinear')
754
+ >>> xcenters = (xedges[:-1] + xedges[1:]) / 2
755
+ >>> ycenters = (yedges[:-1] + yedges[1:]) / 2
756
+ >>> im.set_data(xcenters, ycenters, H)
757
+ >>> ax.add_image(im)
758
+ >>> plt.show()
759
+
760
+ It is also possible to construct a 2-D histogram without specifying bin
761
+ edges:
762
+
763
+ >>> # Generate non-symmetric test data
764
+ >>> n = 10000
765
+ >>> x = np.linspace(1, 100, n)
766
+ >>> y = 2*np.log(x) + np.random.rand(n) - 0.5
767
+ >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges
768
+ >>> H, yedges, xedges = np.histogram2d(y, x, bins=20)
769
+
770
+ Now we can plot the histogram using
771
+ :func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a
772
+ :func:`hexbin <matplotlib.pyplot.hexbin>` for comparison.
773
+
774
+ >>> # Plot histogram using pcolormesh
775
+ >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True)
776
+ >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow')
777
+ >>> ax1.plot(x, 2*np.log(x), 'k-')
778
+ >>> ax1.set_xlim(x.min(), x.max())
779
+ >>> ax1.set_ylim(y.min(), y.max())
780
+ >>> ax1.set_xlabel('x')
781
+ >>> ax1.set_ylabel('y')
782
+ >>> ax1.set_title('histogram2d')
783
+ >>> ax1.grid()
784
+
785
+ >>> # Create hexbin plot for comparison
786
+ >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow')
787
+ >>> ax2.plot(x, 2*np.log(x), 'k-')
788
+ >>> ax2.set_title('hexbin')
789
+ >>> ax2.set_xlim(x.min(), x.max())
790
+ >>> ax2.set_xlabel('x')
791
+ >>> ax2.grid()
792
+
793
+ >>> plt.show()
794
+ """
795
+ from numpy import histogramdd
796
+
797
+ if len(x) != len(y):
798
+ raise ValueError('x and y must have the same length.')
799
+
800
+ try:
801
+ N = len(bins)
802
+ except TypeError:
803
+ N = 1
804
+
805
+ if N != 1 and N != 2:
806
+ xedges = yedges = asarray(bins)
807
+ bins = [xedges, yedges]
808
+ hist, edges = histogramdd([x, y], bins, range, density, weights)
809
+ return hist, edges[0], edges[1]
810
+
811
+
812
+ @set_module('numpy')
813
+ def mask_indices(n, mask_func, k=0):
814
+ """
815
+ Return the indices to access (n, n) arrays, given a masking function.
816
+
817
+ Assume `mask_func` is a function that, for a square array a of size
818
+ ``(n, n)`` with a possible offset argument `k`, when called as
819
+ ``mask_func(a, k)`` returns a new array with zeros in certain locations
820
+ (functions like `triu` or `tril` do precisely this). Then this function
821
+ returns the indices where the non-zero values would be located.
822
+
823
+ Parameters
824
+ ----------
825
+ n : int
826
+ The returned indices will be valid to access arrays of shape (n, n).
827
+ mask_func : callable
828
+ A function whose call signature is similar to that of `triu`, `tril`.
829
+ That is, ``mask_func(x, k)`` returns a boolean array, shaped like `x`.
830
+ `k` is an optional argument to the function.
831
+ k : scalar
832
+ An optional argument which is passed through to `mask_func`. Functions
833
+ like `triu`, `tril` take a second argument that is interpreted as an
834
+ offset.
835
+
836
+ Returns
837
+ -------
838
+ indices : tuple of arrays.
839
+ The `n` arrays of indices corresponding to the locations where
840
+ ``mask_func(np.ones((n, n)), k)`` is True.
841
+
842
+ See Also
843
+ --------
844
+ triu, tril, triu_indices, tril_indices
845
+
846
+ Notes
847
+ -----
848
+ .. versionadded:: 1.4.0
849
+
850
+ Examples
851
+ --------
852
+ These are the indices that would allow you to access the upper triangular
853
+ part of any 3x3 array:
854
+
855
+ >>> iu = np.mask_indices(3, np.triu)
856
+
857
+ For example, if `a` is a 3x3 array:
858
+
859
+ >>> a = np.arange(9).reshape(3, 3)
860
+ >>> a
861
+ array([[0, 1, 2],
862
+ [3, 4, 5],
863
+ [6, 7, 8]])
864
+ >>> a[iu]
865
+ array([0, 1, 2, 4, 5, 8])
866
+
867
+ An offset can be passed also to the masking function. This gets us the
868
+ indices starting on the first diagonal right of the main one:
869
+
870
+ >>> iu1 = np.mask_indices(3, np.triu, 1)
871
+
872
+ with which we now extract only three elements:
873
+
874
+ >>> a[iu1]
875
+ array([1, 2, 5])
876
+
877
+ """
878
+ m = ones((n, n), int)
879
+ a = mask_func(m, k)
880
+ return nonzero(a != 0)
881
+
882
+
883
+ @set_module('numpy')
884
+ def tril_indices(n, k=0, m=None):
885
+ """
886
+ Return the indices for the lower-triangle of an (n, m) array.
887
+
888
+ Parameters
889
+ ----------
890
+ n : int
891
+ The row dimension of the arrays for which the returned
892
+ indices will be valid.
893
+ k : int, optional
894
+ Diagonal offset (see `tril` for details).
895
+ m : int, optional
896
+ .. versionadded:: 1.9.0
897
+
898
+ The column dimension of the arrays for which the returned
899
+ arrays will be valid.
900
+ By default `m` is taken equal to `n`.
901
+
902
+
903
+ Returns
904
+ -------
905
+ inds : tuple of arrays
906
+ The indices for the triangle. The returned tuple contains two arrays,
907
+ each with the indices along one dimension of the array.
908
+
909
+ See also
910
+ --------
911
+ triu_indices : similar function, for upper-triangular.
912
+ mask_indices : generic function accepting an arbitrary mask function.
913
+ tril, triu
914
+
915
+ Notes
916
+ -----
917
+ .. versionadded:: 1.4.0
918
+
919
+ Examples
920
+ --------
921
+ Compute two different sets of indices to access 4x4 arrays, one for the
922
+ lower triangular part starting at the main diagonal, and one starting two
923
+ diagonals further right:
924
+
925
+ >>> il1 = np.tril_indices(4)
926
+ >>> il2 = np.tril_indices(4, 2)
927
+
928
+ Here is how they can be used with a sample array:
929
+
930
+ >>> a = np.arange(16).reshape(4, 4)
931
+ >>> a
932
+ array([[ 0, 1, 2, 3],
933
+ [ 4, 5, 6, 7],
934
+ [ 8, 9, 10, 11],
935
+ [12, 13, 14, 15]])
936
+
937
+ Both for indexing:
938
+
939
+ >>> a[il1]
940
+ array([ 0, 4, 5, ..., 13, 14, 15])
941
+
942
+ And for assigning values:
943
+
944
+ >>> a[il1] = -1
945
+ >>> a
946
+ array([[-1, 1, 2, 3],
947
+ [-1, -1, 6, 7],
948
+ [-1, -1, -1, 11],
949
+ [-1, -1, -1, -1]])
950
+
951
+ These cover almost the whole array (two diagonals right of the main one):
952
+
953
+ >>> a[il2] = -10
954
+ >>> a
955
+ array([[-10, -10, -10, 3],
956
+ [-10, -10, -10, -10],
957
+ [-10, -10, -10, -10],
958
+ [-10, -10, -10, -10]])
959
+
960
+ """
961
+ tri_ = tri(n, m, k=k, dtype=bool)
962
+
963
+ return tuple(broadcast_to(inds, tri_.shape)[tri_]
964
+ for inds in indices(tri_.shape, sparse=True))
965
+
966
+
967
+ def _trilu_indices_form_dispatcher(arr, k=None):
968
+ return (arr,)
969
+
970
+
971
+ @array_function_dispatch(_trilu_indices_form_dispatcher)
972
+ def tril_indices_from(arr, k=0):
973
+ """
974
+ Return the indices for the lower-triangle of arr.
975
+
976
+ See `tril_indices` for full details.
977
+
978
+ Parameters
979
+ ----------
980
+ arr : array_like
981
+ The indices will be valid for square arrays whose dimensions are
982
+ the same as arr.
983
+ k : int, optional
984
+ Diagonal offset (see `tril` for details).
985
+
986
+ Examples
987
+ --------
988
+
989
+ Create a 4 by 4 array.
990
+
991
+ >>> a = np.arange(16).reshape(4, 4)
992
+ >>> a
993
+ array([[ 0, 1, 2, 3],
994
+ [ 4, 5, 6, 7],
995
+ [ 8, 9, 10, 11],
996
+ [12, 13, 14, 15]])
997
+
998
+ Pass the array to get the indices of the lower triangular elements.
999
+
1000
+ >>> trili = np.tril_indices_from(a)
1001
+ >>> trili
1002
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
1003
+
1004
+ >>> a[trili]
1005
+ array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
1006
+
1007
+ This is syntactic sugar for tril_indices().
1008
+
1009
+ >>> np.tril_indices(a.shape[0])
1010
+ (array([0, 1, 1, 2, 2, 2, 3, 3, 3, 3]), array([0, 0, 1, 0, 1, 2, 0, 1, 2, 3]))
1011
+
1012
+ Use the `k` parameter to return the indices for the lower triangular array
1013
+ up to the k-th diagonal.
1014
+
1015
+ >>> trili1 = np.tril_indices_from(a, k=1)
1016
+ >>> a[trili1]
1017
+ array([ 0, 1, 4, 5, 6, 8, 9, 10, 11, 12, 13, 14, 15])
1018
+
1019
+ See Also
1020
+ --------
1021
+ tril_indices, tril, triu_indices_from
1022
+
1023
+ Notes
1024
+ -----
1025
+ .. versionadded:: 1.4.0
1026
+
1027
+ """
1028
+ if arr.ndim != 2:
1029
+ raise ValueError("input array must be 2-d")
1030
+ return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
1031
+
1032
+
1033
+ @set_module('numpy')
1034
+ def triu_indices(n, k=0, m=None):
1035
+ """
1036
+ Return the indices for the upper-triangle of an (n, m) array.
1037
+
1038
+ Parameters
1039
+ ----------
1040
+ n : int
1041
+ The size of the arrays for which the returned indices will
1042
+ be valid.
1043
+ k : int, optional
1044
+ Diagonal offset (see `triu` for details).
1045
+ m : int, optional
1046
+ .. versionadded:: 1.9.0
1047
+
1048
+ The column dimension of the arrays for which the returned
1049
+ arrays will be valid.
1050
+ By default `m` is taken equal to `n`.
1051
+
1052
+
1053
+ Returns
1054
+ -------
1055
+ inds : tuple, shape(2) of ndarrays, shape(`n`)
1056
+ The indices for the triangle. The returned tuple contains two arrays,
1057
+ each with the indices along one dimension of the array. Can be used
1058
+ to slice a ndarray of shape(`n`, `n`).
1059
+
1060
+ See also
1061
+ --------
1062
+ tril_indices : similar function, for lower-triangular.
1063
+ mask_indices : generic function accepting an arbitrary mask function.
1064
+ triu, tril
1065
+
1066
+ Notes
1067
+ -----
1068
+ .. versionadded:: 1.4.0
1069
+
1070
+ Examples
1071
+ --------
1072
+ Compute two different sets of indices to access 4x4 arrays, one for the
1073
+ upper triangular part starting at the main diagonal, and one starting two
1074
+ diagonals further right:
1075
+
1076
+ >>> iu1 = np.triu_indices(4)
1077
+ >>> iu2 = np.triu_indices(4, 2)
1078
+
1079
+ Here is how they can be used with a sample array:
1080
+
1081
+ >>> a = np.arange(16).reshape(4, 4)
1082
+ >>> a
1083
+ array([[ 0, 1, 2, 3],
1084
+ [ 4, 5, 6, 7],
1085
+ [ 8, 9, 10, 11],
1086
+ [12, 13, 14, 15]])
1087
+
1088
+ Both for indexing:
1089
+
1090
+ >>> a[iu1]
1091
+ array([ 0, 1, 2, ..., 10, 11, 15])
1092
+
1093
+ And for assigning values:
1094
+
1095
+ >>> a[iu1] = -1
1096
+ >>> a
1097
+ array([[-1, -1, -1, -1],
1098
+ [ 4, -1, -1, -1],
1099
+ [ 8, 9, -1, -1],
1100
+ [12, 13, 14, -1]])
1101
+
1102
+ These cover only a small part of the whole array (two diagonals right
1103
+ of the main one):
1104
+
1105
+ >>> a[iu2] = -10
1106
+ >>> a
1107
+ array([[ -1, -1, -10, -10],
1108
+ [ 4, -1, -1, -10],
1109
+ [ 8, 9, -1, -1],
1110
+ [ 12, 13, 14, -1]])
1111
+
1112
+ """
1113
+ tri_ = ~tri(n, m, k=k - 1, dtype=bool)
1114
+
1115
+ return tuple(broadcast_to(inds, tri_.shape)[tri_]
1116
+ for inds in indices(tri_.shape, sparse=True))
1117
+
1118
+
1119
+ @array_function_dispatch(_trilu_indices_form_dispatcher)
1120
+ def triu_indices_from(arr, k=0):
1121
+ """
1122
+ Return the indices for the upper-triangle of arr.
1123
+
1124
+ See `triu_indices` for full details.
1125
+
1126
+ Parameters
1127
+ ----------
1128
+ arr : ndarray, shape(N, N)
1129
+ The indices will be valid for square arrays.
1130
+ k : int, optional
1131
+ Diagonal offset (see `triu` for details).
1132
+
1133
+ Returns
1134
+ -------
1135
+ triu_indices_from : tuple, shape(2) of ndarray, shape(N)
1136
+ Indices for the upper-triangle of `arr`.
1137
+
1138
+ Examples
1139
+ --------
1140
+
1141
+ Create a 4 by 4 array.
1142
+
1143
+ >>> a = np.arange(16).reshape(4, 4)
1144
+ >>> a
1145
+ array([[ 0, 1, 2, 3],
1146
+ [ 4, 5, 6, 7],
1147
+ [ 8, 9, 10, 11],
1148
+ [12, 13, 14, 15]])
1149
+
1150
+ Pass the array to get the indices of the upper triangular elements.
1151
+
1152
+ >>> triui = np.triu_indices_from(a)
1153
+ >>> triui
1154
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
1155
+
1156
+ >>> a[triui]
1157
+ array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
1158
+
1159
+ This is syntactic sugar for triu_indices().
1160
+
1161
+ >>> np.triu_indices(a.shape[0])
1162
+ (array([0, 0, 0, 0, 1, 1, 1, 2, 2, 3]), array([0, 1, 2, 3, 1, 2, 3, 2, 3, 3]))
1163
+
1164
+ Use the `k` parameter to return the indices for the upper triangular array
1165
+ from the k-th diagonal.
1166
+
1167
+ >>> triuim1 = np.triu_indices_from(a, k=1)
1168
+ >>> a[triuim1]
1169
+ array([ 1, 2, 3, 6, 7, 11])
1170
+
1171
+
1172
+ See Also
1173
+ --------
1174
+ triu_indices, triu, tril_indices_from
1175
+
1176
+ Notes
1177
+ -----
1178
+ .. versionadded:: 1.4.0
1179
+
1180
+ """
1181
+ if arr.ndim != 2:
1182
+ raise ValueError("input array must be 2-d")
1183
+ return triu_indices(arr.shape[-2], k=k, m=arr.shape[-1])
env-llmeval/lib/python3.10/site-packages/numpy/lib/twodim_base.pyi ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable, Sequence
2
+ from typing import (
3
+ Any,
4
+ overload,
5
+ TypeVar,
6
+ Union,
7
+ )
8
+
9
+ from numpy import (
10
+ generic,
11
+ number,
12
+ bool_,
13
+ timedelta64,
14
+ datetime64,
15
+ int_,
16
+ intp,
17
+ float64,
18
+ signedinteger,
19
+ floating,
20
+ complexfloating,
21
+ object_,
22
+ _OrderCF,
23
+ )
24
+
25
+ from numpy._typing import (
26
+ DTypeLike,
27
+ _DTypeLike,
28
+ ArrayLike,
29
+ _ArrayLike,
30
+ NDArray,
31
+ _SupportsArrayFunc,
32
+ _ArrayLikeInt_co,
33
+ _ArrayLikeFloat_co,
34
+ _ArrayLikeComplex_co,
35
+ _ArrayLikeObject_co,
36
+ )
37
+
38
+ _T = TypeVar("_T")
39
+ _SCT = TypeVar("_SCT", bound=generic)
40
+
41
+ # The returned arrays dtype must be compatible with `np.equal`
42
+ _MaskFunc = Callable[
43
+ [NDArray[int_], _T],
44
+ NDArray[Union[number[Any], bool_, timedelta64, datetime64, object_]],
45
+ ]
46
+
47
+ __all__: list[str]
48
+
49
+ @overload
50
+ def fliplr(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
51
+ @overload
52
+ def fliplr(m: ArrayLike) -> NDArray[Any]: ...
53
+
54
+ @overload
55
+ def flipud(m: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
56
+ @overload
57
+ def flipud(m: ArrayLike) -> NDArray[Any]: ...
58
+
59
+ @overload
60
+ def eye(
61
+ N: int,
62
+ M: None | int = ...,
63
+ k: int = ...,
64
+ dtype: None = ...,
65
+ order: _OrderCF = ...,
66
+ *,
67
+ like: None | _SupportsArrayFunc = ...,
68
+ ) -> NDArray[float64]: ...
69
+ @overload
70
+ def eye(
71
+ N: int,
72
+ M: None | int = ...,
73
+ k: int = ...,
74
+ dtype: _DTypeLike[_SCT] = ...,
75
+ order: _OrderCF = ...,
76
+ *,
77
+ like: None | _SupportsArrayFunc = ...,
78
+ ) -> NDArray[_SCT]: ...
79
+ @overload
80
+ def eye(
81
+ N: int,
82
+ M: None | int = ...,
83
+ k: int = ...,
84
+ dtype: DTypeLike = ...,
85
+ order: _OrderCF = ...,
86
+ *,
87
+ like: None | _SupportsArrayFunc = ...,
88
+ ) -> NDArray[Any]: ...
89
+
90
+ @overload
91
+ def diag(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
92
+ @overload
93
+ def diag(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
94
+
95
+ @overload
96
+ def diagflat(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
97
+ @overload
98
+ def diagflat(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
99
+
100
+ @overload
101
+ def tri(
102
+ N: int,
103
+ M: None | int = ...,
104
+ k: int = ...,
105
+ dtype: None = ...,
106
+ *,
107
+ like: None | _SupportsArrayFunc = ...
108
+ ) -> NDArray[float64]: ...
109
+ @overload
110
+ def tri(
111
+ N: int,
112
+ M: None | int = ...,
113
+ k: int = ...,
114
+ dtype: _DTypeLike[_SCT] = ...,
115
+ *,
116
+ like: None | _SupportsArrayFunc = ...
117
+ ) -> NDArray[_SCT]: ...
118
+ @overload
119
+ def tri(
120
+ N: int,
121
+ M: None | int = ...,
122
+ k: int = ...,
123
+ dtype: DTypeLike = ...,
124
+ *,
125
+ like: None | _SupportsArrayFunc = ...
126
+ ) -> NDArray[Any]: ...
127
+
128
+ @overload
129
+ def tril(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
130
+ @overload
131
+ def tril(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
132
+
133
+ @overload
134
+ def triu(v: _ArrayLike[_SCT], k: int = ...) -> NDArray[_SCT]: ...
135
+ @overload
136
+ def triu(v: ArrayLike, k: int = ...) -> NDArray[Any]: ...
137
+
138
+ @overload
139
+ def vander( # type: ignore[misc]
140
+ x: _ArrayLikeInt_co,
141
+ N: None | int = ...,
142
+ increasing: bool = ...,
143
+ ) -> NDArray[signedinteger[Any]]: ...
144
+ @overload
145
+ def vander( # type: ignore[misc]
146
+ x: _ArrayLikeFloat_co,
147
+ N: None | int = ...,
148
+ increasing: bool = ...,
149
+ ) -> NDArray[floating[Any]]: ...
150
+ @overload
151
+ def vander(
152
+ x: _ArrayLikeComplex_co,
153
+ N: None | int = ...,
154
+ increasing: bool = ...,
155
+ ) -> NDArray[complexfloating[Any, Any]]: ...
156
+ @overload
157
+ def vander(
158
+ x: _ArrayLikeObject_co,
159
+ N: None | int = ...,
160
+ increasing: bool = ...,
161
+ ) -> NDArray[object_]: ...
162
+
163
+ @overload
164
+ def histogram2d( # type: ignore[misc]
165
+ x: _ArrayLikeFloat_co,
166
+ y: _ArrayLikeFloat_co,
167
+ bins: int | Sequence[int] = ...,
168
+ range: None | _ArrayLikeFloat_co = ...,
169
+ density: None | bool = ...,
170
+ weights: None | _ArrayLikeFloat_co = ...,
171
+ ) -> tuple[
172
+ NDArray[float64],
173
+ NDArray[floating[Any]],
174
+ NDArray[floating[Any]],
175
+ ]: ...
176
+ @overload
177
+ def histogram2d(
178
+ x: _ArrayLikeComplex_co,
179
+ y: _ArrayLikeComplex_co,
180
+ bins: int | Sequence[int] = ...,
181
+ range: None | _ArrayLikeFloat_co = ...,
182
+ density: None | bool = ...,
183
+ weights: None | _ArrayLikeFloat_co = ...,
184
+ ) -> tuple[
185
+ NDArray[float64],
186
+ NDArray[complexfloating[Any, Any]],
187
+ NDArray[complexfloating[Any, Any]],
188
+ ]: ...
189
+ @overload # TODO: Sort out `bins`
190
+ def histogram2d(
191
+ x: _ArrayLikeComplex_co,
192
+ y: _ArrayLikeComplex_co,
193
+ bins: Sequence[_ArrayLikeInt_co],
194
+ range: None | _ArrayLikeFloat_co = ...,
195
+ density: None | bool = ...,
196
+ weights: None | _ArrayLikeFloat_co = ...,
197
+ ) -> tuple[
198
+ NDArray[float64],
199
+ NDArray[Any],
200
+ NDArray[Any],
201
+ ]: ...
202
+
203
+ # NOTE: we're assuming/demanding here the `mask_func` returns
204
+ # an ndarray of shape `(n, n)`; otherwise there is the possibility
205
+ # of the output tuple having more or less than 2 elements
206
+ @overload
207
+ def mask_indices(
208
+ n: int,
209
+ mask_func: _MaskFunc[int],
210
+ k: int = ...,
211
+ ) -> tuple[NDArray[intp], NDArray[intp]]: ...
212
+ @overload
213
+ def mask_indices(
214
+ n: int,
215
+ mask_func: _MaskFunc[_T],
216
+ k: _T,
217
+ ) -> tuple[NDArray[intp], NDArray[intp]]: ...
218
+
219
+ def tril_indices(
220
+ n: int,
221
+ k: int = ...,
222
+ m: None | int = ...,
223
+ ) -> tuple[NDArray[int_], NDArray[int_]]: ...
224
+
225
+ def tril_indices_from(
226
+ arr: NDArray[Any],
227
+ k: int = ...,
228
+ ) -> tuple[NDArray[int_], NDArray[int_]]: ...
229
+
230
+ def triu_indices(
231
+ n: int,
232
+ k: int = ...,
233
+ m: None | int = ...,
234
+ ) -> tuple[NDArray[int_], NDArray[int_]]: ...
235
+
236
+ def triu_indices_from(
237
+ arr: NDArray[Any],
238
+ k: int = ...,
239
+ ) -> tuple[NDArray[int_], NDArray[int_]]: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/type_check.py ADDED
@@ -0,0 +1,735 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Automatically adapted for numpy Sep 19, 2005 by convertcode.py
2
+
3
+ """
4
+ import functools
5
+
6
+ __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
7
+ 'isreal', 'nan_to_num', 'real', 'real_if_close',
8
+ 'typename', 'asfarray', 'mintypecode',
9
+ 'common_type']
10
+
11
+ from .._utils import set_module
12
+ import numpy.core.numeric as _nx
13
+ from numpy.core.numeric import asarray, asanyarray, isnan, zeros
14
+ from numpy.core import overrides, getlimits
15
+ from .ufunclike import isneginf, isposinf
16
+
17
+
18
+ array_function_dispatch = functools.partial(
19
+ overrides.array_function_dispatch, module='numpy')
20
+
21
+
22
+ _typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
23
+
24
+
25
+ @set_module('numpy')
26
+ def mintypecode(typechars, typeset='GDFgdf', default='d'):
27
+ """
28
+ Return the character for the minimum-size type to which given types can
29
+ be safely cast.
30
+
31
+ The returned type character must represent the smallest size dtype such
32
+ that an array of the returned type can handle the data from an array of
33
+ all types in `typechars` (or if `typechars` is an array, then its
34
+ dtype.char).
35
+
36
+ Parameters
37
+ ----------
38
+ typechars : list of str or array_like
39
+ If a list of strings, each string should represent a dtype.
40
+ If array_like, the character representation of the array dtype is used.
41
+ typeset : str or list of str, optional
42
+ The set of characters that the returned character is chosen from.
43
+ The default set is 'GDFgdf'.
44
+ default : str, optional
45
+ The default character, this is returned if none of the characters in
46
+ `typechars` matches a character in `typeset`.
47
+
48
+ Returns
49
+ -------
50
+ typechar : str
51
+ The character representing the minimum-size type that was found.
52
+
53
+ See Also
54
+ --------
55
+ dtype, sctype2char, maximum_sctype
56
+
57
+ Examples
58
+ --------
59
+ >>> np.mintypecode(['d', 'f', 'S'])
60
+ 'd'
61
+ >>> x = np.array([1.1, 2-3.j])
62
+ >>> np.mintypecode(x)
63
+ 'D'
64
+
65
+ >>> np.mintypecode('abceh', default='G')
66
+ 'G'
67
+
68
+ """
69
+ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
70
+ for t in typechars)
71
+ intersection = set(t for t in typecodes if t in typeset)
72
+ if not intersection:
73
+ return default
74
+ if 'F' in intersection and 'd' in intersection:
75
+ return 'D'
76
+ return min(intersection, key=_typecodes_by_elsize.index)
77
+
78
+
79
+ def _asfarray_dispatcher(a, dtype=None):
80
+ return (a,)
81
+
82
+
83
+ @array_function_dispatch(_asfarray_dispatcher)
84
+ def asfarray(a, dtype=_nx.float_):
85
+ """
86
+ Return an array converted to a float type.
87
+
88
+ Parameters
89
+ ----------
90
+ a : array_like
91
+ The input array.
92
+ dtype : str or dtype object, optional
93
+ Float type code to coerce input array `a`. If `dtype` is one of the
94
+ 'int' dtypes, it is replaced with float64.
95
+
96
+ Returns
97
+ -------
98
+ out : ndarray
99
+ The input `a` as a float ndarray.
100
+
101
+ Examples
102
+ --------
103
+ >>> np.asfarray([2, 3])
104
+ array([2., 3.])
105
+ >>> np.asfarray([2, 3], dtype='float')
106
+ array([2., 3.])
107
+ >>> np.asfarray([2, 3], dtype='int8')
108
+ array([2., 3.])
109
+
110
+ """
111
+ if not _nx.issubdtype(dtype, _nx.inexact):
112
+ dtype = _nx.float_
113
+ return asarray(a, dtype=dtype)
114
+
115
+
116
+ def _real_dispatcher(val):
117
+ return (val,)
118
+
119
+
120
+ @array_function_dispatch(_real_dispatcher)
121
+ def real(val):
122
+ """
123
+ Return the real part of the complex argument.
124
+
125
+ Parameters
126
+ ----------
127
+ val : array_like
128
+ Input array.
129
+
130
+ Returns
131
+ -------
132
+ out : ndarray or scalar
133
+ The real component of the complex argument. If `val` is real, the type
134
+ of `val` is used for the output. If `val` has complex elements, the
135
+ returned type is float.
136
+
137
+ See Also
138
+ --------
139
+ real_if_close, imag, angle
140
+
141
+ Examples
142
+ --------
143
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
144
+ >>> a.real
145
+ array([1., 3., 5.])
146
+ >>> a.real = 9
147
+ >>> a
148
+ array([9.+2.j, 9.+4.j, 9.+6.j])
149
+ >>> a.real = np.array([9, 8, 7])
150
+ >>> a
151
+ array([9.+2.j, 8.+4.j, 7.+6.j])
152
+ >>> np.real(1 + 1j)
153
+ 1.0
154
+
155
+ """
156
+ try:
157
+ return val.real
158
+ except AttributeError:
159
+ return asanyarray(val).real
160
+
161
+
162
+ def _imag_dispatcher(val):
163
+ return (val,)
164
+
165
+
166
+ @array_function_dispatch(_imag_dispatcher)
167
+ def imag(val):
168
+ """
169
+ Return the imaginary part of the complex argument.
170
+
171
+ Parameters
172
+ ----------
173
+ val : array_like
174
+ Input array.
175
+
176
+ Returns
177
+ -------
178
+ out : ndarray or scalar
179
+ The imaginary component of the complex argument. If `val` is real,
180
+ the type of `val` is used for the output. If `val` has complex
181
+ elements, the returned type is float.
182
+
183
+ See Also
184
+ --------
185
+ real, angle, real_if_close
186
+
187
+ Examples
188
+ --------
189
+ >>> a = np.array([1+2j, 3+4j, 5+6j])
190
+ >>> a.imag
191
+ array([2., 4., 6.])
192
+ >>> a.imag = np.array([8, 10, 12])
193
+ >>> a
194
+ array([1. +8.j, 3.+10.j, 5.+12.j])
195
+ >>> np.imag(1 + 1j)
196
+ 1.0
197
+
198
+ """
199
+ try:
200
+ return val.imag
201
+ except AttributeError:
202
+ return asanyarray(val).imag
203
+
204
+
205
+ def _is_type_dispatcher(x):
206
+ return (x,)
207
+
208
+
209
+ @array_function_dispatch(_is_type_dispatcher)
210
+ def iscomplex(x):
211
+ """
212
+ Returns a bool array, where True if input element is complex.
213
+
214
+ What is tested is whether the input has a non-zero imaginary part, not if
215
+ the input type is complex.
216
+
217
+ Parameters
218
+ ----------
219
+ x : array_like
220
+ Input array.
221
+
222
+ Returns
223
+ -------
224
+ out : ndarray of bools
225
+ Output array.
226
+
227
+ See Also
228
+ --------
229
+ isreal
230
+ iscomplexobj : Return True if x is a complex type or an array of complex
231
+ numbers.
232
+
233
+ Examples
234
+ --------
235
+ >>> np.iscomplex([1+1j, 1+0j, 4.5, 3, 2, 2j])
236
+ array([ True, False, False, False, False, True])
237
+
238
+ """
239
+ ax = asanyarray(x)
240
+ if issubclass(ax.dtype.type, _nx.complexfloating):
241
+ return ax.imag != 0
242
+ res = zeros(ax.shape, bool)
243
+ return res[()] # convert to scalar if needed
244
+
245
+
246
+ @array_function_dispatch(_is_type_dispatcher)
247
+ def isreal(x):
248
+ """
249
+ Returns a bool array, where True if input element is real.
250
+
251
+ If element has complex type with zero complex part, the return value
252
+ for that element is True.
253
+
254
+ Parameters
255
+ ----------
256
+ x : array_like
257
+ Input array.
258
+
259
+ Returns
260
+ -------
261
+ out : ndarray, bool
262
+ Boolean array of same shape as `x`.
263
+
264
+ Notes
265
+ -----
266
+ `isreal` may behave unexpectedly for string or object arrays (see examples)
267
+
268
+ See Also
269
+ --------
270
+ iscomplex
271
+ isrealobj : Return True if x is not a complex type.
272
+
273
+ Examples
274
+ --------
275
+ >>> a = np.array([1+1j, 1+0j, 4.5, 3, 2, 2j], dtype=complex)
276
+ >>> np.isreal(a)
277
+ array([False, True, True, True, True, False])
278
+
279
+ The function does not work on string arrays.
280
+
281
+ >>> a = np.array([2j, "a"], dtype="U")
282
+ >>> np.isreal(a) # Warns about non-elementwise comparison
283
+ False
284
+
285
+ Returns True for all elements in input array of ``dtype=object`` even if
286
+ any of the elements is complex.
287
+
288
+ >>> a = np.array([1, "2", 3+4j], dtype=object)
289
+ >>> np.isreal(a)
290
+ array([ True, True, True])
291
+
292
+ isreal should not be used with object arrays
293
+
294
+ >>> a = np.array([1+2j, 2+1j], dtype=object)
295
+ >>> np.isreal(a)
296
+ array([ True, True])
297
+
298
+ """
299
+ return imag(x) == 0
300
+
301
+
302
+ @array_function_dispatch(_is_type_dispatcher)
303
+ def iscomplexobj(x):
304
+ """
305
+ Check for a complex type or an array of complex numbers.
306
+
307
+ The type of the input is checked, not the value. Even if the input
308
+ has an imaginary part equal to zero, `iscomplexobj` evaluates to True.
309
+
310
+ Parameters
311
+ ----------
312
+ x : any
313
+ The input can be of any type and shape.
314
+
315
+ Returns
316
+ -------
317
+ iscomplexobj : bool
318
+ The return value, True if `x` is of a complex type or has at least
319
+ one complex element.
320
+
321
+ See Also
322
+ --------
323
+ isrealobj, iscomplex
324
+
325
+ Examples
326
+ --------
327
+ >>> np.iscomplexobj(1)
328
+ False
329
+ >>> np.iscomplexobj(1+0j)
330
+ True
331
+ >>> np.iscomplexobj([3, 1+0j, True])
332
+ True
333
+
334
+ """
335
+ try:
336
+ dtype = x.dtype
337
+ type_ = dtype.type
338
+ except AttributeError:
339
+ type_ = asarray(x).dtype.type
340
+ return issubclass(type_, _nx.complexfloating)
341
+
342
+
343
+ @array_function_dispatch(_is_type_dispatcher)
344
+ def isrealobj(x):
345
+ """
346
+ Return True if x is a not complex type or an array of complex numbers.
347
+
348
+ The type of the input is checked, not the value. So even if the input
349
+ has an imaginary part equal to zero, `isrealobj` evaluates to False
350
+ if the data type is complex.
351
+
352
+ Parameters
353
+ ----------
354
+ x : any
355
+ The input can be of any type and shape.
356
+
357
+ Returns
358
+ -------
359
+ y : bool
360
+ The return value, False if `x` is of a complex type.
361
+
362
+ See Also
363
+ --------
364
+ iscomplexobj, isreal
365
+
366
+ Notes
367
+ -----
368
+ The function is only meant for arrays with numerical values but it
369
+ accepts all other objects. Since it assumes array input, the return
370
+ value of other objects may be True.
371
+
372
+ >>> np.isrealobj('A string')
373
+ True
374
+ >>> np.isrealobj(False)
375
+ True
376
+ >>> np.isrealobj(None)
377
+ True
378
+
379
+ Examples
380
+ --------
381
+ >>> np.isrealobj(1)
382
+ True
383
+ >>> np.isrealobj(1+0j)
384
+ False
385
+ >>> np.isrealobj([3, 1+0j, True])
386
+ False
387
+
388
+ """
389
+ return not iscomplexobj(x)
390
+
391
+ #-----------------------------------------------------------------------------
392
+
393
+ def _getmaxmin(t):
394
+ from numpy.core import getlimits
395
+ f = getlimits.finfo(t)
396
+ return f.max, f.min
397
+
398
+
399
+ def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
400
+ return (x,)
401
+
402
+
403
+ @array_function_dispatch(_nan_to_num_dispatcher)
404
+ def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
405
+ """
406
+ Replace NaN with zero and infinity with large finite numbers (default
407
+ behaviour) or with the numbers defined by the user using the `nan`,
408
+ `posinf` and/or `neginf` keywords.
409
+
410
+ If `x` is inexact, NaN is replaced by zero or by the user defined value in
411
+ `nan` keyword, infinity is replaced by the largest finite floating point
412
+ values representable by ``x.dtype`` or by the user defined value in
413
+ `posinf` keyword and -infinity is replaced by the most negative finite
414
+ floating point values representable by ``x.dtype`` or by the user defined
415
+ value in `neginf` keyword.
416
+
417
+ For complex dtypes, the above is applied to each of the real and
418
+ imaginary components of `x` separately.
419
+
420
+ If `x` is not inexact, then no replacements are made.
421
+
422
+ Parameters
423
+ ----------
424
+ x : scalar or array_like
425
+ Input data.
426
+ copy : bool, optional
427
+ Whether to create a copy of `x` (True) or to replace values
428
+ in-place (False). The in-place operation only occurs if
429
+ casting to an array does not require a copy.
430
+ Default is True.
431
+
432
+ .. versionadded:: 1.13
433
+ nan : int, float, optional
434
+ Value to be used to fill NaN values. If no value is passed
435
+ then NaN values will be replaced with 0.0.
436
+
437
+ .. versionadded:: 1.17
438
+ posinf : int, float, optional
439
+ Value to be used to fill positive infinity values. If no value is
440
+ passed then positive infinity values will be replaced with a very
441
+ large number.
442
+
443
+ .. versionadded:: 1.17
444
+ neginf : int, float, optional
445
+ Value to be used to fill negative infinity values. If no value is
446
+ passed then negative infinity values will be replaced with a very
447
+ small (or negative) number.
448
+
449
+ .. versionadded:: 1.17
450
+
451
+
452
+
453
+ Returns
454
+ -------
455
+ out : ndarray
456
+ `x`, with the non-finite values replaced. If `copy` is False, this may
457
+ be `x` itself.
458
+
459
+ See Also
460
+ --------
461
+ isinf : Shows which elements are positive or negative infinity.
462
+ isneginf : Shows which elements are negative infinity.
463
+ isposinf : Shows which elements are positive infinity.
464
+ isnan : Shows which elements are Not a Number (NaN).
465
+ isfinite : Shows which elements are finite (not NaN, not infinity)
466
+
467
+ Notes
468
+ -----
469
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
470
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
471
+
472
+ Examples
473
+ --------
474
+ >>> np.nan_to_num(np.inf)
475
+ 1.7976931348623157e+308
476
+ >>> np.nan_to_num(-np.inf)
477
+ -1.7976931348623157e+308
478
+ >>> np.nan_to_num(np.nan)
479
+ 0.0
480
+ >>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
481
+ >>> np.nan_to_num(x)
482
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
483
+ -1.28000000e+002, 1.28000000e+002])
484
+ >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
485
+ array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
486
+ -1.2800000e+02, 1.2800000e+02])
487
+ >>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
488
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
489
+ -1.28000000e+002, 1.28000000e+002])
490
+ >>> np.nan_to_num(y)
491
+ array([ 1.79769313e+308 +0.00000000e+000j, # may vary
492
+ 0.00000000e+000 +0.00000000e+000j,
493
+ 0.00000000e+000 +1.79769313e+308j])
494
+ >>> np.nan_to_num(y, nan=111111, posinf=222222)
495
+ array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])
496
+ """
497
+ x = _nx.array(x, subok=True, copy=copy)
498
+ xtype = x.dtype.type
499
+
500
+ isscalar = (x.ndim == 0)
501
+
502
+ if not issubclass(xtype, _nx.inexact):
503
+ return x[()] if isscalar else x
504
+
505
+ iscomplex = issubclass(xtype, _nx.complexfloating)
506
+
507
+ dest = (x.real, x.imag) if iscomplex else (x,)
508
+ maxf, minf = _getmaxmin(x.real.dtype)
509
+ if posinf is not None:
510
+ maxf = posinf
511
+ if neginf is not None:
512
+ minf = neginf
513
+ for d in dest:
514
+ idx_nan = isnan(d)
515
+ idx_posinf = isposinf(d)
516
+ idx_neginf = isneginf(d)
517
+ _nx.copyto(d, nan, where=idx_nan)
518
+ _nx.copyto(d, maxf, where=idx_posinf)
519
+ _nx.copyto(d, minf, where=idx_neginf)
520
+ return x[()] if isscalar else x
521
+
522
+ #-----------------------------------------------------------------------------
523
+
524
+ def _real_if_close_dispatcher(a, tol=None):
525
+ return (a,)
526
+
527
+
528
+ @array_function_dispatch(_real_if_close_dispatcher)
529
+ def real_if_close(a, tol=100):
530
+ """
531
+ If input is complex with all imaginary parts close to zero, return
532
+ real parts.
533
+
534
+ "Close to zero" is defined as `tol` * (machine epsilon of the type for
535
+ `a`).
536
+
537
+ Parameters
538
+ ----------
539
+ a : array_like
540
+ Input array.
541
+ tol : float
542
+ Tolerance in machine epsilons for the complex part of the elements
543
+ in the array. If the tolerance is <=1, then the absolute tolerance
544
+ is used.
545
+
546
+ Returns
547
+ -------
548
+ out : ndarray
549
+ If `a` is real, the type of `a` is used for the output. If `a`
550
+ has complex elements, the returned type is float.
551
+
552
+ See Also
553
+ --------
554
+ real, imag, angle
555
+
556
+ Notes
557
+ -----
558
+ Machine epsilon varies from machine to machine and between data types
559
+ but Python floats on most platforms have a machine epsilon equal to
560
+ 2.2204460492503131e-16. You can use 'np.finfo(float).eps' to print
561
+ out the machine epsilon for floats.
562
+
563
+ Examples
564
+ --------
565
+ >>> np.finfo(float).eps
566
+ 2.2204460492503131e-16 # may vary
567
+
568
+ >>> np.real_if_close([2.1 + 4e-14j, 5.2 + 3e-15j], tol=1000)
569
+ array([2.1, 5.2])
570
+ >>> np.real_if_close([2.1 + 4e-13j, 5.2 + 3e-15j], tol=1000)
571
+ array([2.1+4.e-13j, 5.2 + 3e-15j])
572
+
573
+ """
574
+ a = asanyarray(a)
575
+ type_ = a.dtype.type
576
+ if not issubclass(type_, _nx.complexfloating):
577
+ return a
578
+ if tol > 1:
579
+ f = getlimits.finfo(type_)
580
+ tol = f.eps * tol
581
+ if _nx.all(_nx.absolute(a.imag) < tol):
582
+ a = a.real
583
+ return a
584
+
585
+
586
+ #-----------------------------------------------------------------------------
587
+
588
+ _namefromtype = {'S1': 'character',
589
+ '?': 'bool',
590
+ 'b': 'signed char',
591
+ 'B': 'unsigned char',
592
+ 'h': 'short',
593
+ 'H': 'unsigned short',
594
+ 'i': 'integer',
595
+ 'I': 'unsigned integer',
596
+ 'l': 'long integer',
597
+ 'L': 'unsigned long integer',
598
+ 'q': 'long long integer',
599
+ 'Q': 'unsigned long long integer',
600
+ 'f': 'single precision',
601
+ 'd': 'double precision',
602
+ 'g': 'long precision',
603
+ 'F': 'complex single precision',
604
+ 'D': 'complex double precision',
605
+ 'G': 'complex long double precision',
606
+ 'S': 'string',
607
+ 'U': 'unicode',
608
+ 'V': 'void',
609
+ 'O': 'object'
610
+ }
611
+
612
+ @set_module('numpy')
613
+ def typename(char):
614
+ """
615
+ Return a description for the given data type code.
616
+
617
+ Parameters
618
+ ----------
619
+ char : str
620
+ Data type code.
621
+
622
+ Returns
623
+ -------
624
+ out : str
625
+ Description of the input data type code.
626
+
627
+ See Also
628
+ --------
629
+ dtype, typecodes
630
+
631
+ Examples
632
+ --------
633
+ >>> typechars = ['S1', '?', 'B', 'D', 'G', 'F', 'I', 'H', 'L', 'O', 'Q',
634
+ ... 'S', 'U', 'V', 'b', 'd', 'g', 'f', 'i', 'h', 'l', 'q']
635
+ >>> for typechar in typechars:
636
+ ... print(typechar, ' : ', np.typename(typechar))
637
+ ...
638
+ S1 : character
639
+ ? : bool
640
+ B : unsigned char
641
+ D : complex double precision
642
+ G : complex long double precision
643
+ F : complex single precision
644
+ I : unsigned integer
645
+ H : unsigned short
646
+ L : unsigned long integer
647
+ O : object
648
+ Q : unsigned long long integer
649
+ S : string
650
+ U : unicode
651
+ V : void
652
+ b : signed char
653
+ d : double precision
654
+ g : long precision
655
+ f : single precision
656
+ i : integer
657
+ h : short
658
+ l : long integer
659
+ q : long long integer
660
+
661
+ """
662
+ return _namefromtype[char]
663
+
664
+ #-----------------------------------------------------------------------------
665
+
666
+ #determine the "minimum common type" for a group of arrays.
667
+ array_type = [[_nx.half, _nx.single, _nx.double, _nx.longdouble],
668
+ [None, _nx.csingle, _nx.cdouble, _nx.clongdouble]]
669
+ array_precision = {_nx.half: 0,
670
+ _nx.single: 1,
671
+ _nx.double: 2,
672
+ _nx.longdouble: 3,
673
+ _nx.csingle: 1,
674
+ _nx.cdouble: 2,
675
+ _nx.clongdouble: 3}
676
+
677
+
678
+ def _common_type_dispatcher(*arrays):
679
+ return arrays
680
+
681
+
682
+ @array_function_dispatch(_common_type_dispatcher)
683
+ def common_type(*arrays):
684
+ """
685
+ Return a scalar type which is common to the input arrays.
686
+
687
+ The return type will always be an inexact (i.e. floating point) scalar
688
+ type, even if all the arrays are integer arrays. If one of the inputs is
689
+ an integer array, the minimum precision type that is returned is a
690
+ 64-bit floating point dtype.
691
+
692
+ All input arrays except int64 and uint64 can be safely cast to the
693
+ returned dtype without loss of information.
694
+
695
+ Parameters
696
+ ----------
697
+ array1, array2, ... : ndarrays
698
+ Input arrays.
699
+
700
+ Returns
701
+ -------
702
+ out : data type code
703
+ Data type code.
704
+
705
+ See Also
706
+ --------
707
+ dtype, mintypecode
708
+
709
+ Examples
710
+ --------
711
+ >>> np.common_type(np.arange(2, dtype=np.float32))
712
+ <class 'numpy.float32'>
713
+ >>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
714
+ <class 'numpy.float64'>
715
+ >>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
716
+ <class 'numpy.complex128'>
717
+
718
+ """
719
+ is_complex = False
720
+ precision = 0
721
+ for a in arrays:
722
+ t = a.dtype.type
723
+ if iscomplexobj(a):
724
+ is_complex = True
725
+ if issubclass(t, _nx.integer):
726
+ p = 2 # array_precision[_nx.double]
727
+ else:
728
+ p = array_precision.get(t, None)
729
+ if p is None:
730
+ raise TypeError("can't get common type for non-numeric array")
731
+ precision = max(precision, p)
732
+ if is_complex:
733
+ return array_type[1][precision]
734
+ else:
735
+ return array_type[0][precision]
env-llmeval/lib/python3.10/site-packages/numpy/lib/ufunclike.pyi ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, overload, TypeVar
2
+
3
+ from numpy import floating, bool_, object_, ndarray
4
+ from numpy._typing import (
5
+ NDArray,
6
+ _FloatLike_co,
7
+ _ArrayLikeFloat_co,
8
+ _ArrayLikeObject_co,
9
+ )
10
+
11
+ _ArrayType = TypeVar("_ArrayType", bound=ndarray[Any, Any])
12
+
13
+ __all__: list[str]
14
+
15
+ @overload
16
+ def fix( # type: ignore[misc]
17
+ x: _FloatLike_co,
18
+ out: None = ...,
19
+ ) -> floating[Any]: ...
20
+ @overload
21
+ def fix(
22
+ x: _ArrayLikeFloat_co,
23
+ out: None = ...,
24
+ ) -> NDArray[floating[Any]]: ...
25
+ @overload
26
+ def fix(
27
+ x: _ArrayLikeObject_co,
28
+ out: None = ...,
29
+ ) -> NDArray[object_]: ...
30
+ @overload
31
+ def fix(
32
+ x: _ArrayLikeFloat_co | _ArrayLikeObject_co,
33
+ out: _ArrayType,
34
+ ) -> _ArrayType: ...
35
+
36
+ @overload
37
+ def isposinf( # type: ignore[misc]
38
+ x: _FloatLike_co,
39
+ out: None = ...,
40
+ ) -> bool_: ...
41
+ @overload
42
+ def isposinf(
43
+ x: _ArrayLikeFloat_co,
44
+ out: None = ...,
45
+ ) -> NDArray[bool_]: ...
46
+ @overload
47
+ def isposinf(
48
+ x: _ArrayLikeFloat_co,
49
+ out: _ArrayType,
50
+ ) -> _ArrayType: ...
51
+
52
+ @overload
53
+ def isneginf( # type: ignore[misc]
54
+ x: _FloatLike_co,
55
+ out: None = ...,
56
+ ) -> bool_: ...
57
+ @overload
58
+ def isneginf(
59
+ x: _ArrayLikeFloat_co,
60
+ out: None = ...,
61
+ ) -> NDArray[bool_]: ...
62
+ @overload
63
+ def isneginf(
64
+ x: _ArrayLikeFloat_co,
65
+ out: _ArrayType,
66
+ ) -> _ArrayType: ...
env-llmeval/lib/python3.10/site-packages/numpy/lib/user_array.py ADDED
@@ -0,0 +1,286 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Standard container-class for easy multiple-inheritance.
3
+
4
+ Try to inherit from the ndarray instead of using this class as this is not
5
+ complete.
6
+
7
+ """
8
+ from numpy.core import (
9
+ array, asarray, absolute, add, subtract, multiply, divide,
10
+ remainder, power, left_shift, right_shift, bitwise_and, bitwise_or,
11
+ bitwise_xor, invert, less, less_equal, not_equal, equal, greater,
12
+ greater_equal, shape, reshape, arange, sin, sqrt, transpose
13
+ )
14
+
15
+
16
+ class container:
17
+ """
18
+ container(data, dtype=None, copy=True)
19
+
20
+ Standard container-class for easy multiple-inheritance.
21
+
22
+ Methods
23
+ -------
24
+ copy
25
+ tostring
26
+ byteswap
27
+ astype
28
+
29
+ """
30
+ def __init__(self, data, dtype=None, copy=True):
31
+ self.array = array(data, dtype, copy=copy)
32
+
33
+ def __repr__(self):
34
+ if self.ndim > 0:
35
+ return self.__class__.__name__ + repr(self.array)[len("array"):]
36
+ else:
37
+ return self.__class__.__name__ + "(" + repr(self.array) + ")"
38
+
39
+ def __array__(self, t=None):
40
+ if t:
41
+ return self.array.astype(t)
42
+ return self.array
43
+
44
+ # Array as sequence
45
+ def __len__(self):
46
+ return len(self.array)
47
+
48
+ def __getitem__(self, index):
49
+ return self._rc(self.array[index])
50
+
51
+ def __setitem__(self, index, value):
52
+ self.array[index] = asarray(value, self.dtype)
53
+
54
+ def __abs__(self):
55
+ return self._rc(absolute(self.array))
56
+
57
+ def __neg__(self):
58
+ return self._rc(-self.array)
59
+
60
+ def __add__(self, other):
61
+ return self._rc(self.array + asarray(other))
62
+
63
+ __radd__ = __add__
64
+
65
+ def __iadd__(self, other):
66
+ add(self.array, other, self.array)
67
+ return self
68
+
69
+ def __sub__(self, other):
70
+ return self._rc(self.array - asarray(other))
71
+
72
+ def __rsub__(self, other):
73
+ return self._rc(asarray(other) - self.array)
74
+
75
+ def __isub__(self, other):
76
+ subtract(self.array, other, self.array)
77
+ return self
78
+
79
+ def __mul__(self, other):
80
+ return self._rc(multiply(self.array, asarray(other)))
81
+
82
+ __rmul__ = __mul__
83
+
84
+ def __imul__(self, other):
85
+ multiply(self.array, other, self.array)
86
+ return self
87
+
88
+ def __div__(self, other):
89
+ return self._rc(divide(self.array, asarray(other)))
90
+
91
+ def __rdiv__(self, other):
92
+ return self._rc(divide(asarray(other), self.array))
93
+
94
+ def __idiv__(self, other):
95
+ divide(self.array, other, self.array)
96
+ return self
97
+
98
+ def __mod__(self, other):
99
+ return self._rc(remainder(self.array, other))
100
+
101
+ def __rmod__(self, other):
102
+ return self._rc(remainder(other, self.array))
103
+
104
+ def __imod__(self, other):
105
+ remainder(self.array, other, self.array)
106
+ return self
107
+
108
+ def __divmod__(self, other):
109
+ return (self._rc(divide(self.array, other)),
110
+ self._rc(remainder(self.array, other)))
111
+
112
+ def __rdivmod__(self, other):
113
+ return (self._rc(divide(other, self.array)),
114
+ self._rc(remainder(other, self.array)))
115
+
116
+ def __pow__(self, other):
117
+ return self._rc(power(self.array, asarray(other)))
118
+
119
+ def __rpow__(self, other):
120
+ return self._rc(power(asarray(other), self.array))
121
+
122
+ def __ipow__(self, other):
123
+ power(self.array, other, self.array)
124
+ return self
125
+
126
+ def __lshift__(self, other):
127
+ return self._rc(left_shift(self.array, other))
128
+
129
+ def __rshift__(self, other):
130
+ return self._rc(right_shift(self.array, other))
131
+
132
+ def __rlshift__(self, other):
133
+ return self._rc(left_shift(other, self.array))
134
+
135
+ def __rrshift__(self, other):
136
+ return self._rc(right_shift(other, self.array))
137
+
138
+ def __ilshift__(self, other):
139
+ left_shift(self.array, other, self.array)
140
+ return self
141
+
142
+ def __irshift__(self, other):
143
+ right_shift(self.array, other, self.array)
144
+ return self
145
+
146
+ def __and__(self, other):
147
+ return self._rc(bitwise_and(self.array, other))
148
+
149
+ def __rand__(self, other):
150
+ return self._rc(bitwise_and(other, self.array))
151
+
152
+ def __iand__(self, other):
153
+ bitwise_and(self.array, other, self.array)
154
+ return self
155
+
156
+ def __xor__(self, other):
157
+ return self._rc(bitwise_xor(self.array, other))
158
+
159
+ def __rxor__(self, other):
160
+ return self._rc(bitwise_xor(other, self.array))
161
+
162
+ def __ixor__(self, other):
163
+ bitwise_xor(self.array, other, self.array)
164
+ return self
165
+
166
+ def __or__(self, other):
167
+ return self._rc(bitwise_or(self.array, other))
168
+
169
+ def __ror__(self, other):
170
+ return self._rc(bitwise_or(other, self.array))
171
+
172
+ def __ior__(self, other):
173
+ bitwise_or(self.array, other, self.array)
174
+ return self
175
+
176
+ def __pos__(self):
177
+ return self._rc(self.array)
178
+
179
+ def __invert__(self):
180
+ return self._rc(invert(self.array))
181
+
182
+ def _scalarfunc(self, func):
183
+ if self.ndim == 0:
184
+ return func(self[0])
185
+ else:
186
+ raise TypeError(
187
+ "only rank-0 arrays can be converted to Python scalars.")
188
+
189
+ def __complex__(self):
190
+ return self._scalarfunc(complex)
191
+
192
+ def __float__(self):
193
+ return self._scalarfunc(float)
194
+
195
+ def __int__(self):
196
+ return self._scalarfunc(int)
197
+
198
+ def __hex__(self):
199
+ return self._scalarfunc(hex)
200
+
201
+ def __oct__(self):
202
+ return self._scalarfunc(oct)
203
+
204
+ def __lt__(self, other):
205
+ return self._rc(less(self.array, other))
206
+
207
+ def __le__(self, other):
208
+ return self._rc(less_equal(self.array, other))
209
+
210
+ def __eq__(self, other):
211
+ return self._rc(equal(self.array, other))
212
+
213
+ def __ne__(self, other):
214
+ return self._rc(not_equal(self.array, other))
215
+
216
+ def __gt__(self, other):
217
+ return self._rc(greater(self.array, other))
218
+
219
+ def __ge__(self, other):
220
+ return self._rc(greater_equal(self.array, other))
221
+
222
+ def copy(self):
223
+ ""
224
+ return self._rc(self.array.copy())
225
+
226
+ def tostring(self):
227
+ ""
228
+ return self.array.tostring()
229
+
230
+ def tobytes(self):
231
+ ""
232
+ return self.array.tobytes()
233
+
234
+ def byteswap(self):
235
+ ""
236
+ return self._rc(self.array.byteswap())
237
+
238
+ def astype(self, typecode):
239
+ ""
240
+ return self._rc(self.array.astype(typecode))
241
+
242
+ def _rc(self, a):
243
+ if len(shape(a)) == 0:
244
+ return a
245
+ else:
246
+ return self.__class__(a)
247
+
248
+ def __array_wrap__(self, *args):
249
+ return self.__class__(args[0])
250
+
251
+ def __setattr__(self, attr, value):
252
+ if attr == 'array':
253
+ object.__setattr__(self, attr, value)
254
+ return
255
+ try:
256
+ self.array.__setattr__(attr, value)
257
+ except AttributeError:
258
+ object.__setattr__(self, attr, value)
259
+
260
+ # Only called after other approaches fail.
261
+ def __getattr__(self, attr):
262
+ if (attr == 'array'):
263
+ return object.__getattribute__(self, attr)
264
+ return self.array.__getattribute__(attr)
265
+
266
+ #############################################################
267
+ # Test of class container
268
+ #############################################################
269
+ if __name__ == '__main__':
270
+ temp = reshape(arange(10000), (100, 100))
271
+
272
+ ua = container(temp)
273
+ # new object created begin test
274
+ print(dir(ua))
275
+ print(shape(ua), ua.shape) # I have changed Numeric.py
276
+
277
+ ua_small = ua[:3, :5]
278
+ print(ua_small)
279
+ # this did not change ua[0,0], which is not normal behavior
280
+ ua_small[0, 0] = 10
281
+ print(ua_small[0, 0], ua[0, 0])
282
+ print(sin(ua_small) / 3. * 6. + sqrt(ua_small ** 2))
283
+ print(less(ua_small, 103), type(less(ua_small, 103)))
284
+ print(type(ua_small * reshape(arange(15), shape(ua_small))))
285
+ print(reshape(ua_small, (5, 3)))
286
+ print(transpose(ua_small))
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Sub-package containing the matrix class and related functions.
2
+
3
+ """
4
+ from . import defmatrix
5
+ from .defmatrix import *
6
+
7
+ __all__ = defmatrix.__all__
8
+
9
+ from numpy._pytesttester import PytestTester
10
+ test = PytestTester(__name__)
11
+ del PytestTester
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/__init__.pyi ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy._pytesttester import PytestTester
2
+
3
+ from numpy import (
4
+ matrix as matrix,
5
+ )
6
+
7
+ from numpy.matrixlib.defmatrix import (
8
+ bmat as bmat,
9
+ mat as mat,
10
+ asmatrix as asmatrix,
11
+ )
12
+
13
+ __all__: list[str]
14
+ __path__: list[str]
15
+ test: PytestTester
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/defmatrix.py ADDED
@@ -0,0 +1,1114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ['matrix', 'bmat', 'mat', 'asmatrix']
2
+
3
+ import sys
4
+ import warnings
5
+ import ast
6
+
7
+ from .._utils import set_module
8
+ import numpy.core.numeric as N
9
+ from numpy.core.numeric import concatenate, isscalar
10
+ # While not in __all__, matrix_power used to be defined here, so we import
11
+ # it for backward compatibility.
12
+ from numpy.linalg import matrix_power
13
+
14
+
15
+ def _convert_from_string(data):
16
+ for char in '[]':
17
+ data = data.replace(char, '')
18
+
19
+ rows = data.split(';')
20
+ newdata = []
21
+ count = 0
22
+ for row in rows:
23
+ trow = row.split(',')
24
+ newrow = []
25
+ for col in trow:
26
+ temp = col.split()
27
+ newrow.extend(map(ast.literal_eval, temp))
28
+ if count == 0:
29
+ Ncols = len(newrow)
30
+ elif len(newrow) != Ncols:
31
+ raise ValueError("Rows not the same size.")
32
+ count += 1
33
+ newdata.append(newrow)
34
+ return newdata
35
+
36
+
37
+ @set_module('numpy')
38
+ def asmatrix(data, dtype=None):
39
+ """
40
+ Interpret the input as a matrix.
41
+
42
+ Unlike `matrix`, `asmatrix` does not make a copy if the input is already
43
+ a matrix or an ndarray. Equivalent to ``matrix(data, copy=False)``.
44
+
45
+ Parameters
46
+ ----------
47
+ data : array_like
48
+ Input data.
49
+ dtype : data-type
50
+ Data-type of the output matrix.
51
+
52
+ Returns
53
+ -------
54
+ mat : matrix
55
+ `data` interpreted as a matrix.
56
+
57
+ Examples
58
+ --------
59
+ >>> x = np.array([[1, 2], [3, 4]])
60
+
61
+ >>> m = np.asmatrix(x)
62
+
63
+ >>> x[0,0] = 5
64
+
65
+ >>> m
66
+ matrix([[5, 2],
67
+ [3, 4]])
68
+
69
+ """
70
+ return matrix(data, dtype=dtype, copy=False)
71
+
72
+
73
+ @set_module('numpy')
74
+ class matrix(N.ndarray):
75
+ """
76
+ matrix(data, dtype=None, copy=True)
77
+
78
+ .. note:: It is no longer recommended to use this class, even for linear
79
+ algebra. Instead use regular arrays. The class may be removed
80
+ in the future.
81
+
82
+ Returns a matrix from an array-like object, or from a string of data.
83
+ A matrix is a specialized 2-D array that retains its 2-D nature
84
+ through operations. It has certain special operators, such as ``*``
85
+ (matrix multiplication) and ``**`` (matrix power).
86
+
87
+ Parameters
88
+ ----------
89
+ data : array_like or string
90
+ If `data` is a string, it is interpreted as a matrix with commas
91
+ or spaces separating columns, and semicolons separating rows.
92
+ dtype : data-type
93
+ Data-type of the output matrix.
94
+ copy : bool
95
+ If `data` is already an `ndarray`, then this flag determines
96
+ whether the data is copied (the default), or whether a view is
97
+ constructed.
98
+
99
+ See Also
100
+ --------
101
+ array
102
+
103
+ Examples
104
+ --------
105
+ >>> a = np.matrix('1 2; 3 4')
106
+ >>> a
107
+ matrix([[1, 2],
108
+ [3, 4]])
109
+
110
+ >>> np.matrix([[1, 2], [3, 4]])
111
+ matrix([[1, 2],
112
+ [3, 4]])
113
+
114
+ """
115
+ __array_priority__ = 10.0
116
+ def __new__(subtype, data, dtype=None, copy=True):
117
+ warnings.warn('the matrix subclass is not the recommended way to '
118
+ 'represent matrices or deal with linear algebra (see '
119
+ 'https://docs.scipy.org/doc/numpy/user/'
120
+ 'numpy-for-matlab-users.html). '
121
+ 'Please adjust your code to use regular ndarray.',
122
+ PendingDeprecationWarning, stacklevel=2)
123
+ if isinstance(data, matrix):
124
+ dtype2 = data.dtype
125
+ if (dtype is None):
126
+ dtype = dtype2
127
+ if (dtype2 == dtype) and (not copy):
128
+ return data
129
+ return data.astype(dtype)
130
+
131
+ if isinstance(data, N.ndarray):
132
+ if dtype is None:
133
+ intype = data.dtype
134
+ else:
135
+ intype = N.dtype(dtype)
136
+ new = data.view(subtype)
137
+ if intype != data.dtype:
138
+ return new.astype(intype)
139
+ if copy: return new.copy()
140
+ else: return new
141
+
142
+ if isinstance(data, str):
143
+ data = _convert_from_string(data)
144
+
145
+ # now convert data to an array
146
+ arr = N.array(data, dtype=dtype, copy=copy)
147
+ ndim = arr.ndim
148
+ shape = arr.shape
149
+ if (ndim > 2):
150
+ raise ValueError("matrix must be 2-dimensional")
151
+ elif ndim == 0:
152
+ shape = (1, 1)
153
+ elif ndim == 1:
154
+ shape = (1, shape[0])
155
+
156
+ order = 'C'
157
+ if (ndim == 2) and arr.flags.fortran:
158
+ order = 'F'
159
+
160
+ if not (order or arr.flags.contiguous):
161
+ arr = arr.copy()
162
+
163
+ ret = N.ndarray.__new__(subtype, shape, arr.dtype,
164
+ buffer=arr,
165
+ order=order)
166
+ return ret
167
+
168
+ def __array_finalize__(self, obj):
169
+ self._getitem = False
170
+ if (isinstance(obj, matrix) and obj._getitem): return
171
+ ndim = self.ndim
172
+ if (ndim == 2):
173
+ return
174
+ if (ndim > 2):
175
+ newshape = tuple([x for x in self.shape if x > 1])
176
+ ndim = len(newshape)
177
+ if ndim == 2:
178
+ self.shape = newshape
179
+ return
180
+ elif (ndim > 2):
181
+ raise ValueError("shape too large to be a matrix.")
182
+ else:
183
+ newshape = self.shape
184
+ if ndim == 0:
185
+ self.shape = (1, 1)
186
+ elif ndim == 1:
187
+ self.shape = (1, newshape[0])
188
+ return
189
+
190
+ def __getitem__(self, index):
191
+ self._getitem = True
192
+
193
+ try:
194
+ out = N.ndarray.__getitem__(self, index)
195
+ finally:
196
+ self._getitem = False
197
+
198
+ if not isinstance(out, N.ndarray):
199
+ return out
200
+
201
+ if out.ndim == 0:
202
+ return out[()]
203
+ if out.ndim == 1:
204
+ sh = out.shape[0]
205
+ # Determine when we should have a column array
206
+ try:
207
+ n = len(index)
208
+ except Exception:
209
+ n = 0
210
+ if n > 1 and isscalar(index[1]):
211
+ out.shape = (sh, 1)
212
+ else:
213
+ out.shape = (1, sh)
214
+ return out
215
+
216
+ def __mul__(self, other):
217
+ if isinstance(other, (N.ndarray, list, tuple)) :
218
+ # This promotes 1-D vectors to row vectors
219
+ return N.dot(self, asmatrix(other))
220
+ if isscalar(other) or not hasattr(other, '__rmul__') :
221
+ return N.dot(self, other)
222
+ return NotImplemented
223
+
224
+ def __rmul__(self, other):
225
+ return N.dot(other, self)
226
+
227
+ def __imul__(self, other):
228
+ self[:] = self * other
229
+ return self
230
+
231
+ def __pow__(self, other):
232
+ return matrix_power(self, other)
233
+
234
+ def __ipow__(self, other):
235
+ self[:] = self ** other
236
+ return self
237
+
238
+ def __rpow__(self, other):
239
+ return NotImplemented
240
+
241
+ def _align(self, axis):
242
+ """A convenience function for operations that need to preserve axis
243
+ orientation.
244
+ """
245
+ if axis is None:
246
+ return self[0, 0]
247
+ elif axis==0:
248
+ return self
249
+ elif axis==1:
250
+ return self.transpose()
251
+ else:
252
+ raise ValueError("unsupported axis")
253
+
254
+ def _collapse(self, axis):
255
+ """A convenience function for operations that want to collapse
256
+ to a scalar like _align, but are using keepdims=True
257
+ """
258
+ if axis is None:
259
+ return self[0, 0]
260
+ else:
261
+ return self
262
+
263
+ # Necessary because base-class tolist expects dimension
264
+ # reduction by x[0]
265
+ def tolist(self):
266
+ """
267
+ Return the matrix as a (possibly nested) list.
268
+
269
+ See `ndarray.tolist` for full documentation.
270
+
271
+ See Also
272
+ --------
273
+ ndarray.tolist
274
+
275
+ Examples
276
+ --------
277
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
278
+ matrix([[ 0, 1, 2, 3],
279
+ [ 4, 5, 6, 7],
280
+ [ 8, 9, 10, 11]])
281
+ >>> x.tolist()
282
+ [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]]
283
+
284
+ """
285
+ return self.__array__().tolist()
286
+
287
+ # To preserve orientation of result...
288
+ def sum(self, axis=None, dtype=None, out=None):
289
+ """
290
+ Returns the sum of the matrix elements, along the given axis.
291
+
292
+ Refer to `numpy.sum` for full documentation.
293
+
294
+ See Also
295
+ --------
296
+ numpy.sum
297
+
298
+ Notes
299
+ -----
300
+ This is the same as `ndarray.sum`, except that where an `ndarray` would
301
+ be returned, a `matrix` object is returned instead.
302
+
303
+ Examples
304
+ --------
305
+ >>> x = np.matrix([[1, 2], [4, 3]])
306
+ >>> x.sum()
307
+ 10
308
+ >>> x.sum(axis=1)
309
+ matrix([[3],
310
+ [7]])
311
+ >>> x.sum(axis=1, dtype='float')
312
+ matrix([[3.],
313
+ [7.]])
314
+ >>> out = np.zeros((2, 1), dtype='float')
315
+ >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out))
316
+ matrix([[3.],
317
+ [7.]])
318
+
319
+ """
320
+ return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
321
+
322
+
323
+ # To update docstring from array to matrix...
324
+ def squeeze(self, axis=None):
325
+ """
326
+ Return a possibly reshaped matrix.
327
+
328
+ Refer to `numpy.squeeze` for more documentation.
329
+
330
+ Parameters
331
+ ----------
332
+ axis : None or int or tuple of ints, optional
333
+ Selects a subset of the axes of length one in the shape.
334
+ If an axis is selected with shape entry greater than one,
335
+ an error is raised.
336
+
337
+ Returns
338
+ -------
339
+ squeezed : matrix
340
+ The matrix, but as a (1, N) matrix if it had shape (N, 1).
341
+
342
+ See Also
343
+ --------
344
+ numpy.squeeze : related function
345
+
346
+ Notes
347
+ -----
348
+ If `m` has a single column then that column is returned
349
+ as the single row of a matrix. Otherwise `m` is returned.
350
+ The returned matrix is always either `m` itself or a view into `m`.
351
+ Supplying an axis keyword argument will not affect the returned matrix
352
+ but it may cause an error to be raised.
353
+
354
+ Examples
355
+ --------
356
+ >>> c = np.matrix([[1], [2]])
357
+ >>> c
358
+ matrix([[1],
359
+ [2]])
360
+ >>> c.squeeze()
361
+ matrix([[1, 2]])
362
+ >>> r = c.T
363
+ >>> r
364
+ matrix([[1, 2]])
365
+ >>> r.squeeze()
366
+ matrix([[1, 2]])
367
+ >>> m = np.matrix([[1, 2], [3, 4]])
368
+ >>> m.squeeze()
369
+ matrix([[1, 2],
370
+ [3, 4]])
371
+
372
+ """
373
+ return N.ndarray.squeeze(self, axis=axis)
374
+
375
+
376
+ # To update docstring from array to matrix...
377
+ def flatten(self, order='C'):
378
+ """
379
+ Return a flattened copy of the matrix.
380
+
381
+ All `N` elements of the matrix are placed into a single row.
382
+
383
+ Parameters
384
+ ----------
385
+ order : {'C', 'F', 'A', 'K'}, optional
386
+ 'C' means to flatten in row-major (C-style) order. 'F' means to
387
+ flatten in column-major (Fortran-style) order. 'A' means to
388
+ flatten in column-major order if `m` is Fortran *contiguous* in
389
+ memory, row-major order otherwise. 'K' means to flatten `m` in
390
+ the order the elements occur in memory. The default is 'C'.
391
+
392
+ Returns
393
+ -------
394
+ y : matrix
395
+ A copy of the matrix, flattened to a `(1, N)` matrix where `N`
396
+ is the number of elements in the original matrix.
397
+
398
+ See Also
399
+ --------
400
+ ravel : Return a flattened array.
401
+ flat : A 1-D flat iterator over the matrix.
402
+
403
+ Examples
404
+ --------
405
+ >>> m = np.matrix([[1,2], [3,4]])
406
+ >>> m.flatten()
407
+ matrix([[1, 2, 3, 4]])
408
+ >>> m.flatten('F')
409
+ matrix([[1, 3, 2, 4]])
410
+
411
+ """
412
+ return N.ndarray.flatten(self, order=order)
413
+
414
+ def mean(self, axis=None, dtype=None, out=None):
415
+ """
416
+ Returns the average of the matrix elements along the given axis.
417
+
418
+ Refer to `numpy.mean` for full documentation.
419
+
420
+ See Also
421
+ --------
422
+ numpy.mean
423
+
424
+ Notes
425
+ -----
426
+ Same as `ndarray.mean` except that, where that returns an `ndarray`,
427
+ this returns a `matrix` object.
428
+
429
+ Examples
430
+ --------
431
+ >>> x = np.matrix(np.arange(12).reshape((3, 4)))
432
+ >>> x
433
+ matrix([[ 0, 1, 2, 3],
434
+ [ 4, 5, 6, 7],
435
+ [ 8, 9, 10, 11]])
436
+ >>> x.mean()
437
+ 5.5
438
+ >>> x.mean(0)
439
+ matrix([[4., 5., 6., 7.]])
440
+ >>> x.mean(1)
441
+ matrix([[ 1.5],
442
+ [ 5.5],
443
+ [ 9.5]])
444
+
445
+ """
446
+ return N.ndarray.mean(self, axis, dtype, out, keepdims=True)._collapse(axis)
447
+
448
+ def std(self, axis=None, dtype=None, out=None, ddof=0):
449
+ """
450
+ Return the standard deviation of the array elements along the given axis.
451
+
452
+ Refer to `numpy.std` for full documentation.
453
+
454
+ See Also
455
+ --------
456
+ numpy.std
457
+
458
+ Notes
459
+ -----
460
+ This is the same as `ndarray.std`, except that where an `ndarray` would
461
+ be returned, a `matrix` object is returned instead.
462
+
463
+ Examples
464
+ --------
465
+ >>> x = np.matrix(np.arange(12).reshape((3, 4)))
466
+ >>> x
467
+ matrix([[ 0, 1, 2, 3],
468
+ [ 4, 5, 6, 7],
469
+ [ 8, 9, 10, 11]])
470
+ >>> x.std()
471
+ 3.4520525295346629 # may vary
472
+ >>> x.std(0)
473
+ matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary
474
+ >>> x.std(1)
475
+ matrix([[ 1.11803399],
476
+ [ 1.11803399],
477
+ [ 1.11803399]])
478
+
479
+ """
480
+ return N.ndarray.std(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
481
+
482
+ def var(self, axis=None, dtype=None, out=None, ddof=0):
483
+ """
484
+ Returns the variance of the matrix elements, along the given axis.
485
+
486
+ Refer to `numpy.var` for full documentation.
487
+
488
+ See Also
489
+ --------
490
+ numpy.var
491
+
492
+ Notes
493
+ -----
494
+ This is the same as `ndarray.var`, except that where an `ndarray` would
495
+ be returned, a `matrix` object is returned instead.
496
+
497
+ Examples
498
+ --------
499
+ >>> x = np.matrix(np.arange(12).reshape((3, 4)))
500
+ >>> x
501
+ matrix([[ 0, 1, 2, 3],
502
+ [ 4, 5, 6, 7],
503
+ [ 8, 9, 10, 11]])
504
+ >>> x.var()
505
+ 11.916666666666666
506
+ >>> x.var(0)
507
+ matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary
508
+ >>> x.var(1)
509
+ matrix([[1.25],
510
+ [1.25],
511
+ [1.25]])
512
+
513
+ """
514
+ return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
515
+
516
+ def prod(self, axis=None, dtype=None, out=None):
517
+ """
518
+ Return the product of the array elements over the given axis.
519
+
520
+ Refer to `prod` for full documentation.
521
+
522
+ See Also
523
+ --------
524
+ prod, ndarray.prod
525
+
526
+ Notes
527
+ -----
528
+ Same as `ndarray.prod`, except, where that returns an `ndarray`, this
529
+ returns a `matrix` object instead.
530
+
531
+ Examples
532
+ --------
533
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
534
+ matrix([[ 0, 1, 2, 3],
535
+ [ 4, 5, 6, 7],
536
+ [ 8, 9, 10, 11]])
537
+ >>> x.prod()
538
+ 0
539
+ >>> x.prod(0)
540
+ matrix([[ 0, 45, 120, 231]])
541
+ >>> x.prod(1)
542
+ matrix([[ 0],
543
+ [ 840],
544
+ [7920]])
545
+
546
+ """
547
+ return N.ndarray.prod(self, axis, dtype, out, keepdims=True)._collapse(axis)
548
+
549
+ def any(self, axis=None, out=None):
550
+ """
551
+ Test whether any array element along a given axis evaluates to True.
552
+
553
+ Refer to `numpy.any` for full documentation.
554
+
555
+ Parameters
556
+ ----------
557
+ axis : int, optional
558
+ Axis along which logical OR is performed
559
+ out : ndarray, optional
560
+ Output to existing array instead of creating new one, must have
561
+ same shape as expected output
562
+
563
+ Returns
564
+ -------
565
+ any : bool, ndarray
566
+ Returns a single bool if `axis` is ``None``; otherwise,
567
+ returns `ndarray`
568
+
569
+ """
570
+ return N.ndarray.any(self, axis, out, keepdims=True)._collapse(axis)
571
+
572
+ def all(self, axis=None, out=None):
573
+ """
574
+ Test whether all matrix elements along a given axis evaluate to True.
575
+
576
+ Parameters
577
+ ----------
578
+ See `numpy.all` for complete descriptions
579
+
580
+ See Also
581
+ --------
582
+ numpy.all
583
+
584
+ Notes
585
+ -----
586
+ This is the same as `ndarray.all`, but it returns a `matrix` object.
587
+
588
+ Examples
589
+ --------
590
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
591
+ matrix([[ 0, 1, 2, 3],
592
+ [ 4, 5, 6, 7],
593
+ [ 8, 9, 10, 11]])
594
+ >>> y = x[0]; y
595
+ matrix([[0, 1, 2, 3]])
596
+ >>> (x == y)
597
+ matrix([[ True, True, True, True],
598
+ [False, False, False, False],
599
+ [False, False, False, False]])
600
+ >>> (x == y).all()
601
+ False
602
+ >>> (x == y).all(0)
603
+ matrix([[False, False, False, False]])
604
+ >>> (x == y).all(1)
605
+ matrix([[ True],
606
+ [False],
607
+ [False]])
608
+
609
+ """
610
+ return N.ndarray.all(self, axis, out, keepdims=True)._collapse(axis)
611
+
612
+ def max(self, axis=None, out=None):
613
+ """
614
+ Return the maximum value along an axis.
615
+
616
+ Parameters
617
+ ----------
618
+ See `amax` for complete descriptions
619
+
620
+ See Also
621
+ --------
622
+ amax, ndarray.max
623
+
624
+ Notes
625
+ -----
626
+ This is the same as `ndarray.max`, but returns a `matrix` object
627
+ where `ndarray.max` would return an ndarray.
628
+
629
+ Examples
630
+ --------
631
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
632
+ matrix([[ 0, 1, 2, 3],
633
+ [ 4, 5, 6, 7],
634
+ [ 8, 9, 10, 11]])
635
+ >>> x.max()
636
+ 11
637
+ >>> x.max(0)
638
+ matrix([[ 8, 9, 10, 11]])
639
+ >>> x.max(1)
640
+ matrix([[ 3],
641
+ [ 7],
642
+ [11]])
643
+
644
+ """
645
+ return N.ndarray.max(self, axis, out, keepdims=True)._collapse(axis)
646
+
647
+ def argmax(self, axis=None, out=None):
648
+ """
649
+ Indexes of the maximum values along an axis.
650
+
651
+ Return the indexes of the first occurrences of the maximum values
652
+ along the specified axis. If axis is None, the index is for the
653
+ flattened matrix.
654
+
655
+ Parameters
656
+ ----------
657
+ See `numpy.argmax` for complete descriptions
658
+
659
+ See Also
660
+ --------
661
+ numpy.argmax
662
+
663
+ Notes
664
+ -----
665
+ This is the same as `ndarray.argmax`, but returns a `matrix` object
666
+ where `ndarray.argmax` would return an `ndarray`.
667
+
668
+ Examples
669
+ --------
670
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
671
+ matrix([[ 0, 1, 2, 3],
672
+ [ 4, 5, 6, 7],
673
+ [ 8, 9, 10, 11]])
674
+ >>> x.argmax()
675
+ 11
676
+ >>> x.argmax(0)
677
+ matrix([[2, 2, 2, 2]])
678
+ >>> x.argmax(1)
679
+ matrix([[3],
680
+ [3],
681
+ [3]])
682
+
683
+ """
684
+ return N.ndarray.argmax(self, axis, out)._align(axis)
685
+
686
+ def min(self, axis=None, out=None):
687
+ """
688
+ Return the minimum value along an axis.
689
+
690
+ Parameters
691
+ ----------
692
+ See `amin` for complete descriptions.
693
+
694
+ See Also
695
+ --------
696
+ amin, ndarray.min
697
+
698
+ Notes
699
+ -----
700
+ This is the same as `ndarray.min`, but returns a `matrix` object
701
+ where `ndarray.min` would return an ndarray.
702
+
703
+ Examples
704
+ --------
705
+ >>> x = -np.matrix(np.arange(12).reshape((3,4))); x
706
+ matrix([[ 0, -1, -2, -3],
707
+ [ -4, -5, -6, -7],
708
+ [ -8, -9, -10, -11]])
709
+ >>> x.min()
710
+ -11
711
+ >>> x.min(0)
712
+ matrix([[ -8, -9, -10, -11]])
713
+ >>> x.min(1)
714
+ matrix([[ -3],
715
+ [ -7],
716
+ [-11]])
717
+
718
+ """
719
+ return N.ndarray.min(self, axis, out, keepdims=True)._collapse(axis)
720
+
721
+ def argmin(self, axis=None, out=None):
722
+ """
723
+ Indexes of the minimum values along an axis.
724
+
725
+ Return the indexes of the first occurrences of the minimum values
726
+ along the specified axis. If axis is None, the index is for the
727
+ flattened matrix.
728
+
729
+ Parameters
730
+ ----------
731
+ See `numpy.argmin` for complete descriptions.
732
+
733
+ See Also
734
+ --------
735
+ numpy.argmin
736
+
737
+ Notes
738
+ -----
739
+ This is the same as `ndarray.argmin`, but returns a `matrix` object
740
+ where `ndarray.argmin` would return an `ndarray`.
741
+
742
+ Examples
743
+ --------
744
+ >>> x = -np.matrix(np.arange(12).reshape((3,4))); x
745
+ matrix([[ 0, -1, -2, -3],
746
+ [ -4, -5, -6, -7],
747
+ [ -8, -9, -10, -11]])
748
+ >>> x.argmin()
749
+ 11
750
+ >>> x.argmin(0)
751
+ matrix([[2, 2, 2, 2]])
752
+ >>> x.argmin(1)
753
+ matrix([[3],
754
+ [3],
755
+ [3]])
756
+
757
+ """
758
+ return N.ndarray.argmin(self, axis, out)._align(axis)
759
+
760
+ def ptp(self, axis=None, out=None):
761
+ """
762
+ Peak-to-peak (maximum - minimum) value along the given axis.
763
+
764
+ Refer to `numpy.ptp` for full documentation.
765
+
766
+ See Also
767
+ --------
768
+ numpy.ptp
769
+
770
+ Notes
771
+ -----
772
+ Same as `ndarray.ptp`, except, where that would return an `ndarray` object,
773
+ this returns a `matrix` object.
774
+
775
+ Examples
776
+ --------
777
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
778
+ matrix([[ 0, 1, 2, 3],
779
+ [ 4, 5, 6, 7],
780
+ [ 8, 9, 10, 11]])
781
+ >>> x.ptp()
782
+ 11
783
+ >>> x.ptp(0)
784
+ matrix([[8, 8, 8, 8]])
785
+ >>> x.ptp(1)
786
+ matrix([[3],
787
+ [3],
788
+ [3]])
789
+
790
+ """
791
+ return N.ndarray.ptp(self, axis, out)._align(axis)
792
+
793
+ @property
794
+ def I(self):
795
+ """
796
+ Returns the (multiplicative) inverse of invertible `self`.
797
+
798
+ Parameters
799
+ ----------
800
+ None
801
+
802
+ Returns
803
+ -------
804
+ ret : matrix object
805
+ If `self` is non-singular, `ret` is such that ``ret * self`` ==
806
+ ``self * ret`` == ``np.matrix(np.eye(self[0,:].size))`` all return
807
+ ``True``.
808
+
809
+ Raises
810
+ ------
811
+ numpy.linalg.LinAlgError: Singular matrix
812
+ If `self` is singular.
813
+
814
+ See Also
815
+ --------
816
+ linalg.inv
817
+
818
+ Examples
819
+ --------
820
+ >>> m = np.matrix('[1, 2; 3, 4]'); m
821
+ matrix([[1, 2],
822
+ [3, 4]])
823
+ >>> m.getI()
824
+ matrix([[-2. , 1. ],
825
+ [ 1.5, -0.5]])
826
+ >>> m.getI() * m
827
+ matrix([[ 1., 0.], # may vary
828
+ [ 0., 1.]])
829
+
830
+ """
831
+ M, N = self.shape
832
+ if M == N:
833
+ from numpy.linalg import inv as func
834
+ else:
835
+ from numpy.linalg import pinv as func
836
+ return asmatrix(func(self))
837
+
838
+ @property
839
+ def A(self):
840
+ """
841
+ Return `self` as an `ndarray` object.
842
+
843
+ Equivalent to ``np.asarray(self)``.
844
+
845
+ Parameters
846
+ ----------
847
+ None
848
+
849
+ Returns
850
+ -------
851
+ ret : ndarray
852
+ `self` as an `ndarray`
853
+
854
+ Examples
855
+ --------
856
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
857
+ matrix([[ 0, 1, 2, 3],
858
+ [ 4, 5, 6, 7],
859
+ [ 8, 9, 10, 11]])
860
+ >>> x.getA()
861
+ array([[ 0, 1, 2, 3],
862
+ [ 4, 5, 6, 7],
863
+ [ 8, 9, 10, 11]])
864
+
865
+ """
866
+ return self.__array__()
867
+
868
+ @property
869
+ def A1(self):
870
+ """
871
+ Return `self` as a flattened `ndarray`.
872
+
873
+ Equivalent to ``np.asarray(x).ravel()``
874
+
875
+ Parameters
876
+ ----------
877
+ None
878
+
879
+ Returns
880
+ -------
881
+ ret : ndarray
882
+ `self`, 1-D, as an `ndarray`
883
+
884
+ Examples
885
+ --------
886
+ >>> x = np.matrix(np.arange(12).reshape((3,4))); x
887
+ matrix([[ 0, 1, 2, 3],
888
+ [ 4, 5, 6, 7],
889
+ [ 8, 9, 10, 11]])
890
+ >>> x.getA1()
891
+ array([ 0, 1, 2, ..., 9, 10, 11])
892
+
893
+
894
+ """
895
+ return self.__array__().ravel()
896
+
897
+
898
+ def ravel(self, order='C'):
899
+ """
900
+ Return a flattened matrix.
901
+
902
+ Refer to `numpy.ravel` for more documentation.
903
+
904
+ Parameters
905
+ ----------
906
+ order : {'C', 'F', 'A', 'K'}, optional
907
+ The elements of `m` are read using this index order. 'C' means to
908
+ index the elements in C-like order, with the last axis index
909
+ changing fastest, back to the first axis index changing slowest.
910
+ 'F' means to index the elements in Fortran-like index order, with
911
+ the first index changing fastest, and the last index changing
912
+ slowest. Note that the 'C' and 'F' options take no account of the
913
+ memory layout of the underlying array, and only refer to the order
914
+ of axis indexing. 'A' means to read the elements in Fortran-like
915
+ index order if `m` is Fortran *contiguous* in memory, C-like order
916
+ otherwise. 'K' means to read the elements in the order they occur
917
+ in memory, except for reversing the data when strides are negative.
918
+ By default, 'C' index order is used.
919
+
920
+ Returns
921
+ -------
922
+ ret : matrix
923
+ Return the matrix flattened to shape `(1, N)` where `N`
924
+ is the number of elements in the original matrix.
925
+ A copy is made only if necessary.
926
+
927
+ See Also
928
+ --------
929
+ matrix.flatten : returns a similar output matrix but always a copy
930
+ matrix.flat : a flat iterator on the array.
931
+ numpy.ravel : related function which returns an ndarray
932
+
933
+ """
934
+ return N.ndarray.ravel(self, order=order)
935
+
936
+ @property
937
+ def T(self):
938
+ """
939
+ Returns the transpose of the matrix.
940
+
941
+ Does *not* conjugate! For the complex conjugate transpose, use ``.H``.
942
+
943
+ Parameters
944
+ ----------
945
+ None
946
+
947
+ Returns
948
+ -------
949
+ ret : matrix object
950
+ The (non-conjugated) transpose of the matrix.
951
+
952
+ See Also
953
+ --------
954
+ transpose, getH
955
+
956
+ Examples
957
+ --------
958
+ >>> m = np.matrix('[1, 2; 3, 4]')
959
+ >>> m
960
+ matrix([[1, 2],
961
+ [3, 4]])
962
+ >>> m.getT()
963
+ matrix([[1, 3],
964
+ [2, 4]])
965
+
966
+ """
967
+ return self.transpose()
968
+
969
+ @property
970
+ def H(self):
971
+ """
972
+ Returns the (complex) conjugate transpose of `self`.
973
+
974
+ Equivalent to ``np.transpose(self)`` if `self` is real-valued.
975
+
976
+ Parameters
977
+ ----------
978
+ None
979
+
980
+ Returns
981
+ -------
982
+ ret : matrix object
983
+ complex conjugate transpose of `self`
984
+
985
+ Examples
986
+ --------
987
+ >>> x = np.matrix(np.arange(12).reshape((3,4)))
988
+ >>> z = x - 1j*x; z
989
+ matrix([[ 0. +0.j, 1. -1.j, 2. -2.j, 3. -3.j],
990
+ [ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
991
+ [ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
992
+ >>> z.getH()
993
+ matrix([[ 0. -0.j, 4. +4.j, 8. +8.j],
994
+ [ 1. +1.j, 5. +5.j, 9. +9.j],
995
+ [ 2. +2.j, 6. +6.j, 10.+10.j],
996
+ [ 3. +3.j, 7. +7.j, 11.+11.j]])
997
+
998
+ """
999
+ if issubclass(self.dtype.type, N.complexfloating):
1000
+ return self.transpose().conjugate()
1001
+ else:
1002
+ return self.transpose()
1003
+
1004
+ # kept for compatibility
1005
+ getT = T.fget
1006
+ getA = A.fget
1007
+ getA1 = A1.fget
1008
+ getH = H.fget
1009
+ getI = I.fget
1010
+
1011
+ def _from_string(str, gdict, ldict):
1012
+ rows = str.split(';')
1013
+ rowtup = []
1014
+ for row in rows:
1015
+ trow = row.split(',')
1016
+ newrow = []
1017
+ for x in trow:
1018
+ newrow.extend(x.split())
1019
+ trow = newrow
1020
+ coltup = []
1021
+ for col in trow:
1022
+ col = col.strip()
1023
+ try:
1024
+ thismat = ldict[col]
1025
+ except KeyError:
1026
+ try:
1027
+ thismat = gdict[col]
1028
+ except KeyError as e:
1029
+ raise NameError(f"name {col!r} is not defined") from None
1030
+
1031
+ coltup.append(thismat)
1032
+ rowtup.append(concatenate(coltup, axis=-1))
1033
+ return concatenate(rowtup, axis=0)
1034
+
1035
+
1036
+ @set_module('numpy')
1037
+ def bmat(obj, ldict=None, gdict=None):
1038
+ """
1039
+ Build a matrix object from a string, nested sequence, or array.
1040
+
1041
+ Parameters
1042
+ ----------
1043
+ obj : str or array_like
1044
+ Input data. If a string, variables in the current scope may be
1045
+ referenced by name.
1046
+ ldict : dict, optional
1047
+ A dictionary that replaces local operands in current frame.
1048
+ Ignored if `obj` is not a string or `gdict` is None.
1049
+ gdict : dict, optional
1050
+ A dictionary that replaces global operands in current frame.
1051
+ Ignored if `obj` is not a string.
1052
+
1053
+ Returns
1054
+ -------
1055
+ out : matrix
1056
+ Returns a matrix object, which is a specialized 2-D array.
1057
+
1058
+ See Also
1059
+ --------
1060
+ block :
1061
+ A generalization of this function for N-d arrays, that returns normal
1062
+ ndarrays.
1063
+
1064
+ Examples
1065
+ --------
1066
+ >>> A = np.mat('1 1; 1 1')
1067
+ >>> B = np.mat('2 2; 2 2')
1068
+ >>> C = np.mat('3 4; 5 6')
1069
+ >>> D = np.mat('7 8; 9 0')
1070
+
1071
+ All the following expressions construct the same block matrix:
1072
+
1073
+ >>> np.bmat([[A, B], [C, D]])
1074
+ matrix([[1, 1, 2, 2],
1075
+ [1, 1, 2, 2],
1076
+ [3, 4, 7, 8],
1077
+ [5, 6, 9, 0]])
1078
+ >>> np.bmat(np.r_[np.c_[A, B], np.c_[C, D]])
1079
+ matrix([[1, 1, 2, 2],
1080
+ [1, 1, 2, 2],
1081
+ [3, 4, 7, 8],
1082
+ [5, 6, 9, 0]])
1083
+ >>> np.bmat('A,B; C,D')
1084
+ matrix([[1, 1, 2, 2],
1085
+ [1, 1, 2, 2],
1086
+ [3, 4, 7, 8],
1087
+ [5, 6, 9, 0]])
1088
+
1089
+ """
1090
+ if isinstance(obj, str):
1091
+ if gdict is None:
1092
+ # get previous frame
1093
+ frame = sys._getframe().f_back
1094
+ glob_dict = frame.f_globals
1095
+ loc_dict = frame.f_locals
1096
+ else:
1097
+ glob_dict = gdict
1098
+ loc_dict = ldict
1099
+
1100
+ return matrix(_from_string(obj, glob_dict, loc_dict))
1101
+
1102
+ if isinstance(obj, (tuple, list)):
1103
+ # [[A,B],[C,D]]
1104
+ arr_rows = []
1105
+ for row in obj:
1106
+ if isinstance(row, N.ndarray): # not 2-d
1107
+ return matrix(concatenate(obj, axis=-1))
1108
+ else:
1109
+ arr_rows.append(concatenate(row, axis=-1))
1110
+ return matrix(concatenate(arr_rows, axis=0))
1111
+ if isinstance(obj, N.ndarray):
1112
+ return matrix(obj)
1113
+
1114
+ mat = asmatrix
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/defmatrix.pyi ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence, Mapping
2
+ from typing import Any
3
+ from numpy import matrix as matrix
4
+ from numpy._typing import ArrayLike, DTypeLike, NDArray
5
+
6
+ __all__: list[str]
7
+
8
+ def bmat(
9
+ obj: str | Sequence[ArrayLike] | NDArray[Any],
10
+ ldict: None | Mapping[str, Any] = ...,
11
+ gdict: None | Mapping[str, Any] = ...,
12
+ ) -> matrix[Any, Any]: ...
13
+
14
+ def asmatrix(data: ArrayLike, dtype: DTypeLike = ...) -> matrix[Any, Any]: ...
15
+
16
+ mat = asmatrix
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/setup.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ def configuration(parent_package='', top_path=None):
3
+ from numpy.distutils.misc_util import Configuration
4
+ config = Configuration('matrixlib', parent_package, top_path)
5
+ config.add_subpackage('tests')
6
+ config.add_data_files('*.pyi')
7
+ return config
8
+
9
+ if __name__ == "__main__":
10
+ from numpy.distutils.core import setup
11
+ config = configuration(top_path='').todict()
12
+ setup(**config)
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/__init__.py ADDED
File without changes
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/__pycache__/test_numeric.cpython-310.pyc ADDED
Binary file (938 Bytes). View file
 
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/test_defmatrix.py ADDED
@@ -0,0 +1,453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections.abc
2
+
3
+ import numpy as np
4
+ from numpy import matrix, asmatrix, bmat
5
+ from numpy.testing import (
6
+ assert_, assert_equal, assert_almost_equal, assert_array_equal,
7
+ assert_array_almost_equal, assert_raises
8
+ )
9
+ from numpy.linalg import matrix_power
10
+ from numpy.matrixlib import mat
11
+
12
+ class TestCtor:
13
+ def test_basic(self):
14
+ A = np.array([[1, 2], [3, 4]])
15
+ mA = matrix(A)
16
+ assert_(np.all(mA.A == A))
17
+
18
+ B = bmat("A,A;A,A")
19
+ C = bmat([[A, A], [A, A]])
20
+ D = np.array([[1, 2, 1, 2],
21
+ [3, 4, 3, 4],
22
+ [1, 2, 1, 2],
23
+ [3, 4, 3, 4]])
24
+ assert_(np.all(B.A == D))
25
+ assert_(np.all(C.A == D))
26
+
27
+ E = np.array([[5, 6], [7, 8]])
28
+ AEresult = matrix([[1, 2, 5, 6], [3, 4, 7, 8]])
29
+ assert_(np.all(bmat([A, E]) == AEresult))
30
+
31
+ vec = np.arange(5)
32
+ mvec = matrix(vec)
33
+ assert_(mvec.shape == (1, 5))
34
+
35
+ def test_exceptions(self):
36
+ # Check for ValueError when called with invalid string data.
37
+ assert_raises(ValueError, matrix, "invalid")
38
+
39
+ def test_bmat_nondefault_str(self):
40
+ A = np.array([[1, 2], [3, 4]])
41
+ B = np.array([[5, 6], [7, 8]])
42
+ Aresult = np.array([[1, 2, 1, 2],
43
+ [3, 4, 3, 4],
44
+ [1, 2, 1, 2],
45
+ [3, 4, 3, 4]])
46
+ mixresult = np.array([[1, 2, 5, 6],
47
+ [3, 4, 7, 8],
48
+ [5, 6, 1, 2],
49
+ [7, 8, 3, 4]])
50
+ assert_(np.all(bmat("A,A;A,A") == Aresult))
51
+ assert_(np.all(bmat("A,A;A,A", ldict={'A':B}) == Aresult))
52
+ assert_raises(TypeError, bmat, "A,A;A,A", gdict={'A':B})
53
+ assert_(
54
+ np.all(bmat("A,A;A,A", ldict={'A':A}, gdict={'A':B}) == Aresult))
55
+ b2 = bmat("A,B;C,D", ldict={'A':A,'B':B}, gdict={'C':B,'D':A})
56
+ assert_(np.all(b2 == mixresult))
57
+
58
+
59
+ class TestProperties:
60
+ def test_sum(self):
61
+ """Test whether matrix.sum(axis=1) preserves orientation.
62
+ Fails in NumPy <= 0.9.6.2127.
63
+ """
64
+ M = matrix([[1, 2, 0, 0],
65
+ [3, 4, 0, 0],
66
+ [1, 2, 1, 2],
67
+ [3, 4, 3, 4]])
68
+ sum0 = matrix([8, 12, 4, 6])
69
+ sum1 = matrix([3, 7, 6, 14]).T
70
+ sumall = 30
71
+ assert_array_equal(sum0, M.sum(axis=0))
72
+ assert_array_equal(sum1, M.sum(axis=1))
73
+ assert_equal(sumall, M.sum())
74
+
75
+ assert_array_equal(sum0, np.sum(M, axis=0))
76
+ assert_array_equal(sum1, np.sum(M, axis=1))
77
+ assert_equal(sumall, np.sum(M))
78
+
79
+ def test_prod(self):
80
+ x = matrix([[1, 2, 3], [4, 5, 6]])
81
+ assert_equal(x.prod(), 720)
82
+ assert_equal(x.prod(0), matrix([[4, 10, 18]]))
83
+ assert_equal(x.prod(1), matrix([[6], [120]]))
84
+
85
+ assert_equal(np.prod(x), 720)
86
+ assert_equal(np.prod(x, axis=0), matrix([[4, 10, 18]]))
87
+ assert_equal(np.prod(x, axis=1), matrix([[6], [120]]))
88
+
89
+ y = matrix([0, 1, 3])
90
+ assert_(y.prod() == 0)
91
+
92
+ def test_max(self):
93
+ x = matrix([[1, 2, 3], [4, 5, 6]])
94
+ assert_equal(x.max(), 6)
95
+ assert_equal(x.max(0), matrix([[4, 5, 6]]))
96
+ assert_equal(x.max(1), matrix([[3], [6]]))
97
+
98
+ assert_equal(np.max(x), 6)
99
+ assert_equal(np.max(x, axis=0), matrix([[4, 5, 6]]))
100
+ assert_equal(np.max(x, axis=1), matrix([[3], [6]]))
101
+
102
+ def test_min(self):
103
+ x = matrix([[1, 2, 3], [4, 5, 6]])
104
+ assert_equal(x.min(), 1)
105
+ assert_equal(x.min(0), matrix([[1, 2, 3]]))
106
+ assert_equal(x.min(1), matrix([[1], [4]]))
107
+
108
+ assert_equal(np.min(x), 1)
109
+ assert_equal(np.min(x, axis=0), matrix([[1, 2, 3]]))
110
+ assert_equal(np.min(x, axis=1), matrix([[1], [4]]))
111
+
112
+ def test_ptp(self):
113
+ x = np.arange(4).reshape((2, 2))
114
+ assert_(x.ptp() == 3)
115
+ assert_(np.all(x.ptp(0) == np.array([2, 2])))
116
+ assert_(np.all(x.ptp(1) == np.array([1, 1])))
117
+
118
+ def test_var(self):
119
+ x = np.arange(9).reshape((3, 3))
120
+ mx = x.view(np.matrix)
121
+ assert_equal(x.var(ddof=0), mx.var(ddof=0))
122
+ assert_equal(x.var(ddof=1), mx.var(ddof=1))
123
+
124
+ def test_basic(self):
125
+ import numpy.linalg as linalg
126
+
127
+ A = np.array([[1., 2.],
128
+ [3., 4.]])
129
+ mA = matrix(A)
130
+ assert_(np.allclose(linalg.inv(A), mA.I))
131
+ assert_(np.all(np.array(np.transpose(A) == mA.T)))
132
+ assert_(np.all(np.array(np.transpose(A) == mA.H)))
133
+ assert_(np.all(A == mA.A))
134
+
135
+ B = A + 2j*A
136
+ mB = matrix(B)
137
+ assert_(np.allclose(linalg.inv(B), mB.I))
138
+ assert_(np.all(np.array(np.transpose(B) == mB.T)))
139
+ assert_(np.all(np.array(np.transpose(B).conj() == mB.H)))
140
+
141
+ def test_pinv(self):
142
+ x = matrix(np.arange(6).reshape(2, 3))
143
+ xpinv = matrix([[-0.77777778, 0.27777778],
144
+ [-0.11111111, 0.11111111],
145
+ [ 0.55555556, -0.05555556]])
146
+ assert_almost_equal(x.I, xpinv)
147
+
148
+ def test_comparisons(self):
149
+ A = np.arange(100).reshape(10, 10)
150
+ mA = matrix(A)
151
+ mB = matrix(A) + 0.1
152
+ assert_(np.all(mB == A+0.1))
153
+ assert_(np.all(mB == matrix(A+0.1)))
154
+ assert_(not np.any(mB == matrix(A-0.1)))
155
+ assert_(np.all(mA < mB))
156
+ assert_(np.all(mA <= mB))
157
+ assert_(np.all(mA <= mA))
158
+ assert_(not np.any(mA < mA))
159
+
160
+ assert_(not np.any(mB < mA))
161
+ assert_(np.all(mB >= mA))
162
+ assert_(np.all(mB >= mB))
163
+ assert_(not np.any(mB > mB))
164
+
165
+ assert_(np.all(mA == mA))
166
+ assert_(not np.any(mA == mB))
167
+ assert_(np.all(mB != mA))
168
+
169
+ assert_(not np.all(abs(mA) > 0))
170
+ assert_(np.all(abs(mB > 0)))
171
+
172
+ def test_asmatrix(self):
173
+ A = np.arange(100).reshape(10, 10)
174
+ mA = asmatrix(A)
175
+ A[0, 0] = -10
176
+ assert_(A[0, 0] == mA[0, 0])
177
+
178
+ def test_noaxis(self):
179
+ A = matrix([[1, 0], [0, 1]])
180
+ assert_(A.sum() == matrix(2))
181
+ assert_(A.mean() == matrix(0.5))
182
+
183
+ def test_repr(self):
184
+ A = matrix([[1, 0], [0, 1]])
185
+ assert_(repr(A) == "matrix([[1, 0],\n [0, 1]])")
186
+
187
+ def test_make_bool_matrix_from_str(self):
188
+ A = matrix('True; True; False')
189
+ B = matrix([[True], [True], [False]])
190
+ assert_array_equal(A, B)
191
+
192
+ class TestCasting:
193
+ def test_basic(self):
194
+ A = np.arange(100).reshape(10, 10)
195
+ mA = matrix(A)
196
+
197
+ mB = mA.copy()
198
+ O = np.ones((10, 10), np.float64) * 0.1
199
+ mB = mB + O
200
+ assert_(mB.dtype.type == np.float64)
201
+ assert_(np.all(mA != mB))
202
+ assert_(np.all(mB == mA+0.1))
203
+
204
+ mC = mA.copy()
205
+ O = np.ones((10, 10), np.complex128)
206
+ mC = mC * O
207
+ assert_(mC.dtype.type == np.complex128)
208
+ assert_(np.all(mA != mB))
209
+
210
+
211
+ class TestAlgebra:
212
+ def test_basic(self):
213
+ import numpy.linalg as linalg
214
+
215
+ A = np.array([[1., 2.], [3., 4.]])
216
+ mA = matrix(A)
217
+
218
+ B = np.identity(2)
219
+ for i in range(6):
220
+ assert_(np.allclose((mA ** i).A, B))
221
+ B = np.dot(B, A)
222
+
223
+ Ainv = linalg.inv(A)
224
+ B = np.identity(2)
225
+ for i in range(6):
226
+ assert_(np.allclose((mA ** -i).A, B))
227
+ B = np.dot(B, Ainv)
228
+
229
+ assert_(np.allclose((mA * mA).A, np.dot(A, A)))
230
+ assert_(np.allclose((mA + mA).A, (A + A)))
231
+ assert_(np.allclose((3*mA).A, (3*A)))
232
+
233
+ mA2 = matrix(A)
234
+ mA2 *= 3
235
+ assert_(np.allclose(mA2.A, 3*A))
236
+
237
+ def test_pow(self):
238
+ """Test raising a matrix to an integer power works as expected."""
239
+ m = matrix("1. 2.; 3. 4.")
240
+ m2 = m.copy()
241
+ m2 **= 2
242
+ mi = m.copy()
243
+ mi **= -1
244
+ m4 = m2.copy()
245
+ m4 **= 2
246
+ assert_array_almost_equal(m2, m**2)
247
+ assert_array_almost_equal(m4, np.dot(m2, m2))
248
+ assert_array_almost_equal(np.dot(mi, m), np.eye(2))
249
+
250
+ def test_scalar_type_pow(self):
251
+ m = matrix([[1, 2], [3, 4]])
252
+ for scalar_t in [np.int8, np.uint8]:
253
+ two = scalar_t(2)
254
+ assert_array_almost_equal(m ** 2, m ** two)
255
+
256
+ def test_notimplemented(self):
257
+ '''Check that 'not implemented' operations produce a failure.'''
258
+ A = matrix([[1., 2.],
259
+ [3., 4.]])
260
+
261
+ # __rpow__
262
+ with assert_raises(TypeError):
263
+ 1.0**A
264
+
265
+ # __mul__ with something not a list, ndarray, tuple, or scalar
266
+ with assert_raises(TypeError):
267
+ A*object()
268
+
269
+
270
+ class TestMatrixReturn:
271
+ def test_instance_methods(self):
272
+ a = matrix([1.0], dtype='f8')
273
+ methodargs = {
274
+ 'astype': ('intc',),
275
+ 'clip': (0.0, 1.0),
276
+ 'compress': ([1],),
277
+ 'repeat': (1,),
278
+ 'reshape': (1,),
279
+ 'swapaxes': (0, 0),
280
+ 'dot': np.array([1.0]),
281
+ }
282
+ excluded_methods = [
283
+ 'argmin', 'choose', 'dump', 'dumps', 'fill', 'getfield',
284
+ 'getA', 'getA1', 'item', 'nonzero', 'put', 'putmask', 'resize',
285
+ 'searchsorted', 'setflags', 'setfield', 'sort',
286
+ 'partition', 'argpartition',
287
+ 'take', 'tofile', 'tolist', 'tostring', 'tobytes', 'all', 'any',
288
+ 'sum', 'argmax', 'argmin', 'min', 'max', 'mean', 'var', 'ptp',
289
+ 'prod', 'std', 'ctypes', 'itemset',
290
+ ]
291
+ for attrib in dir(a):
292
+ if attrib.startswith('_') or attrib in excluded_methods:
293
+ continue
294
+ f = getattr(a, attrib)
295
+ if isinstance(f, collections.abc.Callable):
296
+ # reset contents of a
297
+ a.astype('f8')
298
+ a.fill(1.0)
299
+ if attrib in methodargs:
300
+ args = methodargs[attrib]
301
+ else:
302
+ args = ()
303
+ b = f(*args)
304
+ assert_(type(b) is matrix, "%s" % attrib)
305
+ assert_(type(a.real) is matrix)
306
+ assert_(type(a.imag) is matrix)
307
+ c, d = matrix([0.0]).nonzero()
308
+ assert_(type(c) is np.ndarray)
309
+ assert_(type(d) is np.ndarray)
310
+
311
+
312
+ class TestIndexing:
313
+ def test_basic(self):
314
+ x = asmatrix(np.zeros((3, 2), float))
315
+ y = np.zeros((3, 1), float)
316
+ y[:, 0] = [0.8, 0.2, 0.3]
317
+ x[:, 1] = y > 0.5
318
+ assert_equal(x, [[0, 1], [0, 0], [0, 0]])
319
+
320
+
321
+ class TestNewScalarIndexing:
322
+ a = matrix([[1, 2], [3, 4]])
323
+
324
+ def test_dimesions(self):
325
+ a = self.a
326
+ x = a[0]
327
+ assert_equal(x.ndim, 2)
328
+
329
+ def test_array_from_matrix_list(self):
330
+ a = self.a
331
+ x = np.array([a, a])
332
+ assert_equal(x.shape, [2, 2, 2])
333
+
334
+ def test_array_to_list(self):
335
+ a = self.a
336
+ assert_equal(a.tolist(), [[1, 2], [3, 4]])
337
+
338
+ def test_fancy_indexing(self):
339
+ a = self.a
340
+ x = a[1, [0, 1, 0]]
341
+ assert_(isinstance(x, matrix))
342
+ assert_equal(x, matrix([[3, 4, 3]]))
343
+ x = a[[1, 0]]
344
+ assert_(isinstance(x, matrix))
345
+ assert_equal(x, matrix([[3, 4], [1, 2]]))
346
+ x = a[[[1], [0]], [[1, 0], [0, 1]]]
347
+ assert_(isinstance(x, matrix))
348
+ assert_equal(x, matrix([[4, 3], [1, 2]]))
349
+
350
+ def test_matrix_element(self):
351
+ x = matrix([[1, 2, 3], [4, 5, 6]])
352
+ assert_equal(x[0][0], matrix([[1, 2, 3]]))
353
+ assert_equal(x[0][0].shape, (1, 3))
354
+ assert_equal(x[0].shape, (1, 3))
355
+ assert_equal(x[:, 0].shape, (2, 1))
356
+
357
+ x = matrix(0)
358
+ assert_equal(x[0, 0], 0)
359
+ assert_equal(x[0], 0)
360
+ assert_equal(x[:, 0].shape, x.shape)
361
+
362
+ def test_scalar_indexing(self):
363
+ x = asmatrix(np.zeros((3, 2), float))
364
+ assert_equal(x[0, 0], x[0][0])
365
+
366
+ def test_row_column_indexing(self):
367
+ x = asmatrix(np.eye(2))
368
+ assert_array_equal(x[0,:], [[1, 0]])
369
+ assert_array_equal(x[1,:], [[0, 1]])
370
+ assert_array_equal(x[:, 0], [[1], [0]])
371
+ assert_array_equal(x[:, 1], [[0], [1]])
372
+
373
+ def test_boolean_indexing(self):
374
+ A = np.arange(6)
375
+ A.shape = (3, 2)
376
+ x = asmatrix(A)
377
+ assert_array_equal(x[:, np.array([True, False])], x[:, 0])
378
+ assert_array_equal(x[np.array([True, False, False]),:], x[0,:])
379
+
380
+ def test_list_indexing(self):
381
+ A = np.arange(6)
382
+ A.shape = (3, 2)
383
+ x = asmatrix(A)
384
+ assert_array_equal(x[:, [1, 0]], x[:, ::-1])
385
+ assert_array_equal(x[[2, 1, 0],:], x[::-1,:])
386
+
387
+
388
+ class TestPower:
389
+ def test_returntype(self):
390
+ a = np.array([[0, 1], [0, 0]])
391
+ assert_(type(matrix_power(a, 2)) is np.ndarray)
392
+ a = mat(a)
393
+ assert_(type(matrix_power(a, 2)) is matrix)
394
+
395
+ def test_list(self):
396
+ assert_array_equal(matrix_power([[0, 1], [0, 0]], 2), [[0, 0], [0, 0]])
397
+
398
+
399
+ class TestShape:
400
+
401
+ a = np.array([[1], [2]])
402
+ m = matrix([[1], [2]])
403
+
404
+ def test_shape(self):
405
+ assert_equal(self.a.shape, (2, 1))
406
+ assert_equal(self.m.shape, (2, 1))
407
+
408
+ def test_numpy_ravel(self):
409
+ assert_equal(np.ravel(self.a).shape, (2,))
410
+ assert_equal(np.ravel(self.m).shape, (2,))
411
+
412
+ def test_member_ravel(self):
413
+ assert_equal(self.a.ravel().shape, (2,))
414
+ assert_equal(self.m.ravel().shape, (1, 2))
415
+
416
+ def test_member_flatten(self):
417
+ assert_equal(self.a.flatten().shape, (2,))
418
+ assert_equal(self.m.flatten().shape, (1, 2))
419
+
420
+ def test_numpy_ravel_order(self):
421
+ x = np.array([[1, 2, 3], [4, 5, 6]])
422
+ assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6])
423
+ assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6])
424
+ assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6])
425
+ assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6])
426
+ x = matrix([[1, 2, 3], [4, 5, 6]])
427
+ assert_equal(np.ravel(x), [1, 2, 3, 4, 5, 6])
428
+ assert_equal(np.ravel(x, order='F'), [1, 4, 2, 5, 3, 6])
429
+ assert_equal(np.ravel(x.T), [1, 4, 2, 5, 3, 6])
430
+ assert_equal(np.ravel(x.T, order='A'), [1, 2, 3, 4, 5, 6])
431
+
432
+ def test_matrix_ravel_order(self):
433
+ x = matrix([[1, 2, 3], [4, 5, 6]])
434
+ assert_equal(x.ravel(), [[1, 2, 3, 4, 5, 6]])
435
+ assert_equal(x.ravel(order='F'), [[1, 4, 2, 5, 3, 6]])
436
+ assert_equal(x.T.ravel(), [[1, 4, 2, 5, 3, 6]])
437
+ assert_equal(x.T.ravel(order='A'), [[1, 2, 3, 4, 5, 6]])
438
+
439
+ def test_array_memory_sharing(self):
440
+ assert_(np.may_share_memory(self.a, self.a.ravel()))
441
+ assert_(not np.may_share_memory(self.a, self.a.flatten()))
442
+
443
+ def test_matrix_memory_sharing(self):
444
+ assert_(np.may_share_memory(self.m, self.m.ravel()))
445
+ assert_(not np.may_share_memory(self.m, self.m.flatten()))
446
+
447
+ def test_expand_dims_matrix(self):
448
+ # matrices are always 2d - so expand_dims only makes sense when the
449
+ # type is changed away from matrix.
450
+ a = np.arange(10).reshape((2, 5)).view(np.matrix)
451
+ expanded = np.expand_dims(a, axis=1)
452
+ assert_equal(expanded.ndim, 3)
453
+ assert_(not isinstance(expanded, np.matrix))
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/test_interaction.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Tests of interaction of matrix with other parts of numpy.
2
+
3
+ Note that tests with MaskedArray and linalg are done in separate files.
4
+ """
5
+ import pytest
6
+
7
+ import textwrap
8
+ import warnings
9
+
10
+ import numpy as np
11
+ from numpy.testing import (assert_, assert_equal, assert_raises,
12
+ assert_raises_regex, assert_array_equal,
13
+ assert_almost_equal, assert_array_almost_equal)
14
+
15
+
16
+ def test_fancy_indexing():
17
+ # The matrix class messes with the shape. While this is always
18
+ # weird (getitem is not used, it does not have setitem nor knows
19
+ # about fancy indexing), this tests gh-3110
20
+ # 2018-04-29: moved here from core.tests.test_index.
21
+ m = np.matrix([[1, 2], [3, 4]])
22
+
23
+ assert_(isinstance(m[[0, 1, 0], :], np.matrix))
24
+
25
+ # gh-3110. Note the transpose currently because matrices do *not*
26
+ # support dimension fixing for fancy indexing correctly.
27
+ x = np.asmatrix(np.arange(50).reshape(5, 10))
28
+ assert_equal(x[:2, np.array(-1)], x[:2, -1].T)
29
+
30
+
31
+ def test_polynomial_mapdomain():
32
+ # test that polynomial preserved matrix subtype.
33
+ # 2018-04-29: moved here from polynomial.tests.polyutils.
34
+ dom1 = [0, 4]
35
+ dom2 = [1, 3]
36
+ x = np.matrix([dom1, dom1])
37
+ res = np.polynomial.polyutils.mapdomain(x, dom1, dom2)
38
+ assert_(isinstance(res, np.matrix))
39
+
40
+
41
+ def test_sort_matrix_none():
42
+ # 2018-04-29: moved here from core.tests.test_multiarray
43
+ a = np.matrix([[2, 1, 0]])
44
+ actual = np.sort(a, axis=None)
45
+ expected = np.matrix([[0, 1, 2]])
46
+ assert_equal(actual, expected)
47
+ assert_(type(expected) is np.matrix)
48
+
49
+
50
+ def test_partition_matrix_none():
51
+ # gh-4301
52
+ # 2018-04-29: moved here from core.tests.test_multiarray
53
+ a = np.matrix([[2, 1, 0]])
54
+ actual = np.partition(a, 1, axis=None)
55
+ expected = np.matrix([[0, 1, 2]])
56
+ assert_equal(actual, expected)
57
+ assert_(type(expected) is np.matrix)
58
+
59
+
60
+ def test_dot_scalar_and_matrix_of_objects():
61
+ # Ticket #2469
62
+ # 2018-04-29: moved here from core.tests.test_multiarray
63
+ arr = np.matrix([1, 2], dtype=object)
64
+ desired = np.matrix([[3, 6]], dtype=object)
65
+ assert_equal(np.dot(arr, 3), desired)
66
+ assert_equal(np.dot(3, arr), desired)
67
+
68
+
69
+ def test_inner_scalar_and_matrix():
70
+ # 2018-04-29: moved here from core.tests.test_multiarray
71
+ for dt in np.typecodes['AllInteger'] + np.typecodes['AllFloat'] + '?':
72
+ sca = np.array(3, dtype=dt)[()]
73
+ arr = np.matrix([[1, 2], [3, 4]], dtype=dt)
74
+ desired = np.matrix([[3, 6], [9, 12]], dtype=dt)
75
+ assert_equal(np.inner(arr, sca), desired)
76
+ assert_equal(np.inner(sca, arr), desired)
77
+
78
+
79
+ def test_inner_scalar_and_matrix_of_objects():
80
+ # Ticket #4482
81
+ # 2018-04-29: moved here from core.tests.test_multiarray
82
+ arr = np.matrix([1, 2], dtype=object)
83
+ desired = np.matrix([[3, 6]], dtype=object)
84
+ assert_equal(np.inner(arr, 3), desired)
85
+ assert_equal(np.inner(3, arr), desired)
86
+
87
+
88
+ def test_iter_allocate_output_subtype():
89
+ # Make sure that the subtype with priority wins
90
+ # 2018-04-29: moved here from core.tests.test_nditer, given the
91
+ # matrix specific shape test.
92
+
93
+ # matrix vs ndarray
94
+ a = np.matrix([[1, 2], [3, 4]])
95
+ b = np.arange(4).reshape(2, 2).T
96
+ i = np.nditer([a, b, None], [],
97
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
98
+ assert_(type(i.operands[2]) is np.matrix)
99
+ assert_(type(i.operands[2]) is not np.ndarray)
100
+ assert_equal(i.operands[2].shape, (2, 2))
101
+
102
+ # matrix always wants things to be 2D
103
+ b = np.arange(4).reshape(1, 2, 2)
104
+ assert_raises(RuntimeError, np.nditer, [a, b, None], [],
105
+ [['readonly'], ['readonly'], ['writeonly', 'allocate']])
106
+ # but if subtypes are disabled, the result can still work
107
+ i = np.nditer([a, b, None], [],
108
+ [['readonly'], ['readonly'],
109
+ ['writeonly', 'allocate', 'no_subtype']])
110
+ assert_(type(i.operands[2]) is np.ndarray)
111
+ assert_(type(i.operands[2]) is not np.matrix)
112
+ assert_equal(i.operands[2].shape, (1, 2, 2))
113
+
114
+
115
+ def like_function():
116
+ # 2018-04-29: moved here from core.tests.test_numeric
117
+ a = np.matrix([[1, 2], [3, 4]])
118
+ for like_function in np.zeros_like, np.ones_like, np.empty_like:
119
+ b = like_function(a)
120
+ assert_(type(b) is np.matrix)
121
+
122
+ c = like_function(a, subok=False)
123
+ assert_(type(c) is not np.matrix)
124
+
125
+
126
+ def test_array_astype():
127
+ # 2018-04-29: copied here from core.tests.test_api
128
+ # subok=True passes through a matrix
129
+ a = np.matrix([[0, 1, 2], [3, 4, 5]], dtype='f4')
130
+ b = a.astype('f4', subok=True, copy=False)
131
+ assert_(a is b)
132
+
133
+ # subok=True is default, and creates a subtype on a cast
134
+ b = a.astype('i4', copy=False)
135
+ assert_equal(a, b)
136
+ assert_equal(type(b), np.matrix)
137
+
138
+ # subok=False never returns a matrix
139
+ b = a.astype('f4', subok=False, copy=False)
140
+ assert_equal(a, b)
141
+ assert_(not (a is b))
142
+ assert_(type(b) is not np.matrix)
143
+
144
+
145
+ def test_stack():
146
+ # 2018-04-29: copied here from core.tests.test_shape_base
147
+ # check np.matrix cannot be stacked
148
+ m = np.matrix([[1, 2], [3, 4]])
149
+ assert_raises_regex(ValueError, 'shape too large to be a matrix',
150
+ np.stack, [m, m])
151
+
152
+
153
+ def test_object_scalar_multiply():
154
+ # Tickets #2469 and #4482
155
+ # 2018-04-29: moved here from core.tests.test_ufunc
156
+ arr = np.matrix([1, 2], dtype=object)
157
+ desired = np.matrix([[3, 6]], dtype=object)
158
+ assert_equal(np.multiply(arr, 3), desired)
159
+ assert_equal(np.multiply(3, arr), desired)
160
+
161
+
162
+ def test_nanfunctions_matrices():
163
+ # Check that it works and that type and
164
+ # shape are preserved
165
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
166
+ mat = np.matrix(np.eye(3))
167
+ for f in [np.nanmin, np.nanmax]:
168
+ res = f(mat, axis=0)
169
+ assert_(isinstance(res, np.matrix))
170
+ assert_(res.shape == (1, 3))
171
+ res = f(mat, axis=1)
172
+ assert_(isinstance(res, np.matrix))
173
+ assert_(res.shape == (3, 1))
174
+ res = f(mat)
175
+ assert_(np.isscalar(res))
176
+ # check that rows of nan are dealt with for subclasses (#4628)
177
+ mat[1] = np.nan
178
+ for f in [np.nanmin, np.nanmax]:
179
+ with warnings.catch_warnings(record=True) as w:
180
+ warnings.simplefilter('always')
181
+ res = f(mat, axis=0)
182
+ assert_(isinstance(res, np.matrix))
183
+ assert_(not np.any(np.isnan(res)))
184
+ assert_(len(w) == 0)
185
+
186
+ with warnings.catch_warnings(record=True) as w:
187
+ warnings.simplefilter('always')
188
+ res = f(mat, axis=1)
189
+ assert_(isinstance(res, np.matrix))
190
+ assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
191
+ and not np.isnan(res[2, 0]))
192
+ assert_(len(w) == 1, 'no warning raised')
193
+ assert_(issubclass(w[0].category, RuntimeWarning))
194
+
195
+ with warnings.catch_warnings(record=True) as w:
196
+ warnings.simplefilter('always')
197
+ res = f(mat)
198
+ assert_(np.isscalar(res))
199
+ assert_(res != np.nan)
200
+ assert_(len(w) == 0)
201
+
202
+
203
+ def test_nanfunctions_matrices_general():
204
+ # Check that it works and that type and
205
+ # shape are preserved
206
+ # 2018-04-29: moved here from core.tests.test_nanfunctions
207
+ mat = np.matrix(np.eye(3))
208
+ for f in (np.nanargmin, np.nanargmax, np.nansum, np.nanprod,
209
+ np.nanmean, np.nanvar, np.nanstd):
210
+ res = f(mat, axis=0)
211
+ assert_(isinstance(res, np.matrix))
212
+ assert_(res.shape == (1, 3))
213
+ res = f(mat, axis=1)
214
+ assert_(isinstance(res, np.matrix))
215
+ assert_(res.shape == (3, 1))
216
+ res = f(mat)
217
+ assert_(np.isscalar(res))
218
+
219
+ for f in np.nancumsum, np.nancumprod:
220
+ res = f(mat, axis=0)
221
+ assert_(isinstance(res, np.matrix))
222
+ assert_(res.shape == (3, 3))
223
+ res = f(mat, axis=1)
224
+ assert_(isinstance(res, np.matrix))
225
+ assert_(res.shape == (3, 3))
226
+ res = f(mat)
227
+ assert_(isinstance(res, np.matrix))
228
+ assert_(res.shape == (1, 3*3))
229
+
230
+
231
+ def test_average_matrix():
232
+ # 2018-04-29: moved here from core.tests.test_function_base.
233
+ y = np.matrix(np.random.rand(5, 5))
234
+ assert_array_equal(y.mean(0), np.average(y, 0))
235
+
236
+ a = np.matrix([[1, 2], [3, 4]])
237
+ w = np.matrix([[1, 2], [3, 4]])
238
+
239
+ r = np.average(a, axis=0, weights=w)
240
+ assert_equal(type(r), np.matrix)
241
+ assert_equal(r, [[2.5, 10.0/3]])
242
+
243
+
244
+ def test_trapz_matrix():
245
+ # Test to make sure matrices give the same answer as ndarrays
246
+ # 2018-04-29: moved here from core.tests.test_function_base.
247
+ x = np.linspace(0, 5)
248
+ y = x * x
249
+ r = np.trapz(y, x)
250
+ mx = np.matrix(x)
251
+ my = np.matrix(y)
252
+ mr = np.trapz(my, mx)
253
+ assert_almost_equal(mr, r)
254
+
255
+
256
+ def test_ediff1d_matrix():
257
+ # 2018-04-29: moved here from core.tests.test_arraysetops.
258
+ assert(isinstance(np.ediff1d(np.matrix(1)), np.matrix))
259
+ assert(isinstance(np.ediff1d(np.matrix(1), to_begin=1), np.matrix))
260
+
261
+
262
+ def test_apply_along_axis_matrix():
263
+ # this test is particularly malicious because matrix
264
+ # refuses to become 1d
265
+ # 2018-04-29: moved here from core.tests.test_shape_base.
266
+ def double(row):
267
+ return row * 2
268
+
269
+ m = np.matrix([[0, 1], [2, 3]])
270
+ expected = np.matrix([[0, 2], [4, 6]])
271
+
272
+ result = np.apply_along_axis(double, 0, m)
273
+ assert_(isinstance(result, np.matrix))
274
+ assert_array_equal(result, expected)
275
+
276
+ result = np.apply_along_axis(double, 1, m)
277
+ assert_(isinstance(result, np.matrix))
278
+ assert_array_equal(result, expected)
279
+
280
+
281
+ def test_kron_matrix():
282
+ # 2018-04-29: moved here from core.tests.test_shape_base.
283
+ a = np.ones([2, 2])
284
+ m = np.asmatrix(a)
285
+ assert_equal(type(np.kron(a, a)), np.ndarray)
286
+ assert_equal(type(np.kron(m, m)), np.matrix)
287
+ assert_equal(type(np.kron(a, m)), np.matrix)
288
+ assert_equal(type(np.kron(m, a)), np.matrix)
289
+
290
+
291
+ class TestConcatenatorMatrix:
292
+ # 2018-04-29: moved here from core.tests.test_index_tricks.
293
+ def test_matrix(self):
294
+ a = [1, 2]
295
+ b = [3, 4]
296
+
297
+ ab_r = np.r_['r', a, b]
298
+ ab_c = np.r_['c', a, b]
299
+
300
+ assert_equal(type(ab_r), np.matrix)
301
+ assert_equal(type(ab_c), np.matrix)
302
+
303
+ assert_equal(np.array(ab_r), [[1, 2, 3, 4]])
304
+ assert_equal(np.array(ab_c), [[1], [2], [3], [4]])
305
+
306
+ assert_raises(ValueError, lambda: np.r_['rc', a, b])
307
+
308
+ def test_matrix_scalar(self):
309
+ r = np.r_['r', [1, 2], 3]
310
+ assert_equal(type(r), np.matrix)
311
+ assert_equal(np.array(r), [[1, 2, 3]])
312
+
313
+ def test_matrix_builder(self):
314
+ a = np.array([1])
315
+ b = np.array([2])
316
+ c = np.array([3])
317
+ d = np.array([4])
318
+ actual = np.r_['a, b; c, d']
319
+ expected = np.bmat([[a, b], [c, d]])
320
+
321
+ assert_equal(actual, expected)
322
+ assert_equal(type(actual), type(expected))
323
+
324
+
325
+ def test_array_equal_error_message_matrix():
326
+ # 2018-04-29: moved here from testing.tests.test_utils.
327
+ with pytest.raises(AssertionError) as exc_info:
328
+ assert_equal(np.array([1, 2]), np.matrix([1, 2]))
329
+ msg = str(exc_info.value)
330
+ msg_reference = textwrap.dedent("""\
331
+
332
+ Arrays are not equal
333
+
334
+ (shapes (2,), (1, 2) mismatch)
335
+ x: array([1, 2])
336
+ y: matrix([[1, 2]])""")
337
+ assert_equal(msg, msg_reference)
338
+
339
+
340
+ def test_array_almost_equal_matrix():
341
+ # Matrix slicing keeps things 2-D, while array does not necessarily.
342
+ # See gh-8452.
343
+ # 2018-04-29: moved here from testing.tests.test_utils.
344
+ m1 = np.matrix([[1., 2.]])
345
+ m2 = np.matrix([[1., np.nan]])
346
+ m3 = np.matrix([[1., -np.inf]])
347
+ m4 = np.matrix([[np.nan, np.inf]])
348
+ m5 = np.matrix([[1., 2.], [np.nan, np.inf]])
349
+ for assert_func in assert_array_almost_equal, assert_almost_equal:
350
+ for m in m1, m2, m3, m4, m5:
351
+ assert_func(m, m)
352
+ a = np.array(m)
353
+ assert_func(a, m)
354
+ assert_func(m, a)
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/test_masked_matrix.py ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_warns
3
+ from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
4
+ assert_array_equal)
5
+ from numpy.ma.core import (masked_array, masked_values, masked, allequal,
6
+ MaskType, getmask, MaskedArray, nomask,
7
+ log, add, hypot, divide)
8
+ from numpy.ma.extras import mr_
9
+ from numpy.compat import pickle
10
+
11
+
12
+ class MMatrix(MaskedArray, np.matrix,):
13
+
14
+ def __new__(cls, data, mask=nomask):
15
+ mat = np.matrix(data)
16
+ _data = MaskedArray.__new__(cls, data=mat, mask=mask)
17
+ return _data
18
+
19
+ def __array_finalize__(self, obj):
20
+ np.matrix.__array_finalize__(self, obj)
21
+ MaskedArray.__array_finalize__(self, obj)
22
+ return
23
+
24
+ @property
25
+ def _series(self):
26
+ _view = self.view(MaskedArray)
27
+ _view._sharedmask = False
28
+ return _view
29
+
30
+
31
+ class TestMaskedMatrix:
32
+ def test_matrix_indexing(self):
33
+ # Tests conversions and indexing
34
+ x1 = np.matrix([[1, 2, 3], [4, 3, 2]])
35
+ x2 = masked_array(x1, mask=[[1, 0, 0], [0, 1, 0]])
36
+ x3 = masked_array(x1, mask=[[0, 1, 0], [1, 0, 0]])
37
+ x4 = masked_array(x1)
38
+ # test conversion to strings
39
+ str(x2) # raises?
40
+ repr(x2) # raises?
41
+ # tests of indexing
42
+ assert_(type(x2[1, 0]) is type(x1[1, 0]))
43
+ assert_(x1[1, 0] == x2[1, 0])
44
+ assert_(x2[1, 1] is masked)
45
+ assert_equal(x1[0, 2], x2[0, 2])
46
+ assert_equal(x1[0, 1:], x2[0, 1:])
47
+ assert_equal(x1[:, 2], x2[:, 2])
48
+ assert_equal(x1[:], x2[:])
49
+ assert_equal(x1[1:], x3[1:])
50
+ x1[0, 2] = 9
51
+ x2[0, 2] = 9
52
+ assert_equal(x1, x2)
53
+ x1[0, 1:] = 99
54
+ x2[0, 1:] = 99
55
+ assert_equal(x1, x2)
56
+ x2[0, 1] = masked
57
+ assert_equal(x1, x2)
58
+ x2[0, 1:] = masked
59
+ assert_equal(x1, x2)
60
+ x2[0, :] = x1[0, :]
61
+ x2[0, 1] = masked
62
+ assert_(allequal(getmask(x2), np.array([[0, 1, 0], [0, 1, 0]])))
63
+ x3[1, :] = masked_array([1, 2, 3], [1, 1, 0])
64
+ assert_(allequal(getmask(x3)[1], masked_array([1, 1, 0])))
65
+ assert_(allequal(getmask(x3[1]), masked_array([1, 1, 0])))
66
+ x4[1, :] = masked_array([1, 2, 3], [1, 1, 0])
67
+ assert_(allequal(getmask(x4[1]), masked_array([1, 1, 0])))
68
+ assert_(allequal(x4[1], masked_array([1, 2, 3])))
69
+ x1 = np.matrix(np.arange(5) * 1.0)
70
+ x2 = masked_values(x1, 3.0)
71
+ assert_equal(x1, x2)
72
+ assert_(allequal(masked_array([0, 0, 0, 1, 0], dtype=MaskType),
73
+ x2.mask))
74
+ assert_equal(3.0, x2.fill_value)
75
+
76
+ def test_pickling_subbaseclass(self):
77
+ # Test pickling w/ a subclass of ndarray
78
+ a = masked_array(np.matrix(list(range(10))), mask=[1, 0, 1, 0, 0] * 2)
79
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
80
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
81
+ assert_equal(a_pickled._mask, a._mask)
82
+ assert_equal(a_pickled, a)
83
+ assert_(isinstance(a_pickled._data, np.matrix))
84
+
85
+ def test_count_mean_with_matrix(self):
86
+ m = masked_array(np.matrix([[1, 2], [3, 4]]), mask=np.zeros((2, 2)))
87
+
88
+ assert_equal(m.count(axis=0).shape, (1, 2))
89
+ assert_equal(m.count(axis=1).shape, (2, 1))
90
+
91
+ # Make sure broadcasting inside mean and var work
92
+ assert_equal(m.mean(axis=0), [[2., 3.]])
93
+ assert_equal(m.mean(axis=1), [[1.5], [3.5]])
94
+
95
+ def test_flat(self):
96
+ # Test that flat can return items even for matrices [#4585, #4615]
97
+ # test simple access
98
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
99
+ assert_equal(test.flat[1], 2)
100
+ assert_equal(test.flat[2], masked)
101
+ assert_(np.all(test.flat[0:2] == test[0, 0:2]))
102
+ # Test flat on masked_matrices
103
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
104
+ test.flat = masked_array([3, 2, 1], mask=[1, 0, 0])
105
+ control = masked_array(np.matrix([[3, 2, 1]]), mask=[1, 0, 0])
106
+ assert_equal(test, control)
107
+ # Test setting
108
+ test = masked_array(np.matrix([[1, 2, 3]]), mask=[0, 0, 1])
109
+ testflat = test.flat
110
+ testflat[:] = testflat[[2, 1, 0]]
111
+ assert_equal(test, control)
112
+ testflat[0] = 9
113
+ # test that matrices keep the correct shape (#4615)
114
+ a = masked_array(np.matrix(np.eye(2)), mask=0)
115
+ b = a.flat
116
+ b01 = b[:2]
117
+ assert_equal(b01.data, np.array([[1., 0.]]))
118
+ assert_equal(b01.mask, np.array([[False, False]]))
119
+
120
+ def test_allany_onmatrices(self):
121
+ x = np.array([[0.13, 0.26, 0.90],
122
+ [0.28, 0.33, 0.63],
123
+ [0.31, 0.87, 0.70]])
124
+ X = np.matrix(x)
125
+ m = np.array([[True, False, False],
126
+ [False, False, False],
127
+ [True, True, False]], dtype=np.bool_)
128
+ mX = masked_array(X, mask=m)
129
+ mXbig = (mX > 0.5)
130
+ mXsmall = (mX < 0.5)
131
+
132
+ assert_(not mXbig.all())
133
+ assert_(mXbig.any())
134
+ assert_equal(mXbig.all(0), np.matrix([False, False, True]))
135
+ assert_equal(mXbig.all(1), np.matrix([False, False, True]).T)
136
+ assert_equal(mXbig.any(0), np.matrix([False, False, True]))
137
+ assert_equal(mXbig.any(1), np.matrix([True, True, True]).T)
138
+
139
+ assert_(not mXsmall.all())
140
+ assert_(mXsmall.any())
141
+ assert_equal(mXsmall.all(0), np.matrix([True, True, False]))
142
+ assert_equal(mXsmall.all(1), np.matrix([False, False, False]).T)
143
+ assert_equal(mXsmall.any(0), np.matrix([True, True, False]))
144
+ assert_equal(mXsmall.any(1), np.matrix([True, True, False]).T)
145
+
146
+ def test_compressed(self):
147
+ a = masked_array(np.matrix([1, 2, 3, 4]), mask=[0, 0, 0, 0])
148
+ b = a.compressed()
149
+ assert_equal(b, a)
150
+ assert_(isinstance(b, np.matrix))
151
+ a[0, 0] = masked
152
+ b = a.compressed()
153
+ assert_equal(b, [[2, 3, 4]])
154
+
155
+ def test_ravel(self):
156
+ a = masked_array(np.matrix([1, 2, 3, 4, 5]), mask=[[0, 1, 0, 0, 0]])
157
+ aravel = a.ravel()
158
+ assert_equal(aravel.shape, (1, 5))
159
+ assert_equal(aravel._mask.shape, a.shape)
160
+
161
+ def test_view(self):
162
+ # Test view w/ flexible dtype
163
+ iterator = list(zip(np.arange(10), np.random.rand(10)))
164
+ data = np.array(iterator)
165
+ a = masked_array(iterator, dtype=[('a', float), ('b', float)])
166
+ a.mask[0] = (1, 0)
167
+ test = a.view((float, 2), np.matrix)
168
+ assert_equal(test, data)
169
+ assert_(isinstance(test, np.matrix))
170
+ assert_(not isinstance(test, MaskedArray))
171
+
172
+
173
+ class TestSubclassing:
174
+ # Test suite for masked subclasses of ndarray.
175
+
176
+ def setup_method(self):
177
+ x = np.arange(5, dtype='float')
178
+ mx = MMatrix(x, mask=[0, 1, 0, 0, 0])
179
+ self.data = (x, mx)
180
+
181
+ def test_maskedarray_subclassing(self):
182
+ # Tests subclassing MaskedArray
183
+ (x, mx) = self.data
184
+ assert_(isinstance(mx._data, np.matrix))
185
+
186
+ def test_masked_unary_operations(self):
187
+ # Tests masked_unary_operation
188
+ (x, mx) = self.data
189
+ with np.errstate(divide='ignore'):
190
+ assert_(isinstance(log(mx), MMatrix))
191
+ assert_equal(log(x), np.log(x))
192
+
193
+ def test_masked_binary_operations(self):
194
+ # Tests masked_binary_operation
195
+ (x, mx) = self.data
196
+ # Result should be a MMatrix
197
+ assert_(isinstance(add(mx, mx), MMatrix))
198
+ assert_(isinstance(add(mx, x), MMatrix))
199
+ # Result should work
200
+ assert_equal(add(mx, x), mx+x)
201
+ assert_(isinstance(add(mx, mx)._data, np.matrix))
202
+ with assert_warns(DeprecationWarning):
203
+ assert_(isinstance(add.outer(mx, mx), MMatrix))
204
+ assert_(isinstance(hypot(mx, mx), MMatrix))
205
+ assert_(isinstance(hypot(mx, x), MMatrix))
206
+
207
+ def test_masked_binary_operations2(self):
208
+ # Tests domained_masked_binary_operation
209
+ (x, mx) = self.data
210
+ xmx = masked_array(mx.data.__array__(), mask=mx.mask)
211
+ assert_(isinstance(divide(mx, mx), MMatrix))
212
+ assert_(isinstance(divide(mx, x), MMatrix))
213
+ assert_equal(divide(mx, mx), divide(xmx, xmx))
214
+
215
+ class TestConcatenator:
216
+ # Tests for mr_, the equivalent of r_ for masked arrays.
217
+
218
+ def test_matrix_builder(self):
219
+ assert_raises(np.ma.MAError, lambda: mr_['1, 2; 3, 4'])
220
+
221
+ def test_matrix(self):
222
+ # Test consistency with unmasked version. If we ever deprecate
223
+ # matrix, this test should either still pass, or both actual and
224
+ # expected should fail to be build.
225
+ actual = mr_['r', 1, 2, 3]
226
+ expected = np.ma.array(np.r_['r', 1, 2, 3])
227
+ assert_array_equal(actual, expected)
228
+
229
+ # outer type is masked array, inner type is matrix
230
+ assert_equal(type(actual), type(expected))
231
+ assert_equal(type(actual.data), type(expected.data))
env-llmeval/lib/python3.10/site-packages/numpy/matrixlib/tests/test_matrix_linalg.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Test functions for linalg module using the matrix class."""
2
+ import numpy as np
3
+
4
+ from numpy.linalg.tests.test_linalg import (
5
+ LinalgCase, apply_tag, TestQR as _TestQR, LinalgTestCase,
6
+ _TestNorm2D, _TestNormDoubleBase, _TestNormSingleBase, _TestNormInt64Base,
7
+ SolveCases, InvCases, EigvalsCases, EigCases, SVDCases, CondCases,
8
+ PinvCases, DetCases, LstsqCases)
9
+
10
+
11
+ CASES = []
12
+
13
+ # square test cases
14
+ CASES += apply_tag('square', [
15
+ LinalgCase("0x0_matrix",
16
+ np.empty((0, 0), dtype=np.double).view(np.matrix),
17
+ np.empty((0, 1), dtype=np.double).view(np.matrix),
18
+ tags={'size-0'}),
19
+ LinalgCase("matrix_b_only",
20
+ np.array([[1., 2.], [3., 4.]]),
21
+ np.matrix([2., 1.]).T),
22
+ LinalgCase("matrix_a_and_b",
23
+ np.matrix([[1., 2.], [3., 4.]]),
24
+ np.matrix([2., 1.]).T),
25
+ ])
26
+
27
+ # hermitian test-cases
28
+ CASES += apply_tag('hermitian', [
29
+ LinalgCase("hmatrix_a_and_b",
30
+ np.matrix([[1., 2.], [2., 1.]]),
31
+ None),
32
+ ])
33
+ # No need to make generalized or strided cases for matrices.
34
+
35
+
36
+ class MatrixTestCase(LinalgTestCase):
37
+ TEST_CASES = CASES
38
+
39
+
40
+ class TestSolveMatrix(SolveCases, MatrixTestCase):
41
+ pass
42
+
43
+
44
+ class TestInvMatrix(InvCases, MatrixTestCase):
45
+ pass
46
+
47
+
48
+ class TestEigvalsMatrix(EigvalsCases, MatrixTestCase):
49
+ pass
50
+
51
+
52
+ class TestEigMatrix(EigCases, MatrixTestCase):
53
+ pass
54
+
55
+
56
+ class TestSVDMatrix(SVDCases, MatrixTestCase):
57
+ pass
58
+
59
+
60
+ class TestCondMatrix(CondCases, MatrixTestCase):
61
+ pass
62
+
63
+
64
+ class TestPinvMatrix(PinvCases, MatrixTestCase):
65
+ pass
66
+
67
+
68
+ class TestDetMatrix(DetCases, MatrixTestCase):
69
+ pass
70
+
71
+
72
+ class TestLstsqMatrix(LstsqCases, MatrixTestCase):
73
+ pass
74
+
75
+
76
+ class _TestNorm2DMatrix(_TestNorm2D):
77
+ array = np.matrix
78
+
79
+
80
+ class TestNormDoubleMatrix(_TestNorm2DMatrix, _TestNormDoubleBase):
81
+ pass
82
+
83
+
84
+ class TestNormSingleMatrix(_TestNorm2DMatrix, _TestNormSingleBase):
85
+ pass
86
+
87
+
88
+ class TestNormInt64Matrix(_TestNorm2DMatrix, _TestNormInt64Base):
89
+ pass
90
+
91
+
92
+ class TestQRMatrix(_TestQR):
93
+ array = np.matrix