applied-ai-018 commited on
Commit
57fe09e
·
verified ·
1 Parent(s): b295afd

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/9.attention.dense.weight/exp_avg_sq.pt +3 -0
  3. venv/lib/python3.10/site-packages/numpy/lib/__init__.py +92 -0
  4. venv/lib/python3.10/site-packages/numpy/lib/__init__.pyi +245 -0
  5. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/__init__.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/_datasource.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/_iotools.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/_version.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/arraypad.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/arraysetops.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/arrayterator.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/format.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/function_base.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/histograms.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/index_tricks.cpython-310.pyc +0 -0
  16. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/mixins.cpython-310.pyc +0 -0
  17. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-310.pyc +0 -0
  18. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/npyio.cpython-310.pyc +0 -0
  19. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/polynomial.cpython-310.pyc +0 -0
  20. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/recfunctions.cpython-310.pyc +0 -0
  21. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/scimath.cpython-310.pyc +0 -0
  22. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/setup.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/shape_base.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/twodim_base.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/type_check.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/ufunclike.cpython-310.pyc +0 -0
  28. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/user_array.cpython-310.pyc +0 -0
  29. venv/lib/python3.10/site-packages/numpy/lib/__pycache__/utils.cpython-310.pyc +0 -0
  30. venv/lib/python3.10/site-packages/numpy/lib/_datasource.py +704 -0
  31. venv/lib/python3.10/site-packages/numpy/lib/_iotools.py +897 -0
  32. venv/lib/python3.10/site-packages/numpy/lib/_version.py +155 -0
  33. venv/lib/python3.10/site-packages/numpy/lib/_version.pyi +17 -0
  34. venv/lib/python3.10/site-packages/numpy/lib/arraypad.pyi +85 -0
  35. venv/lib/python3.10/site-packages/numpy/lib/arraysetops.pyi +362 -0
  36. venv/lib/python3.10/site-packages/numpy/lib/arrayterator.py +219 -0
  37. venv/lib/python3.10/site-packages/numpy/lib/format.py +976 -0
  38. venv/lib/python3.10/site-packages/numpy/lib/function_base.pyi +697 -0
  39. venv/lib/python3.10/site-packages/numpy/lib/histograms.pyi +47 -0
  40. venv/lib/python3.10/site-packages/numpy/lib/index_tricks.py +1046 -0
  41. venv/lib/python3.10/site-packages/numpy/lib/mixins.pyi +74 -0
  42. venv/lib/python3.10/site-packages/numpy/lib/nanfunctions.py +1887 -0
  43. venv/lib/python3.10/site-packages/numpy/lib/npyio.py +2547 -0
  44. venv/lib/python3.10/site-packages/numpy/lib/npyio.pyi +330 -0
  45. venv/lib/python3.10/site-packages/numpy/lib/polynomial.py +1453 -0
  46. venv/lib/python3.10/site-packages/numpy/lib/polynomial.pyi +303 -0
  47. venv/lib/python3.10/site-packages/numpy/lib/scimath.py +625 -0
  48. venv/lib/python3.10/site-packages/numpy/lib/scimath.pyi +94 -0
  49. venv/lib/python3.10/site-packages/numpy/lib/setup.py +12 -0
  50. venv/lib/python3.10/site-packages/numpy/lib/shape_base.pyi +220 -0
ckpts/universal/global_step40/zero/10.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:250eb61aeade057e4e711631ee909d5d140592a6f730066a6cc38b1c731f03cc
3
+ size 33555627
ckpts/universal/global_step40/zero/9.attention.dense.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6498b8c22595b3324278e4f2e6ecfb2847df09f4c35b51d41bfb6a09c6604387
3
+ size 16778411
venv/lib/python3.10/site-packages/numpy/lib/__init__.py ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ **Note:** almost all functions in the ``numpy.lib`` namespace
3
+ are also present in the main ``numpy`` namespace. Please use the
4
+ functions as ``np.<funcname>`` where possible.
5
+
6
+ ``numpy.lib`` is mostly a space for implementing functions that don't
7
+ belong in core or in another NumPy submodule with a clear purpose
8
+ (e.g. ``random``, ``fft``, ``linalg``, ``ma``).
9
+
10
+ Most contains basic functions that are used by several submodules and are
11
+ useful to have in the main name-space.
12
+
13
+ """
14
+
15
+ # Public submodules
16
+ # Note: recfunctions and (maybe) format are public too, but not imported
17
+ from . import mixins
18
+ from . import scimath as emath
19
+
20
+ # Private submodules
21
+ # load module names. See https://github.com/networkx/networkx/issues/5838
22
+ from . import type_check
23
+ from . import index_tricks
24
+ from . import function_base
25
+ from . import nanfunctions
26
+ from . import shape_base
27
+ from . import stride_tricks
28
+ from . import twodim_base
29
+ from . import ufunclike
30
+ from . import histograms
31
+ from . import polynomial
32
+ from . import utils
33
+ from . import arraysetops
34
+ from . import npyio
35
+ from . import arrayterator
36
+ from . import arraypad
37
+ from . import _version
38
+
39
+ from .type_check import *
40
+ from .index_tricks import *
41
+ from .function_base import *
42
+ from .nanfunctions import *
43
+ from .shape_base import *
44
+ from .stride_tricks import *
45
+ from .twodim_base import *
46
+ from .ufunclike import *
47
+ from .histograms import *
48
+
49
+ from .polynomial import *
50
+ from .utils import *
51
+ from .arraysetops import *
52
+ from .npyio import *
53
+ from .arrayterator import Arrayterator
54
+ from .arraypad import *
55
+ from ._version import *
56
+ from numpy.core._multiarray_umath import tracemalloc_domain
57
+
58
+ __all__ = ['emath', 'tracemalloc_domain', 'Arrayterator']
59
+ __all__ += type_check.__all__
60
+ __all__ += index_tricks.__all__
61
+ __all__ += function_base.__all__
62
+ __all__ += shape_base.__all__
63
+ __all__ += stride_tricks.__all__
64
+ __all__ += twodim_base.__all__
65
+ __all__ += ufunclike.__all__
66
+ __all__ += arraypad.__all__
67
+ __all__ += polynomial.__all__
68
+ __all__ += utils.__all__
69
+ __all__ += arraysetops.__all__
70
+ __all__ += npyio.__all__
71
+ __all__ += nanfunctions.__all__
72
+ __all__ += histograms.__all__
73
+
74
+ from numpy._pytesttester import PytestTester
75
+ test = PytestTester(__name__)
76
+ del PytestTester
77
+
78
+ def __getattr__(attr):
79
+ # Warn for reprecated attributes
80
+ import math
81
+ import warnings
82
+
83
+ if attr == 'math':
84
+ warnings.warn(
85
+ "`np.lib.math` is a deprecated alias for the standard library "
86
+ "`math` module (Deprecated Numpy 1.25). Replace usages of "
87
+ "`numpy.lib.math` with `math`", DeprecationWarning, stacklevel=2)
88
+ return math
89
+ else:
90
+ raise AttributeError("module {!r} has no attribute "
91
+ "{!r}".format(__name__, attr))
92
+
venv/lib/python3.10/site-packages/numpy/lib/__init__.pyi ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math as math
2
+ from typing import Any
3
+
4
+ from numpy._pytesttester import PytestTester
5
+
6
+ from numpy import (
7
+ ndenumerate as ndenumerate,
8
+ ndindex as ndindex,
9
+ )
10
+
11
+ from numpy.version import version
12
+
13
+ from numpy.lib import (
14
+ format as format,
15
+ mixins as mixins,
16
+ scimath as scimath,
17
+ stride_tricks as stride_tricks,
18
+ )
19
+
20
+ from numpy.lib._version import (
21
+ NumpyVersion as NumpyVersion,
22
+ )
23
+
24
+ from numpy.lib.arraypad import (
25
+ pad as pad,
26
+ )
27
+
28
+ from numpy.lib.arraysetops import (
29
+ ediff1d as ediff1d,
30
+ intersect1d as intersect1d,
31
+ setxor1d as setxor1d,
32
+ union1d as union1d,
33
+ setdiff1d as setdiff1d,
34
+ unique as unique,
35
+ in1d as in1d,
36
+ isin as isin,
37
+ )
38
+
39
+ from numpy.lib.arrayterator import (
40
+ Arrayterator as Arrayterator,
41
+ )
42
+
43
+ from numpy.lib.function_base import (
44
+ select as select,
45
+ piecewise as piecewise,
46
+ trim_zeros as trim_zeros,
47
+ copy as copy,
48
+ iterable as iterable,
49
+ percentile as percentile,
50
+ diff as diff,
51
+ gradient as gradient,
52
+ angle as angle,
53
+ unwrap as unwrap,
54
+ sort_complex as sort_complex,
55
+ disp as disp,
56
+ flip as flip,
57
+ rot90 as rot90,
58
+ extract as extract,
59
+ place as place,
60
+ vectorize as vectorize,
61
+ asarray_chkfinite as asarray_chkfinite,
62
+ average as average,
63
+ bincount as bincount,
64
+ digitize as digitize,
65
+ cov as cov,
66
+ corrcoef as corrcoef,
67
+ median as median,
68
+ sinc as sinc,
69
+ hamming as hamming,
70
+ hanning as hanning,
71
+ bartlett as bartlett,
72
+ blackman as blackman,
73
+ kaiser as kaiser,
74
+ trapz as trapz,
75
+ i0 as i0,
76
+ add_newdoc as add_newdoc,
77
+ add_docstring as add_docstring,
78
+ meshgrid as meshgrid,
79
+ delete as delete,
80
+ insert as insert,
81
+ append as append,
82
+ interp as interp,
83
+ add_newdoc_ufunc as add_newdoc_ufunc,
84
+ quantile as quantile,
85
+ )
86
+
87
+ from numpy.lib.histograms import (
88
+ histogram_bin_edges as histogram_bin_edges,
89
+ histogram as histogram,
90
+ histogramdd as histogramdd,
91
+ )
92
+
93
+ from numpy.lib.index_tricks import (
94
+ ravel_multi_index as ravel_multi_index,
95
+ unravel_index as unravel_index,
96
+ mgrid as mgrid,
97
+ ogrid as ogrid,
98
+ r_ as r_,
99
+ c_ as c_,
100
+ s_ as s_,
101
+ index_exp as index_exp,
102
+ ix_ as ix_,
103
+ fill_diagonal as fill_diagonal,
104
+ diag_indices as diag_indices,
105
+ diag_indices_from as diag_indices_from,
106
+ )
107
+
108
+ from numpy.lib.nanfunctions import (
109
+ nansum as nansum,
110
+ nanmax as nanmax,
111
+ nanmin as nanmin,
112
+ nanargmax as nanargmax,
113
+ nanargmin as nanargmin,
114
+ nanmean as nanmean,
115
+ nanmedian as nanmedian,
116
+ nanpercentile as nanpercentile,
117
+ nanvar as nanvar,
118
+ nanstd as nanstd,
119
+ nanprod as nanprod,
120
+ nancumsum as nancumsum,
121
+ nancumprod as nancumprod,
122
+ nanquantile as nanquantile,
123
+ )
124
+
125
+ from numpy.lib.npyio import (
126
+ savetxt as savetxt,
127
+ loadtxt as loadtxt,
128
+ genfromtxt as genfromtxt,
129
+ recfromtxt as recfromtxt,
130
+ recfromcsv as recfromcsv,
131
+ load as load,
132
+ save as save,
133
+ savez as savez,
134
+ savez_compressed as savez_compressed,
135
+ packbits as packbits,
136
+ unpackbits as unpackbits,
137
+ fromregex as fromregex,
138
+ DataSource as DataSource,
139
+ )
140
+
141
+ from numpy.lib.polynomial import (
142
+ poly as poly,
143
+ roots as roots,
144
+ polyint as polyint,
145
+ polyder as polyder,
146
+ polyadd as polyadd,
147
+ polysub as polysub,
148
+ polymul as polymul,
149
+ polydiv as polydiv,
150
+ polyval as polyval,
151
+ polyfit as polyfit,
152
+ RankWarning as RankWarning,
153
+ poly1d as poly1d,
154
+ )
155
+
156
+ from numpy.lib.shape_base import (
157
+ column_stack as column_stack,
158
+ row_stack as row_stack,
159
+ dstack as dstack,
160
+ array_split as array_split,
161
+ split as split,
162
+ hsplit as hsplit,
163
+ vsplit as vsplit,
164
+ dsplit as dsplit,
165
+ apply_over_axes as apply_over_axes,
166
+ expand_dims as expand_dims,
167
+ apply_along_axis as apply_along_axis,
168
+ kron as kron,
169
+ tile as tile,
170
+ get_array_wrap as get_array_wrap,
171
+ take_along_axis as take_along_axis,
172
+ put_along_axis as put_along_axis,
173
+ )
174
+
175
+ from numpy.lib.stride_tricks import (
176
+ broadcast_to as broadcast_to,
177
+ broadcast_arrays as broadcast_arrays,
178
+ broadcast_shapes as broadcast_shapes,
179
+ )
180
+
181
+ from numpy.lib.twodim_base import (
182
+ diag as diag,
183
+ diagflat as diagflat,
184
+ eye as eye,
185
+ fliplr as fliplr,
186
+ flipud as flipud,
187
+ tri as tri,
188
+ triu as triu,
189
+ tril as tril,
190
+ vander as vander,
191
+ histogram2d as histogram2d,
192
+ mask_indices as mask_indices,
193
+ tril_indices as tril_indices,
194
+ tril_indices_from as tril_indices_from,
195
+ triu_indices as triu_indices,
196
+ triu_indices_from as triu_indices_from,
197
+ )
198
+
199
+ from numpy.lib.type_check import (
200
+ mintypecode as mintypecode,
201
+ asfarray as asfarray,
202
+ real as real,
203
+ imag as imag,
204
+ iscomplex as iscomplex,
205
+ isreal as isreal,
206
+ iscomplexobj as iscomplexobj,
207
+ isrealobj as isrealobj,
208
+ nan_to_num as nan_to_num,
209
+ real_if_close as real_if_close,
210
+ typename as typename,
211
+ common_type as common_type,
212
+ )
213
+
214
+ from numpy.lib.ufunclike import (
215
+ fix as fix,
216
+ isposinf as isposinf,
217
+ isneginf as isneginf,
218
+ )
219
+
220
+ from numpy.lib.utils import (
221
+ issubclass_ as issubclass_,
222
+ issubsctype as issubsctype,
223
+ issubdtype as issubdtype,
224
+ deprecate as deprecate,
225
+ deprecate_with_doc as deprecate_with_doc,
226
+ get_include as get_include,
227
+ info as info,
228
+ source as source,
229
+ who as who,
230
+ lookfor as lookfor,
231
+ byte_bounds as byte_bounds,
232
+ safe_eval as safe_eval,
233
+ show_runtime as show_runtime,
234
+ )
235
+
236
+ from numpy.core.multiarray import (
237
+ tracemalloc_domain as tracemalloc_domain,
238
+ )
239
+
240
+ __all__: list[str]
241
+ __path__: list[str]
242
+ test: PytestTester
243
+
244
+ __version__ = version
245
+ emath = scimath
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.3 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/_datasource.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/_iotools.cpython-310.pyc ADDED
Binary file (25.9 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/_version.cpython-310.pyc ADDED
Binary file (4.84 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/arraypad.cpython-310.pyc ADDED
Binary file (22.4 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/arraysetops.cpython-310.pyc ADDED
Binary file (28 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/arrayterator.cpython-310.pyc ADDED
Binary file (7.03 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/format.cpython-310.pyc ADDED
Binary file (27 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/function_base.cpython-310.pyc ADDED
Binary file (165 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/histograms.cpython-310.pyc ADDED
Binary file (30.7 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/index_tricks.cpython-310.pyc ADDED
Binary file (29.2 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/mixins.cpython-310.pyc ADDED
Binary file (7.04 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/nanfunctions.cpython-310.pyc ADDED
Binary file (59 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/npyio.cpython-310.pyc ADDED
Binary file (74.7 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/polynomial.cpython-310.pyc ADDED
Binary file (41.5 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/recfunctions.cpython-310.pyc ADDED
Binary file (48.5 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/scimath.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/setup.cpython-310.pyc ADDED
Binary file (627 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/shape_base.cpython-310.pyc ADDED
Binary file (35.7 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/stride_tricks.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/twodim_base.cpython-310.pyc ADDED
Binary file (33.1 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/type_check.cpython-310.pyc ADDED
Binary file (19.6 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/ufunclike.cpython-310.pyc ADDED
Binary file (6.27 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/user_array.cpython-310.pyc ADDED
Binary file (10.5 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/__pycache__/utils.cpython-310.pyc ADDED
Binary file (29.2 kB). View file
 
venv/lib/python3.10/site-packages/numpy/lib/_datasource.py ADDED
@@ -0,0 +1,704 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A file interface for handling local and remote data files.
2
+
3
+ The goal of datasource is to abstract some of the file system operations
4
+ when dealing with data files so the researcher doesn't have to know all the
5
+ low-level details. Through datasource, a researcher can obtain and use a
6
+ file with one function call, regardless of location of the file.
7
+
8
+ DataSource is meant to augment standard python libraries, not replace them.
9
+ It should work seamlessly with standard file IO operations and the os
10
+ module.
11
+
12
+ DataSource files can originate locally or remotely:
13
+
14
+ - local files : '/home/guido/src/local/data.txt'
15
+ - URLs (http, ftp, ...) : 'http://www.scipy.org/not/real/data.txt'
16
+
17
+ DataSource files can also be compressed or uncompressed. Currently only
18
+ gzip, bz2 and xz are supported.
19
+
20
+ Example::
21
+
22
+ >>> # Create a DataSource, use os.curdir (default) for local storage.
23
+ >>> from numpy import DataSource
24
+ >>> ds = DataSource()
25
+ >>>
26
+ >>> # Open a remote file.
27
+ >>> # DataSource downloads the file, stores it locally in:
28
+ >>> # './www.google.com/index.html'
29
+ >>> # opens the file and returns a file object.
30
+ >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
31
+ >>>
32
+ >>> # Use the file as you normally would
33
+ >>> fp.read() # doctest: +SKIP
34
+ >>> fp.close() # doctest: +SKIP
35
+
36
+ """
37
+ import os
38
+ import io
39
+
40
+ from .._utils import set_module
41
+
42
+
43
+ _open = open
44
+
45
+
46
+ def _check_mode(mode, encoding, newline):
47
+ """Check mode and that encoding and newline are compatible.
48
+
49
+ Parameters
50
+ ----------
51
+ mode : str
52
+ File open mode.
53
+ encoding : str
54
+ File encoding.
55
+ newline : str
56
+ Newline for text files.
57
+
58
+ """
59
+ if "t" in mode:
60
+ if "b" in mode:
61
+ raise ValueError("Invalid mode: %r" % (mode,))
62
+ else:
63
+ if encoding is not None:
64
+ raise ValueError("Argument 'encoding' not supported in binary mode")
65
+ if newline is not None:
66
+ raise ValueError("Argument 'newline' not supported in binary mode")
67
+
68
+
69
+ # Using a class instead of a module-level dictionary
70
+ # to reduce the initial 'import numpy' overhead by
71
+ # deferring the import of lzma, bz2 and gzip until needed
72
+
73
+ # TODO: .zip support, .tar support?
74
+ class _FileOpeners:
75
+ """
76
+ Container for different methods to open (un-)compressed files.
77
+
78
+ `_FileOpeners` contains a dictionary that holds one method for each
79
+ supported file format. Attribute lookup is implemented in such a way
80
+ that an instance of `_FileOpeners` itself can be indexed with the keys
81
+ of that dictionary. Currently uncompressed files as well as files
82
+ compressed with ``gzip``, ``bz2`` or ``xz`` compression are supported.
83
+
84
+ Notes
85
+ -----
86
+ `_file_openers`, an instance of `_FileOpeners`, is made available for
87
+ use in the `_datasource` module.
88
+
89
+ Examples
90
+ --------
91
+ >>> import gzip
92
+ >>> np.lib._datasource._file_openers.keys()
93
+ [None, '.bz2', '.gz', '.xz', '.lzma']
94
+ >>> np.lib._datasource._file_openers['.gz'] is gzip.open
95
+ True
96
+
97
+ """
98
+
99
+ def __init__(self):
100
+ self._loaded = False
101
+ self._file_openers = {None: io.open}
102
+
103
+ def _load(self):
104
+ if self._loaded:
105
+ return
106
+
107
+ try:
108
+ import bz2
109
+ self._file_openers[".bz2"] = bz2.open
110
+ except ImportError:
111
+ pass
112
+
113
+ try:
114
+ import gzip
115
+ self._file_openers[".gz"] = gzip.open
116
+ except ImportError:
117
+ pass
118
+
119
+ try:
120
+ import lzma
121
+ self._file_openers[".xz"] = lzma.open
122
+ self._file_openers[".lzma"] = lzma.open
123
+ except (ImportError, AttributeError):
124
+ # There are incompatible backports of lzma that do not have the
125
+ # lzma.open attribute, so catch that as well as ImportError.
126
+ pass
127
+
128
+ self._loaded = True
129
+
130
+ def keys(self):
131
+ """
132
+ Return the keys of currently supported file openers.
133
+
134
+ Parameters
135
+ ----------
136
+ None
137
+
138
+ Returns
139
+ -------
140
+ keys : list
141
+ The keys are None for uncompressed files and the file extension
142
+ strings (i.e. ``'.gz'``, ``'.xz'``) for supported compression
143
+ methods.
144
+
145
+ """
146
+ self._load()
147
+ return list(self._file_openers.keys())
148
+
149
+ def __getitem__(self, key):
150
+ self._load()
151
+ return self._file_openers[key]
152
+
153
+ _file_openers = _FileOpeners()
154
+
155
+ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
156
+ """
157
+ Open `path` with `mode` and return the file object.
158
+
159
+ If ``path`` is an URL, it will be downloaded, stored in the
160
+ `DataSource` `destpath` directory and opened from there.
161
+
162
+ Parameters
163
+ ----------
164
+ path : str
165
+ Local file path or URL to open.
166
+ mode : str, optional
167
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing, 'a' to
168
+ append. Available modes depend on the type of object specified by
169
+ path. Default is 'r'.
170
+ destpath : str, optional
171
+ Path to the directory where the source file gets downloaded to for
172
+ use. If `destpath` is None, a temporary directory will be created.
173
+ The default path is the current directory.
174
+ encoding : {None, str}, optional
175
+ Open text file with given encoding. The default encoding will be
176
+ what `io.open` uses.
177
+ newline : {None, str}, optional
178
+ Newline to use when reading text file.
179
+
180
+ Returns
181
+ -------
182
+ out : file object
183
+ The opened file.
184
+
185
+ Notes
186
+ -----
187
+ This is a convenience function that instantiates a `DataSource` and
188
+ returns the file object from ``DataSource.open(path)``.
189
+
190
+ """
191
+
192
+ ds = DataSource(destpath)
193
+ return ds.open(path, mode, encoding=encoding, newline=newline)
194
+
195
+
196
+ @set_module('numpy')
197
+ class DataSource:
198
+ """
199
+ DataSource(destpath='.')
200
+
201
+ A generic data source file (file, http, ftp, ...).
202
+
203
+ DataSources can be local files or remote files/URLs. The files may
204
+ also be compressed or uncompressed. DataSource hides some of the
205
+ low-level details of downloading the file, allowing you to simply pass
206
+ in a valid file path (or URL) and obtain a file object.
207
+
208
+ Parameters
209
+ ----------
210
+ destpath : str or None, optional
211
+ Path to the directory where the source file gets downloaded to for
212
+ use. If `destpath` is None, a temporary directory will be created.
213
+ The default path is the current directory.
214
+
215
+ Notes
216
+ -----
217
+ URLs require a scheme string (``http://``) to be used, without it they
218
+ will fail::
219
+
220
+ >>> repos = np.DataSource()
221
+ >>> repos.exists('www.google.com/index.html')
222
+ False
223
+ >>> repos.exists('http://www.google.com/index.html')
224
+ True
225
+
226
+ Temporary directories are deleted when the DataSource is deleted.
227
+
228
+ Examples
229
+ --------
230
+ ::
231
+
232
+ >>> ds = np.DataSource('/home/guido')
233
+ >>> urlname = 'http://www.google.com/'
234
+ >>> gfile = ds.open('http://www.google.com/')
235
+ >>> ds.abspath(urlname)
236
+ '/home/guido/www.google.com/index.html'
237
+
238
+ >>> ds = np.DataSource(None) # use with temporary file
239
+ >>> ds.open('/home/guido/foobar.txt')
240
+ <open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
241
+ >>> ds.abspath('/home/guido/foobar.txt')
242
+ '/tmp/.../home/guido/foobar.txt'
243
+
244
+ """
245
+
246
+ def __init__(self, destpath=os.curdir):
247
+ """Create a DataSource with a local path at destpath."""
248
+ if destpath:
249
+ self._destpath = os.path.abspath(destpath)
250
+ self._istmpdest = False
251
+ else:
252
+ import tempfile # deferring import to improve startup time
253
+ self._destpath = tempfile.mkdtemp()
254
+ self._istmpdest = True
255
+
256
+ def __del__(self):
257
+ # Remove temp directories
258
+ if hasattr(self, '_istmpdest') and self._istmpdest:
259
+ import shutil
260
+
261
+ shutil.rmtree(self._destpath)
262
+
263
+ def _iszip(self, filename):
264
+ """Test if the filename is a zip file by looking at the file extension.
265
+
266
+ """
267
+ fname, ext = os.path.splitext(filename)
268
+ return ext in _file_openers.keys()
269
+
270
+ def _iswritemode(self, mode):
271
+ """Test if the given mode will open a file for writing."""
272
+
273
+ # Currently only used to test the bz2 files.
274
+ _writemodes = ("w", "+")
275
+ for c in mode:
276
+ if c in _writemodes:
277
+ return True
278
+ return False
279
+
280
+ def _splitzipext(self, filename):
281
+ """Split zip extension from filename and return filename.
282
+
283
+ Returns
284
+ -------
285
+ base, zip_ext : {tuple}
286
+
287
+ """
288
+
289
+ if self._iszip(filename):
290
+ return os.path.splitext(filename)
291
+ else:
292
+ return filename, None
293
+
294
+ def _possible_names(self, filename):
295
+ """Return a tuple containing compressed filename variations."""
296
+ names = [filename]
297
+ if not self._iszip(filename):
298
+ for zipext in _file_openers.keys():
299
+ if zipext:
300
+ names.append(filename+zipext)
301
+ return names
302
+
303
+ def _isurl(self, path):
304
+ """Test if path is a net location. Tests the scheme and netloc."""
305
+
306
+ # We do this here to reduce the 'import numpy' initial import time.
307
+ from urllib.parse import urlparse
308
+
309
+ # BUG : URLs require a scheme string ('http://') to be used.
310
+ # www.google.com will fail.
311
+ # Should we prepend the scheme for those that don't have it and
312
+ # test that also? Similar to the way we append .gz and test for
313
+ # for compressed versions of files.
314
+
315
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
316
+ return bool(scheme and netloc)
317
+
318
+ def _cache(self, path):
319
+ """Cache the file specified by path.
320
+
321
+ Creates a copy of the file in the datasource cache.
322
+
323
+ """
324
+ # We import these here because importing them is slow and
325
+ # a significant fraction of numpy's total import time.
326
+ import shutil
327
+ from urllib.request import urlopen
328
+
329
+ upath = self.abspath(path)
330
+
331
+ # ensure directory exists
332
+ if not os.path.exists(os.path.dirname(upath)):
333
+ os.makedirs(os.path.dirname(upath))
334
+
335
+ # TODO: Doesn't handle compressed files!
336
+ if self._isurl(path):
337
+ with urlopen(path) as openedurl:
338
+ with _open(upath, 'wb') as f:
339
+ shutil.copyfileobj(openedurl, f)
340
+ else:
341
+ shutil.copyfile(path, upath)
342
+ return upath
343
+
344
+ def _findfile(self, path):
345
+ """Searches for ``path`` and returns full path if found.
346
+
347
+ If path is an URL, _findfile will cache a local copy and return the
348
+ path to the cached file. If path is a local file, _findfile will
349
+ return a path to that local file.
350
+
351
+ The search will include possible compressed versions of the file
352
+ and return the first occurrence found.
353
+
354
+ """
355
+
356
+ # Build list of possible local file paths
357
+ if not self._isurl(path):
358
+ # Valid local paths
359
+ filelist = self._possible_names(path)
360
+ # Paths in self._destpath
361
+ filelist += self._possible_names(self.abspath(path))
362
+ else:
363
+ # Cached URLs in self._destpath
364
+ filelist = self._possible_names(self.abspath(path))
365
+ # Remote URLs
366
+ filelist = filelist + self._possible_names(path)
367
+
368
+ for name in filelist:
369
+ if self.exists(name):
370
+ if self._isurl(name):
371
+ name = self._cache(name)
372
+ return name
373
+ return None
374
+
375
+ def abspath(self, path):
376
+ """
377
+ Return absolute path of file in the DataSource directory.
378
+
379
+ If `path` is an URL, then `abspath` will return either the location
380
+ the file exists locally or the location it would exist when opened
381
+ using the `open` method.
382
+
383
+ Parameters
384
+ ----------
385
+ path : str
386
+ Can be a local file or a remote URL.
387
+
388
+ Returns
389
+ -------
390
+ out : str
391
+ Complete path, including the `DataSource` destination directory.
392
+
393
+ Notes
394
+ -----
395
+ The functionality is based on `os.path.abspath`.
396
+
397
+ """
398
+ # We do this here to reduce the 'import numpy' initial import time.
399
+ from urllib.parse import urlparse
400
+
401
+ # TODO: This should be more robust. Handles case where path includes
402
+ # the destpath, but not other sub-paths. Failing case:
403
+ # path = /home/guido/datafile.txt
404
+ # destpath = /home/alex/
405
+ # upath = self.abspath(path)
406
+ # upath == '/home/alex/home/guido/datafile.txt'
407
+
408
+ # handle case where path includes self._destpath
409
+ splitpath = path.split(self._destpath, 2)
410
+ if len(splitpath) > 1:
411
+ path = splitpath[1]
412
+ scheme, netloc, upath, uparams, uquery, ufrag = urlparse(path)
413
+ netloc = self._sanitize_relative_path(netloc)
414
+ upath = self._sanitize_relative_path(upath)
415
+ return os.path.join(self._destpath, netloc, upath)
416
+
417
+ def _sanitize_relative_path(self, path):
418
+ """Return a sanitised relative path for which
419
+ os.path.abspath(os.path.join(base, path)).startswith(base)
420
+ """
421
+ last = None
422
+ path = os.path.normpath(path)
423
+ while path != last:
424
+ last = path
425
+ # Note: os.path.join treats '/' as os.sep on Windows
426
+ path = path.lstrip(os.sep).lstrip('/')
427
+ path = path.lstrip(os.pardir).lstrip('..')
428
+ drive, path = os.path.splitdrive(path) # for Windows
429
+ return path
430
+
431
+ def exists(self, path):
432
+ """
433
+ Test if path exists.
434
+
435
+ Test if `path` exists as (and in this order):
436
+
437
+ - a local file.
438
+ - a remote URL that has been downloaded and stored locally in the
439
+ `DataSource` directory.
440
+ - a remote URL that has not been downloaded, but is valid and
441
+ accessible.
442
+
443
+ Parameters
444
+ ----------
445
+ path : str
446
+ Can be a local file or a remote URL.
447
+
448
+ Returns
449
+ -------
450
+ out : bool
451
+ True if `path` exists.
452
+
453
+ Notes
454
+ -----
455
+ When `path` is an URL, `exists` will return True if it's either
456
+ stored locally in the `DataSource` directory, or is a valid remote
457
+ URL. `DataSource` does not discriminate between the two, the file
458
+ is accessible if it exists in either location.
459
+
460
+ """
461
+
462
+ # First test for local path
463
+ if os.path.exists(path):
464
+ return True
465
+
466
+ # We import this here because importing urllib is slow and
467
+ # a significant fraction of numpy's total import time.
468
+ from urllib.request import urlopen
469
+ from urllib.error import URLError
470
+
471
+ # Test cached url
472
+ upath = self.abspath(path)
473
+ if os.path.exists(upath):
474
+ return True
475
+
476
+ # Test remote url
477
+ if self._isurl(path):
478
+ try:
479
+ netfile = urlopen(path)
480
+ netfile.close()
481
+ del(netfile)
482
+ return True
483
+ except URLError:
484
+ return False
485
+ return False
486
+
487
+ def open(self, path, mode='r', encoding=None, newline=None):
488
+ """
489
+ Open and return file-like object.
490
+
491
+ If `path` is an URL, it will be downloaded, stored in the
492
+ `DataSource` directory and opened from there.
493
+
494
+ Parameters
495
+ ----------
496
+ path : str
497
+ Local file path or URL to open.
498
+ mode : {'r', 'w', 'a'}, optional
499
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
500
+ 'a' to append. Available modes depend on the type of object
501
+ specified by `path`. Default is 'r'.
502
+ encoding : {None, str}, optional
503
+ Open text file with given encoding. The default encoding will be
504
+ what `io.open` uses.
505
+ newline : {None, str}, optional
506
+ Newline to use when reading text file.
507
+
508
+ Returns
509
+ -------
510
+ out : file object
511
+ File object.
512
+
513
+ """
514
+
515
+ # TODO: There is no support for opening a file for writing which
516
+ # doesn't exist yet (creating a file). Should there be?
517
+
518
+ # TODO: Add a ``subdir`` parameter for specifying the subdirectory
519
+ # used to store URLs in self._destpath.
520
+
521
+ if self._isurl(path) and self._iswritemode(mode):
522
+ raise ValueError("URLs are not writeable")
523
+
524
+ # NOTE: _findfile will fail on a new file opened for writing.
525
+ found = self._findfile(path)
526
+ if found:
527
+ _fname, ext = self._splitzipext(found)
528
+ if ext == 'bz2':
529
+ mode.replace("+", "")
530
+ return _file_openers[ext](found, mode=mode,
531
+ encoding=encoding, newline=newline)
532
+ else:
533
+ raise FileNotFoundError(f"{path} not found.")
534
+
535
+
536
+ class Repository (DataSource):
537
+ """
538
+ Repository(baseurl, destpath='.')
539
+
540
+ A data repository where multiple DataSource's share a base
541
+ URL/directory.
542
+
543
+ `Repository` extends `DataSource` by prepending a base URL (or
544
+ directory) to all the files it handles. Use `Repository` when you will
545
+ be working with multiple files from one base URL. Initialize
546
+ `Repository` with the base URL, then refer to each file by its filename
547
+ only.
548
+
549
+ Parameters
550
+ ----------
551
+ baseurl : str
552
+ Path to the local directory or remote location that contains the
553
+ data files.
554
+ destpath : str or None, optional
555
+ Path to the directory where the source file gets downloaded to for
556
+ use. If `destpath` is None, a temporary directory will be created.
557
+ The default path is the current directory.
558
+
559
+ Examples
560
+ --------
561
+ To analyze all files in the repository, do something like this
562
+ (note: this is not self-contained code)::
563
+
564
+ >>> repos = np.lib._datasource.Repository('/home/user/data/dir/')
565
+ >>> for filename in filelist:
566
+ ... fp = repos.open(filename)
567
+ ... fp.analyze()
568
+ ... fp.close()
569
+
570
+ Similarly you could use a URL for a repository::
571
+
572
+ >>> repos = np.lib._datasource.Repository('http://www.xyz.edu/data')
573
+
574
+ """
575
+
576
+ def __init__(self, baseurl, destpath=os.curdir):
577
+ """Create a Repository with a shared url or directory of baseurl."""
578
+ DataSource.__init__(self, destpath=destpath)
579
+ self._baseurl = baseurl
580
+
581
+ def __del__(self):
582
+ DataSource.__del__(self)
583
+
584
+ def _fullpath(self, path):
585
+ """Return complete path for path. Prepends baseurl if necessary."""
586
+ splitpath = path.split(self._baseurl, 2)
587
+ if len(splitpath) == 1:
588
+ result = os.path.join(self._baseurl, path)
589
+ else:
590
+ result = path # path contains baseurl already
591
+ return result
592
+
593
+ def _findfile(self, path):
594
+ """Extend DataSource method to prepend baseurl to ``path``."""
595
+ return DataSource._findfile(self, self._fullpath(path))
596
+
597
+ def abspath(self, path):
598
+ """
599
+ Return absolute path of file in the Repository directory.
600
+
601
+ If `path` is an URL, then `abspath` will return either the location
602
+ the file exists locally or the location it would exist when opened
603
+ using the `open` method.
604
+
605
+ Parameters
606
+ ----------
607
+ path : str
608
+ Can be a local file or a remote URL. This may, but does not
609
+ have to, include the `baseurl` with which the `Repository` was
610
+ initialized.
611
+
612
+ Returns
613
+ -------
614
+ out : str
615
+ Complete path, including the `DataSource` destination directory.
616
+
617
+ """
618
+ return DataSource.abspath(self, self._fullpath(path))
619
+
620
+ def exists(self, path):
621
+ """
622
+ Test if path exists prepending Repository base URL to path.
623
+
624
+ Test if `path` exists as (and in this order):
625
+
626
+ - a local file.
627
+ - a remote URL that has been downloaded and stored locally in the
628
+ `DataSource` directory.
629
+ - a remote URL that has not been downloaded, but is valid and
630
+ accessible.
631
+
632
+ Parameters
633
+ ----------
634
+ path : str
635
+ Can be a local file or a remote URL. This may, but does not
636
+ have to, include the `baseurl` with which the `Repository` was
637
+ initialized.
638
+
639
+ Returns
640
+ -------
641
+ out : bool
642
+ True if `path` exists.
643
+
644
+ Notes
645
+ -----
646
+ When `path` is an URL, `exists` will return True if it's either
647
+ stored locally in the `DataSource` directory, or is a valid remote
648
+ URL. `DataSource` does not discriminate between the two, the file
649
+ is accessible if it exists in either location.
650
+
651
+ """
652
+ return DataSource.exists(self, self._fullpath(path))
653
+
654
+ def open(self, path, mode='r', encoding=None, newline=None):
655
+ """
656
+ Open and return file-like object prepending Repository base URL.
657
+
658
+ If `path` is an URL, it will be downloaded, stored in the
659
+ DataSource directory and opened from there.
660
+
661
+ Parameters
662
+ ----------
663
+ path : str
664
+ Local file path or URL to open. This may, but does not have to,
665
+ include the `baseurl` with which the `Repository` was
666
+ initialized.
667
+ mode : {'r', 'w', 'a'}, optional
668
+ Mode to open `path`. Mode 'r' for reading, 'w' for writing,
669
+ 'a' to append. Available modes depend on the type of object
670
+ specified by `path`. Default is 'r'.
671
+ encoding : {None, str}, optional
672
+ Open text file with given encoding. The default encoding will be
673
+ what `io.open` uses.
674
+ newline : {None, str}, optional
675
+ Newline to use when reading text file.
676
+
677
+ Returns
678
+ -------
679
+ out : file object
680
+ File object.
681
+
682
+ """
683
+ return DataSource.open(self, self._fullpath(path), mode,
684
+ encoding=encoding, newline=newline)
685
+
686
+ def listdir(self):
687
+ """
688
+ List files in the source Repository.
689
+
690
+ Returns
691
+ -------
692
+ files : list of str
693
+ List of file names (not containing a directory part).
694
+
695
+ Notes
696
+ -----
697
+ Does not currently work for remote repositories.
698
+
699
+ """
700
+ if self._isurl(self._baseurl):
701
+ raise NotImplementedError(
702
+ "Directory listing of URLs, not supported yet.")
703
+ else:
704
+ return os.listdir(self._baseurl)
venv/lib/python3.10/site-packages/numpy/lib/_iotools.py ADDED
@@ -0,0 +1,897 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A collection of functions designed to help I/O with ascii files.
2
+
3
+ """
4
+ __docformat__ = "restructuredtext en"
5
+
6
+ import numpy as np
7
+ import numpy.core.numeric as nx
8
+ from numpy.compat import asbytes, asunicode
9
+
10
+
11
+ def _decode_line(line, encoding=None):
12
+ """Decode bytes from binary input streams.
13
+
14
+ Defaults to decoding from 'latin1'. That differs from the behavior of
15
+ np.compat.asunicode that decodes from 'ascii'.
16
+
17
+ Parameters
18
+ ----------
19
+ line : str or bytes
20
+ Line to be decoded.
21
+ encoding : str
22
+ Encoding used to decode `line`.
23
+
24
+ Returns
25
+ -------
26
+ decoded_line : str
27
+
28
+ """
29
+ if type(line) is bytes:
30
+ if encoding is None:
31
+ encoding = "latin1"
32
+ line = line.decode(encoding)
33
+
34
+ return line
35
+
36
+
37
+ def _is_string_like(obj):
38
+ """
39
+ Check whether obj behaves like a string.
40
+ """
41
+ try:
42
+ obj + ''
43
+ except (TypeError, ValueError):
44
+ return False
45
+ return True
46
+
47
+
48
+ def _is_bytes_like(obj):
49
+ """
50
+ Check whether obj behaves like a bytes object.
51
+ """
52
+ try:
53
+ obj + b''
54
+ except (TypeError, ValueError):
55
+ return False
56
+ return True
57
+
58
+
59
+ def has_nested_fields(ndtype):
60
+ """
61
+ Returns whether one or several fields of a dtype are nested.
62
+
63
+ Parameters
64
+ ----------
65
+ ndtype : dtype
66
+ Data-type of a structured array.
67
+
68
+ Raises
69
+ ------
70
+ AttributeError
71
+ If `ndtype` does not have a `names` attribute.
72
+
73
+ Examples
74
+ --------
75
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float)])
76
+ >>> np.lib._iotools.has_nested_fields(dt)
77
+ False
78
+
79
+ """
80
+ for name in ndtype.names or ():
81
+ if ndtype[name].names is not None:
82
+ return True
83
+ return False
84
+
85
+
86
+ def flatten_dtype(ndtype, flatten_base=False):
87
+ """
88
+ Unpack a structured data-type by collapsing nested fields and/or fields
89
+ with a shape.
90
+
91
+ Note that the field names are lost.
92
+
93
+ Parameters
94
+ ----------
95
+ ndtype : dtype
96
+ The datatype to collapse
97
+ flatten_base : bool, optional
98
+ If True, transform a field with a shape into several fields. Default is
99
+ False.
100
+
101
+ Examples
102
+ --------
103
+ >>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
104
+ ... ('block', int, (2, 3))])
105
+ >>> np.lib._iotools.flatten_dtype(dt)
106
+ [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
107
+ >>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
108
+ [dtype('S4'),
109
+ dtype('float64'),
110
+ dtype('float64'),
111
+ dtype('int64'),
112
+ dtype('int64'),
113
+ dtype('int64'),
114
+ dtype('int64'),
115
+ dtype('int64'),
116
+ dtype('int64')]
117
+
118
+ """
119
+ names = ndtype.names
120
+ if names is None:
121
+ if flatten_base:
122
+ return [ndtype.base] * int(np.prod(ndtype.shape))
123
+ return [ndtype.base]
124
+ else:
125
+ types = []
126
+ for field in names:
127
+ info = ndtype.fields[field]
128
+ flat_dt = flatten_dtype(info[0], flatten_base)
129
+ types.extend(flat_dt)
130
+ return types
131
+
132
+
133
+ class LineSplitter:
134
+ """
135
+ Object to split a string at a given delimiter or at given places.
136
+
137
+ Parameters
138
+ ----------
139
+ delimiter : str, int, or sequence of ints, optional
140
+ If a string, character used to delimit consecutive fields.
141
+ If an integer or a sequence of integers, width(s) of each field.
142
+ comments : str, optional
143
+ Character used to mark the beginning of a comment. Default is '#'.
144
+ autostrip : bool, optional
145
+ Whether to strip each individual field. Default is True.
146
+
147
+ """
148
+
149
+ def autostrip(self, method):
150
+ """
151
+ Wrapper to strip each member of the output of `method`.
152
+
153
+ Parameters
154
+ ----------
155
+ method : function
156
+ Function that takes a single argument and returns a sequence of
157
+ strings.
158
+
159
+ Returns
160
+ -------
161
+ wrapped : function
162
+ The result of wrapping `method`. `wrapped` takes a single input
163
+ argument and returns a list of strings that are stripped of
164
+ white-space.
165
+
166
+ """
167
+ return lambda input: [_.strip() for _ in method(input)]
168
+
169
+ def __init__(self, delimiter=None, comments='#', autostrip=True,
170
+ encoding=None):
171
+ delimiter = _decode_line(delimiter)
172
+ comments = _decode_line(comments)
173
+
174
+ self.comments = comments
175
+
176
+ # Delimiter is a character
177
+ if (delimiter is None) or isinstance(delimiter, str):
178
+ delimiter = delimiter or None
179
+ _handyman = self._delimited_splitter
180
+ # Delimiter is a list of field widths
181
+ elif hasattr(delimiter, '__iter__'):
182
+ _handyman = self._variablewidth_splitter
183
+ idx = np.cumsum([0] + list(delimiter))
184
+ delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])]
185
+ # Delimiter is a single integer
186
+ elif int(delimiter):
187
+ (_handyman, delimiter) = (
188
+ self._fixedwidth_splitter, int(delimiter))
189
+ else:
190
+ (_handyman, delimiter) = (self._delimited_splitter, None)
191
+ self.delimiter = delimiter
192
+ if autostrip:
193
+ self._handyman = self.autostrip(_handyman)
194
+ else:
195
+ self._handyman = _handyman
196
+ self.encoding = encoding
197
+
198
+ def _delimited_splitter(self, line):
199
+ """Chop off comments, strip, and split at delimiter. """
200
+ if self.comments is not None:
201
+ line = line.split(self.comments)[0]
202
+ line = line.strip(" \r\n")
203
+ if not line:
204
+ return []
205
+ return line.split(self.delimiter)
206
+
207
+ def _fixedwidth_splitter(self, line):
208
+ if self.comments is not None:
209
+ line = line.split(self.comments)[0]
210
+ line = line.strip("\r\n")
211
+ if not line:
212
+ return []
213
+ fixed = self.delimiter
214
+ slices = [slice(i, i + fixed) for i in range(0, len(line), fixed)]
215
+ return [line[s] for s in slices]
216
+
217
+ def _variablewidth_splitter(self, line):
218
+ if self.comments is not None:
219
+ line = line.split(self.comments)[0]
220
+ if not line:
221
+ return []
222
+ slices = self.delimiter
223
+ return [line[s] for s in slices]
224
+
225
+ def __call__(self, line):
226
+ return self._handyman(_decode_line(line, self.encoding))
227
+
228
+
229
+ class NameValidator:
230
+ """
231
+ Object to validate a list of strings to use as field names.
232
+
233
+ The strings are stripped of any non alphanumeric character, and spaces
234
+ are replaced by '_'. During instantiation, the user can define a list
235
+ of names to exclude, as well as a list of invalid characters. Names in
236
+ the exclusion list are appended a '_' character.
237
+
238
+ Once an instance has been created, it can be called with a list of
239
+ names, and a list of valid names will be created. The `__call__`
240
+ method accepts an optional keyword "default" that sets the default name
241
+ in case of ambiguity. By default this is 'f', so that names will
242
+ default to `f0`, `f1`, etc.
243
+
244
+ Parameters
245
+ ----------
246
+ excludelist : sequence, optional
247
+ A list of names to exclude. This list is appended to the default
248
+ list ['return', 'file', 'print']. Excluded names are appended an
249
+ underscore: for example, `file` becomes `file_` if supplied.
250
+ deletechars : str, optional
251
+ A string combining invalid characters that must be deleted from the
252
+ names.
253
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
254
+ * If True, field names are case-sensitive.
255
+ * If False or 'upper', field names are converted to upper case.
256
+ * If 'lower', field names are converted to lower case.
257
+
258
+ The default value is True.
259
+ replace_space : '_', optional
260
+ Character(s) used in replacement of white spaces.
261
+
262
+ Notes
263
+ -----
264
+ Calling an instance of `NameValidator` is the same as calling its
265
+ method `validate`.
266
+
267
+ Examples
268
+ --------
269
+ >>> validator = np.lib._iotools.NameValidator()
270
+ >>> validator(['file', 'field2', 'with space', 'CaSe'])
271
+ ('file_', 'field2', 'with_space', 'CaSe')
272
+
273
+ >>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
274
+ ... deletechars='q',
275
+ ... case_sensitive=False)
276
+ >>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
277
+ ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
278
+
279
+ """
280
+
281
+ defaultexcludelist = ['return', 'file', 'print']
282
+ defaultdeletechars = set(r"""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
283
+
284
+ def __init__(self, excludelist=None, deletechars=None,
285
+ case_sensitive=None, replace_space='_'):
286
+ # Process the exclusion list ..
287
+ if excludelist is None:
288
+ excludelist = []
289
+ excludelist.extend(self.defaultexcludelist)
290
+ self.excludelist = excludelist
291
+ # Process the list of characters to delete
292
+ if deletechars is None:
293
+ delete = self.defaultdeletechars
294
+ else:
295
+ delete = set(deletechars)
296
+ delete.add('"')
297
+ self.deletechars = delete
298
+ # Process the case option .....
299
+ if (case_sensitive is None) or (case_sensitive is True):
300
+ self.case_converter = lambda x: x
301
+ elif (case_sensitive is False) or case_sensitive.startswith('u'):
302
+ self.case_converter = lambda x: x.upper()
303
+ elif case_sensitive.startswith('l'):
304
+ self.case_converter = lambda x: x.lower()
305
+ else:
306
+ msg = 'unrecognized case_sensitive value %s.' % case_sensitive
307
+ raise ValueError(msg)
308
+
309
+ self.replace_space = replace_space
310
+
311
+ def validate(self, names, defaultfmt="f%i", nbfields=None):
312
+ """
313
+ Validate a list of strings as field names for a structured array.
314
+
315
+ Parameters
316
+ ----------
317
+ names : sequence of str
318
+ Strings to be validated.
319
+ defaultfmt : str, optional
320
+ Default format string, used if validating a given string
321
+ reduces its length to zero.
322
+ nbfields : integer, optional
323
+ Final number of validated names, used to expand or shrink the
324
+ initial list of names.
325
+
326
+ Returns
327
+ -------
328
+ validatednames : list of str
329
+ The list of validated field names.
330
+
331
+ Notes
332
+ -----
333
+ A `NameValidator` instance can be called directly, which is the
334
+ same as calling `validate`. For examples, see `NameValidator`.
335
+
336
+ """
337
+ # Initial checks ..............
338
+ if (names is None):
339
+ if (nbfields is None):
340
+ return None
341
+ names = []
342
+ if isinstance(names, str):
343
+ names = [names, ]
344
+ if nbfields is not None:
345
+ nbnames = len(names)
346
+ if (nbnames < nbfields):
347
+ names = list(names) + [''] * (nbfields - nbnames)
348
+ elif (nbnames > nbfields):
349
+ names = names[:nbfields]
350
+ # Set some shortcuts ...........
351
+ deletechars = self.deletechars
352
+ excludelist = self.excludelist
353
+ case_converter = self.case_converter
354
+ replace_space = self.replace_space
355
+ # Initializes some variables ...
356
+ validatednames = []
357
+ seen = dict()
358
+ nbempty = 0
359
+
360
+ for item in names:
361
+ item = case_converter(item).strip()
362
+ if replace_space:
363
+ item = item.replace(' ', replace_space)
364
+ item = ''.join([c for c in item if c not in deletechars])
365
+ if item == '':
366
+ item = defaultfmt % nbempty
367
+ while item in names:
368
+ nbempty += 1
369
+ item = defaultfmt % nbempty
370
+ nbempty += 1
371
+ elif item in excludelist:
372
+ item += '_'
373
+ cnt = seen.get(item, 0)
374
+ if cnt > 0:
375
+ validatednames.append(item + '_%d' % cnt)
376
+ else:
377
+ validatednames.append(item)
378
+ seen[item] = cnt + 1
379
+ return tuple(validatednames)
380
+
381
+ def __call__(self, names, defaultfmt="f%i", nbfields=None):
382
+ return self.validate(names, defaultfmt=defaultfmt, nbfields=nbfields)
383
+
384
+
385
+ def str2bool(value):
386
+ """
387
+ Tries to transform a string supposed to represent a boolean to a boolean.
388
+
389
+ Parameters
390
+ ----------
391
+ value : str
392
+ The string that is transformed to a boolean.
393
+
394
+ Returns
395
+ -------
396
+ boolval : bool
397
+ The boolean representation of `value`.
398
+
399
+ Raises
400
+ ------
401
+ ValueError
402
+ If the string is not 'True' or 'False' (case independent)
403
+
404
+ Examples
405
+ --------
406
+ >>> np.lib._iotools.str2bool('TRUE')
407
+ True
408
+ >>> np.lib._iotools.str2bool('false')
409
+ False
410
+
411
+ """
412
+ value = value.upper()
413
+ if value == 'TRUE':
414
+ return True
415
+ elif value == 'FALSE':
416
+ return False
417
+ else:
418
+ raise ValueError("Invalid boolean")
419
+
420
+
421
+ class ConverterError(Exception):
422
+ """
423
+ Exception raised when an error occurs in a converter for string values.
424
+
425
+ """
426
+ pass
427
+
428
+
429
+ class ConverterLockError(ConverterError):
430
+ """
431
+ Exception raised when an attempt is made to upgrade a locked converter.
432
+
433
+ """
434
+ pass
435
+
436
+
437
+ class ConversionWarning(UserWarning):
438
+ """
439
+ Warning issued when a string converter has a problem.
440
+
441
+ Notes
442
+ -----
443
+ In `genfromtxt` a `ConversionWarning` is issued if raising exceptions
444
+ is explicitly suppressed with the "invalid_raise" keyword.
445
+
446
+ """
447
+ pass
448
+
449
+
450
+ class StringConverter:
451
+ """
452
+ Factory class for function transforming a string into another object
453
+ (int, float).
454
+
455
+ After initialization, an instance can be called to transform a string
456
+ into another object. If the string is recognized as representing a
457
+ missing value, a default value is returned.
458
+
459
+ Attributes
460
+ ----------
461
+ func : function
462
+ Function used for the conversion.
463
+ default : any
464
+ Default value to return when the input corresponds to a missing
465
+ value.
466
+ type : type
467
+ Type of the output.
468
+ _status : int
469
+ Integer representing the order of the conversion.
470
+ _mapper : sequence of tuples
471
+ Sequence of tuples (dtype, function, default value) to evaluate in
472
+ order.
473
+ _locked : bool
474
+ Holds `locked` parameter.
475
+
476
+ Parameters
477
+ ----------
478
+ dtype_or_func : {None, dtype, function}, optional
479
+ If a `dtype`, specifies the input data type, used to define a basic
480
+ function and a default value for missing data. For example, when
481
+ `dtype` is float, the `func` attribute is set to `float` and the
482
+ default value to `np.nan`. If a function, this function is used to
483
+ convert a string to another object. In this case, it is recommended
484
+ to give an associated default value as input.
485
+ default : any, optional
486
+ Value to return by default, that is, when the string to be
487
+ converted is flagged as missing. If not given, `StringConverter`
488
+ tries to supply a reasonable default value.
489
+ missing_values : {None, sequence of str}, optional
490
+ ``None`` or sequence of strings indicating a missing value. If ``None``
491
+ then missing values are indicated by empty entries. The default is
492
+ ``None``.
493
+ locked : bool, optional
494
+ Whether the StringConverter should be locked to prevent automatic
495
+ upgrade or not. Default is False.
496
+
497
+ """
498
+ _mapper = [(nx.bool_, str2bool, False),
499
+ (nx.int_, int, -1),]
500
+
501
+ # On 32-bit systems, we need to make sure that we explicitly include
502
+ # nx.int64 since ns.int_ is nx.int32.
503
+ if nx.dtype(nx.int_).itemsize < nx.dtype(nx.int64).itemsize:
504
+ _mapper.append((nx.int64, int, -1))
505
+
506
+ _mapper.extend([(nx.float64, float, nx.nan),
507
+ (nx.complex128, complex, nx.nan + 0j),
508
+ (nx.longdouble, nx.longdouble, nx.nan),
509
+ # If a non-default dtype is passed, fall back to generic
510
+ # ones (should only be used for the converter)
511
+ (nx.integer, int, -1),
512
+ (nx.floating, float, nx.nan),
513
+ (nx.complexfloating, complex, nx.nan + 0j),
514
+ # Last, try with the string types (must be last, because
515
+ # `_mapper[-1]` is used as default in some cases)
516
+ (nx.str_, asunicode, '???'),
517
+ (nx.bytes_, asbytes, '???'),
518
+ ])
519
+
520
+ @classmethod
521
+ def _getdtype(cls, val):
522
+ """Returns the dtype of the input variable."""
523
+ return np.array(val).dtype
524
+
525
+ @classmethod
526
+ def _getsubdtype(cls, val):
527
+ """Returns the type of the dtype of the input variable."""
528
+ return np.array(val).dtype.type
529
+
530
+ @classmethod
531
+ def _dtypeortype(cls, dtype):
532
+ """Returns dtype for datetime64 and type of dtype otherwise."""
533
+
534
+ # This is a bit annoying. We want to return the "general" type in most
535
+ # cases (ie. "string" rather than "S10"), but we want to return the
536
+ # specific type for datetime64 (ie. "datetime64[us]" rather than
537
+ # "datetime64").
538
+ if dtype.type == np.datetime64:
539
+ return dtype
540
+ return dtype.type
541
+
542
+ @classmethod
543
+ def upgrade_mapper(cls, func, default=None):
544
+ """
545
+ Upgrade the mapper of a StringConverter by adding a new function and
546
+ its corresponding default.
547
+
548
+ The input function (or sequence of functions) and its associated
549
+ default value (if any) is inserted in penultimate position of the
550
+ mapper. The corresponding type is estimated from the dtype of the
551
+ default value.
552
+
553
+ Parameters
554
+ ----------
555
+ func : var
556
+ Function, or sequence of functions
557
+
558
+ Examples
559
+ --------
560
+ >>> import dateutil.parser
561
+ >>> import datetime
562
+ >>> dateparser = dateutil.parser.parse
563
+ >>> defaultdate = datetime.date(2000, 1, 1)
564
+ >>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
565
+ """
566
+ # Func is a single functions
567
+ if hasattr(func, '__call__'):
568
+ cls._mapper.insert(-1, (cls._getsubdtype(default), func, default))
569
+ return
570
+ elif hasattr(func, '__iter__'):
571
+ if isinstance(func[0], (tuple, list)):
572
+ for _ in func:
573
+ cls._mapper.insert(-1, _)
574
+ return
575
+ if default is None:
576
+ default = [None] * len(func)
577
+ else:
578
+ default = list(default)
579
+ default.append([None] * (len(func) - len(default)))
580
+ for fct, dft in zip(func, default):
581
+ cls._mapper.insert(-1, (cls._getsubdtype(dft), fct, dft))
582
+
583
+ @classmethod
584
+ def _find_map_entry(cls, dtype):
585
+ # if a converter for the specific dtype is available use that
586
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
587
+ if dtype.type == deftype:
588
+ return i, (deftype, func, default_def)
589
+
590
+ # otherwise find an inexact match
591
+ for i, (deftype, func, default_def) in enumerate(cls._mapper):
592
+ if np.issubdtype(dtype.type, deftype):
593
+ return i, (deftype, func, default_def)
594
+
595
+ raise LookupError
596
+
597
+ def __init__(self, dtype_or_func=None, default=None, missing_values=None,
598
+ locked=False):
599
+ # Defines a lock for upgrade
600
+ self._locked = bool(locked)
601
+ # No input dtype: minimal initialization
602
+ if dtype_or_func is None:
603
+ self.func = str2bool
604
+ self._status = 0
605
+ self.default = default or False
606
+ dtype = np.dtype('bool')
607
+ else:
608
+ # Is the input a np.dtype ?
609
+ try:
610
+ self.func = None
611
+ dtype = np.dtype(dtype_or_func)
612
+ except TypeError:
613
+ # dtype_or_func must be a function, then
614
+ if not hasattr(dtype_or_func, '__call__'):
615
+ errmsg = ("The input argument `dtype` is neither a"
616
+ " function nor a dtype (got '%s' instead)")
617
+ raise TypeError(errmsg % type(dtype_or_func))
618
+ # Set the function
619
+ self.func = dtype_or_func
620
+ # If we don't have a default, try to guess it or set it to
621
+ # None
622
+ if default is None:
623
+ try:
624
+ default = self.func('0')
625
+ except ValueError:
626
+ default = None
627
+ dtype = self._getdtype(default)
628
+
629
+ # find the best match in our mapper
630
+ try:
631
+ self._status, (_, func, default_def) = self._find_map_entry(dtype)
632
+ except LookupError:
633
+ # no match
634
+ self.default = default
635
+ _, func, _ = self._mapper[-1]
636
+ self._status = 0
637
+ else:
638
+ # use the found default only if we did not already have one
639
+ if default is None:
640
+ self.default = default_def
641
+ else:
642
+ self.default = default
643
+
644
+ # If the input was a dtype, set the function to the last we saw
645
+ if self.func is None:
646
+ self.func = func
647
+
648
+ # If the status is 1 (int), change the function to
649
+ # something more robust.
650
+ if self.func == self._mapper[1][1]:
651
+ if issubclass(dtype.type, np.uint64):
652
+ self.func = np.uint64
653
+ elif issubclass(dtype.type, np.int64):
654
+ self.func = np.int64
655
+ else:
656
+ self.func = lambda x: int(float(x))
657
+ # Store the list of strings corresponding to missing values.
658
+ if missing_values is None:
659
+ self.missing_values = {''}
660
+ else:
661
+ if isinstance(missing_values, str):
662
+ missing_values = missing_values.split(",")
663
+ self.missing_values = set(list(missing_values) + [''])
664
+
665
+ self._callingfunction = self._strict_call
666
+ self.type = self._dtypeortype(dtype)
667
+ self._checked = False
668
+ self._initial_default = default
669
+
670
+ def _loose_call(self, value):
671
+ try:
672
+ return self.func(value)
673
+ except ValueError:
674
+ return self.default
675
+
676
+ def _strict_call(self, value):
677
+ try:
678
+
679
+ # We check if we can convert the value using the current function
680
+ new_value = self.func(value)
681
+
682
+ # In addition to having to check whether func can convert the
683
+ # value, we also have to make sure that we don't get overflow
684
+ # errors for integers.
685
+ if self.func is int:
686
+ try:
687
+ np.array(value, dtype=self.type)
688
+ except OverflowError:
689
+ raise ValueError
690
+
691
+ # We're still here so we can now return the new value
692
+ return new_value
693
+
694
+ except ValueError:
695
+ if value.strip() in self.missing_values:
696
+ if not self._status:
697
+ self._checked = False
698
+ return self.default
699
+ raise ValueError("Cannot convert string '%s'" % value)
700
+
701
+ def __call__(self, value):
702
+ return self._callingfunction(value)
703
+
704
+ def _do_upgrade(self):
705
+ # Raise an exception if we locked the converter...
706
+ if self._locked:
707
+ errmsg = "Converter is locked and cannot be upgraded"
708
+ raise ConverterLockError(errmsg)
709
+ _statusmax = len(self._mapper)
710
+ # Complains if we try to upgrade by the maximum
711
+ _status = self._status
712
+ if _status == _statusmax:
713
+ errmsg = "Could not find a valid conversion function"
714
+ raise ConverterError(errmsg)
715
+ elif _status < _statusmax - 1:
716
+ _status += 1
717
+ self.type, self.func, default = self._mapper[_status]
718
+ self._status = _status
719
+ if self._initial_default is not None:
720
+ self.default = self._initial_default
721
+ else:
722
+ self.default = default
723
+
724
+ def upgrade(self, value):
725
+ """
726
+ Find the best converter for a given string, and return the result.
727
+
728
+ The supplied string `value` is converted by testing different
729
+ converters in order. First the `func` method of the
730
+ `StringConverter` instance is tried, if this fails other available
731
+ converters are tried. The order in which these other converters
732
+ are tried is determined by the `_status` attribute of the instance.
733
+
734
+ Parameters
735
+ ----------
736
+ value : str
737
+ The string to convert.
738
+
739
+ Returns
740
+ -------
741
+ out : any
742
+ The result of converting `value` with the appropriate converter.
743
+
744
+ """
745
+ self._checked = True
746
+ try:
747
+ return self._strict_call(value)
748
+ except ValueError:
749
+ self._do_upgrade()
750
+ return self.upgrade(value)
751
+
752
+ def iterupgrade(self, value):
753
+ self._checked = True
754
+ if not hasattr(value, '__iter__'):
755
+ value = (value,)
756
+ _strict_call = self._strict_call
757
+ try:
758
+ for _m in value:
759
+ _strict_call(_m)
760
+ except ValueError:
761
+ self._do_upgrade()
762
+ self.iterupgrade(value)
763
+
764
+ def update(self, func, default=None, testing_value=None,
765
+ missing_values='', locked=False):
766
+ """
767
+ Set StringConverter attributes directly.
768
+
769
+ Parameters
770
+ ----------
771
+ func : function
772
+ Conversion function.
773
+ default : any, optional
774
+ Value to return by default, that is, when the string to be
775
+ converted is flagged as missing. If not given,
776
+ `StringConverter` tries to supply a reasonable default value.
777
+ testing_value : str, optional
778
+ A string representing a standard input value of the converter.
779
+ This string is used to help defining a reasonable default
780
+ value.
781
+ missing_values : {sequence of str, None}, optional
782
+ Sequence of strings indicating a missing value. If ``None``, then
783
+ the existing `missing_values` are cleared. The default is `''`.
784
+ locked : bool, optional
785
+ Whether the StringConverter should be locked to prevent
786
+ automatic upgrade or not. Default is False.
787
+
788
+ Notes
789
+ -----
790
+ `update` takes the same parameters as the constructor of
791
+ `StringConverter`, except that `func` does not accept a `dtype`
792
+ whereas `dtype_or_func` in the constructor does.
793
+
794
+ """
795
+ self.func = func
796
+ self._locked = locked
797
+
798
+ # Don't reset the default to None if we can avoid it
799
+ if default is not None:
800
+ self.default = default
801
+ self.type = self._dtypeortype(self._getdtype(default))
802
+ else:
803
+ try:
804
+ tester = func(testing_value or '1')
805
+ except (TypeError, ValueError):
806
+ tester = None
807
+ self.type = self._dtypeortype(self._getdtype(tester))
808
+
809
+ # Add the missing values to the existing set or clear it.
810
+ if missing_values is None:
811
+ # Clear all missing values even though the ctor initializes it to
812
+ # set(['']) when the argument is None.
813
+ self.missing_values = set()
814
+ else:
815
+ if not np.iterable(missing_values):
816
+ missing_values = [missing_values]
817
+ if not all(isinstance(v, str) for v in missing_values):
818
+ raise TypeError("missing_values must be strings or unicode")
819
+ self.missing_values.update(missing_values)
820
+
821
+
822
+ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
823
+ """
824
+ Convenience function to create a `np.dtype` object.
825
+
826
+ The function processes the input `dtype` and matches it with the given
827
+ names.
828
+
829
+ Parameters
830
+ ----------
831
+ ndtype : var
832
+ Definition of the dtype. Can be any string or dictionary recognized
833
+ by the `np.dtype` function, or a sequence of types.
834
+ names : str or sequence, optional
835
+ Sequence of strings to use as field names for a structured dtype.
836
+ For convenience, `names` can be a string of a comma-separated list
837
+ of names.
838
+ defaultfmt : str, optional
839
+ Format string used to define missing names, such as ``"f%i"``
840
+ (default) or ``"fields_%02i"``.
841
+ validationargs : optional
842
+ A series of optional arguments used to initialize a
843
+ `NameValidator`.
844
+
845
+ Examples
846
+ --------
847
+ >>> np.lib._iotools.easy_dtype(float)
848
+ dtype('float64')
849
+ >>> np.lib._iotools.easy_dtype("i4, f8")
850
+ dtype([('f0', '<i4'), ('f1', '<f8')])
851
+ >>> np.lib._iotools.easy_dtype("i4, f8", defaultfmt="field_%03i")
852
+ dtype([('field_000', '<i4'), ('field_001', '<f8')])
853
+
854
+ >>> np.lib._iotools.easy_dtype((int, float, float), names="a,b,c")
855
+ dtype([('a', '<i8'), ('b', '<f8'), ('c', '<f8')])
856
+ >>> np.lib._iotools.easy_dtype(float, names="a,b,c")
857
+ dtype([('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
858
+
859
+ """
860
+ try:
861
+ ndtype = np.dtype(ndtype)
862
+ except TypeError:
863
+ validate = NameValidator(**validationargs)
864
+ nbfields = len(ndtype)
865
+ if names is None:
866
+ names = [''] * len(ndtype)
867
+ elif isinstance(names, str):
868
+ names = names.split(",")
869
+ names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
870
+ ndtype = np.dtype(dict(formats=ndtype, names=names))
871
+ else:
872
+ # Explicit names
873
+ if names is not None:
874
+ validate = NameValidator(**validationargs)
875
+ if isinstance(names, str):
876
+ names = names.split(",")
877
+ # Simple dtype: repeat to match the nb of names
878
+ if ndtype.names is None:
879
+ formats = tuple([ndtype.type] * len(names))
880
+ names = validate(names, defaultfmt=defaultfmt)
881
+ ndtype = np.dtype(list(zip(names, formats)))
882
+ # Structured dtype: just validate the names as needed
883
+ else:
884
+ ndtype.names = validate(names, nbfields=len(ndtype.names),
885
+ defaultfmt=defaultfmt)
886
+ # No implicit names
887
+ elif ndtype.names is not None:
888
+ validate = NameValidator(**validationargs)
889
+ # Default initial names : should we change the format ?
890
+ numbered_names = tuple("f%i" % i for i in range(len(ndtype.names)))
891
+ if ((ndtype.names == numbered_names) and (defaultfmt != "f%i")):
892
+ ndtype.names = validate([''] * len(ndtype.names),
893
+ defaultfmt=defaultfmt)
894
+ # Explicit initial names : just validate
895
+ else:
896
+ ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
897
+ return ndtype
venv/lib/python3.10/site-packages/numpy/lib/_version.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Utility to compare (NumPy) version strings.
2
+
3
+ The NumpyVersion class allows properly comparing numpy version strings.
4
+ The LooseVersion and StrictVersion classes that distutils provides don't
5
+ work; they don't recognize anything like alpha/beta/rc/dev versions.
6
+
7
+ """
8
+ import re
9
+
10
+
11
+ __all__ = ['NumpyVersion']
12
+
13
+
14
+ class NumpyVersion():
15
+ """Parse and compare numpy version strings.
16
+
17
+ NumPy has the following versioning scheme (numbers given are examples; they
18
+ can be > 9 in principle):
19
+
20
+ - Released version: '1.8.0', '1.8.1', etc.
21
+ - Alpha: '1.8.0a1', '1.8.0a2', etc.
22
+ - Beta: '1.8.0b1', '1.8.0b2', etc.
23
+ - Release candidates: '1.8.0rc1', '1.8.0rc2', etc.
24
+ - Development versions: '1.8.0.dev-f1234afa' (git commit hash appended)
25
+ - Development versions after a1: '1.8.0a1.dev-f1234afa',
26
+ '1.8.0b2.dev-f1234afa',
27
+ '1.8.1rc1.dev-f1234afa', etc.
28
+ - Development versions (no git hash available): '1.8.0.dev-Unknown'
29
+
30
+ Comparing needs to be done against a valid version string or other
31
+ `NumpyVersion` instance. Note that all development versions of the same
32
+ (pre-)release compare equal.
33
+
34
+ .. versionadded:: 1.9.0
35
+
36
+ Parameters
37
+ ----------
38
+ vstring : str
39
+ NumPy version string (``np.__version__``).
40
+
41
+ Examples
42
+ --------
43
+ >>> from numpy.lib import NumpyVersion
44
+ >>> if NumpyVersion(np.__version__) < '1.7.0':
45
+ ... print('skip')
46
+ >>> # skip
47
+
48
+ >>> NumpyVersion('1.7') # raises ValueError, add ".0"
49
+ Traceback (most recent call last):
50
+ ...
51
+ ValueError: Not a valid numpy version string
52
+
53
+ """
54
+
55
+ def __init__(self, vstring):
56
+ self.vstring = vstring
57
+ ver_main = re.match(r'\d+\.\d+\.\d+', vstring)
58
+ if not ver_main:
59
+ raise ValueError("Not a valid numpy version string")
60
+
61
+ self.version = ver_main.group()
62
+ self.major, self.minor, self.bugfix = [int(x) for x in
63
+ self.version.split('.')]
64
+ if len(vstring) == ver_main.end():
65
+ self.pre_release = 'final'
66
+ else:
67
+ alpha = re.match(r'a\d', vstring[ver_main.end():])
68
+ beta = re.match(r'b\d', vstring[ver_main.end():])
69
+ rc = re.match(r'rc\d', vstring[ver_main.end():])
70
+ pre_rel = [m for m in [alpha, beta, rc] if m is not None]
71
+ if pre_rel:
72
+ self.pre_release = pre_rel[0].group()
73
+ else:
74
+ self.pre_release = ''
75
+
76
+ self.is_devversion = bool(re.search(r'.dev', vstring))
77
+
78
+ def _compare_version(self, other):
79
+ """Compare major.minor.bugfix"""
80
+ if self.major == other.major:
81
+ if self.minor == other.minor:
82
+ if self.bugfix == other.bugfix:
83
+ vercmp = 0
84
+ elif self.bugfix > other.bugfix:
85
+ vercmp = 1
86
+ else:
87
+ vercmp = -1
88
+ elif self.minor > other.minor:
89
+ vercmp = 1
90
+ else:
91
+ vercmp = -1
92
+ elif self.major > other.major:
93
+ vercmp = 1
94
+ else:
95
+ vercmp = -1
96
+
97
+ return vercmp
98
+
99
+ def _compare_pre_release(self, other):
100
+ """Compare alpha/beta/rc/final."""
101
+ if self.pre_release == other.pre_release:
102
+ vercmp = 0
103
+ elif self.pre_release == 'final':
104
+ vercmp = 1
105
+ elif other.pre_release == 'final':
106
+ vercmp = -1
107
+ elif self.pre_release > other.pre_release:
108
+ vercmp = 1
109
+ else:
110
+ vercmp = -1
111
+
112
+ return vercmp
113
+
114
+ def _compare(self, other):
115
+ if not isinstance(other, (str, NumpyVersion)):
116
+ raise ValueError("Invalid object to compare with NumpyVersion.")
117
+
118
+ if isinstance(other, str):
119
+ other = NumpyVersion(other)
120
+
121
+ vercmp = self._compare_version(other)
122
+ if vercmp == 0:
123
+ # Same x.y.z version, check for alpha/beta/rc
124
+ vercmp = self._compare_pre_release(other)
125
+ if vercmp == 0:
126
+ # Same version and same pre-release, check if dev version
127
+ if self.is_devversion is other.is_devversion:
128
+ vercmp = 0
129
+ elif self.is_devversion:
130
+ vercmp = -1
131
+ else:
132
+ vercmp = 1
133
+
134
+ return vercmp
135
+
136
+ def __lt__(self, other):
137
+ return self._compare(other) < 0
138
+
139
+ def __le__(self, other):
140
+ return self._compare(other) <= 0
141
+
142
+ def __eq__(self, other):
143
+ return self._compare(other) == 0
144
+
145
+ def __ne__(self, other):
146
+ return self._compare(other) != 0
147
+
148
+ def __gt__(self, other):
149
+ return self._compare(other) > 0
150
+
151
+ def __ge__(self, other):
152
+ return self._compare(other) >= 0
153
+
154
+ def __repr__(self):
155
+ return "NumpyVersion(%s)" % self.vstring
venv/lib/python3.10/site-packages/numpy/lib/_version.pyi ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__: list[str]
2
+
3
+ class NumpyVersion:
4
+ vstring: str
5
+ version: str
6
+ major: int
7
+ minor: int
8
+ bugfix: int
9
+ pre_release: str
10
+ is_devversion: bool
11
+ def __init__(self, vstring: str) -> None: ...
12
+ def __lt__(self, other: str | NumpyVersion) -> bool: ...
13
+ def __le__(self, other: str | NumpyVersion) -> bool: ...
14
+ def __eq__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
15
+ def __ne__(self, other: str | NumpyVersion) -> bool: ... # type: ignore[override]
16
+ def __gt__(self, other: str | NumpyVersion) -> bool: ...
17
+ def __ge__(self, other: str | NumpyVersion) -> bool: ...
venv/lib/python3.10/site-packages/numpy/lib/arraypad.pyi ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ Any,
4
+ overload,
5
+ TypeVar,
6
+ Protocol,
7
+ )
8
+
9
+ from numpy import generic
10
+
11
+ from numpy._typing import (
12
+ ArrayLike,
13
+ NDArray,
14
+ _ArrayLikeInt,
15
+ _ArrayLike,
16
+ )
17
+
18
+ _SCT = TypeVar("_SCT", bound=generic)
19
+
20
+ class _ModeFunc(Protocol):
21
+ def __call__(
22
+ self,
23
+ vector: NDArray[Any],
24
+ iaxis_pad_width: tuple[int, int],
25
+ iaxis: int,
26
+ kwargs: dict[str, Any],
27
+ /,
28
+ ) -> None: ...
29
+
30
+ _ModeKind = L[
31
+ "constant",
32
+ "edge",
33
+ "linear_ramp",
34
+ "maximum",
35
+ "mean",
36
+ "median",
37
+ "minimum",
38
+ "reflect",
39
+ "symmetric",
40
+ "wrap",
41
+ "empty",
42
+ ]
43
+
44
+ __all__: list[str]
45
+
46
+ # TODO: In practice each keyword argument is exclusive to one or more
47
+ # specific modes. Consider adding more overloads to express this in the future.
48
+
49
+ # Expand `**kwargs` into explicit keyword-only arguments
50
+ @overload
51
+ def pad(
52
+ array: _ArrayLike[_SCT],
53
+ pad_width: _ArrayLikeInt,
54
+ mode: _ModeKind = ...,
55
+ *,
56
+ stat_length: None | _ArrayLikeInt = ...,
57
+ constant_values: ArrayLike = ...,
58
+ end_values: ArrayLike = ...,
59
+ reflect_type: L["odd", "even"] = ...,
60
+ ) -> NDArray[_SCT]: ...
61
+ @overload
62
+ def pad(
63
+ array: ArrayLike,
64
+ pad_width: _ArrayLikeInt,
65
+ mode: _ModeKind = ...,
66
+ *,
67
+ stat_length: None | _ArrayLikeInt = ...,
68
+ constant_values: ArrayLike = ...,
69
+ end_values: ArrayLike = ...,
70
+ reflect_type: L["odd", "even"] = ...,
71
+ ) -> NDArray[Any]: ...
72
+ @overload
73
+ def pad(
74
+ array: _ArrayLike[_SCT],
75
+ pad_width: _ArrayLikeInt,
76
+ mode: _ModeFunc,
77
+ **kwargs: Any,
78
+ ) -> NDArray[_SCT]: ...
79
+ @overload
80
+ def pad(
81
+ array: ArrayLike,
82
+ pad_width: _ArrayLikeInt,
83
+ mode: _ModeFunc,
84
+ **kwargs: Any,
85
+ ) -> NDArray[Any]: ...
venv/lib/python3.10/site-packages/numpy/lib/arraysetops.pyi ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ Any,
4
+ TypeVar,
5
+ overload,
6
+ SupportsIndex,
7
+ )
8
+
9
+ from numpy import (
10
+ generic,
11
+ number,
12
+ bool_,
13
+ ushort,
14
+ ubyte,
15
+ uintc,
16
+ uint,
17
+ ulonglong,
18
+ short,
19
+ int8,
20
+ byte,
21
+ intc,
22
+ int_,
23
+ intp,
24
+ longlong,
25
+ half,
26
+ single,
27
+ double,
28
+ longdouble,
29
+ csingle,
30
+ cdouble,
31
+ clongdouble,
32
+ timedelta64,
33
+ datetime64,
34
+ object_,
35
+ str_,
36
+ bytes_,
37
+ void,
38
+ )
39
+
40
+ from numpy._typing import (
41
+ ArrayLike,
42
+ NDArray,
43
+ _ArrayLike,
44
+ _ArrayLikeBool_co,
45
+ _ArrayLikeDT64_co,
46
+ _ArrayLikeTD64_co,
47
+ _ArrayLikeObject_co,
48
+ _ArrayLikeNumber_co,
49
+ )
50
+
51
+ _SCT = TypeVar("_SCT", bound=generic)
52
+ _NumberType = TypeVar("_NumberType", bound=number[Any])
53
+
54
+ # Explicitly set all allowed values to prevent accidental castings to
55
+ # abstract dtypes (their common super-type).
56
+ #
57
+ # Only relevant if two or more arguments are parametrized, (e.g. `setdiff1d`)
58
+ # which could result in, for example, `int64` and `float64`producing a
59
+ # `number[_64Bit]` array
60
+ _SCTNoCast = TypeVar(
61
+ "_SCTNoCast",
62
+ bool_,
63
+ ushort,
64
+ ubyte,
65
+ uintc,
66
+ uint,
67
+ ulonglong,
68
+ short,
69
+ byte,
70
+ intc,
71
+ int_,
72
+ longlong,
73
+ half,
74
+ single,
75
+ double,
76
+ longdouble,
77
+ csingle,
78
+ cdouble,
79
+ clongdouble,
80
+ timedelta64,
81
+ datetime64,
82
+ object_,
83
+ str_,
84
+ bytes_,
85
+ void,
86
+ )
87
+
88
+ __all__: list[str]
89
+
90
+ @overload
91
+ def ediff1d(
92
+ ary: _ArrayLikeBool_co,
93
+ to_end: None | ArrayLike = ...,
94
+ to_begin: None | ArrayLike = ...,
95
+ ) -> NDArray[int8]: ...
96
+ @overload
97
+ def ediff1d(
98
+ ary: _ArrayLike[_NumberType],
99
+ to_end: None | ArrayLike = ...,
100
+ to_begin: None | ArrayLike = ...,
101
+ ) -> NDArray[_NumberType]: ...
102
+ @overload
103
+ def ediff1d(
104
+ ary: _ArrayLikeNumber_co,
105
+ to_end: None | ArrayLike = ...,
106
+ to_begin: None | ArrayLike = ...,
107
+ ) -> NDArray[Any]: ...
108
+ @overload
109
+ def ediff1d(
110
+ ary: _ArrayLikeDT64_co | _ArrayLikeTD64_co,
111
+ to_end: None | ArrayLike = ...,
112
+ to_begin: None | ArrayLike = ...,
113
+ ) -> NDArray[timedelta64]: ...
114
+ @overload
115
+ def ediff1d(
116
+ ary: _ArrayLikeObject_co,
117
+ to_end: None | ArrayLike = ...,
118
+ to_begin: None | ArrayLike = ...,
119
+ ) -> NDArray[object_]: ...
120
+
121
+ @overload
122
+ def unique(
123
+ ar: _ArrayLike[_SCT],
124
+ return_index: L[False] = ...,
125
+ return_inverse: L[False] = ...,
126
+ return_counts: L[False] = ...,
127
+ axis: None | SupportsIndex = ...,
128
+ *,
129
+ equal_nan: bool = ...,
130
+ ) -> NDArray[_SCT]: ...
131
+ @overload
132
+ def unique(
133
+ ar: ArrayLike,
134
+ return_index: L[False] = ...,
135
+ return_inverse: L[False] = ...,
136
+ return_counts: L[False] = ...,
137
+ axis: None | SupportsIndex = ...,
138
+ *,
139
+ equal_nan: bool = ...,
140
+ ) -> NDArray[Any]: ...
141
+ @overload
142
+ def unique(
143
+ ar: _ArrayLike[_SCT],
144
+ return_index: L[True] = ...,
145
+ return_inverse: L[False] = ...,
146
+ return_counts: L[False] = ...,
147
+ axis: None | SupportsIndex = ...,
148
+ *,
149
+ equal_nan: bool = ...,
150
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
151
+ @overload
152
+ def unique(
153
+ ar: ArrayLike,
154
+ return_index: L[True] = ...,
155
+ return_inverse: L[False] = ...,
156
+ return_counts: L[False] = ...,
157
+ axis: None | SupportsIndex = ...,
158
+ *,
159
+ equal_nan: bool = ...,
160
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
161
+ @overload
162
+ def unique(
163
+ ar: _ArrayLike[_SCT],
164
+ return_index: L[False] = ...,
165
+ return_inverse: L[True] = ...,
166
+ return_counts: L[False] = ...,
167
+ axis: None | SupportsIndex = ...,
168
+ *,
169
+ equal_nan: bool = ...,
170
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
171
+ @overload
172
+ def unique(
173
+ ar: ArrayLike,
174
+ return_index: L[False] = ...,
175
+ return_inverse: L[True] = ...,
176
+ return_counts: L[False] = ...,
177
+ axis: None | SupportsIndex = ...,
178
+ *,
179
+ equal_nan: bool = ...,
180
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
181
+ @overload
182
+ def unique(
183
+ ar: _ArrayLike[_SCT],
184
+ return_index: L[False] = ...,
185
+ return_inverse: L[False] = ...,
186
+ return_counts: L[True] = ...,
187
+ axis: None | SupportsIndex = ...,
188
+ *,
189
+ equal_nan: bool = ...,
190
+ ) -> tuple[NDArray[_SCT], NDArray[intp]]: ...
191
+ @overload
192
+ def unique(
193
+ ar: ArrayLike,
194
+ return_index: L[False] = ...,
195
+ return_inverse: L[False] = ...,
196
+ return_counts: L[True] = ...,
197
+ axis: None | SupportsIndex = ...,
198
+ *,
199
+ equal_nan: bool = ...,
200
+ ) -> tuple[NDArray[Any], NDArray[intp]]: ...
201
+ @overload
202
+ def unique(
203
+ ar: _ArrayLike[_SCT],
204
+ return_index: L[True] = ...,
205
+ return_inverse: L[True] = ...,
206
+ return_counts: L[False] = ...,
207
+ axis: None | SupportsIndex = ...,
208
+ *,
209
+ equal_nan: bool = ...,
210
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
211
+ @overload
212
+ def unique(
213
+ ar: ArrayLike,
214
+ return_index: L[True] = ...,
215
+ return_inverse: L[True] = ...,
216
+ return_counts: L[False] = ...,
217
+ axis: None | SupportsIndex = ...,
218
+ *,
219
+ equal_nan: bool = ...,
220
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
221
+ @overload
222
+ def unique(
223
+ ar: _ArrayLike[_SCT],
224
+ return_index: L[True] = ...,
225
+ return_inverse: L[False] = ...,
226
+ return_counts: L[True] = ...,
227
+ axis: None | SupportsIndex = ...,
228
+ *,
229
+ equal_nan: bool = ...,
230
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
231
+ @overload
232
+ def unique(
233
+ ar: ArrayLike,
234
+ return_index: L[True] = ...,
235
+ return_inverse: L[False] = ...,
236
+ return_counts: L[True] = ...,
237
+ axis: None | SupportsIndex = ...,
238
+ *,
239
+ equal_nan: bool = ...,
240
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
241
+ @overload
242
+ def unique(
243
+ ar: _ArrayLike[_SCT],
244
+ return_index: L[False] = ...,
245
+ return_inverse: L[True] = ...,
246
+ return_counts: L[True] = ...,
247
+ axis: None | SupportsIndex = ...,
248
+ *,
249
+ equal_nan: bool = ...,
250
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp]]: ...
251
+ @overload
252
+ def unique(
253
+ ar: ArrayLike,
254
+ return_index: L[False] = ...,
255
+ return_inverse: L[True] = ...,
256
+ return_counts: L[True] = ...,
257
+ axis: None | SupportsIndex = ...,
258
+ *,
259
+ equal_nan: bool = ...,
260
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
261
+ @overload
262
+ def unique(
263
+ ar: _ArrayLike[_SCT],
264
+ return_index: L[True] = ...,
265
+ return_inverse: L[True] = ...,
266
+ return_counts: L[True] = ...,
267
+ axis: None | SupportsIndex = ...,
268
+ *,
269
+ equal_nan: bool = ...,
270
+ ) -> tuple[NDArray[_SCT], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
271
+ @overload
272
+ def unique(
273
+ ar: ArrayLike,
274
+ return_index: L[True] = ...,
275
+ return_inverse: L[True] = ...,
276
+ return_counts: L[True] = ...,
277
+ axis: None | SupportsIndex = ...,
278
+ *,
279
+ equal_nan: bool = ...,
280
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp], NDArray[intp]]: ...
281
+
282
+ @overload
283
+ def intersect1d(
284
+ ar1: _ArrayLike[_SCTNoCast],
285
+ ar2: _ArrayLike[_SCTNoCast],
286
+ assume_unique: bool = ...,
287
+ return_indices: L[False] = ...,
288
+ ) -> NDArray[_SCTNoCast]: ...
289
+ @overload
290
+ def intersect1d(
291
+ ar1: ArrayLike,
292
+ ar2: ArrayLike,
293
+ assume_unique: bool = ...,
294
+ return_indices: L[False] = ...,
295
+ ) -> NDArray[Any]: ...
296
+ @overload
297
+ def intersect1d(
298
+ ar1: _ArrayLike[_SCTNoCast],
299
+ ar2: _ArrayLike[_SCTNoCast],
300
+ assume_unique: bool = ...,
301
+ return_indices: L[True] = ...,
302
+ ) -> tuple[NDArray[_SCTNoCast], NDArray[intp], NDArray[intp]]: ...
303
+ @overload
304
+ def intersect1d(
305
+ ar1: ArrayLike,
306
+ ar2: ArrayLike,
307
+ assume_unique: bool = ...,
308
+ return_indices: L[True] = ...,
309
+ ) -> tuple[NDArray[Any], NDArray[intp], NDArray[intp]]: ...
310
+
311
+ @overload
312
+ def setxor1d(
313
+ ar1: _ArrayLike[_SCTNoCast],
314
+ ar2: _ArrayLike[_SCTNoCast],
315
+ assume_unique: bool = ...,
316
+ ) -> NDArray[_SCTNoCast]: ...
317
+ @overload
318
+ def setxor1d(
319
+ ar1: ArrayLike,
320
+ ar2: ArrayLike,
321
+ assume_unique: bool = ...,
322
+ ) -> NDArray[Any]: ...
323
+
324
+ def in1d(
325
+ ar1: ArrayLike,
326
+ ar2: ArrayLike,
327
+ assume_unique: bool = ...,
328
+ invert: bool = ...,
329
+ ) -> NDArray[bool_]: ...
330
+
331
+ def isin(
332
+ element: ArrayLike,
333
+ test_elements: ArrayLike,
334
+ assume_unique: bool = ...,
335
+ invert: bool = ...,
336
+ *,
337
+ kind: None | str = ...,
338
+ ) -> NDArray[bool_]: ...
339
+
340
+ @overload
341
+ def union1d(
342
+ ar1: _ArrayLike[_SCTNoCast],
343
+ ar2: _ArrayLike[_SCTNoCast],
344
+ ) -> NDArray[_SCTNoCast]: ...
345
+ @overload
346
+ def union1d(
347
+ ar1: ArrayLike,
348
+ ar2: ArrayLike,
349
+ ) -> NDArray[Any]: ...
350
+
351
+ @overload
352
+ def setdiff1d(
353
+ ar1: _ArrayLike[_SCTNoCast],
354
+ ar2: _ArrayLike[_SCTNoCast],
355
+ assume_unique: bool = ...,
356
+ ) -> NDArray[_SCTNoCast]: ...
357
+ @overload
358
+ def setdiff1d(
359
+ ar1: ArrayLike,
360
+ ar2: ArrayLike,
361
+ assume_unique: bool = ...,
362
+ ) -> NDArray[Any]: ...
venv/lib/python3.10/site-packages/numpy/lib/arrayterator.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A buffered iterator for big arrays.
3
+
4
+ This module solves the problem of iterating over a big file-based array
5
+ without having to read it into memory. The `Arrayterator` class wraps
6
+ an array object, and when iterated it will return sub-arrays with at most
7
+ a user-specified number of elements.
8
+
9
+ """
10
+ from operator import mul
11
+ from functools import reduce
12
+
13
+ __all__ = ['Arrayterator']
14
+
15
+
16
+ class Arrayterator:
17
+ """
18
+ Buffered iterator for big arrays.
19
+
20
+ `Arrayterator` creates a buffered iterator for reading big arrays in small
21
+ contiguous blocks. The class is useful for objects stored in the
22
+ file system. It allows iteration over the object *without* reading
23
+ everything in memory; instead, small blocks are read and iterated over.
24
+
25
+ `Arrayterator` can be used with any object that supports multidimensional
26
+ slices. This includes NumPy arrays, but also variables from
27
+ Scientific.IO.NetCDF or pynetcdf for example.
28
+
29
+ Parameters
30
+ ----------
31
+ var : array_like
32
+ The object to iterate over.
33
+ buf_size : int, optional
34
+ The buffer size. If `buf_size` is supplied, the maximum amount of
35
+ data that will be read into memory is `buf_size` elements.
36
+ Default is None, which will read as many element as possible
37
+ into memory.
38
+
39
+ Attributes
40
+ ----------
41
+ var
42
+ buf_size
43
+ start
44
+ stop
45
+ step
46
+ shape
47
+ flat
48
+
49
+ See Also
50
+ --------
51
+ ndenumerate : Multidimensional array iterator.
52
+ flatiter : Flat array iterator.
53
+ memmap : Create a memory-map to an array stored in a binary file on disk.
54
+
55
+ Notes
56
+ -----
57
+ The algorithm works by first finding a "running dimension", along which
58
+ the blocks will be extracted. Given an array of dimensions
59
+ ``(d1, d2, ..., dn)``, e.g. if `buf_size` is smaller than ``d1``, the
60
+ first dimension will be used. If, on the other hand,
61
+ ``d1 < buf_size < d1*d2`` the second dimension will be used, and so on.
62
+ Blocks are extracted along this dimension, and when the last block is
63
+ returned the process continues from the next dimension, until all
64
+ elements have been read.
65
+
66
+ Examples
67
+ --------
68
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
69
+ >>> a_itor = np.lib.Arrayterator(a, 2)
70
+ >>> a_itor.shape
71
+ (3, 4, 5, 6)
72
+
73
+ Now we can iterate over ``a_itor``, and it will return arrays of size
74
+ two. Since `buf_size` was smaller than any dimension, the first
75
+ dimension will be iterated over first:
76
+
77
+ >>> for subarr in a_itor:
78
+ ... if not subarr.all():
79
+ ... print(subarr, subarr.shape) # doctest: +SKIP
80
+ >>> # [[[[0 1]]]] (1, 1, 1, 2)
81
+
82
+ """
83
+
84
+ def __init__(self, var, buf_size=None):
85
+ self.var = var
86
+ self.buf_size = buf_size
87
+
88
+ self.start = [0 for dim in var.shape]
89
+ self.stop = [dim for dim in var.shape]
90
+ self.step = [1 for dim in var.shape]
91
+
92
+ def __getattr__(self, attr):
93
+ return getattr(self.var, attr)
94
+
95
+ def __getitem__(self, index):
96
+ """
97
+ Return a new arrayterator.
98
+
99
+ """
100
+ # Fix index, handling ellipsis and incomplete slices.
101
+ if not isinstance(index, tuple):
102
+ index = (index,)
103
+ fixed = []
104
+ length, dims = len(index), self.ndim
105
+ for slice_ in index:
106
+ if slice_ is Ellipsis:
107
+ fixed.extend([slice(None)] * (dims-length+1))
108
+ length = len(fixed)
109
+ elif isinstance(slice_, int):
110
+ fixed.append(slice(slice_, slice_+1, 1))
111
+ else:
112
+ fixed.append(slice_)
113
+ index = tuple(fixed)
114
+ if len(index) < dims:
115
+ index += (slice(None),) * (dims-len(index))
116
+
117
+ # Return a new arrayterator object.
118
+ out = self.__class__(self.var, self.buf_size)
119
+ for i, (start, stop, step, slice_) in enumerate(
120
+ zip(self.start, self.stop, self.step, index)):
121
+ out.start[i] = start + (slice_.start or 0)
122
+ out.step[i] = step * (slice_.step or 1)
123
+ out.stop[i] = start + (slice_.stop or stop-start)
124
+ out.stop[i] = min(stop, out.stop[i])
125
+ return out
126
+
127
+ def __array__(self):
128
+ """
129
+ Return corresponding data.
130
+
131
+ """
132
+ slice_ = tuple(slice(*t) for t in zip(
133
+ self.start, self.stop, self.step))
134
+ return self.var[slice_]
135
+
136
+ @property
137
+ def flat(self):
138
+ """
139
+ A 1-D flat iterator for Arrayterator objects.
140
+
141
+ This iterator returns elements of the array to be iterated over in
142
+ `Arrayterator` one by one. It is similar to `flatiter`.
143
+
144
+ See Also
145
+ --------
146
+ Arrayterator
147
+ flatiter
148
+
149
+ Examples
150
+ --------
151
+ >>> a = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6)
152
+ >>> a_itor = np.lib.Arrayterator(a, 2)
153
+
154
+ >>> for subarr in a_itor.flat:
155
+ ... if not subarr:
156
+ ... print(subarr, type(subarr))
157
+ ...
158
+ 0 <class 'numpy.int64'>
159
+
160
+ """
161
+ for block in self:
162
+ yield from block.flat
163
+
164
+ @property
165
+ def shape(self):
166
+ """
167
+ The shape of the array to be iterated over.
168
+
169
+ For an example, see `Arrayterator`.
170
+
171
+ """
172
+ return tuple(((stop-start-1)//step+1) for start, stop, step in
173
+ zip(self.start, self.stop, self.step))
174
+
175
+ def __iter__(self):
176
+ # Skip arrays with degenerate dimensions
177
+ if [dim for dim in self.shape if dim <= 0]:
178
+ return
179
+
180
+ start = self.start[:]
181
+ stop = self.stop[:]
182
+ step = self.step[:]
183
+ ndims = self.var.ndim
184
+
185
+ while True:
186
+ count = self.buf_size or reduce(mul, self.shape)
187
+
188
+ # iterate over each dimension, looking for the
189
+ # running dimension (ie, the dimension along which
190
+ # the blocks will be built from)
191
+ rundim = 0
192
+ for i in range(ndims-1, -1, -1):
193
+ # if count is zero we ran out of elements to read
194
+ # along higher dimensions, so we read only a single position
195
+ if count == 0:
196
+ stop[i] = start[i]+1
197
+ elif count <= self.shape[i]:
198
+ # limit along this dimension
199
+ stop[i] = start[i] + count*step[i]
200
+ rundim = i
201
+ else:
202
+ # read everything along this dimension
203
+ stop[i] = self.stop[i]
204
+ stop[i] = min(self.stop[i], stop[i])
205
+ count = count//self.shape[i]
206
+
207
+ # yield a block
208
+ slice_ = tuple(slice(*t) for t in zip(start, stop, step))
209
+ yield self.var[slice_]
210
+
211
+ # Update start position, taking care of overflow to
212
+ # other dimensions
213
+ start[rundim] = stop[rundim] # start where we stopped
214
+ for i in range(ndims-1, 0, -1):
215
+ if start[i] >= self.stop[i]:
216
+ start[i] = self.start[i]
217
+ start[i-1] += self.step[i-1]
218
+ if start[0] >= self.stop[0]:
219
+ return
venv/lib/python3.10/site-packages/numpy/lib/format.py ADDED
@@ -0,0 +1,976 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Binary serialization
3
+
4
+ NPY format
5
+ ==========
6
+
7
+ A simple format for saving numpy arrays to disk with the full
8
+ information about them.
9
+
10
+ The ``.npy`` format is the standard binary file format in NumPy for
11
+ persisting a *single* arbitrary NumPy array on disk. The format stores all
12
+ of the shape and dtype information necessary to reconstruct the array
13
+ correctly even on another machine with a different architecture.
14
+ The format is designed to be as simple as possible while achieving
15
+ its limited goals.
16
+
17
+ The ``.npz`` format is the standard format for persisting *multiple* NumPy
18
+ arrays on disk. A ``.npz`` file is a zip file containing multiple ``.npy``
19
+ files, one for each array.
20
+
21
+ Capabilities
22
+ ------------
23
+
24
+ - Can represent all NumPy arrays including nested record arrays and
25
+ object arrays.
26
+
27
+ - Represents the data in its native binary form.
28
+
29
+ - Supports Fortran-contiguous arrays directly.
30
+
31
+ - Stores all of the necessary information to reconstruct the array
32
+ including shape and dtype on a machine of a different
33
+ architecture. Both little-endian and big-endian arrays are
34
+ supported, and a file with little-endian numbers will yield
35
+ a little-endian array on any machine reading the file. The
36
+ types are described in terms of their actual sizes. For example,
37
+ if a machine with a 64-bit C "long int" writes out an array with
38
+ "long ints", a reading machine with 32-bit C "long ints" will yield
39
+ an array with 64-bit integers.
40
+
41
+ - Is straightforward to reverse engineer. Datasets often live longer than
42
+ the programs that created them. A competent developer should be
43
+ able to create a solution in their preferred programming language to
44
+ read most ``.npy`` files that they have been given without much
45
+ documentation.
46
+
47
+ - Allows memory-mapping of the data. See `open_memmap`.
48
+
49
+ - Can be read from a filelike stream object instead of an actual file.
50
+
51
+ - Stores object arrays, i.e. arrays containing elements that are arbitrary
52
+ Python objects. Files with object arrays are not to be mmapable, but
53
+ can be read and written to disk.
54
+
55
+ Limitations
56
+ -----------
57
+
58
+ - Arbitrary subclasses of numpy.ndarray are not completely preserved.
59
+ Subclasses will be accepted for writing, but only the array data will
60
+ be written out. A regular numpy.ndarray object will be created
61
+ upon reading the file.
62
+
63
+ .. warning::
64
+
65
+ Due to limitations in the interpretation of structured dtypes, dtypes
66
+ with fields with empty names will have the names replaced by 'f0', 'f1',
67
+ etc. Such arrays will not round-trip through the format entirely
68
+ accurately. The data is intact; only the field names will differ. We are
69
+ working on a fix for this. This fix will not require a change in the
70
+ file format. The arrays with such structures can still be saved and
71
+ restored, and the correct dtype may be restored by using the
72
+ ``loadedarray.view(correct_dtype)`` method.
73
+
74
+ File extensions
75
+ ---------------
76
+
77
+ We recommend using the ``.npy`` and ``.npz`` extensions for files saved
78
+ in this format. This is by no means a requirement; applications may wish
79
+ to use these file formats but use an extension specific to the
80
+ application. In the absence of an obvious alternative, however,
81
+ we suggest using ``.npy`` and ``.npz``.
82
+
83
+ Version numbering
84
+ -----------------
85
+
86
+ The version numbering of these formats is independent of NumPy version
87
+ numbering. If the format is upgraded, the code in `numpy.io` will still
88
+ be able to read and write Version 1.0 files.
89
+
90
+ Format Version 1.0
91
+ ------------------
92
+
93
+ The first 6 bytes are a magic string: exactly ``\\x93NUMPY``.
94
+
95
+ The next 1 byte is an unsigned byte: the major version number of the file
96
+ format, e.g. ``\\x01``.
97
+
98
+ The next 1 byte is an unsigned byte: the minor version number of the file
99
+ format, e.g. ``\\x00``. Note: the version of the file format is not tied
100
+ to the version of the numpy package.
101
+
102
+ The next 2 bytes form a little-endian unsigned short int: the length of
103
+ the header data HEADER_LEN.
104
+
105
+ The next HEADER_LEN bytes form the header data describing the array's
106
+ format. It is an ASCII string which contains a Python literal expression
107
+ of a dictionary. It is terminated by a newline (``\\n``) and padded with
108
+ spaces (``\\x20``) to make the total of
109
+ ``len(magic string) + 2 + len(length) + HEADER_LEN`` be evenly divisible
110
+ by 64 for alignment purposes.
111
+
112
+ The dictionary contains three keys:
113
+
114
+ "descr" : dtype.descr
115
+ An object that can be passed as an argument to the `numpy.dtype`
116
+ constructor to create the array's dtype.
117
+ "fortran_order" : bool
118
+ Whether the array data is Fortran-contiguous or not. Since
119
+ Fortran-contiguous arrays are a common form of non-C-contiguity,
120
+ we allow them to be written directly to disk for efficiency.
121
+ "shape" : tuple of int
122
+ The shape of the array.
123
+
124
+ For repeatability and readability, the dictionary keys are sorted in
125
+ alphabetic order. This is for convenience only. A writer SHOULD implement
126
+ this if possible. A reader MUST NOT depend on this.
127
+
128
+ Following the header comes the array data. If the dtype contains Python
129
+ objects (i.e. ``dtype.hasobject is True``), then the data is a Python
130
+ pickle of the array. Otherwise the data is the contiguous (either C-
131
+ or Fortran-, depending on ``fortran_order``) bytes of the array.
132
+ Consumers can figure out the number of bytes by multiplying the number
133
+ of elements given by the shape (noting that ``shape=()`` means there is
134
+ 1 element) by ``dtype.itemsize``.
135
+
136
+ Format Version 2.0
137
+ ------------------
138
+
139
+ The version 1.0 format only allowed the array header to have a total size of
140
+ 65535 bytes. This can be exceeded by structured arrays with a large number of
141
+ columns. The version 2.0 format extends the header size to 4 GiB.
142
+ `numpy.save` will automatically save in 2.0 format if the data requires it,
143
+ else it will always use the more compatible 1.0 format.
144
+
145
+ The description of the fourth element of the header therefore has become:
146
+ "The next 4 bytes form a little-endian unsigned int: the length of the header
147
+ data HEADER_LEN."
148
+
149
+ Format Version 3.0
150
+ ------------------
151
+
152
+ This version replaces the ASCII string (which in practice was latin1) with
153
+ a utf8-encoded string, so supports structured types with any unicode field
154
+ names.
155
+
156
+ Notes
157
+ -----
158
+ The ``.npy`` format, including motivation for creating it and a comparison of
159
+ alternatives, is described in the
160
+ :doc:`"npy-format" NEP <neps:nep-0001-npy-format>`, however details have
161
+ evolved with time and this document is more current.
162
+
163
+ """
164
+ import numpy
165
+ import warnings
166
+ from numpy.lib.utils import safe_eval, drop_metadata
167
+ from numpy.compat import (
168
+ isfileobj, os_fspath, pickle
169
+ )
170
+
171
+
172
+ __all__ = []
173
+
174
+
175
+ EXPECTED_KEYS = {'descr', 'fortran_order', 'shape'}
176
+ MAGIC_PREFIX = b'\x93NUMPY'
177
+ MAGIC_LEN = len(MAGIC_PREFIX) + 2
178
+ ARRAY_ALIGN = 64 # plausible values are powers of 2 between 16 and 4096
179
+ BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
180
+ # allow growth within the address space of a 64 bit machine along one axis
181
+ GROWTH_AXIS_MAX_DIGITS = 21 # = len(str(8*2**64-1)) hypothetical int1 dtype
182
+
183
+ # difference between version 1.0 and 2.0 is a 4 byte (I) header length
184
+ # instead of 2 bytes (H) allowing storage of large structured arrays
185
+ _header_size_info = {
186
+ (1, 0): ('<H', 'latin1'),
187
+ (2, 0): ('<I', 'latin1'),
188
+ (3, 0): ('<I', 'utf8'),
189
+ }
190
+
191
+ # Python's literal_eval is not actually safe for large inputs, since parsing
192
+ # may become slow or even cause interpreter crashes.
193
+ # This is an arbitrary, low limit which should make it safe in practice.
194
+ _MAX_HEADER_SIZE = 10000
195
+
196
+ def _check_version(version):
197
+ if version not in [(1, 0), (2, 0), (3, 0), None]:
198
+ msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
199
+ raise ValueError(msg % (version,))
200
+
201
+ def magic(major, minor):
202
+ """ Return the magic string for the given file format version.
203
+
204
+ Parameters
205
+ ----------
206
+ major : int in [0, 255]
207
+ minor : int in [0, 255]
208
+
209
+ Returns
210
+ -------
211
+ magic : str
212
+
213
+ Raises
214
+ ------
215
+ ValueError if the version cannot be formatted.
216
+ """
217
+ if major < 0 or major > 255:
218
+ raise ValueError("major version must be 0 <= major < 256")
219
+ if minor < 0 or minor > 255:
220
+ raise ValueError("minor version must be 0 <= minor < 256")
221
+ return MAGIC_PREFIX + bytes([major, minor])
222
+
223
+ def read_magic(fp):
224
+ """ Read the magic string to get the version of the file format.
225
+
226
+ Parameters
227
+ ----------
228
+ fp : filelike object
229
+
230
+ Returns
231
+ -------
232
+ major : int
233
+ minor : int
234
+ """
235
+ magic_str = _read_bytes(fp, MAGIC_LEN, "magic string")
236
+ if magic_str[:-2] != MAGIC_PREFIX:
237
+ msg = "the magic string is not correct; expected %r, got %r"
238
+ raise ValueError(msg % (MAGIC_PREFIX, magic_str[:-2]))
239
+ major, minor = magic_str[-2:]
240
+ return major, minor
241
+
242
+
243
+ def dtype_to_descr(dtype):
244
+ """
245
+ Get a serializable descriptor from the dtype.
246
+
247
+ The .descr attribute of a dtype object cannot be round-tripped through
248
+ the dtype() constructor. Simple types, like dtype('float32'), have
249
+ a descr which looks like a record array with one field with '' as
250
+ a name. The dtype() constructor interprets this as a request to give
251
+ a default name. Instead, we construct descriptor that can be passed to
252
+ dtype().
253
+
254
+ Parameters
255
+ ----------
256
+ dtype : dtype
257
+ The dtype of the array that will be written to disk.
258
+
259
+ Returns
260
+ -------
261
+ descr : object
262
+ An object that can be passed to `numpy.dtype()` in order to
263
+ replicate the input dtype.
264
+
265
+ """
266
+ # NOTE: that drop_metadata may not return the right dtype e.g. for user
267
+ # dtypes. In that case our code below would fail the same, though.
268
+ new_dtype = drop_metadata(dtype)
269
+ if new_dtype is not dtype:
270
+ warnings.warn("metadata on a dtype is not saved to an npy/npz. "
271
+ "Use another format (such as pickle) to store it.",
272
+ UserWarning, stacklevel=2)
273
+ if dtype.names is not None:
274
+ # This is a record array. The .descr is fine. XXX: parts of the
275
+ # record array with an empty name, like padding bytes, still get
276
+ # fiddled with. This needs to be fixed in the C implementation of
277
+ # dtype().
278
+ return dtype.descr
279
+ else:
280
+ return dtype.str
281
+
282
+ def descr_to_dtype(descr):
283
+ """
284
+ Returns a dtype based off the given description.
285
+
286
+ This is essentially the reverse of `dtype_to_descr()`. It will remove
287
+ the valueless padding fields created by, i.e. simple fields like
288
+ dtype('float32'), and then convert the description to its corresponding
289
+ dtype.
290
+
291
+ Parameters
292
+ ----------
293
+ descr : object
294
+ The object retrieved by dtype.descr. Can be passed to
295
+ `numpy.dtype()` in order to replicate the input dtype.
296
+
297
+ Returns
298
+ -------
299
+ dtype : dtype
300
+ The dtype constructed by the description.
301
+
302
+ """
303
+ if isinstance(descr, str):
304
+ # No padding removal needed
305
+ return numpy.dtype(descr)
306
+ elif isinstance(descr, tuple):
307
+ # subtype, will always have a shape descr[1]
308
+ dt = descr_to_dtype(descr[0])
309
+ return numpy.dtype((dt, descr[1]))
310
+
311
+ titles = []
312
+ names = []
313
+ formats = []
314
+ offsets = []
315
+ offset = 0
316
+ for field in descr:
317
+ if len(field) == 2:
318
+ name, descr_str = field
319
+ dt = descr_to_dtype(descr_str)
320
+ else:
321
+ name, descr_str, shape = field
322
+ dt = numpy.dtype((descr_to_dtype(descr_str), shape))
323
+
324
+ # Ignore padding bytes, which will be void bytes with '' as name
325
+ # Once support for blank names is removed, only "if name == ''" needed)
326
+ is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
327
+ if not is_pad:
328
+ title, name = name if isinstance(name, tuple) else (None, name)
329
+ titles.append(title)
330
+ names.append(name)
331
+ formats.append(dt)
332
+ offsets.append(offset)
333
+ offset += dt.itemsize
334
+
335
+ return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
336
+ 'offsets': offsets, 'itemsize': offset})
337
+
338
+ def header_data_from_array_1_0(array):
339
+ """ Get the dictionary of header metadata from a numpy.ndarray.
340
+
341
+ Parameters
342
+ ----------
343
+ array : numpy.ndarray
344
+
345
+ Returns
346
+ -------
347
+ d : dict
348
+ This has the appropriate entries for writing its string representation
349
+ to the header of the file.
350
+ """
351
+ d = {'shape': array.shape}
352
+ if array.flags.c_contiguous:
353
+ d['fortran_order'] = False
354
+ elif array.flags.f_contiguous:
355
+ d['fortran_order'] = True
356
+ else:
357
+ # Totally non-contiguous data. We will have to make it C-contiguous
358
+ # before writing. Note that we need to test for C_CONTIGUOUS first
359
+ # because a 1-D array is both C_CONTIGUOUS and F_CONTIGUOUS.
360
+ d['fortran_order'] = False
361
+
362
+ d['descr'] = dtype_to_descr(array.dtype)
363
+ return d
364
+
365
+
366
+ def _wrap_header(header, version):
367
+ """
368
+ Takes a stringified header, and attaches the prefix and padding to it
369
+ """
370
+ import struct
371
+ assert version is not None
372
+ fmt, encoding = _header_size_info[version]
373
+ header = header.encode(encoding)
374
+ hlen = len(header) + 1
375
+ padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
376
+ try:
377
+ header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
378
+ except struct.error:
379
+ msg = "Header length {} too big for version={}".format(hlen, version)
380
+ raise ValueError(msg) from None
381
+
382
+ # Pad the header with spaces and a final newline such that the magic
383
+ # string, the header-length short and the header are aligned on a
384
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
385
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
386
+ # offset must be page-aligned (i.e. the beginning of the file).
387
+ return header_prefix + header + b' '*padlen + b'\n'
388
+
389
+
390
+ def _wrap_header_guess_version(header):
391
+ """
392
+ Like `_wrap_header`, but chooses an appropriate version given the contents
393
+ """
394
+ try:
395
+ return _wrap_header(header, (1, 0))
396
+ except ValueError:
397
+ pass
398
+
399
+ try:
400
+ ret = _wrap_header(header, (2, 0))
401
+ except UnicodeEncodeError:
402
+ pass
403
+ else:
404
+ warnings.warn("Stored array in format 2.0. It can only be"
405
+ "read by NumPy >= 1.9", UserWarning, stacklevel=2)
406
+ return ret
407
+
408
+ header = _wrap_header(header, (3, 0))
409
+ warnings.warn("Stored array in format 3.0. It can only be "
410
+ "read by NumPy >= 1.17", UserWarning, stacklevel=2)
411
+ return header
412
+
413
+
414
+ def _write_array_header(fp, d, version=None):
415
+ """ Write the header for an array and returns the version used
416
+
417
+ Parameters
418
+ ----------
419
+ fp : filelike object
420
+ d : dict
421
+ This has the appropriate entries for writing its string representation
422
+ to the header of the file.
423
+ version : tuple or None
424
+ None means use oldest that works. Providing an explicit version will
425
+ raise a ValueError if the format does not allow saving this data.
426
+ Default: None
427
+ """
428
+ header = ["{"]
429
+ for key, value in sorted(d.items()):
430
+ # Need to use repr here, since we eval these when reading
431
+ header.append("'%s': %s, " % (key, repr(value)))
432
+ header.append("}")
433
+ header = "".join(header)
434
+
435
+ # Add some spare space so that the array header can be modified in-place
436
+ # when changing the array size, e.g. when growing it by appending data at
437
+ # the end.
438
+ shape = d['shape']
439
+ header += " " * ((GROWTH_AXIS_MAX_DIGITS - len(repr(
440
+ shape[-1 if d['fortran_order'] else 0]
441
+ ))) if len(shape) > 0 else 0)
442
+
443
+ if version is None:
444
+ header = _wrap_header_guess_version(header)
445
+ else:
446
+ header = _wrap_header(header, version)
447
+ fp.write(header)
448
+
449
+ def write_array_header_1_0(fp, d):
450
+ """ Write the header for an array using the 1.0 format.
451
+
452
+ Parameters
453
+ ----------
454
+ fp : filelike object
455
+ d : dict
456
+ This has the appropriate entries for writing its string
457
+ representation to the header of the file.
458
+ """
459
+ _write_array_header(fp, d, (1, 0))
460
+
461
+
462
+ def write_array_header_2_0(fp, d):
463
+ """ Write the header for an array using the 2.0 format.
464
+ The 2.0 format allows storing very large structured arrays.
465
+
466
+ .. versionadded:: 1.9.0
467
+
468
+ Parameters
469
+ ----------
470
+ fp : filelike object
471
+ d : dict
472
+ This has the appropriate entries for writing its string
473
+ representation to the header of the file.
474
+ """
475
+ _write_array_header(fp, d, (2, 0))
476
+
477
+ def read_array_header_1_0(fp, max_header_size=_MAX_HEADER_SIZE):
478
+ """
479
+ Read an array header from a filelike object using the 1.0 file format
480
+ version.
481
+
482
+ This will leave the file object located just after the header.
483
+
484
+ Parameters
485
+ ----------
486
+ fp : filelike object
487
+ A file object or something with a `.read()` method like a file.
488
+
489
+ Returns
490
+ -------
491
+ shape : tuple of int
492
+ The shape of the array.
493
+ fortran_order : bool
494
+ The array data will be written out directly if it is either
495
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
496
+ contiguous before writing it out.
497
+ dtype : dtype
498
+ The dtype of the file's data.
499
+ max_header_size : int, optional
500
+ Maximum allowed size of the header. Large headers may not be safe
501
+ to load securely and thus require explicitly passing a larger value.
502
+ See :py:func:`ast.literal_eval()` for details.
503
+
504
+ Raises
505
+ ------
506
+ ValueError
507
+ If the data is invalid.
508
+
509
+ """
510
+ return _read_array_header(
511
+ fp, version=(1, 0), max_header_size=max_header_size)
512
+
513
+ def read_array_header_2_0(fp, max_header_size=_MAX_HEADER_SIZE):
514
+ """
515
+ Read an array header from a filelike object using the 2.0 file format
516
+ version.
517
+
518
+ This will leave the file object located just after the header.
519
+
520
+ .. versionadded:: 1.9.0
521
+
522
+ Parameters
523
+ ----------
524
+ fp : filelike object
525
+ A file object or something with a `.read()` method like a file.
526
+ max_header_size : int, optional
527
+ Maximum allowed size of the header. Large headers may not be safe
528
+ to load securely and thus require explicitly passing a larger value.
529
+ See :py:func:`ast.literal_eval()` for details.
530
+
531
+ Returns
532
+ -------
533
+ shape : tuple of int
534
+ The shape of the array.
535
+ fortran_order : bool
536
+ The array data will be written out directly if it is either
537
+ C-contiguous or Fortran-contiguous. Otherwise, it will be made
538
+ contiguous before writing it out.
539
+ dtype : dtype
540
+ The dtype of the file's data.
541
+
542
+ Raises
543
+ ------
544
+ ValueError
545
+ If the data is invalid.
546
+
547
+ """
548
+ return _read_array_header(
549
+ fp, version=(2, 0), max_header_size=max_header_size)
550
+
551
+
552
+ def _filter_header(s):
553
+ """Clean up 'L' in npz header ints.
554
+
555
+ Cleans up the 'L' in strings representing integers. Needed to allow npz
556
+ headers produced in Python2 to be read in Python3.
557
+
558
+ Parameters
559
+ ----------
560
+ s : string
561
+ Npy file header.
562
+
563
+ Returns
564
+ -------
565
+ header : str
566
+ Cleaned up header.
567
+
568
+ """
569
+ import tokenize
570
+ from io import StringIO
571
+
572
+ tokens = []
573
+ last_token_was_number = False
574
+ for token in tokenize.generate_tokens(StringIO(s).readline):
575
+ token_type = token[0]
576
+ token_string = token[1]
577
+ if (last_token_was_number and
578
+ token_type == tokenize.NAME and
579
+ token_string == "L"):
580
+ continue
581
+ else:
582
+ tokens.append(token)
583
+ last_token_was_number = (token_type == tokenize.NUMBER)
584
+ return tokenize.untokenize(tokens)
585
+
586
+
587
+ def _read_array_header(fp, version, max_header_size=_MAX_HEADER_SIZE):
588
+ """
589
+ see read_array_header_1_0
590
+ """
591
+ # Read an unsigned, little-endian short int which has the length of the
592
+ # header.
593
+ import struct
594
+ hinfo = _header_size_info.get(version)
595
+ if hinfo is None:
596
+ raise ValueError("Invalid version {!r}".format(version))
597
+ hlength_type, encoding = hinfo
598
+
599
+ hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
600
+ header_length = struct.unpack(hlength_type, hlength_str)[0]
601
+ header = _read_bytes(fp, header_length, "array header")
602
+ header = header.decode(encoding)
603
+ if len(header) > max_header_size:
604
+ raise ValueError(
605
+ f"Header info length ({len(header)}) is large and may not be safe "
606
+ "to load securely.\n"
607
+ "To allow loading, adjust `max_header_size` or fully trust "
608
+ "the `.npy` file using `allow_pickle=True`.\n"
609
+ "For safety against large resource use or crashes, sandboxing "
610
+ "may be necessary.")
611
+
612
+ # The header is a pretty-printed string representation of a literal
613
+ # Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
614
+ # boundary. The keys are strings.
615
+ # "shape" : tuple of int
616
+ # "fortran_order" : bool
617
+ # "descr" : dtype.descr
618
+ # Versions (2, 0) and (1, 0) could have been created by a Python 2
619
+ # implementation before header filtering was implemented.
620
+ #
621
+ # For performance reasons, we try without _filter_header first though
622
+ try:
623
+ d = safe_eval(header)
624
+ except SyntaxError as e:
625
+ if version <= (2, 0):
626
+ header = _filter_header(header)
627
+ try:
628
+ d = safe_eval(header)
629
+ except SyntaxError as e2:
630
+ msg = "Cannot parse header: {!r}"
631
+ raise ValueError(msg.format(header)) from e2
632
+ else:
633
+ warnings.warn(
634
+ "Reading `.npy` or `.npz` file required additional "
635
+ "header parsing as it was created on Python 2. Save the "
636
+ "file again to speed up loading and avoid this warning.",
637
+ UserWarning, stacklevel=4)
638
+ else:
639
+ msg = "Cannot parse header: {!r}"
640
+ raise ValueError(msg.format(header)) from e
641
+ if not isinstance(d, dict):
642
+ msg = "Header is not a dictionary: {!r}"
643
+ raise ValueError(msg.format(d))
644
+
645
+ if EXPECTED_KEYS != d.keys():
646
+ keys = sorted(d.keys())
647
+ msg = "Header does not contain the correct keys: {!r}"
648
+ raise ValueError(msg.format(keys))
649
+
650
+ # Sanity-check the values.
651
+ if (not isinstance(d['shape'], tuple) or
652
+ not all(isinstance(x, int) for x in d['shape'])):
653
+ msg = "shape is not valid: {!r}"
654
+ raise ValueError(msg.format(d['shape']))
655
+ if not isinstance(d['fortran_order'], bool):
656
+ msg = "fortran_order is not a valid bool: {!r}"
657
+ raise ValueError(msg.format(d['fortran_order']))
658
+ try:
659
+ dtype = descr_to_dtype(d['descr'])
660
+ except TypeError as e:
661
+ msg = "descr is not a valid dtype descriptor: {!r}"
662
+ raise ValueError(msg.format(d['descr'])) from e
663
+
664
+ return d['shape'], d['fortran_order'], dtype
665
+
666
+ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
667
+ """
668
+ Write an array to an NPY file, including a header.
669
+
670
+ If the array is neither C-contiguous nor Fortran-contiguous AND the
671
+ file_like object is not a real file object, this function will have to
672
+ copy data in memory.
673
+
674
+ Parameters
675
+ ----------
676
+ fp : file_like object
677
+ An open, writable file object, or similar object with a
678
+ ``.write()`` method.
679
+ array : ndarray
680
+ The array to write to disk.
681
+ version : (int, int) or None, optional
682
+ The version number of the format. None means use the oldest
683
+ supported version that is able to store the data. Default: None
684
+ allow_pickle : bool, optional
685
+ Whether to allow writing pickled data. Default: True
686
+ pickle_kwargs : dict, optional
687
+ Additional keyword arguments to pass to pickle.dump, excluding
688
+ 'protocol'. These are only useful when pickling objects in object
689
+ arrays on Python 3 to Python 2 compatible format.
690
+
691
+ Raises
692
+ ------
693
+ ValueError
694
+ If the array cannot be persisted. This includes the case of
695
+ allow_pickle=False and array being an object array.
696
+ Various other errors
697
+ If the array contains Python objects as part of its dtype, the
698
+ process of pickling them may raise various errors if the objects
699
+ are not picklable.
700
+
701
+ """
702
+ _check_version(version)
703
+ _write_array_header(fp, header_data_from_array_1_0(array), version)
704
+
705
+ if array.itemsize == 0:
706
+ buffersize = 0
707
+ else:
708
+ # Set buffer size to 16 MiB to hide the Python loop overhead.
709
+ buffersize = max(16 * 1024 ** 2 // array.itemsize, 1)
710
+
711
+ if array.dtype.hasobject:
712
+ # We contain Python objects so we cannot write out the data
713
+ # directly. Instead, we will pickle it out
714
+ if not allow_pickle:
715
+ raise ValueError("Object arrays cannot be saved when "
716
+ "allow_pickle=False")
717
+ if pickle_kwargs is None:
718
+ pickle_kwargs = {}
719
+ pickle.dump(array, fp, protocol=3, **pickle_kwargs)
720
+ elif array.flags.f_contiguous and not array.flags.c_contiguous:
721
+ if isfileobj(fp):
722
+ array.T.tofile(fp)
723
+ else:
724
+ for chunk in numpy.nditer(
725
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
726
+ buffersize=buffersize, order='F'):
727
+ fp.write(chunk.tobytes('C'))
728
+ else:
729
+ if isfileobj(fp):
730
+ array.tofile(fp)
731
+ else:
732
+ for chunk in numpy.nditer(
733
+ array, flags=['external_loop', 'buffered', 'zerosize_ok'],
734
+ buffersize=buffersize, order='C'):
735
+ fp.write(chunk.tobytes('C'))
736
+
737
+
738
+ def read_array(fp, allow_pickle=False, pickle_kwargs=None, *,
739
+ max_header_size=_MAX_HEADER_SIZE):
740
+ """
741
+ Read an array from an NPY file.
742
+
743
+ Parameters
744
+ ----------
745
+ fp : file_like object
746
+ If this is not a real file object, then this may take extra memory
747
+ and time.
748
+ allow_pickle : bool, optional
749
+ Whether to allow writing pickled data. Default: False
750
+
751
+ .. versionchanged:: 1.16.3
752
+ Made default False in response to CVE-2019-6446.
753
+
754
+ pickle_kwargs : dict
755
+ Additional keyword arguments to pass to pickle.load. These are only
756
+ useful when loading object arrays saved on Python 2 when using
757
+ Python 3.
758
+ max_header_size : int, optional
759
+ Maximum allowed size of the header. Large headers may not be safe
760
+ to load securely and thus require explicitly passing a larger value.
761
+ See :py:func:`ast.literal_eval()` for details.
762
+ This option is ignored when `allow_pickle` is passed. In that case
763
+ the file is by definition trusted and the limit is unnecessary.
764
+
765
+ Returns
766
+ -------
767
+ array : ndarray
768
+ The array from the data on disk.
769
+
770
+ Raises
771
+ ------
772
+ ValueError
773
+ If the data is invalid, or allow_pickle=False and the file contains
774
+ an object array.
775
+
776
+ """
777
+ if allow_pickle:
778
+ # Effectively ignore max_header_size, since `allow_pickle` indicates
779
+ # that the input is fully trusted.
780
+ max_header_size = 2**64
781
+
782
+ version = read_magic(fp)
783
+ _check_version(version)
784
+ shape, fortran_order, dtype = _read_array_header(
785
+ fp, version, max_header_size=max_header_size)
786
+ if len(shape) == 0:
787
+ count = 1
788
+ else:
789
+ count = numpy.multiply.reduce(shape, dtype=numpy.int64)
790
+
791
+ # Now read the actual data.
792
+ if dtype.hasobject:
793
+ # The array contained Python objects. We need to unpickle the data.
794
+ if not allow_pickle:
795
+ raise ValueError("Object arrays cannot be loaded when "
796
+ "allow_pickle=False")
797
+ if pickle_kwargs is None:
798
+ pickle_kwargs = {}
799
+ try:
800
+ array = pickle.load(fp, **pickle_kwargs)
801
+ except UnicodeError as err:
802
+ # Friendlier error message
803
+ raise UnicodeError("Unpickling a python object failed: %r\n"
804
+ "You may need to pass the encoding= option "
805
+ "to numpy.load" % (err,)) from err
806
+ else:
807
+ if isfileobj(fp):
808
+ # We can use the fast fromfile() function.
809
+ array = numpy.fromfile(fp, dtype=dtype, count=count)
810
+ else:
811
+ # This is not a real file. We have to read it the
812
+ # memory-intensive way.
813
+ # crc32 module fails on reads greater than 2 ** 32 bytes,
814
+ # breaking large reads from gzip streams. Chunk reads to
815
+ # BUFFER_SIZE bytes to avoid issue and reduce memory overhead
816
+ # of the read. In non-chunked case count < max_read_count, so
817
+ # only one read is performed.
818
+
819
+ # Use np.ndarray instead of np.empty since the latter does
820
+ # not correctly instantiate zero-width string dtypes; see
821
+ # https://github.com/numpy/numpy/pull/6430
822
+ array = numpy.ndarray(count, dtype=dtype)
823
+
824
+ if dtype.itemsize > 0:
825
+ # If dtype.itemsize == 0 then there's nothing more to read
826
+ max_read_count = BUFFER_SIZE // min(BUFFER_SIZE, dtype.itemsize)
827
+
828
+ for i in range(0, count, max_read_count):
829
+ read_count = min(max_read_count, count - i)
830
+ read_size = int(read_count * dtype.itemsize)
831
+ data = _read_bytes(fp, read_size, "array data")
832
+ array[i:i+read_count] = numpy.frombuffer(data, dtype=dtype,
833
+ count=read_count)
834
+
835
+ if fortran_order:
836
+ array.shape = shape[::-1]
837
+ array = array.transpose()
838
+ else:
839
+ array.shape = shape
840
+
841
+ return array
842
+
843
+
844
+ def open_memmap(filename, mode='r+', dtype=None, shape=None,
845
+ fortran_order=False, version=None, *,
846
+ max_header_size=_MAX_HEADER_SIZE):
847
+ """
848
+ Open a .npy file as a memory-mapped array.
849
+
850
+ This may be used to read an existing file or create a new one.
851
+
852
+ Parameters
853
+ ----------
854
+ filename : str or path-like
855
+ The name of the file on disk. This may *not* be a file-like
856
+ object.
857
+ mode : str, optional
858
+ The mode in which to open the file; the default is 'r+'. In
859
+ addition to the standard file modes, 'c' is also accepted to mean
860
+ "copy on write." See `memmap` for the available mode strings.
861
+ dtype : data-type, optional
862
+ The data type of the array if we are creating a new file in "write"
863
+ mode, if not, `dtype` is ignored. The default value is None, which
864
+ results in a data-type of `float64`.
865
+ shape : tuple of int
866
+ The shape of the array if we are creating a new file in "write"
867
+ mode, in which case this parameter is required. Otherwise, this
868
+ parameter is ignored and is thus optional.
869
+ fortran_order : bool, optional
870
+ Whether the array should be Fortran-contiguous (True) or
871
+ C-contiguous (False, the default) if we are creating a new file in
872
+ "write" mode.
873
+ version : tuple of int (major, minor) or None
874
+ If the mode is a "write" mode, then this is the version of the file
875
+ format used to create the file. None means use the oldest
876
+ supported version that is able to store the data. Default: None
877
+ max_header_size : int, optional
878
+ Maximum allowed size of the header. Large headers may not be safe
879
+ to load securely and thus require explicitly passing a larger value.
880
+ See :py:func:`ast.literal_eval()` for details.
881
+
882
+ Returns
883
+ -------
884
+ marray : memmap
885
+ The memory-mapped array.
886
+
887
+ Raises
888
+ ------
889
+ ValueError
890
+ If the data or the mode is invalid.
891
+ OSError
892
+ If the file is not found or cannot be opened correctly.
893
+
894
+ See Also
895
+ --------
896
+ numpy.memmap
897
+
898
+ """
899
+ if isfileobj(filename):
900
+ raise ValueError("Filename must be a string or a path-like object."
901
+ " Memmap cannot use existing file handles.")
902
+
903
+ if 'w' in mode:
904
+ # We are creating the file, not reading it.
905
+ # Check if we ought to create the file.
906
+ _check_version(version)
907
+ # Ensure that the given dtype is an authentic dtype object rather
908
+ # than just something that can be interpreted as a dtype object.
909
+ dtype = numpy.dtype(dtype)
910
+ if dtype.hasobject:
911
+ msg = "Array can't be memory-mapped: Python objects in dtype."
912
+ raise ValueError(msg)
913
+ d = dict(
914
+ descr=dtype_to_descr(dtype),
915
+ fortran_order=fortran_order,
916
+ shape=shape,
917
+ )
918
+ # If we got here, then it should be safe to create the file.
919
+ with open(os_fspath(filename), mode+'b') as fp:
920
+ _write_array_header(fp, d, version)
921
+ offset = fp.tell()
922
+ else:
923
+ # Read the header of the file first.
924
+ with open(os_fspath(filename), 'rb') as fp:
925
+ version = read_magic(fp)
926
+ _check_version(version)
927
+
928
+ shape, fortran_order, dtype = _read_array_header(
929
+ fp, version, max_header_size=max_header_size)
930
+ if dtype.hasobject:
931
+ msg = "Array can't be memory-mapped: Python objects in dtype."
932
+ raise ValueError(msg)
933
+ offset = fp.tell()
934
+
935
+ if fortran_order:
936
+ order = 'F'
937
+ else:
938
+ order = 'C'
939
+
940
+ # We need to change a write-only mode to a read-write mode since we've
941
+ # already written data to the file.
942
+ if mode == 'w+':
943
+ mode = 'r+'
944
+
945
+ marray = numpy.memmap(filename, dtype=dtype, shape=shape, order=order,
946
+ mode=mode, offset=offset)
947
+
948
+ return marray
949
+
950
+
951
+ def _read_bytes(fp, size, error_template="ran out of data"):
952
+ """
953
+ Read from file-like object until size bytes are read.
954
+ Raises ValueError if not EOF is encountered before size bytes are read.
955
+ Non-blocking objects only supported if they derive from io objects.
956
+
957
+ Required as e.g. ZipExtFile in python 2.6 can return less data than
958
+ requested.
959
+ """
960
+ data = bytes()
961
+ while True:
962
+ # io files (default in python3) return None or raise on
963
+ # would-block, python2 file will truncate, probably nothing can be
964
+ # done about that. note that regular files can't be non-blocking
965
+ try:
966
+ r = fp.read(size - len(data))
967
+ data += r
968
+ if len(r) == 0 or len(data) == size:
969
+ break
970
+ except BlockingIOError:
971
+ pass
972
+ if len(data) != size:
973
+ msg = "EOF: reading %s, expected %d bytes got %d"
974
+ raise ValueError(msg % (error_template, size, len(data)))
975
+ else:
976
+ return data
venv/lib/python3.10/site-packages/numpy/lib/function_base.pyi ADDED
@@ -0,0 +1,697 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from collections.abc import Sequence, Iterator, Callable, Iterable
3
+ from typing import (
4
+ Literal as L,
5
+ Any,
6
+ TypeVar,
7
+ overload,
8
+ Protocol,
9
+ SupportsIndex,
10
+ SupportsInt,
11
+ )
12
+
13
+ if sys.version_info >= (3, 10):
14
+ from typing import TypeGuard
15
+ else:
16
+ from typing_extensions import TypeGuard
17
+
18
+ from numpy import (
19
+ vectorize as vectorize,
20
+ ufunc,
21
+ generic,
22
+ floating,
23
+ complexfloating,
24
+ intp,
25
+ float64,
26
+ complex128,
27
+ timedelta64,
28
+ datetime64,
29
+ object_,
30
+ _OrderKACF,
31
+ )
32
+
33
+ from numpy._typing import (
34
+ NDArray,
35
+ ArrayLike,
36
+ DTypeLike,
37
+ _ShapeLike,
38
+ _ScalarLike_co,
39
+ _DTypeLike,
40
+ _ArrayLike,
41
+ _ArrayLikeInt_co,
42
+ _ArrayLikeFloat_co,
43
+ _ArrayLikeComplex_co,
44
+ _ArrayLikeTD64_co,
45
+ _ArrayLikeDT64_co,
46
+ _ArrayLikeObject_co,
47
+ _FloatLike_co,
48
+ _ComplexLike_co,
49
+ )
50
+
51
+ from numpy.core.function_base import (
52
+ add_newdoc as add_newdoc,
53
+ )
54
+
55
+ from numpy.core.multiarray import (
56
+ add_docstring as add_docstring,
57
+ bincount as bincount,
58
+ )
59
+
60
+ from numpy.core.umath import _add_newdoc_ufunc
61
+
62
+ _T = TypeVar("_T")
63
+ _T_co = TypeVar("_T_co", covariant=True)
64
+ _SCT = TypeVar("_SCT", bound=generic)
65
+ _ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
66
+
67
+ _2Tuple = tuple[_T, _T]
68
+
69
+ class _TrimZerosSequence(Protocol[_T_co]):
70
+ def __len__(self) -> int: ...
71
+ def __getitem__(self, key: slice, /) -> _T_co: ...
72
+ def __iter__(self) -> Iterator[Any]: ...
73
+
74
+ class _SupportsWriteFlush(Protocol):
75
+ def write(self, s: str, /) -> object: ...
76
+ def flush(self) -> object: ...
77
+
78
+ __all__: list[str]
79
+
80
+ # NOTE: This is in reality a re-export of `np.core.umath._add_newdoc_ufunc`
81
+ def add_newdoc_ufunc(ufunc: ufunc, new_docstring: str, /) -> None: ...
82
+
83
+ @overload
84
+ def rot90(
85
+ m: _ArrayLike[_SCT],
86
+ k: int = ...,
87
+ axes: tuple[int, int] = ...,
88
+ ) -> NDArray[_SCT]: ...
89
+ @overload
90
+ def rot90(
91
+ m: ArrayLike,
92
+ k: int = ...,
93
+ axes: tuple[int, int] = ...,
94
+ ) -> NDArray[Any]: ...
95
+
96
+ @overload
97
+ def flip(m: _SCT, axis: None = ...) -> _SCT: ...
98
+ @overload
99
+ def flip(m: _ScalarLike_co, axis: None = ...) -> Any: ...
100
+ @overload
101
+ def flip(m: _ArrayLike[_SCT], axis: None | _ShapeLike = ...) -> NDArray[_SCT]: ...
102
+ @overload
103
+ def flip(m: ArrayLike, axis: None | _ShapeLike = ...) -> NDArray[Any]: ...
104
+
105
+ def iterable(y: object) -> TypeGuard[Iterable[Any]]: ...
106
+
107
+ @overload
108
+ def average(
109
+ a: _ArrayLikeFloat_co,
110
+ axis: None = ...,
111
+ weights: None | _ArrayLikeFloat_co= ...,
112
+ returned: L[False] = ...,
113
+ keepdims: L[False] = ...,
114
+ ) -> floating[Any]: ...
115
+ @overload
116
+ def average(
117
+ a: _ArrayLikeComplex_co,
118
+ axis: None = ...,
119
+ weights: None | _ArrayLikeComplex_co = ...,
120
+ returned: L[False] = ...,
121
+ keepdims: L[False] = ...,
122
+ ) -> complexfloating[Any, Any]: ...
123
+ @overload
124
+ def average(
125
+ a: _ArrayLikeObject_co,
126
+ axis: None = ...,
127
+ weights: None | Any = ...,
128
+ returned: L[False] = ...,
129
+ keepdims: L[False] = ...,
130
+ ) -> Any: ...
131
+ @overload
132
+ def average(
133
+ a: _ArrayLikeFloat_co,
134
+ axis: None = ...,
135
+ weights: None | _ArrayLikeFloat_co= ...,
136
+ returned: L[True] = ...,
137
+ keepdims: L[False] = ...,
138
+ ) -> _2Tuple[floating[Any]]: ...
139
+ @overload
140
+ def average(
141
+ a: _ArrayLikeComplex_co,
142
+ axis: None = ...,
143
+ weights: None | _ArrayLikeComplex_co = ...,
144
+ returned: L[True] = ...,
145
+ keepdims: L[False] = ...,
146
+ ) -> _2Tuple[complexfloating[Any, Any]]: ...
147
+ @overload
148
+ def average(
149
+ a: _ArrayLikeObject_co,
150
+ axis: None = ...,
151
+ weights: None | Any = ...,
152
+ returned: L[True] = ...,
153
+ keepdims: L[False] = ...,
154
+ ) -> _2Tuple[Any]: ...
155
+ @overload
156
+ def average(
157
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
158
+ axis: None | _ShapeLike = ...,
159
+ weights: None | Any = ...,
160
+ returned: L[False] = ...,
161
+ keepdims: bool = ...,
162
+ ) -> Any: ...
163
+ @overload
164
+ def average(
165
+ a: _ArrayLikeComplex_co | _ArrayLikeObject_co,
166
+ axis: None | _ShapeLike = ...,
167
+ weights: None | Any = ...,
168
+ returned: L[True] = ...,
169
+ keepdims: bool = ...,
170
+ ) -> _2Tuple[Any]: ...
171
+
172
+ @overload
173
+ def asarray_chkfinite(
174
+ a: _ArrayLike[_SCT],
175
+ dtype: None = ...,
176
+ order: _OrderKACF = ...,
177
+ ) -> NDArray[_SCT]: ...
178
+ @overload
179
+ def asarray_chkfinite(
180
+ a: object,
181
+ dtype: None = ...,
182
+ order: _OrderKACF = ...,
183
+ ) -> NDArray[Any]: ...
184
+ @overload
185
+ def asarray_chkfinite(
186
+ a: Any,
187
+ dtype: _DTypeLike[_SCT],
188
+ order: _OrderKACF = ...,
189
+ ) -> NDArray[_SCT]: ...
190
+ @overload
191
+ def asarray_chkfinite(
192
+ a: Any,
193
+ dtype: DTypeLike,
194
+ order: _OrderKACF = ...,
195
+ ) -> NDArray[Any]: ...
196
+
197
+ # TODO: Use PEP 612 `ParamSpec` once mypy supports `Concatenate`
198
+ # xref python/mypy#8645
199
+ @overload
200
+ def piecewise(
201
+ x: _ArrayLike[_SCT],
202
+ condlist: ArrayLike,
203
+ funclist: Sequence[Any | Callable[..., Any]],
204
+ *args: Any,
205
+ **kw: Any,
206
+ ) -> NDArray[_SCT]: ...
207
+ @overload
208
+ def piecewise(
209
+ x: ArrayLike,
210
+ condlist: ArrayLike,
211
+ funclist: Sequence[Any | Callable[..., Any]],
212
+ *args: Any,
213
+ **kw: Any,
214
+ ) -> NDArray[Any]: ...
215
+
216
+ def select(
217
+ condlist: Sequence[ArrayLike],
218
+ choicelist: Sequence[ArrayLike],
219
+ default: ArrayLike = ...,
220
+ ) -> NDArray[Any]: ...
221
+
222
+ @overload
223
+ def copy(
224
+ a: _ArrayType,
225
+ order: _OrderKACF,
226
+ subok: L[True],
227
+ ) -> _ArrayType: ...
228
+ @overload
229
+ def copy(
230
+ a: _ArrayType,
231
+ order: _OrderKACF = ...,
232
+ *,
233
+ subok: L[True],
234
+ ) -> _ArrayType: ...
235
+ @overload
236
+ def copy(
237
+ a: _ArrayLike[_SCT],
238
+ order: _OrderKACF = ...,
239
+ subok: L[False] = ...,
240
+ ) -> NDArray[_SCT]: ...
241
+ @overload
242
+ def copy(
243
+ a: ArrayLike,
244
+ order: _OrderKACF = ...,
245
+ subok: L[False] = ...,
246
+ ) -> NDArray[Any]: ...
247
+
248
+ def gradient(
249
+ f: ArrayLike,
250
+ *varargs: ArrayLike,
251
+ axis: None | _ShapeLike = ...,
252
+ edge_order: L[1, 2] = ...,
253
+ ) -> Any: ...
254
+
255
+ @overload
256
+ def diff(
257
+ a: _T,
258
+ n: L[0],
259
+ axis: SupportsIndex = ...,
260
+ prepend: ArrayLike = ...,
261
+ append: ArrayLike = ...,
262
+ ) -> _T: ...
263
+ @overload
264
+ def diff(
265
+ a: ArrayLike,
266
+ n: int = ...,
267
+ axis: SupportsIndex = ...,
268
+ prepend: ArrayLike = ...,
269
+ append: ArrayLike = ...,
270
+ ) -> NDArray[Any]: ...
271
+
272
+ @overload
273
+ def interp(
274
+ x: _ArrayLikeFloat_co,
275
+ xp: _ArrayLikeFloat_co,
276
+ fp: _ArrayLikeFloat_co,
277
+ left: None | _FloatLike_co = ...,
278
+ right: None | _FloatLike_co = ...,
279
+ period: None | _FloatLike_co = ...,
280
+ ) -> NDArray[float64]: ...
281
+ @overload
282
+ def interp(
283
+ x: _ArrayLikeFloat_co,
284
+ xp: _ArrayLikeFloat_co,
285
+ fp: _ArrayLikeComplex_co,
286
+ left: None | _ComplexLike_co = ...,
287
+ right: None | _ComplexLike_co = ...,
288
+ period: None | _FloatLike_co = ...,
289
+ ) -> NDArray[complex128]: ...
290
+
291
+ @overload
292
+ def angle(z: _ComplexLike_co, deg: bool = ...) -> floating[Any]: ...
293
+ @overload
294
+ def angle(z: object_, deg: bool = ...) -> Any: ...
295
+ @overload
296
+ def angle(z: _ArrayLikeComplex_co, deg: bool = ...) -> NDArray[floating[Any]]: ...
297
+ @overload
298
+ def angle(z: _ArrayLikeObject_co, deg: bool = ...) -> NDArray[object_]: ...
299
+
300
+ @overload
301
+ def unwrap(
302
+ p: _ArrayLikeFloat_co,
303
+ discont: None | float = ...,
304
+ axis: int = ...,
305
+ *,
306
+ period: float = ...,
307
+ ) -> NDArray[floating[Any]]: ...
308
+ @overload
309
+ def unwrap(
310
+ p: _ArrayLikeObject_co,
311
+ discont: None | float = ...,
312
+ axis: int = ...,
313
+ *,
314
+ period: float = ...,
315
+ ) -> NDArray[object_]: ...
316
+
317
+ def sort_complex(a: ArrayLike) -> NDArray[complexfloating[Any, Any]]: ...
318
+
319
+ def trim_zeros(
320
+ filt: _TrimZerosSequence[_T],
321
+ trim: L["f", "b", "fb", "bf"] = ...,
322
+ ) -> _T: ...
323
+
324
+ @overload
325
+ def extract(condition: ArrayLike, arr: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
326
+ @overload
327
+ def extract(condition: ArrayLike, arr: ArrayLike) -> NDArray[Any]: ...
328
+
329
+ def place(arr: NDArray[Any], mask: ArrayLike, vals: Any) -> None: ...
330
+
331
+ def disp(
332
+ mesg: object,
333
+ device: None | _SupportsWriteFlush = ...,
334
+ linefeed: bool = ...,
335
+ ) -> None: ...
336
+
337
+ @overload
338
+ def cov(
339
+ m: _ArrayLikeFloat_co,
340
+ y: None | _ArrayLikeFloat_co = ...,
341
+ rowvar: bool = ...,
342
+ bias: bool = ...,
343
+ ddof: None | SupportsIndex | SupportsInt = ...,
344
+ fweights: None | ArrayLike = ...,
345
+ aweights: None | ArrayLike = ...,
346
+ *,
347
+ dtype: None = ...,
348
+ ) -> NDArray[floating[Any]]: ...
349
+ @overload
350
+ def cov(
351
+ m: _ArrayLikeComplex_co,
352
+ y: None | _ArrayLikeComplex_co = ...,
353
+ rowvar: bool = ...,
354
+ bias: bool = ...,
355
+ ddof: None | SupportsIndex | SupportsInt = ...,
356
+ fweights: None | ArrayLike = ...,
357
+ aweights: None | ArrayLike = ...,
358
+ *,
359
+ dtype: None = ...,
360
+ ) -> NDArray[complexfloating[Any, Any]]: ...
361
+ @overload
362
+ def cov(
363
+ m: _ArrayLikeComplex_co,
364
+ y: None | _ArrayLikeComplex_co = ...,
365
+ rowvar: bool = ...,
366
+ bias: bool = ...,
367
+ ddof: None | SupportsIndex | SupportsInt = ...,
368
+ fweights: None | ArrayLike = ...,
369
+ aweights: None | ArrayLike = ...,
370
+ *,
371
+ dtype: _DTypeLike[_SCT],
372
+ ) -> NDArray[_SCT]: ...
373
+ @overload
374
+ def cov(
375
+ m: _ArrayLikeComplex_co,
376
+ y: None | _ArrayLikeComplex_co = ...,
377
+ rowvar: bool = ...,
378
+ bias: bool = ...,
379
+ ddof: None | SupportsIndex | SupportsInt = ...,
380
+ fweights: None | ArrayLike = ...,
381
+ aweights: None | ArrayLike = ...,
382
+ *,
383
+ dtype: DTypeLike,
384
+ ) -> NDArray[Any]: ...
385
+
386
+ # NOTE `bias` and `ddof` have been deprecated
387
+ @overload
388
+ def corrcoef(
389
+ m: _ArrayLikeFloat_co,
390
+ y: None | _ArrayLikeFloat_co = ...,
391
+ rowvar: bool = ...,
392
+ *,
393
+ dtype: None = ...,
394
+ ) -> NDArray[floating[Any]]: ...
395
+ @overload
396
+ def corrcoef(
397
+ m: _ArrayLikeComplex_co,
398
+ y: None | _ArrayLikeComplex_co = ...,
399
+ rowvar: bool = ...,
400
+ *,
401
+ dtype: None = ...,
402
+ ) -> NDArray[complexfloating[Any, Any]]: ...
403
+ @overload
404
+ def corrcoef(
405
+ m: _ArrayLikeComplex_co,
406
+ y: None | _ArrayLikeComplex_co = ...,
407
+ rowvar: bool = ...,
408
+ *,
409
+ dtype: _DTypeLike[_SCT],
410
+ ) -> NDArray[_SCT]: ...
411
+ @overload
412
+ def corrcoef(
413
+ m: _ArrayLikeComplex_co,
414
+ y: None | _ArrayLikeComplex_co = ...,
415
+ rowvar: bool = ...,
416
+ *,
417
+ dtype: DTypeLike,
418
+ ) -> NDArray[Any]: ...
419
+
420
+ def blackman(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
421
+
422
+ def bartlett(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
423
+
424
+ def hanning(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
425
+
426
+ def hamming(M: _FloatLike_co) -> NDArray[floating[Any]]: ...
427
+
428
+ def i0(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
429
+
430
+ def kaiser(
431
+ M: _FloatLike_co,
432
+ beta: _FloatLike_co,
433
+ ) -> NDArray[floating[Any]]: ...
434
+
435
+ @overload
436
+ def sinc(x: _FloatLike_co) -> floating[Any]: ...
437
+ @overload
438
+ def sinc(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
439
+ @overload
440
+ def sinc(x: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ...
441
+ @overload
442
+ def sinc(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
443
+
444
+ # NOTE: Deprecated
445
+ # def msort(a: ArrayLike) -> NDArray[Any]: ...
446
+
447
+ @overload
448
+ def median(
449
+ a: _ArrayLikeFloat_co,
450
+ axis: None = ...,
451
+ out: None = ...,
452
+ overwrite_input: bool = ...,
453
+ keepdims: L[False] = ...,
454
+ ) -> floating[Any]: ...
455
+ @overload
456
+ def median(
457
+ a: _ArrayLikeComplex_co,
458
+ axis: None = ...,
459
+ out: None = ...,
460
+ overwrite_input: bool = ...,
461
+ keepdims: L[False] = ...,
462
+ ) -> complexfloating[Any, Any]: ...
463
+ @overload
464
+ def median(
465
+ a: _ArrayLikeTD64_co,
466
+ axis: None = ...,
467
+ out: None = ...,
468
+ overwrite_input: bool = ...,
469
+ keepdims: L[False] = ...,
470
+ ) -> timedelta64: ...
471
+ @overload
472
+ def median(
473
+ a: _ArrayLikeObject_co,
474
+ axis: None = ...,
475
+ out: None = ...,
476
+ overwrite_input: bool = ...,
477
+ keepdims: L[False] = ...,
478
+ ) -> Any: ...
479
+ @overload
480
+ def median(
481
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
482
+ axis: None | _ShapeLike = ...,
483
+ out: None = ...,
484
+ overwrite_input: bool = ...,
485
+ keepdims: bool = ...,
486
+ ) -> Any: ...
487
+ @overload
488
+ def median(
489
+ a: _ArrayLikeFloat_co | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
490
+ axis: None | _ShapeLike = ...,
491
+ out: _ArrayType = ...,
492
+ overwrite_input: bool = ...,
493
+ keepdims: bool = ...,
494
+ ) -> _ArrayType: ...
495
+
496
+ _MethodKind = L[
497
+ "inverted_cdf",
498
+ "averaged_inverted_cdf",
499
+ "closest_observation",
500
+ "interpolated_inverted_cdf",
501
+ "hazen",
502
+ "weibull",
503
+ "linear",
504
+ "median_unbiased",
505
+ "normal_unbiased",
506
+ "lower",
507
+ "higher",
508
+ "midpoint",
509
+ "nearest",
510
+ ]
511
+
512
+ @overload
513
+ def percentile(
514
+ a: _ArrayLikeFloat_co,
515
+ q: _FloatLike_co,
516
+ axis: None = ...,
517
+ out: None = ...,
518
+ overwrite_input: bool = ...,
519
+ method: _MethodKind = ...,
520
+ keepdims: L[False] = ...,
521
+ ) -> floating[Any]: ...
522
+ @overload
523
+ def percentile(
524
+ a: _ArrayLikeComplex_co,
525
+ q: _FloatLike_co,
526
+ axis: None = ...,
527
+ out: None = ...,
528
+ overwrite_input: bool = ...,
529
+ method: _MethodKind = ...,
530
+ keepdims: L[False] = ...,
531
+ ) -> complexfloating[Any, Any]: ...
532
+ @overload
533
+ def percentile(
534
+ a: _ArrayLikeTD64_co,
535
+ q: _FloatLike_co,
536
+ axis: None = ...,
537
+ out: None = ...,
538
+ overwrite_input: bool = ...,
539
+ method: _MethodKind = ...,
540
+ keepdims: L[False] = ...,
541
+ ) -> timedelta64: ...
542
+ @overload
543
+ def percentile(
544
+ a: _ArrayLikeDT64_co,
545
+ q: _FloatLike_co,
546
+ axis: None = ...,
547
+ out: None = ...,
548
+ overwrite_input: bool = ...,
549
+ method: _MethodKind = ...,
550
+ keepdims: L[False] = ...,
551
+ ) -> datetime64: ...
552
+ @overload
553
+ def percentile(
554
+ a: _ArrayLikeObject_co,
555
+ q: _FloatLike_co,
556
+ axis: None = ...,
557
+ out: None = ...,
558
+ overwrite_input: bool = ...,
559
+ method: _MethodKind = ...,
560
+ keepdims: L[False] = ...,
561
+ ) -> Any: ...
562
+ @overload
563
+ def percentile(
564
+ a: _ArrayLikeFloat_co,
565
+ q: _ArrayLikeFloat_co,
566
+ axis: None = ...,
567
+ out: None = ...,
568
+ overwrite_input: bool = ...,
569
+ method: _MethodKind = ...,
570
+ keepdims: L[False] = ...,
571
+ ) -> NDArray[floating[Any]]: ...
572
+ @overload
573
+ def percentile(
574
+ a: _ArrayLikeComplex_co,
575
+ q: _ArrayLikeFloat_co,
576
+ axis: None = ...,
577
+ out: None = ...,
578
+ overwrite_input: bool = ...,
579
+ method: _MethodKind = ...,
580
+ keepdims: L[False] = ...,
581
+ ) -> NDArray[complexfloating[Any, Any]]: ...
582
+ @overload
583
+ def percentile(
584
+ a: _ArrayLikeTD64_co,
585
+ q: _ArrayLikeFloat_co,
586
+ axis: None = ...,
587
+ out: None = ...,
588
+ overwrite_input: bool = ...,
589
+ method: _MethodKind = ...,
590
+ keepdims: L[False] = ...,
591
+ ) -> NDArray[timedelta64]: ...
592
+ @overload
593
+ def percentile(
594
+ a: _ArrayLikeDT64_co,
595
+ q: _ArrayLikeFloat_co,
596
+ axis: None = ...,
597
+ out: None = ...,
598
+ overwrite_input: bool = ...,
599
+ method: _MethodKind = ...,
600
+ keepdims: L[False] = ...,
601
+ ) -> NDArray[datetime64]: ...
602
+ @overload
603
+ def percentile(
604
+ a: _ArrayLikeObject_co,
605
+ q: _ArrayLikeFloat_co,
606
+ axis: None = ...,
607
+ out: None = ...,
608
+ overwrite_input: bool = ...,
609
+ method: _MethodKind = ...,
610
+ keepdims: L[False] = ...,
611
+ ) -> NDArray[object_]: ...
612
+ @overload
613
+ def percentile(
614
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
615
+ q: _ArrayLikeFloat_co,
616
+ axis: None | _ShapeLike = ...,
617
+ out: None = ...,
618
+ overwrite_input: bool = ...,
619
+ method: _MethodKind = ...,
620
+ keepdims: bool = ...,
621
+ ) -> Any: ...
622
+ @overload
623
+ def percentile(
624
+ a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
625
+ q: _ArrayLikeFloat_co,
626
+ axis: None | _ShapeLike = ...,
627
+ out: _ArrayType = ...,
628
+ overwrite_input: bool = ...,
629
+ method: _MethodKind = ...,
630
+ keepdims: bool = ...,
631
+ ) -> _ArrayType: ...
632
+
633
+ # NOTE: Not an alias, but they do have identical signatures
634
+ # (that we can reuse)
635
+ quantile = percentile
636
+
637
+ # TODO: Returns a scalar for <= 1D array-likes; returns an ndarray otherwise
638
+ def trapz(
639
+ y: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
640
+ x: None | _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co = ...,
641
+ dx: float = ...,
642
+ axis: SupportsIndex = ...,
643
+ ) -> Any: ...
644
+
645
+ def meshgrid(
646
+ *xi: ArrayLike,
647
+ copy: bool = ...,
648
+ sparse: bool = ...,
649
+ indexing: L["xy", "ij"] = ...,
650
+ ) -> list[NDArray[Any]]: ...
651
+
652
+ @overload
653
+ def delete(
654
+ arr: _ArrayLike[_SCT],
655
+ obj: slice | _ArrayLikeInt_co,
656
+ axis: None | SupportsIndex = ...,
657
+ ) -> NDArray[_SCT]: ...
658
+ @overload
659
+ def delete(
660
+ arr: ArrayLike,
661
+ obj: slice | _ArrayLikeInt_co,
662
+ axis: None | SupportsIndex = ...,
663
+ ) -> NDArray[Any]: ...
664
+
665
+ @overload
666
+ def insert(
667
+ arr: _ArrayLike[_SCT],
668
+ obj: slice | _ArrayLikeInt_co,
669
+ values: ArrayLike,
670
+ axis: None | SupportsIndex = ...,
671
+ ) -> NDArray[_SCT]: ...
672
+ @overload
673
+ def insert(
674
+ arr: ArrayLike,
675
+ obj: slice | _ArrayLikeInt_co,
676
+ values: ArrayLike,
677
+ axis: None | SupportsIndex = ...,
678
+ ) -> NDArray[Any]: ...
679
+
680
+ def append(
681
+ arr: ArrayLike,
682
+ values: ArrayLike,
683
+ axis: None | SupportsIndex = ...,
684
+ ) -> NDArray[Any]: ...
685
+
686
+ @overload
687
+ def digitize(
688
+ x: _FloatLike_co,
689
+ bins: _ArrayLikeFloat_co,
690
+ right: bool = ...,
691
+ ) -> intp: ...
692
+ @overload
693
+ def digitize(
694
+ x: _ArrayLikeFloat_co,
695
+ bins: _ArrayLikeFloat_co,
696
+ right: bool = ...,
697
+ ) -> NDArray[intp]: ...
venv/lib/python3.10/site-packages/numpy/lib/histograms.pyi ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ from typing import (
3
+ Literal as L,
4
+ Any,
5
+ SupportsIndex,
6
+ )
7
+
8
+ from numpy._typing import (
9
+ NDArray,
10
+ ArrayLike,
11
+ )
12
+
13
+ _BinKind = L[
14
+ "stone",
15
+ "auto",
16
+ "doane",
17
+ "fd",
18
+ "rice",
19
+ "scott",
20
+ "sqrt",
21
+ "sturges",
22
+ ]
23
+
24
+ __all__: list[str]
25
+
26
+ def histogram_bin_edges(
27
+ a: ArrayLike,
28
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
29
+ range: None | tuple[float, float] = ...,
30
+ weights: None | ArrayLike = ...,
31
+ ) -> NDArray[Any]: ...
32
+
33
+ def histogram(
34
+ a: ArrayLike,
35
+ bins: _BinKind | SupportsIndex | ArrayLike = ...,
36
+ range: None | tuple[float, float] = ...,
37
+ density: bool = ...,
38
+ weights: None | ArrayLike = ...,
39
+ ) -> tuple[NDArray[Any], NDArray[Any]]: ...
40
+
41
+ def histogramdd(
42
+ sample: ArrayLike,
43
+ bins: SupportsIndex | ArrayLike = ...,
44
+ range: Sequence[tuple[float, float]] = ...,
45
+ density: None | bool = ...,
46
+ weights: None | ArrayLike = ...,
47
+ ) -> tuple[NDArray[Any], list[NDArray[Any]]]: ...
venv/lib/python3.10/site-packages/numpy/lib/index_tricks.py ADDED
@@ -0,0 +1,1046 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import sys
3
+ import math
4
+ import warnings
5
+
6
+ import numpy as np
7
+ from .._utils import set_module
8
+ import numpy.core.numeric as _nx
9
+ from numpy.core.numeric import ScalarType, array
10
+ from numpy.core.numerictypes import issubdtype
11
+
12
+ import numpy.matrixlib as matrixlib
13
+ from .function_base import diff
14
+ from numpy.core.multiarray import ravel_multi_index, unravel_index
15
+ from numpy.core import overrides, linspace
16
+ from numpy.lib.stride_tricks import as_strided
17
+
18
+
19
+ array_function_dispatch = functools.partial(
20
+ overrides.array_function_dispatch, module='numpy')
21
+
22
+
23
+ __all__ = [
24
+ 'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
25
+ 's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
26
+ 'diag_indices', 'diag_indices_from'
27
+ ]
28
+
29
+
30
+ def _ix__dispatcher(*args):
31
+ return args
32
+
33
+
34
+ @array_function_dispatch(_ix__dispatcher)
35
+ def ix_(*args):
36
+ """
37
+ Construct an open mesh from multiple sequences.
38
+
39
+ This function takes N 1-D sequences and returns N outputs with N
40
+ dimensions each, such that the shape is 1 in all but one dimension
41
+ and the dimension with the non-unit shape value cycles through all
42
+ N dimensions.
43
+
44
+ Using `ix_` one can quickly construct index arrays that will index
45
+ the cross product. ``a[np.ix_([1,3],[2,5])]`` returns the array
46
+ ``[[a[1,2] a[1,5]], [a[3,2] a[3,5]]]``.
47
+
48
+ Parameters
49
+ ----------
50
+ args : 1-D sequences
51
+ Each sequence should be of integer or boolean type.
52
+ Boolean sequences will be interpreted as boolean masks for the
53
+ corresponding dimension (equivalent to passing in
54
+ ``np.nonzero(boolean_sequence)``).
55
+
56
+ Returns
57
+ -------
58
+ out : tuple of ndarrays
59
+ N arrays with N dimensions each, with N the number of input
60
+ sequences. Together these arrays form an open mesh.
61
+
62
+ See Also
63
+ --------
64
+ ogrid, mgrid, meshgrid
65
+
66
+ Examples
67
+ --------
68
+ >>> a = np.arange(10).reshape(2, 5)
69
+ >>> a
70
+ array([[0, 1, 2, 3, 4],
71
+ [5, 6, 7, 8, 9]])
72
+ >>> ixgrid = np.ix_([0, 1], [2, 4])
73
+ >>> ixgrid
74
+ (array([[0],
75
+ [1]]), array([[2, 4]]))
76
+ >>> ixgrid[0].shape, ixgrid[1].shape
77
+ ((2, 1), (1, 2))
78
+ >>> a[ixgrid]
79
+ array([[2, 4],
80
+ [7, 9]])
81
+
82
+ >>> ixgrid = np.ix_([True, True], [2, 4])
83
+ >>> a[ixgrid]
84
+ array([[2, 4],
85
+ [7, 9]])
86
+ >>> ixgrid = np.ix_([True, True], [False, False, True, False, True])
87
+ >>> a[ixgrid]
88
+ array([[2, 4],
89
+ [7, 9]])
90
+
91
+ """
92
+ out = []
93
+ nd = len(args)
94
+ for k, new in enumerate(args):
95
+ if not isinstance(new, _nx.ndarray):
96
+ new = np.asarray(new)
97
+ if new.size == 0:
98
+ # Explicitly type empty arrays to avoid float default
99
+ new = new.astype(_nx.intp)
100
+ if new.ndim != 1:
101
+ raise ValueError("Cross index must be 1 dimensional")
102
+ if issubdtype(new.dtype, _nx.bool_):
103
+ new, = new.nonzero()
104
+ new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
105
+ out.append(new)
106
+ return tuple(out)
107
+
108
+
109
+ class nd_grid:
110
+ """
111
+ Construct a multi-dimensional "meshgrid".
112
+
113
+ ``grid = nd_grid()`` creates an instance which will return a mesh-grid
114
+ when indexed. The dimension and number of the output arrays are equal
115
+ to the number of indexing dimensions. If the step length is not a
116
+ complex number, then the stop is not inclusive.
117
+
118
+ However, if the step length is a **complex number** (e.g. 5j), then the
119
+ integer part of its magnitude is interpreted as specifying the
120
+ number of points to create between the start and stop values, where
121
+ the stop value **is inclusive**.
122
+
123
+ If instantiated with an argument of ``sparse=True``, the mesh-grid is
124
+ open (or not fleshed out) so that only one-dimension of each returned
125
+ argument is greater than 1.
126
+
127
+ Parameters
128
+ ----------
129
+ sparse : bool, optional
130
+ Whether the grid is sparse or not. Default is False.
131
+
132
+ Notes
133
+ -----
134
+ Two instances of `nd_grid` are made available in the NumPy namespace,
135
+ `mgrid` and `ogrid`, approximately defined as::
136
+
137
+ mgrid = nd_grid(sparse=False)
138
+ ogrid = nd_grid(sparse=True)
139
+
140
+ Users should use these pre-defined instances instead of using `nd_grid`
141
+ directly.
142
+ """
143
+
144
+ def __init__(self, sparse=False):
145
+ self.sparse = sparse
146
+
147
+ def __getitem__(self, key):
148
+ try:
149
+ size = []
150
+ # Mimic the behavior of `np.arange` and use a data type
151
+ # which is at least as large as `np.int_`
152
+ num_list = [0]
153
+ for k in range(len(key)):
154
+ step = key[k].step
155
+ start = key[k].start
156
+ stop = key[k].stop
157
+ if start is None:
158
+ start = 0
159
+ if step is None:
160
+ step = 1
161
+ if isinstance(step, (_nx.complexfloating, complex)):
162
+ step = abs(step)
163
+ size.append(int(step))
164
+ else:
165
+ size.append(
166
+ int(math.ceil((stop - start) / (step*1.0))))
167
+ num_list += [start, stop, step]
168
+ typ = _nx.result_type(*num_list)
169
+ if self.sparse:
170
+ nn = [_nx.arange(_x, dtype=_t)
171
+ for _x, _t in zip(size, (typ,)*len(size))]
172
+ else:
173
+ nn = _nx.indices(size, typ)
174
+ for k, kk in enumerate(key):
175
+ step = kk.step
176
+ start = kk.start
177
+ if start is None:
178
+ start = 0
179
+ if step is None:
180
+ step = 1
181
+ if isinstance(step, (_nx.complexfloating, complex)):
182
+ step = int(abs(step))
183
+ if step != 1:
184
+ step = (kk.stop - start) / float(step - 1)
185
+ nn[k] = (nn[k]*step+start)
186
+ if self.sparse:
187
+ slobj = [_nx.newaxis]*len(size)
188
+ for k in range(len(size)):
189
+ slobj[k] = slice(None, None)
190
+ nn[k] = nn[k][tuple(slobj)]
191
+ slobj[k] = _nx.newaxis
192
+ return nn
193
+ except (IndexError, TypeError):
194
+ step = key.step
195
+ stop = key.stop
196
+ start = key.start
197
+ if start is None:
198
+ start = 0
199
+ if isinstance(step, (_nx.complexfloating, complex)):
200
+ # Prevent the (potential) creation of integer arrays
201
+ step_float = abs(step)
202
+ step = length = int(step_float)
203
+ if step != 1:
204
+ step = (key.stop-start)/float(step-1)
205
+ typ = _nx.result_type(start, stop, step_float)
206
+ return _nx.arange(0, length, 1, dtype=typ)*step + start
207
+ else:
208
+ return _nx.arange(start, stop, step)
209
+
210
+
211
+ class MGridClass(nd_grid):
212
+ """
213
+ An instance which returns a dense multi-dimensional "meshgrid".
214
+
215
+ An instance which returns a dense (or fleshed out) mesh-grid
216
+ when indexed, so that each returned argument has the same shape.
217
+ The dimensions and number of the output arrays are equal to the
218
+ number of indexing dimensions. If the step length is not a complex
219
+ number, then the stop is not inclusive.
220
+
221
+ However, if the step length is a **complex number** (e.g. 5j), then
222
+ the integer part of its magnitude is interpreted as specifying the
223
+ number of points to create between the start and stop values, where
224
+ the stop value **is inclusive**.
225
+
226
+ Returns
227
+ -------
228
+ mesh-grid `ndarrays` all of the same dimensions
229
+
230
+ See Also
231
+ --------
232
+ ogrid : like `mgrid` but returns open (not fleshed out) mesh grids
233
+ meshgrid: return coordinate matrices from coordinate vectors
234
+ r_ : array concatenator
235
+ :ref:`how-to-partition`
236
+
237
+ Examples
238
+ --------
239
+ >>> np.mgrid[0:5, 0:5]
240
+ array([[[0, 0, 0, 0, 0],
241
+ [1, 1, 1, 1, 1],
242
+ [2, 2, 2, 2, 2],
243
+ [3, 3, 3, 3, 3],
244
+ [4, 4, 4, 4, 4]],
245
+ [[0, 1, 2, 3, 4],
246
+ [0, 1, 2, 3, 4],
247
+ [0, 1, 2, 3, 4],
248
+ [0, 1, 2, 3, 4],
249
+ [0, 1, 2, 3, 4]]])
250
+ >>> np.mgrid[-1:1:5j]
251
+ array([-1. , -0.5, 0. , 0.5, 1. ])
252
+
253
+ """
254
+
255
+ def __init__(self):
256
+ super().__init__(sparse=False)
257
+
258
+
259
+ mgrid = MGridClass()
260
+
261
+
262
+ class OGridClass(nd_grid):
263
+ """
264
+ An instance which returns an open multi-dimensional "meshgrid".
265
+
266
+ An instance which returns an open (i.e. not fleshed out) mesh-grid
267
+ when indexed, so that only one dimension of each returned array is
268
+ greater than 1. The dimension and number of the output arrays are
269
+ equal to the number of indexing dimensions. If the step length is
270
+ not a complex number, then the stop is not inclusive.
271
+
272
+ However, if the step length is a **complex number** (e.g. 5j), then
273
+ the integer part of its magnitude is interpreted as specifying the
274
+ number of points to create between the start and stop values, where
275
+ the stop value **is inclusive**.
276
+
277
+ Returns
278
+ -------
279
+ mesh-grid
280
+ `ndarrays` with only one dimension not equal to 1
281
+
282
+ See Also
283
+ --------
284
+ mgrid : like `ogrid` but returns dense (or fleshed out) mesh grids
285
+ meshgrid: return coordinate matrices from coordinate vectors
286
+ r_ : array concatenator
287
+ :ref:`how-to-partition`
288
+
289
+ Examples
290
+ --------
291
+ >>> from numpy import ogrid
292
+ >>> ogrid[-1:1:5j]
293
+ array([-1. , -0.5, 0. , 0.5, 1. ])
294
+ >>> ogrid[0:5,0:5]
295
+ [array([[0],
296
+ [1],
297
+ [2],
298
+ [3],
299
+ [4]]), array([[0, 1, 2, 3, 4]])]
300
+
301
+ """
302
+
303
+ def __init__(self):
304
+ super().__init__(sparse=True)
305
+
306
+
307
+ ogrid = OGridClass()
308
+
309
+
310
+ class AxisConcatenator:
311
+ """
312
+ Translates slice objects to concatenation along an axis.
313
+
314
+ For detailed documentation on usage, see `r_`.
315
+ """
316
+ # allow ma.mr_ to override this
317
+ concatenate = staticmethod(_nx.concatenate)
318
+ makemat = staticmethod(matrixlib.matrix)
319
+
320
+ def __init__(self, axis=0, matrix=False, ndmin=1, trans1d=-1):
321
+ self.axis = axis
322
+ self.matrix = matrix
323
+ self.trans1d = trans1d
324
+ self.ndmin = ndmin
325
+
326
+ def __getitem__(self, key):
327
+ # handle matrix builder syntax
328
+ if isinstance(key, str):
329
+ frame = sys._getframe().f_back
330
+ mymat = matrixlib.bmat(key, frame.f_globals, frame.f_locals)
331
+ return mymat
332
+
333
+ if not isinstance(key, tuple):
334
+ key = (key,)
335
+
336
+ # copy attributes, since they can be overridden in the first argument
337
+ trans1d = self.trans1d
338
+ ndmin = self.ndmin
339
+ matrix = self.matrix
340
+ axis = self.axis
341
+
342
+ objs = []
343
+ # dtypes or scalars for weak scalar handling in result_type
344
+ result_type_objs = []
345
+
346
+ for k, item in enumerate(key):
347
+ scalar = False
348
+ if isinstance(item, slice):
349
+ step = item.step
350
+ start = item.start
351
+ stop = item.stop
352
+ if start is None:
353
+ start = 0
354
+ if step is None:
355
+ step = 1
356
+ if isinstance(step, (_nx.complexfloating, complex)):
357
+ size = int(abs(step))
358
+ newobj = linspace(start, stop, num=size)
359
+ else:
360
+ newobj = _nx.arange(start, stop, step)
361
+ if ndmin > 1:
362
+ newobj = array(newobj, copy=False, ndmin=ndmin)
363
+ if trans1d != -1:
364
+ newobj = newobj.swapaxes(-1, trans1d)
365
+ elif isinstance(item, str):
366
+ if k != 0:
367
+ raise ValueError("special directives must be the "
368
+ "first entry.")
369
+ if item in ('r', 'c'):
370
+ matrix = True
371
+ col = (item == 'c')
372
+ continue
373
+ if ',' in item:
374
+ vec = item.split(',')
375
+ try:
376
+ axis, ndmin = [int(x) for x in vec[:2]]
377
+ if len(vec) == 3:
378
+ trans1d = int(vec[2])
379
+ continue
380
+ except Exception as e:
381
+ raise ValueError(
382
+ "unknown special directive {!r}".format(item)
383
+ ) from e
384
+ try:
385
+ axis = int(item)
386
+ continue
387
+ except (ValueError, TypeError) as e:
388
+ raise ValueError("unknown special directive") from e
389
+ elif type(item) in ScalarType:
390
+ scalar = True
391
+ newobj = item
392
+ else:
393
+ item_ndim = np.ndim(item)
394
+ newobj = array(item, copy=False, subok=True, ndmin=ndmin)
395
+ if trans1d != -1 and item_ndim < ndmin:
396
+ k2 = ndmin - item_ndim
397
+ k1 = trans1d
398
+ if k1 < 0:
399
+ k1 += k2 + 1
400
+ defaxes = list(range(ndmin))
401
+ axes = defaxes[:k1] + defaxes[k2:] + defaxes[k1:k2]
402
+ newobj = newobj.transpose(axes)
403
+
404
+ objs.append(newobj)
405
+ if scalar:
406
+ result_type_objs.append(item)
407
+ else:
408
+ result_type_objs.append(newobj.dtype)
409
+
410
+ # Ensure that scalars won't up-cast unless warranted, for 0, drops
411
+ # through to error in concatenate.
412
+ if len(result_type_objs) != 0:
413
+ final_dtype = _nx.result_type(*result_type_objs)
414
+ # concatenate could do cast, but that can be overriden:
415
+ objs = [array(obj, copy=False, subok=True,
416
+ ndmin=ndmin, dtype=final_dtype) for obj in objs]
417
+
418
+ res = self.concatenate(tuple(objs), axis=axis)
419
+
420
+ if matrix:
421
+ oldndim = res.ndim
422
+ res = self.makemat(res)
423
+ if oldndim == 1 and col:
424
+ res = res.T
425
+ return res
426
+
427
+ def __len__(self):
428
+ return 0
429
+
430
+ # separate classes are used here instead of just making r_ = concatentor(0),
431
+ # etc. because otherwise we couldn't get the doc string to come out right
432
+ # in help(r_)
433
+
434
+
435
+ class RClass(AxisConcatenator):
436
+ """
437
+ Translates slice objects to concatenation along the first axis.
438
+
439
+ This is a simple way to build up arrays quickly. There are two use cases.
440
+
441
+ 1. If the index expression contains comma separated arrays, then stack
442
+ them along their first axis.
443
+ 2. If the index expression contains slice notation or scalars then create
444
+ a 1-D array with a range indicated by the slice notation.
445
+
446
+ If slice notation is used, the syntax ``start:stop:step`` is equivalent
447
+ to ``np.arange(start, stop, step)`` inside of the brackets. However, if
448
+ ``step`` is an imaginary number (i.e. 100j) then its integer portion is
449
+ interpreted as a number-of-points desired and the start and stop are
450
+ inclusive. In other words ``start:stop:stepj`` is interpreted as
451
+ ``np.linspace(start, stop, step, endpoint=1)`` inside of the brackets.
452
+ After expansion of slice notation, all comma separated sequences are
453
+ concatenated together.
454
+
455
+ Optional character strings placed as the first element of the index
456
+ expression can be used to change the output. The strings 'r' or 'c' result
457
+ in matrix output. If the result is 1-D and 'r' is specified a 1 x N (row)
458
+ matrix is produced. If the result is 1-D and 'c' is specified, then a N x 1
459
+ (column) matrix is produced. If the result is 2-D then both provide the
460
+ same matrix result.
461
+
462
+ A string integer specifies which axis to stack multiple comma separated
463
+ arrays along. A string of two comma-separated integers allows indication
464
+ of the minimum number of dimensions to force each entry into as the
465
+ second integer (the axis to concatenate along is still the first integer).
466
+
467
+ A string with three comma-separated integers allows specification of the
468
+ axis to concatenate along, the minimum number of dimensions to force the
469
+ entries to, and which axis should contain the start of the arrays which
470
+ are less than the specified number of dimensions. In other words the third
471
+ integer allows you to specify where the 1's should be placed in the shape
472
+ of the arrays that have their shapes upgraded. By default, they are placed
473
+ in the front of the shape tuple. The third argument allows you to specify
474
+ where the start of the array should be instead. Thus, a third argument of
475
+ '0' would place the 1's at the end of the array shape. Negative integers
476
+ specify where in the new shape tuple the last dimension of upgraded arrays
477
+ should be placed, so the default is '-1'.
478
+
479
+ Parameters
480
+ ----------
481
+ Not a function, so takes no parameters
482
+
483
+
484
+ Returns
485
+ -------
486
+ A concatenated ndarray or matrix.
487
+
488
+ See Also
489
+ --------
490
+ concatenate : Join a sequence of arrays along an existing axis.
491
+ c_ : Translates slice objects to concatenation along the second axis.
492
+
493
+ Examples
494
+ --------
495
+ >>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
496
+ array([1, 2, 3, ..., 4, 5, 6])
497
+ >>> np.r_[-1:1:6j, [0]*3, 5, 6]
498
+ array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
499
+
500
+ String integers specify the axis to concatenate along or the minimum
501
+ number of dimensions to force entries into.
502
+
503
+ >>> a = np.array([[0, 1, 2], [3, 4, 5]])
504
+ >>> np.r_['-1', a, a] # concatenate along last axis
505
+ array([[0, 1, 2, 0, 1, 2],
506
+ [3, 4, 5, 3, 4, 5]])
507
+ >>> np.r_['0,2', [1,2,3], [4,5,6]] # concatenate along first axis, dim>=2
508
+ array([[1, 2, 3],
509
+ [4, 5, 6]])
510
+
511
+ >>> np.r_['0,2,0', [1,2,3], [4,5,6]]
512
+ array([[1],
513
+ [2],
514
+ [3],
515
+ [4],
516
+ [5],
517
+ [6]])
518
+ >>> np.r_['1,2,0', [1,2,3], [4,5,6]]
519
+ array([[1, 4],
520
+ [2, 5],
521
+ [3, 6]])
522
+
523
+ Using 'r' or 'c' as a first string argument creates a matrix.
524
+
525
+ >>> np.r_['r',[1,2,3], [4,5,6]]
526
+ matrix([[1, 2, 3, 4, 5, 6]])
527
+
528
+ """
529
+
530
+ def __init__(self):
531
+ AxisConcatenator.__init__(self, 0)
532
+
533
+
534
+ r_ = RClass()
535
+
536
+
537
+ class CClass(AxisConcatenator):
538
+ """
539
+ Translates slice objects to concatenation along the second axis.
540
+
541
+ This is short-hand for ``np.r_['-1,2,0', index expression]``, which is
542
+ useful because of its common occurrence. In particular, arrays will be
543
+ stacked along their last axis after being upgraded to at least 2-D with
544
+ 1's post-pended to the shape (column vectors made out of 1-D arrays).
545
+
546
+ See Also
547
+ --------
548
+ column_stack : Stack 1-D arrays as columns into a 2-D array.
549
+ r_ : For more detailed documentation.
550
+
551
+ Examples
552
+ --------
553
+ >>> np.c_[np.array([1,2,3]), np.array([4,5,6])]
554
+ array([[1, 4],
555
+ [2, 5],
556
+ [3, 6]])
557
+ >>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
558
+ array([[1, 2, 3, ..., 4, 5, 6]])
559
+
560
+ """
561
+
562
+ def __init__(self):
563
+ AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
564
+
565
+
566
+ c_ = CClass()
567
+
568
+
569
+ @set_module('numpy')
570
+ class ndenumerate:
571
+ """
572
+ Multidimensional index iterator.
573
+
574
+ Return an iterator yielding pairs of array coordinates and values.
575
+
576
+ Parameters
577
+ ----------
578
+ arr : ndarray
579
+ Input array.
580
+
581
+ See Also
582
+ --------
583
+ ndindex, flatiter
584
+
585
+ Examples
586
+ --------
587
+ >>> a = np.array([[1, 2], [3, 4]])
588
+ >>> for index, x in np.ndenumerate(a):
589
+ ... print(index, x)
590
+ (0, 0) 1
591
+ (0, 1) 2
592
+ (1, 0) 3
593
+ (1, 1) 4
594
+
595
+ """
596
+
597
+ def __init__(self, arr):
598
+ self.iter = np.asarray(arr).flat
599
+
600
+ def __next__(self):
601
+ """
602
+ Standard iterator method, returns the index tuple and array value.
603
+
604
+ Returns
605
+ -------
606
+ coords : tuple of ints
607
+ The indices of the current iteration.
608
+ val : scalar
609
+ The array element of the current iteration.
610
+
611
+ """
612
+ return self.iter.coords, next(self.iter)
613
+
614
+ def __iter__(self):
615
+ return self
616
+
617
+
618
+ @set_module('numpy')
619
+ class ndindex:
620
+ """
621
+ An N-dimensional iterator object to index arrays.
622
+
623
+ Given the shape of an array, an `ndindex` instance iterates over
624
+ the N-dimensional index of the array. At each iteration a tuple
625
+ of indices is returned, the last dimension is iterated over first.
626
+
627
+ Parameters
628
+ ----------
629
+ shape : ints, or a single tuple of ints
630
+ The size of each dimension of the array can be passed as
631
+ individual parameters or as the elements of a tuple.
632
+
633
+ See Also
634
+ --------
635
+ ndenumerate, flatiter
636
+
637
+ Examples
638
+ --------
639
+ Dimensions as individual arguments
640
+
641
+ >>> for index in np.ndindex(3, 2, 1):
642
+ ... print(index)
643
+ (0, 0, 0)
644
+ (0, 1, 0)
645
+ (1, 0, 0)
646
+ (1, 1, 0)
647
+ (2, 0, 0)
648
+ (2, 1, 0)
649
+
650
+ Same dimensions - but in a tuple ``(3, 2, 1)``
651
+
652
+ >>> for index in np.ndindex((3, 2, 1)):
653
+ ... print(index)
654
+ (0, 0, 0)
655
+ (0, 1, 0)
656
+ (1, 0, 0)
657
+ (1, 1, 0)
658
+ (2, 0, 0)
659
+ (2, 1, 0)
660
+
661
+ """
662
+
663
+ def __init__(self, *shape):
664
+ if len(shape) == 1 and isinstance(shape[0], tuple):
665
+ shape = shape[0]
666
+ x = as_strided(_nx.zeros(1), shape=shape,
667
+ strides=_nx.zeros_like(shape))
668
+ self._it = _nx.nditer(x, flags=['multi_index', 'zerosize_ok'],
669
+ order='C')
670
+
671
+ def __iter__(self):
672
+ return self
673
+
674
+ def ndincr(self):
675
+ """
676
+ Increment the multi-dimensional index by one.
677
+
678
+ This method is for backward compatibility only: do not use.
679
+
680
+ .. deprecated:: 1.20.0
681
+ This method has been advised against since numpy 1.8.0, but only
682
+ started emitting DeprecationWarning as of this version.
683
+ """
684
+ # NumPy 1.20.0, 2020-09-08
685
+ warnings.warn(
686
+ "`ndindex.ndincr()` is deprecated, use `next(ndindex)` instead",
687
+ DeprecationWarning, stacklevel=2)
688
+ next(self)
689
+
690
+ def __next__(self):
691
+ """
692
+ Standard iterator method, updates the index and returns the index
693
+ tuple.
694
+
695
+ Returns
696
+ -------
697
+ val : tuple of ints
698
+ Returns a tuple containing the indices of the current
699
+ iteration.
700
+
701
+ """
702
+ next(self._it)
703
+ return self._it.multi_index
704
+
705
+
706
+ # You can do all this with slice() plus a few special objects,
707
+ # but there's a lot to remember. This version is simpler because
708
+ # it uses the standard array indexing syntax.
709
+ #
710
+ # Written by Konrad Hinsen <[email protected]>
711
+ # last revision: 1999-7-23
712
+ #
713
+ # Cosmetic changes by T. Oliphant 2001
714
+ #
715
+ #
716
+
717
+ class IndexExpression:
718
+ """
719
+ A nicer way to build up index tuples for arrays.
720
+
721
+ .. note::
722
+ Use one of the two predefined instances `index_exp` or `s_`
723
+ rather than directly using `IndexExpression`.
724
+
725
+ For any index combination, including slicing and axis insertion,
726
+ ``a[indices]`` is the same as ``a[np.index_exp[indices]]`` for any
727
+ array `a`. However, ``np.index_exp[indices]`` can be used anywhere
728
+ in Python code and returns a tuple of slice objects that can be
729
+ used in the construction of complex index expressions.
730
+
731
+ Parameters
732
+ ----------
733
+ maketuple : bool
734
+ If True, always returns a tuple.
735
+
736
+ See Also
737
+ --------
738
+ index_exp : Predefined instance that always returns a tuple:
739
+ `index_exp = IndexExpression(maketuple=True)`.
740
+ s_ : Predefined instance without tuple conversion:
741
+ `s_ = IndexExpression(maketuple=False)`.
742
+
743
+ Notes
744
+ -----
745
+ You can do all this with `slice()` plus a few special objects,
746
+ but there's a lot to remember and this version is simpler because
747
+ it uses the standard array indexing syntax.
748
+
749
+ Examples
750
+ --------
751
+ >>> np.s_[2::2]
752
+ slice(2, None, 2)
753
+ >>> np.index_exp[2::2]
754
+ (slice(2, None, 2),)
755
+
756
+ >>> np.array([0, 1, 2, 3, 4])[np.s_[2::2]]
757
+ array([2, 4])
758
+
759
+ """
760
+
761
+ def __init__(self, maketuple):
762
+ self.maketuple = maketuple
763
+
764
+ def __getitem__(self, item):
765
+ if self.maketuple and not isinstance(item, tuple):
766
+ return (item,)
767
+ else:
768
+ return item
769
+
770
+
771
+ index_exp = IndexExpression(maketuple=True)
772
+ s_ = IndexExpression(maketuple=False)
773
+
774
+ # End contribution from Konrad.
775
+
776
+
777
+ # The following functions complement those in twodim_base, but are
778
+ # applicable to N-dimensions.
779
+
780
+
781
+ def _fill_diagonal_dispatcher(a, val, wrap=None):
782
+ return (a,)
783
+
784
+
785
+ @array_function_dispatch(_fill_diagonal_dispatcher)
786
+ def fill_diagonal(a, val, wrap=False):
787
+ """Fill the main diagonal of the given array of any dimensionality.
788
+
789
+ For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
790
+ locations with indices ``a[i, ..., i]`` all identical. This function
791
+ modifies the input array in-place, it does not return a value.
792
+
793
+ Parameters
794
+ ----------
795
+ a : array, at least 2-D.
796
+ Array whose diagonal is to be filled, it gets modified in-place.
797
+
798
+ val : scalar or array_like
799
+ Value(s) to write on the diagonal. If `val` is scalar, the value is
800
+ written along the diagonal. If array-like, the flattened `val` is
801
+ written along the diagonal, repeating if necessary to fill all
802
+ diagonal entries.
803
+
804
+ wrap : bool
805
+ For tall matrices in NumPy version up to 1.6.2, the
806
+ diagonal "wrapped" after N columns. You can have this behavior
807
+ with this option. This affects only tall matrices.
808
+
809
+ See also
810
+ --------
811
+ diag_indices, diag_indices_from
812
+
813
+ Notes
814
+ -----
815
+ .. versionadded:: 1.4.0
816
+
817
+ This functionality can be obtained via `diag_indices`, but internally
818
+ this version uses a much faster implementation that never constructs the
819
+ indices and uses simple slicing.
820
+
821
+ Examples
822
+ --------
823
+ >>> a = np.zeros((3, 3), int)
824
+ >>> np.fill_diagonal(a, 5)
825
+ >>> a
826
+ array([[5, 0, 0],
827
+ [0, 5, 0],
828
+ [0, 0, 5]])
829
+
830
+ The same function can operate on a 4-D array:
831
+
832
+ >>> a = np.zeros((3, 3, 3, 3), int)
833
+ >>> np.fill_diagonal(a, 4)
834
+
835
+ We only show a few blocks for clarity:
836
+
837
+ >>> a[0, 0]
838
+ array([[4, 0, 0],
839
+ [0, 0, 0],
840
+ [0, 0, 0]])
841
+ >>> a[1, 1]
842
+ array([[0, 0, 0],
843
+ [0, 4, 0],
844
+ [0, 0, 0]])
845
+ >>> a[2, 2]
846
+ array([[0, 0, 0],
847
+ [0, 0, 0],
848
+ [0, 0, 4]])
849
+
850
+ The wrap option affects only tall matrices:
851
+
852
+ >>> # tall matrices no wrap
853
+ >>> a = np.zeros((5, 3), int)
854
+ >>> np.fill_diagonal(a, 4)
855
+ >>> a
856
+ array([[4, 0, 0],
857
+ [0, 4, 0],
858
+ [0, 0, 4],
859
+ [0, 0, 0],
860
+ [0, 0, 0]])
861
+
862
+ >>> # tall matrices wrap
863
+ >>> a = np.zeros((5, 3), int)
864
+ >>> np.fill_diagonal(a, 4, wrap=True)
865
+ >>> a
866
+ array([[4, 0, 0],
867
+ [0, 4, 0],
868
+ [0, 0, 4],
869
+ [0, 0, 0],
870
+ [4, 0, 0]])
871
+
872
+ >>> # wide matrices
873
+ >>> a = np.zeros((3, 5), int)
874
+ >>> np.fill_diagonal(a, 4, wrap=True)
875
+ >>> a
876
+ array([[4, 0, 0, 0, 0],
877
+ [0, 4, 0, 0, 0],
878
+ [0, 0, 4, 0, 0]])
879
+
880
+ The anti-diagonal can be filled by reversing the order of elements
881
+ using either `numpy.flipud` or `numpy.fliplr`.
882
+
883
+ >>> a = np.zeros((3, 3), int);
884
+ >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
885
+ >>> a
886
+ array([[0, 0, 1],
887
+ [0, 2, 0],
888
+ [3, 0, 0]])
889
+ >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
890
+ >>> a
891
+ array([[0, 0, 3],
892
+ [0, 2, 0],
893
+ [1, 0, 0]])
894
+
895
+ Note that the order in which the diagonal is filled varies depending
896
+ on the flip function.
897
+ """
898
+ if a.ndim < 2:
899
+ raise ValueError("array must be at least 2-d")
900
+ end = None
901
+ if a.ndim == 2:
902
+ # Explicit, fast formula for the common case. For 2-d arrays, we
903
+ # accept rectangular ones.
904
+ step = a.shape[1] + 1
905
+ # This is needed to don't have tall matrix have the diagonal wrap.
906
+ if not wrap:
907
+ end = a.shape[1] * a.shape[1]
908
+ else:
909
+ # For more than d=2, the strided formula is only valid for arrays with
910
+ # all dimensions equal, so we check first.
911
+ if not np.all(diff(a.shape) == 0):
912
+ raise ValueError("All dimensions of input must be of equal length")
913
+ step = 1 + (np.cumprod(a.shape[:-1])).sum()
914
+
915
+ # Write the value out into the diagonal.
916
+ a.flat[:end:step] = val
917
+
918
+
919
+ @set_module('numpy')
920
+ def diag_indices(n, ndim=2):
921
+ """
922
+ Return the indices to access the main diagonal of an array.
923
+
924
+ This returns a tuple of indices that can be used to access the main
925
+ diagonal of an array `a` with ``a.ndim >= 2`` dimensions and shape
926
+ (n, n, ..., n). For ``a.ndim = 2`` this is the usual diagonal, for
927
+ ``a.ndim > 2`` this is the set of indices to access ``a[i, i, ..., i]``
928
+ for ``i = [0..n-1]``.
929
+
930
+ Parameters
931
+ ----------
932
+ n : int
933
+ The size, along each dimension, of the arrays for which the returned
934
+ indices can be used.
935
+
936
+ ndim : int, optional
937
+ The number of dimensions.
938
+
939
+ See Also
940
+ --------
941
+ diag_indices_from
942
+
943
+ Notes
944
+ -----
945
+ .. versionadded:: 1.4.0
946
+
947
+ Examples
948
+ --------
949
+ Create a set of indices to access the diagonal of a (4, 4) array:
950
+
951
+ >>> di = np.diag_indices(4)
952
+ >>> di
953
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
954
+ >>> a = np.arange(16).reshape(4, 4)
955
+ >>> a
956
+ array([[ 0, 1, 2, 3],
957
+ [ 4, 5, 6, 7],
958
+ [ 8, 9, 10, 11],
959
+ [12, 13, 14, 15]])
960
+ >>> a[di] = 100
961
+ >>> a
962
+ array([[100, 1, 2, 3],
963
+ [ 4, 100, 6, 7],
964
+ [ 8, 9, 100, 11],
965
+ [ 12, 13, 14, 100]])
966
+
967
+ Now, we create indices to manipulate a 3-D array:
968
+
969
+ >>> d3 = np.diag_indices(2, 3)
970
+ >>> d3
971
+ (array([0, 1]), array([0, 1]), array([0, 1]))
972
+
973
+ And use it to set the diagonal of an array of zeros to 1:
974
+
975
+ >>> a = np.zeros((2, 2, 2), dtype=int)
976
+ >>> a[d3] = 1
977
+ >>> a
978
+ array([[[1, 0],
979
+ [0, 0]],
980
+ [[0, 0],
981
+ [0, 1]]])
982
+
983
+ """
984
+ idx = np.arange(n)
985
+ return (idx,) * ndim
986
+
987
+
988
+ def _diag_indices_from(arr):
989
+ return (arr,)
990
+
991
+
992
+ @array_function_dispatch(_diag_indices_from)
993
+ def diag_indices_from(arr):
994
+ """
995
+ Return the indices to access the main diagonal of an n-dimensional array.
996
+
997
+ See `diag_indices` for full details.
998
+
999
+ Parameters
1000
+ ----------
1001
+ arr : array, at least 2-D
1002
+
1003
+ See Also
1004
+ --------
1005
+ diag_indices
1006
+
1007
+ Notes
1008
+ -----
1009
+ .. versionadded:: 1.4.0
1010
+
1011
+ Examples
1012
+ --------
1013
+
1014
+ Create a 4 by 4 array.
1015
+
1016
+ >>> a = np.arange(16).reshape(4, 4)
1017
+ >>> a
1018
+ array([[ 0, 1, 2, 3],
1019
+ [ 4, 5, 6, 7],
1020
+ [ 8, 9, 10, 11],
1021
+ [12, 13, 14, 15]])
1022
+
1023
+ Get the indices of the diagonal elements.
1024
+
1025
+ >>> di = np.diag_indices_from(a)
1026
+ >>> di
1027
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
1028
+
1029
+ >>> a[di]
1030
+ array([ 0, 5, 10, 15])
1031
+
1032
+ This is simply syntactic sugar for diag_indices.
1033
+
1034
+ >>> np.diag_indices(a.shape[0])
1035
+ (array([0, 1, 2, 3]), array([0, 1, 2, 3]))
1036
+
1037
+ """
1038
+
1039
+ if not arr.ndim >= 2:
1040
+ raise ValueError("input array must be at least 2-d")
1041
+ # For more than d=2, the strided formula is only valid for arrays with
1042
+ # all dimensions equal, so we check first.
1043
+ if not np.all(diff(arr.shape) == 0):
1044
+ raise ValueError("All dimensions of input must be of equal length")
1045
+
1046
+ return diag_indices(arr.shape[0], arr.ndim)
venv/lib/python3.10/site-packages/numpy/lib/mixins.pyi ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABCMeta, abstractmethod
2
+ from typing import Literal as L, Any
3
+
4
+ from numpy import ufunc
5
+
6
+ __all__: list[str]
7
+
8
+ # NOTE: `NDArrayOperatorsMixin` is not formally an abstract baseclass,
9
+ # even though it's reliant on subclasses implementing `__array_ufunc__`
10
+
11
+ # NOTE: The accepted input- and output-types of the various dunders are
12
+ # completely dependent on how `__array_ufunc__` is implemented.
13
+ # As such, only little type safety can be provided here.
14
+
15
+ class NDArrayOperatorsMixin(metaclass=ABCMeta):
16
+ @abstractmethod
17
+ def __array_ufunc__(
18
+ self,
19
+ ufunc: ufunc,
20
+ method: L["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
21
+ *inputs: Any,
22
+ **kwargs: Any,
23
+ ) -> Any: ...
24
+ def __lt__(self, other: Any) -> Any: ...
25
+ def __le__(self, other: Any) -> Any: ...
26
+ def __eq__(self, other: Any) -> Any: ...
27
+ def __ne__(self, other: Any) -> Any: ...
28
+ def __gt__(self, other: Any) -> Any: ...
29
+ def __ge__(self, other: Any) -> Any: ...
30
+ def __add__(self, other: Any) -> Any: ...
31
+ def __radd__(self, other: Any) -> Any: ...
32
+ def __iadd__(self, other: Any) -> Any: ...
33
+ def __sub__(self, other: Any) -> Any: ...
34
+ def __rsub__(self, other: Any) -> Any: ...
35
+ def __isub__(self, other: Any) -> Any: ...
36
+ def __mul__(self, other: Any) -> Any: ...
37
+ def __rmul__(self, other: Any) -> Any: ...
38
+ def __imul__(self, other: Any) -> Any: ...
39
+ def __matmul__(self, other: Any) -> Any: ...
40
+ def __rmatmul__(self, other: Any) -> Any: ...
41
+ def __imatmul__(self, other: Any) -> Any: ...
42
+ def __truediv__(self, other: Any) -> Any: ...
43
+ def __rtruediv__(self, other: Any) -> Any: ...
44
+ def __itruediv__(self, other: Any) -> Any: ...
45
+ def __floordiv__(self, other: Any) -> Any: ...
46
+ def __rfloordiv__(self, other: Any) -> Any: ...
47
+ def __ifloordiv__(self, other: Any) -> Any: ...
48
+ def __mod__(self, other: Any) -> Any: ...
49
+ def __rmod__(self, other: Any) -> Any: ...
50
+ def __imod__(self, other: Any) -> Any: ...
51
+ def __divmod__(self, other: Any) -> Any: ...
52
+ def __rdivmod__(self, other: Any) -> Any: ...
53
+ def __pow__(self, other: Any) -> Any: ...
54
+ def __rpow__(self, other: Any) -> Any: ...
55
+ def __ipow__(self, other: Any) -> Any: ...
56
+ def __lshift__(self, other: Any) -> Any: ...
57
+ def __rlshift__(self, other: Any) -> Any: ...
58
+ def __ilshift__(self, other: Any) -> Any: ...
59
+ def __rshift__(self, other: Any) -> Any: ...
60
+ def __rrshift__(self, other: Any) -> Any: ...
61
+ def __irshift__(self, other: Any) -> Any: ...
62
+ def __and__(self, other: Any) -> Any: ...
63
+ def __rand__(self, other: Any) -> Any: ...
64
+ def __iand__(self, other: Any) -> Any: ...
65
+ def __xor__(self, other: Any) -> Any: ...
66
+ def __rxor__(self, other: Any) -> Any: ...
67
+ def __ixor__(self, other: Any) -> Any: ...
68
+ def __or__(self, other: Any) -> Any: ...
69
+ def __ror__(self, other: Any) -> Any: ...
70
+ def __ior__(self, other: Any) -> Any: ...
71
+ def __neg__(self) -> Any: ...
72
+ def __pos__(self) -> Any: ...
73
+ def __abs__(self) -> Any: ...
74
+ def __invert__(self) -> Any: ...
venv/lib/python3.10/site-packages/numpy/lib/nanfunctions.py ADDED
@@ -0,0 +1,1887 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions that ignore NaN.
3
+
4
+ Functions
5
+ ---------
6
+
7
+ - `nanmin` -- minimum non-NaN value
8
+ - `nanmax` -- maximum non-NaN value
9
+ - `nanargmin` -- index of minimum non-NaN value
10
+ - `nanargmax` -- index of maximum non-NaN value
11
+ - `nansum` -- sum of non-NaN values
12
+ - `nanprod` -- product of non-NaN values
13
+ - `nancumsum` -- cumulative sum of non-NaN values
14
+ - `nancumprod` -- cumulative product of non-NaN values
15
+ - `nanmean` -- mean of non-NaN values
16
+ - `nanvar` -- variance of non-NaN values
17
+ - `nanstd` -- standard deviation of non-NaN values
18
+ - `nanmedian` -- median of non-NaN values
19
+ - `nanquantile` -- qth quantile of non-NaN values
20
+ - `nanpercentile` -- qth percentile of non-NaN values
21
+
22
+ """
23
+ import functools
24
+ import warnings
25
+ import numpy as np
26
+ from numpy.lib import function_base
27
+ from numpy.core import overrides
28
+
29
+
30
+ array_function_dispatch = functools.partial(
31
+ overrides.array_function_dispatch, module='numpy')
32
+
33
+
34
+ __all__ = [
35
+ 'nansum', 'nanmax', 'nanmin', 'nanargmax', 'nanargmin', 'nanmean',
36
+ 'nanmedian', 'nanpercentile', 'nanvar', 'nanstd', 'nanprod',
37
+ 'nancumsum', 'nancumprod', 'nanquantile'
38
+ ]
39
+
40
+
41
+ def _nan_mask(a, out=None):
42
+ """
43
+ Parameters
44
+ ----------
45
+ a : array-like
46
+ Input array with at least 1 dimension.
47
+ out : ndarray, optional
48
+ Alternate output array in which to place the result. The default
49
+ is ``None``; if provided, it must have the same shape as the
50
+ expected output and will prevent the allocation of a new array.
51
+
52
+ Returns
53
+ -------
54
+ y : bool ndarray or True
55
+ A bool array where ``np.nan`` positions are marked with ``False``
56
+ and other positions are marked with ``True``. If the type of ``a``
57
+ is such that it can't possibly contain ``np.nan``, returns ``True``.
58
+ """
59
+ # we assume that a is an array for this private function
60
+
61
+ if a.dtype.kind not in 'fc':
62
+ return True
63
+
64
+ y = np.isnan(a, out=out)
65
+ y = np.invert(y, out=y)
66
+ return y
67
+
68
+ def _replace_nan(a, val):
69
+ """
70
+ If `a` is of inexact type, make a copy of `a`, replace NaNs with
71
+ the `val` value, and return the copy together with a boolean mask
72
+ marking the locations where NaNs were present. If `a` is not of
73
+ inexact type, do nothing and return `a` together with a mask of None.
74
+
75
+ Note that scalars will end up as array scalars, which is important
76
+ for using the result as the value of the out argument in some
77
+ operations.
78
+
79
+ Parameters
80
+ ----------
81
+ a : array-like
82
+ Input array.
83
+ val : float
84
+ NaN values are set to val before doing the operation.
85
+
86
+ Returns
87
+ -------
88
+ y : ndarray
89
+ If `a` is of inexact type, return a copy of `a` with the NaNs
90
+ replaced by the fill value, otherwise return `a`.
91
+ mask: {bool, None}
92
+ If `a` is of inexact type, return a boolean mask marking locations of
93
+ NaNs, otherwise return None.
94
+
95
+ """
96
+ a = np.asanyarray(a)
97
+
98
+ if a.dtype == np.object_:
99
+ # object arrays do not support `isnan` (gh-9009), so make a guess
100
+ mask = np.not_equal(a, a, dtype=bool)
101
+ elif issubclass(a.dtype.type, np.inexact):
102
+ mask = np.isnan(a)
103
+ else:
104
+ mask = None
105
+
106
+ if mask is not None:
107
+ a = np.array(a, subok=True, copy=True)
108
+ np.copyto(a, val, where=mask)
109
+
110
+ return a, mask
111
+
112
+
113
+ def _copyto(a, val, mask):
114
+ """
115
+ Replace values in `a` with NaN where `mask` is True. This differs from
116
+ copyto in that it will deal with the case where `a` is a numpy scalar.
117
+
118
+ Parameters
119
+ ----------
120
+ a : ndarray or numpy scalar
121
+ Array or numpy scalar some of whose values are to be replaced
122
+ by val.
123
+ val : numpy scalar
124
+ Value used a replacement.
125
+ mask : ndarray, scalar
126
+ Boolean array. Where True the corresponding element of `a` is
127
+ replaced by `val`. Broadcasts.
128
+
129
+ Returns
130
+ -------
131
+ res : ndarray, scalar
132
+ Array with elements replaced or scalar `val`.
133
+
134
+ """
135
+ if isinstance(a, np.ndarray):
136
+ np.copyto(a, val, where=mask, casting='unsafe')
137
+ else:
138
+ a = a.dtype.type(val)
139
+ return a
140
+
141
+
142
+ def _remove_nan_1d(arr1d, overwrite_input=False):
143
+ """
144
+ Equivalent to arr1d[~arr1d.isnan()], but in a different order
145
+
146
+ Presumably faster as it incurs fewer copies
147
+
148
+ Parameters
149
+ ----------
150
+ arr1d : ndarray
151
+ Array to remove nans from
152
+ overwrite_input : bool
153
+ True if `arr1d` can be modified in place
154
+
155
+ Returns
156
+ -------
157
+ res : ndarray
158
+ Array with nan elements removed
159
+ overwrite_input : bool
160
+ True if `res` can be modified in place, given the constraint on the
161
+ input
162
+ """
163
+ if arr1d.dtype == object:
164
+ # object arrays do not support `isnan` (gh-9009), so make a guess
165
+ c = np.not_equal(arr1d, arr1d, dtype=bool)
166
+ else:
167
+ c = np.isnan(arr1d)
168
+
169
+ s = np.nonzero(c)[0]
170
+ if s.size == arr1d.size:
171
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
172
+ stacklevel=6)
173
+ return arr1d[:0], True
174
+ elif s.size == 0:
175
+ return arr1d, overwrite_input
176
+ else:
177
+ if not overwrite_input:
178
+ arr1d = arr1d.copy()
179
+ # select non-nans at end of array
180
+ enonan = arr1d[-s.size:][~c[-s.size:]]
181
+ # fill nans in beginning of array with non-nans of end
182
+ arr1d[s[:enonan.size]] = enonan
183
+
184
+ return arr1d[:-s.size], True
185
+
186
+
187
+ def _divide_by_count(a, b, out=None):
188
+ """
189
+ Compute a/b ignoring invalid results. If `a` is an array the division
190
+ is done in place. If `a` is a scalar, then its type is preserved in the
191
+ output. If out is None, then a is used instead so that the division
192
+ is in place. Note that this is only called with `a` an inexact type.
193
+
194
+ Parameters
195
+ ----------
196
+ a : {ndarray, numpy scalar}
197
+ Numerator. Expected to be of inexact type but not checked.
198
+ b : {ndarray, numpy scalar}
199
+ Denominator.
200
+ out : ndarray, optional
201
+ Alternate output array in which to place the result. The default
202
+ is ``None``; if provided, it must have the same shape as the
203
+ expected output, but the type will be cast if necessary.
204
+
205
+ Returns
206
+ -------
207
+ ret : {ndarray, numpy scalar}
208
+ The return value is a/b. If `a` was an ndarray the division is done
209
+ in place. If `a` is a numpy scalar, the division preserves its type.
210
+
211
+ """
212
+ with np.errstate(invalid='ignore', divide='ignore'):
213
+ if isinstance(a, np.ndarray):
214
+ if out is None:
215
+ return np.divide(a, b, out=a, casting='unsafe')
216
+ else:
217
+ return np.divide(a, b, out=out, casting='unsafe')
218
+ else:
219
+ if out is None:
220
+ # Precaution against reduced object arrays
221
+ try:
222
+ return a.dtype.type(a / b)
223
+ except AttributeError:
224
+ return a / b
225
+ else:
226
+ # This is questionable, but currently a numpy scalar can
227
+ # be output to a zero dimensional array.
228
+ return np.divide(a, b, out=out, casting='unsafe')
229
+
230
+
231
+ def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None,
232
+ initial=None, where=None):
233
+ return (a, out)
234
+
235
+
236
+ @array_function_dispatch(_nanmin_dispatcher)
237
+ def nanmin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
238
+ where=np._NoValue):
239
+ """
240
+ Return minimum of an array or minimum along an axis, ignoring any NaNs.
241
+ When all-NaN slices are encountered a ``RuntimeWarning`` is raised and
242
+ Nan is returned for that slice.
243
+
244
+ Parameters
245
+ ----------
246
+ a : array_like
247
+ Array containing numbers whose minimum is desired. If `a` is not an
248
+ array, a conversion is attempted.
249
+ axis : {int, tuple of int, None}, optional
250
+ Axis or axes along which the minimum is computed. The default is to compute
251
+ the minimum of the flattened array.
252
+ out : ndarray, optional
253
+ Alternate output array in which to place the result. The default
254
+ is ``None``; if provided, it must have the same shape as the
255
+ expected output, but the type will be cast if necessary. See
256
+ :ref:`ufuncs-output-type` for more details.
257
+
258
+ .. versionadded:: 1.8.0
259
+ keepdims : bool, optional
260
+ If this is set to True, the axes which are reduced are left
261
+ in the result as dimensions with size one. With this option,
262
+ the result will broadcast correctly against the original `a`.
263
+
264
+ If the value is anything but the default, then
265
+ `keepdims` will be passed through to the `min` method
266
+ of sub-classes of `ndarray`. If the sub-classes methods
267
+ does not implement `keepdims` any exceptions will be raised.
268
+
269
+ .. versionadded:: 1.8.0
270
+ initial : scalar, optional
271
+ The maximum value of an output element. Must be present to allow
272
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
273
+
274
+ .. versionadded:: 1.22.0
275
+ where : array_like of bool, optional
276
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
277
+ for details.
278
+
279
+ .. versionadded:: 1.22.0
280
+
281
+ Returns
282
+ -------
283
+ nanmin : ndarray
284
+ An array with the same shape as `a`, with the specified axis
285
+ removed. If `a` is a 0-d array, or if axis is None, an ndarray
286
+ scalar is returned. The same dtype as `a` is returned.
287
+
288
+ See Also
289
+ --------
290
+ nanmax :
291
+ The maximum value of an array along a given axis, ignoring any NaNs.
292
+ amin :
293
+ The minimum value of an array along a given axis, propagating any NaNs.
294
+ fmin :
295
+ Element-wise minimum of two arrays, ignoring any NaNs.
296
+ minimum :
297
+ Element-wise minimum of two arrays, propagating any NaNs.
298
+ isnan :
299
+ Shows which elements are Not a Number (NaN).
300
+ isfinite:
301
+ Shows which elements are neither NaN nor infinity.
302
+
303
+ amax, fmax, maximum
304
+
305
+ Notes
306
+ -----
307
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
308
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
309
+ Positive infinity is treated as a very large number and negative
310
+ infinity is treated as a very small (i.e. negative) number.
311
+
312
+ If the input has a integer type the function is equivalent to np.min.
313
+
314
+ Examples
315
+ --------
316
+ >>> a = np.array([[1, 2], [3, np.nan]])
317
+ >>> np.nanmin(a)
318
+ 1.0
319
+ >>> np.nanmin(a, axis=0)
320
+ array([1., 2.])
321
+ >>> np.nanmin(a, axis=1)
322
+ array([1., 3.])
323
+
324
+ When positive infinity and negative infinity are present:
325
+
326
+ >>> np.nanmin([1, 2, np.nan, np.inf])
327
+ 1.0
328
+ >>> np.nanmin([1, 2, np.nan, np.NINF])
329
+ -inf
330
+
331
+ """
332
+ kwargs = {}
333
+ if keepdims is not np._NoValue:
334
+ kwargs['keepdims'] = keepdims
335
+ if initial is not np._NoValue:
336
+ kwargs['initial'] = initial
337
+ if where is not np._NoValue:
338
+ kwargs['where'] = where
339
+
340
+ if type(a) is np.ndarray and a.dtype != np.object_:
341
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
342
+ # which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
343
+ res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
344
+ if np.isnan(res).any():
345
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
346
+ stacklevel=2)
347
+ else:
348
+ # Slow, but safe for subclasses of ndarray
349
+ a, mask = _replace_nan(a, +np.inf)
350
+ res = np.amin(a, axis=axis, out=out, **kwargs)
351
+ if mask is None:
352
+ return res
353
+
354
+ # Check for all-NaN axis
355
+ kwargs.pop("initial", None)
356
+ mask = np.all(mask, axis=axis, **kwargs)
357
+ if np.any(mask):
358
+ res = _copyto(res, np.nan, mask)
359
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
360
+ stacklevel=2)
361
+ return res
362
+
363
+
364
+ def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None,
365
+ initial=None, where=None):
366
+ return (a, out)
367
+
368
+
369
+ @array_function_dispatch(_nanmax_dispatcher)
370
+ def nanmax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
371
+ where=np._NoValue):
372
+ """
373
+ Return the maximum of an array or maximum along an axis, ignoring any
374
+ NaNs. When all-NaN slices are encountered a ``RuntimeWarning`` is
375
+ raised and NaN is returned for that slice.
376
+
377
+ Parameters
378
+ ----------
379
+ a : array_like
380
+ Array containing numbers whose maximum is desired. If `a` is not an
381
+ array, a conversion is attempted.
382
+ axis : {int, tuple of int, None}, optional
383
+ Axis or axes along which the maximum is computed. The default is to compute
384
+ the maximum of the flattened array.
385
+ out : ndarray, optional
386
+ Alternate output array in which to place the result. The default
387
+ is ``None``; if provided, it must have the same shape as the
388
+ expected output, but the type will be cast if necessary. See
389
+ :ref:`ufuncs-output-type` for more details.
390
+
391
+ .. versionadded:: 1.8.0
392
+ keepdims : bool, optional
393
+ If this is set to True, the axes which are reduced are left
394
+ in the result as dimensions with size one. With this option,
395
+ the result will broadcast correctly against the original `a`.
396
+
397
+ If the value is anything but the default, then
398
+ `keepdims` will be passed through to the `max` method
399
+ of sub-classes of `ndarray`. If the sub-classes methods
400
+ does not implement `keepdims` any exceptions will be raised.
401
+
402
+ .. versionadded:: 1.8.0
403
+ initial : scalar, optional
404
+ The minimum value of an output element. Must be present to allow
405
+ computation on empty slice. See `~numpy.ufunc.reduce` for details.
406
+
407
+ .. versionadded:: 1.22.0
408
+ where : array_like of bool, optional
409
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
410
+ for details.
411
+
412
+ .. versionadded:: 1.22.0
413
+
414
+ Returns
415
+ -------
416
+ nanmax : ndarray
417
+ An array with the same shape as `a`, with the specified axis removed.
418
+ If `a` is a 0-d array, or if axis is None, an ndarray scalar is
419
+ returned. The same dtype as `a` is returned.
420
+
421
+ See Also
422
+ --------
423
+ nanmin :
424
+ The minimum value of an array along a given axis, ignoring any NaNs.
425
+ amax :
426
+ The maximum value of an array along a given axis, propagating any NaNs.
427
+ fmax :
428
+ Element-wise maximum of two arrays, ignoring any NaNs.
429
+ maximum :
430
+ Element-wise maximum of two arrays, propagating any NaNs.
431
+ isnan :
432
+ Shows which elements are Not a Number (NaN).
433
+ isfinite:
434
+ Shows which elements are neither NaN nor infinity.
435
+
436
+ amin, fmin, minimum
437
+
438
+ Notes
439
+ -----
440
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
441
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
442
+ Positive infinity is treated as a very large number and negative
443
+ infinity is treated as a very small (i.e. negative) number.
444
+
445
+ If the input has a integer type the function is equivalent to np.max.
446
+
447
+ Examples
448
+ --------
449
+ >>> a = np.array([[1, 2], [3, np.nan]])
450
+ >>> np.nanmax(a)
451
+ 3.0
452
+ >>> np.nanmax(a, axis=0)
453
+ array([3., 2.])
454
+ >>> np.nanmax(a, axis=1)
455
+ array([2., 3.])
456
+
457
+ When positive infinity and negative infinity are present:
458
+
459
+ >>> np.nanmax([1, 2, np.nan, np.NINF])
460
+ 2.0
461
+ >>> np.nanmax([1, 2, np.nan, np.inf])
462
+ inf
463
+
464
+ """
465
+ kwargs = {}
466
+ if keepdims is not np._NoValue:
467
+ kwargs['keepdims'] = keepdims
468
+ if initial is not np._NoValue:
469
+ kwargs['initial'] = initial
470
+ if where is not np._NoValue:
471
+ kwargs['where'] = where
472
+
473
+ if type(a) is np.ndarray and a.dtype != np.object_:
474
+ # Fast, but not safe for subclasses of ndarray, or object arrays,
475
+ # which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
476
+ res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
477
+ if np.isnan(res).any():
478
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
479
+ stacklevel=2)
480
+ else:
481
+ # Slow, but safe for subclasses of ndarray
482
+ a, mask = _replace_nan(a, -np.inf)
483
+ res = np.amax(a, axis=axis, out=out, **kwargs)
484
+ if mask is None:
485
+ return res
486
+
487
+ # Check for all-NaN axis
488
+ kwargs.pop("initial", None)
489
+ mask = np.all(mask, axis=axis, **kwargs)
490
+ if np.any(mask):
491
+ res = _copyto(res, np.nan, mask)
492
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
493
+ stacklevel=2)
494
+ return res
495
+
496
+
497
+ def _nanargmin_dispatcher(a, axis=None, out=None, *, keepdims=None):
498
+ return (a,)
499
+
500
+
501
+ @array_function_dispatch(_nanargmin_dispatcher)
502
+ def nanargmin(a, axis=None, out=None, *, keepdims=np._NoValue):
503
+ """
504
+ Return the indices of the minimum values in the specified axis ignoring
505
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the results
506
+ cannot be trusted if a slice contains only NaNs and Infs.
507
+
508
+ Parameters
509
+ ----------
510
+ a : array_like
511
+ Input data.
512
+ axis : int, optional
513
+ Axis along which to operate. By default flattened input is used.
514
+ out : array, optional
515
+ If provided, the result will be inserted into this array. It should
516
+ be of the appropriate shape and dtype.
517
+
518
+ .. versionadded:: 1.22.0
519
+ keepdims : bool, optional
520
+ If this is set to True, the axes which are reduced are left
521
+ in the result as dimensions with size one. With this option,
522
+ the result will broadcast correctly against the array.
523
+
524
+ .. versionadded:: 1.22.0
525
+
526
+ Returns
527
+ -------
528
+ index_array : ndarray
529
+ An array of indices or a single index value.
530
+
531
+ See Also
532
+ --------
533
+ argmin, nanargmax
534
+
535
+ Examples
536
+ --------
537
+ >>> a = np.array([[np.nan, 4], [2, 3]])
538
+ >>> np.argmin(a)
539
+ 0
540
+ >>> np.nanargmin(a)
541
+ 2
542
+ >>> np.nanargmin(a, axis=0)
543
+ array([1, 1])
544
+ >>> np.nanargmin(a, axis=1)
545
+ array([1, 0])
546
+
547
+ """
548
+ a, mask = _replace_nan(a, np.inf)
549
+ if mask is not None:
550
+ mask = np.all(mask, axis=axis)
551
+ if np.any(mask):
552
+ raise ValueError("All-NaN slice encountered")
553
+ res = np.argmin(a, axis=axis, out=out, keepdims=keepdims)
554
+ return res
555
+
556
+
557
+ def _nanargmax_dispatcher(a, axis=None, out=None, *, keepdims=None):
558
+ return (a,)
559
+
560
+
561
+ @array_function_dispatch(_nanargmax_dispatcher)
562
+ def nanargmax(a, axis=None, out=None, *, keepdims=np._NoValue):
563
+ """
564
+ Return the indices of the maximum values in the specified axis ignoring
565
+ NaNs. For all-NaN slices ``ValueError`` is raised. Warning: the
566
+ results cannot be trusted if a slice contains only NaNs and -Infs.
567
+
568
+
569
+ Parameters
570
+ ----------
571
+ a : array_like
572
+ Input data.
573
+ axis : int, optional
574
+ Axis along which to operate. By default flattened input is used.
575
+ out : array, optional
576
+ If provided, the result will be inserted into this array. It should
577
+ be of the appropriate shape and dtype.
578
+
579
+ .. versionadded:: 1.22.0
580
+ keepdims : bool, optional
581
+ If this is set to True, the axes which are reduced are left
582
+ in the result as dimensions with size one. With this option,
583
+ the result will broadcast correctly against the array.
584
+
585
+ .. versionadded:: 1.22.0
586
+
587
+ Returns
588
+ -------
589
+ index_array : ndarray
590
+ An array of indices or a single index value.
591
+
592
+ See Also
593
+ --------
594
+ argmax, nanargmin
595
+
596
+ Examples
597
+ --------
598
+ >>> a = np.array([[np.nan, 4], [2, 3]])
599
+ >>> np.argmax(a)
600
+ 0
601
+ >>> np.nanargmax(a)
602
+ 1
603
+ >>> np.nanargmax(a, axis=0)
604
+ array([1, 0])
605
+ >>> np.nanargmax(a, axis=1)
606
+ array([1, 1])
607
+
608
+ """
609
+ a, mask = _replace_nan(a, -np.inf)
610
+ if mask is not None:
611
+ mask = np.all(mask, axis=axis)
612
+ if np.any(mask):
613
+ raise ValueError("All-NaN slice encountered")
614
+ res = np.argmax(a, axis=axis, out=out, keepdims=keepdims)
615
+ return res
616
+
617
+
618
+ def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
619
+ initial=None, where=None):
620
+ return (a, out)
621
+
622
+
623
+ @array_function_dispatch(_nansum_dispatcher)
624
+ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
625
+ initial=np._NoValue, where=np._NoValue):
626
+ """
627
+ Return the sum of array elements over a given axis treating Not a
628
+ Numbers (NaNs) as zero.
629
+
630
+ In NumPy versions <= 1.9.0 Nan is returned for slices that are all-NaN or
631
+ empty. In later versions zero is returned.
632
+
633
+ Parameters
634
+ ----------
635
+ a : array_like
636
+ Array containing numbers whose sum is desired. If `a` is not an
637
+ array, a conversion is attempted.
638
+ axis : {int, tuple of int, None}, optional
639
+ Axis or axes along which the sum is computed. The default is to compute the
640
+ sum of the flattened array.
641
+ dtype : data-type, optional
642
+ The type of the returned array and of the accumulator in which the
643
+ elements are summed. By default, the dtype of `a` is used. An
644
+ exception is when `a` has an integer type with less precision than
645
+ the platform (u)intp. In that case, the default will be either
646
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
647
+ bits. For inexact inputs, dtype must be inexact.
648
+
649
+ .. versionadded:: 1.8.0
650
+ out : ndarray, optional
651
+ Alternate output array in which to place the result. The default
652
+ is ``None``. If provided, it must have the same shape as the
653
+ expected output, but the type will be cast if necessary. See
654
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
655
+ can yield unexpected results.
656
+
657
+ .. versionadded:: 1.8.0
658
+ keepdims : bool, optional
659
+ If this is set to True, the axes which are reduced are left
660
+ in the result as dimensions with size one. With this option,
661
+ the result will broadcast correctly against the original `a`.
662
+
663
+
664
+ If the value is anything but the default, then
665
+ `keepdims` will be passed through to the `mean` or `sum` methods
666
+ of sub-classes of `ndarray`. If the sub-classes methods
667
+ does not implement `keepdims` any exceptions will be raised.
668
+
669
+ .. versionadded:: 1.8.0
670
+ initial : scalar, optional
671
+ Starting value for the sum. See `~numpy.ufunc.reduce` for details.
672
+
673
+ .. versionadded:: 1.22.0
674
+ where : array_like of bool, optional
675
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
676
+
677
+ .. versionadded:: 1.22.0
678
+
679
+ Returns
680
+ -------
681
+ nansum : ndarray.
682
+ A new array holding the result is returned unless `out` is
683
+ specified, in which it is returned. The result has the same
684
+ size as `a`, and the same shape as `a` if `axis` is not None
685
+ or `a` is a 1-d array.
686
+
687
+ See Also
688
+ --------
689
+ numpy.sum : Sum across array propagating NaNs.
690
+ isnan : Show which elements are NaN.
691
+ isfinite : Show which elements are not NaN or +/-inf.
692
+
693
+ Notes
694
+ -----
695
+ If both positive and negative infinity are present, the sum will be Not
696
+ A Number (NaN).
697
+
698
+ Examples
699
+ --------
700
+ >>> np.nansum(1)
701
+ 1
702
+ >>> np.nansum([1])
703
+ 1
704
+ >>> np.nansum([1, np.nan])
705
+ 1.0
706
+ >>> a = np.array([[1, 1], [1, np.nan]])
707
+ >>> np.nansum(a)
708
+ 3.0
709
+ >>> np.nansum(a, axis=0)
710
+ array([2., 1.])
711
+ >>> np.nansum([1, np.nan, np.inf])
712
+ inf
713
+ >>> np.nansum([1, np.nan, np.NINF])
714
+ -inf
715
+ >>> from numpy.testing import suppress_warnings
716
+ >>> with suppress_warnings() as sup:
717
+ ... sup.filter(RuntimeWarning)
718
+ ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
719
+ nan
720
+
721
+ """
722
+ a, mask = _replace_nan(a, 0)
723
+ return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
724
+ initial=initial, where=where)
725
+
726
+
727
+ def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
728
+ initial=None, where=None):
729
+ return (a, out)
730
+
731
+
732
+ @array_function_dispatch(_nanprod_dispatcher)
733
+ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
734
+ initial=np._NoValue, where=np._NoValue):
735
+ """
736
+ Return the product of array elements over a given axis treating Not a
737
+ Numbers (NaNs) as ones.
738
+
739
+ One is returned for slices that are all-NaN or empty.
740
+
741
+ .. versionadded:: 1.10.0
742
+
743
+ Parameters
744
+ ----------
745
+ a : array_like
746
+ Array containing numbers whose product is desired. If `a` is not an
747
+ array, a conversion is attempted.
748
+ axis : {int, tuple of int, None}, optional
749
+ Axis or axes along which the product is computed. The default is to compute
750
+ the product of the flattened array.
751
+ dtype : data-type, optional
752
+ The type of the returned array and of the accumulator in which the
753
+ elements are summed. By default, the dtype of `a` is used. An
754
+ exception is when `a` has an integer type with less precision than
755
+ the platform (u)intp. In that case, the default will be either
756
+ (u)int32 or (u)int64 depending on whether the platform is 32 or 64
757
+ bits. For inexact inputs, dtype must be inexact.
758
+ out : ndarray, optional
759
+ Alternate output array in which to place the result. The default
760
+ is ``None``. If provided, it must have the same shape as the
761
+ expected output, but the type will be cast if necessary. See
762
+ :ref:`ufuncs-output-type` for more details. The casting of NaN to integer
763
+ can yield unexpected results.
764
+ keepdims : bool, optional
765
+ If True, the axes which are reduced are left in the result as
766
+ dimensions with size one. With this option, the result will
767
+ broadcast correctly against the original `arr`.
768
+ initial : scalar, optional
769
+ The starting value for this product. See `~numpy.ufunc.reduce`
770
+ for details.
771
+
772
+ .. versionadded:: 1.22.0
773
+ where : array_like of bool, optional
774
+ Elements to include in the product. See `~numpy.ufunc.reduce`
775
+ for details.
776
+
777
+ .. versionadded:: 1.22.0
778
+
779
+ Returns
780
+ -------
781
+ nanprod : ndarray
782
+ A new array holding the result is returned unless `out` is
783
+ specified, in which case it is returned.
784
+
785
+ See Also
786
+ --------
787
+ numpy.prod : Product across array propagating NaNs.
788
+ isnan : Show which elements are NaN.
789
+
790
+ Examples
791
+ --------
792
+ >>> np.nanprod(1)
793
+ 1
794
+ >>> np.nanprod([1])
795
+ 1
796
+ >>> np.nanprod([1, np.nan])
797
+ 1.0
798
+ >>> a = np.array([[1, 2], [3, np.nan]])
799
+ >>> np.nanprod(a)
800
+ 6.0
801
+ >>> np.nanprod(a, axis=0)
802
+ array([3., 2.])
803
+
804
+ """
805
+ a, mask = _replace_nan(a, 1)
806
+ return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
807
+ initial=initial, where=where)
808
+
809
+
810
+ def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
811
+ return (a, out)
812
+
813
+
814
+ @array_function_dispatch(_nancumsum_dispatcher)
815
+ def nancumsum(a, axis=None, dtype=None, out=None):
816
+ """
817
+ Return the cumulative sum of array elements over a given axis treating Not a
818
+ Numbers (NaNs) as zero. The cumulative sum does not change when NaNs are
819
+ encountered and leading NaNs are replaced by zeros.
820
+
821
+ Zeros are returned for slices that are all-NaN or empty.
822
+
823
+ .. versionadded:: 1.12.0
824
+
825
+ Parameters
826
+ ----------
827
+ a : array_like
828
+ Input array.
829
+ axis : int, optional
830
+ Axis along which the cumulative sum is computed. The default
831
+ (None) is to compute the cumsum over the flattened array.
832
+ dtype : dtype, optional
833
+ Type of the returned array and of the accumulator in which the
834
+ elements are summed. If `dtype` is not specified, it defaults
835
+ to the dtype of `a`, unless `a` has an integer dtype with a
836
+ precision less than that of the default platform integer. In
837
+ that case, the default platform integer is used.
838
+ out : ndarray, optional
839
+ Alternative output array in which to place the result. It must
840
+ have the same shape and buffer length as the expected output
841
+ but the type will be cast if necessary. See :ref:`ufuncs-output-type` for
842
+ more details.
843
+
844
+ Returns
845
+ -------
846
+ nancumsum : ndarray.
847
+ A new array holding the result is returned unless `out` is
848
+ specified, in which it is returned. The result has the same
849
+ size as `a`, and the same shape as `a` if `axis` is not None
850
+ or `a` is a 1-d array.
851
+
852
+ See Also
853
+ --------
854
+ numpy.cumsum : Cumulative sum across array propagating NaNs.
855
+ isnan : Show which elements are NaN.
856
+
857
+ Examples
858
+ --------
859
+ >>> np.nancumsum(1)
860
+ array([1])
861
+ >>> np.nancumsum([1])
862
+ array([1])
863
+ >>> np.nancumsum([1, np.nan])
864
+ array([1., 1.])
865
+ >>> a = np.array([[1, 2], [3, np.nan]])
866
+ >>> np.nancumsum(a)
867
+ array([1., 3., 6., 6.])
868
+ >>> np.nancumsum(a, axis=0)
869
+ array([[1., 2.],
870
+ [4., 2.]])
871
+ >>> np.nancumsum(a, axis=1)
872
+ array([[1., 3.],
873
+ [3., 3.]])
874
+
875
+ """
876
+ a, mask = _replace_nan(a, 0)
877
+ return np.cumsum(a, axis=axis, dtype=dtype, out=out)
878
+
879
+
880
+ def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):
881
+ return (a, out)
882
+
883
+
884
+ @array_function_dispatch(_nancumprod_dispatcher)
885
+ def nancumprod(a, axis=None, dtype=None, out=None):
886
+ """
887
+ Return the cumulative product of array elements over a given axis treating Not a
888
+ Numbers (NaNs) as one. The cumulative product does not change when NaNs are
889
+ encountered and leading NaNs are replaced by ones.
890
+
891
+ Ones are returned for slices that are all-NaN or empty.
892
+
893
+ .. versionadded:: 1.12.0
894
+
895
+ Parameters
896
+ ----------
897
+ a : array_like
898
+ Input array.
899
+ axis : int, optional
900
+ Axis along which the cumulative product is computed. By default
901
+ the input is flattened.
902
+ dtype : dtype, optional
903
+ Type of the returned array, as well as of the accumulator in which
904
+ the elements are multiplied. If *dtype* is not specified, it
905
+ defaults to the dtype of `a`, unless `a` has an integer dtype with
906
+ a precision less than that of the default platform integer. In
907
+ that case, the default platform integer is used instead.
908
+ out : ndarray, optional
909
+ Alternative output array in which to place the result. It must
910
+ have the same shape and buffer length as the expected output
911
+ but the type of the resulting values will be cast if necessary.
912
+
913
+ Returns
914
+ -------
915
+ nancumprod : ndarray
916
+ A new array holding the result is returned unless `out` is
917
+ specified, in which case it is returned.
918
+
919
+ See Also
920
+ --------
921
+ numpy.cumprod : Cumulative product across array propagating NaNs.
922
+ isnan : Show which elements are NaN.
923
+
924
+ Examples
925
+ --------
926
+ >>> np.nancumprod(1)
927
+ array([1])
928
+ >>> np.nancumprod([1])
929
+ array([1])
930
+ >>> np.nancumprod([1, np.nan])
931
+ array([1., 1.])
932
+ >>> a = np.array([[1, 2], [3, np.nan]])
933
+ >>> np.nancumprod(a)
934
+ array([1., 2., 6., 6.])
935
+ >>> np.nancumprod(a, axis=0)
936
+ array([[1., 2.],
937
+ [3., 2.]])
938
+ >>> np.nancumprod(a, axis=1)
939
+ array([[1., 2.],
940
+ [3., 3.]])
941
+
942
+ """
943
+ a, mask = _replace_nan(a, 1)
944
+ return np.cumprod(a, axis=axis, dtype=dtype, out=out)
945
+
946
+
947
+ def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
948
+ *, where=None):
949
+ return (a, out)
950
+
951
+
952
+ @array_function_dispatch(_nanmean_dispatcher)
953
+ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
954
+ *, where=np._NoValue):
955
+ """
956
+ Compute the arithmetic mean along the specified axis, ignoring NaNs.
957
+
958
+ Returns the average of the array elements. The average is taken over
959
+ the flattened array by default, otherwise over the specified axis.
960
+ `float64` intermediate and return values are used for integer inputs.
961
+
962
+ For all-NaN slices, NaN is returned and a `RuntimeWarning` is raised.
963
+
964
+ .. versionadded:: 1.8.0
965
+
966
+ Parameters
967
+ ----------
968
+ a : array_like
969
+ Array containing numbers whose mean is desired. If `a` is not an
970
+ array, a conversion is attempted.
971
+ axis : {int, tuple of int, None}, optional
972
+ Axis or axes along which the means are computed. The default is to compute
973
+ the mean of the flattened array.
974
+ dtype : data-type, optional
975
+ Type to use in computing the mean. For integer inputs, the default
976
+ is `float64`; for inexact inputs, it is the same as the input
977
+ dtype.
978
+ out : ndarray, optional
979
+ Alternate output array in which to place the result. The default
980
+ is ``None``; if provided, it must have the same shape as the
981
+ expected output, but the type will be cast if necessary. See
982
+ :ref:`ufuncs-output-type` for more details.
983
+ keepdims : bool, optional
984
+ If this is set to True, the axes which are reduced are left
985
+ in the result as dimensions with size one. With this option,
986
+ the result will broadcast correctly against the original `a`.
987
+
988
+ If the value is anything but the default, then
989
+ `keepdims` will be passed through to the `mean` or `sum` methods
990
+ of sub-classes of `ndarray`. If the sub-classes methods
991
+ does not implement `keepdims` any exceptions will be raised.
992
+ where : array_like of bool, optional
993
+ Elements to include in the mean. See `~numpy.ufunc.reduce` for details.
994
+
995
+ .. versionadded:: 1.22.0
996
+
997
+ Returns
998
+ -------
999
+ m : ndarray, see dtype parameter above
1000
+ If `out=None`, returns a new array containing the mean values,
1001
+ otherwise a reference to the output array is returned. Nan is
1002
+ returned for slices that contain only NaNs.
1003
+
1004
+ See Also
1005
+ --------
1006
+ average : Weighted average
1007
+ mean : Arithmetic mean taken while not ignoring NaNs
1008
+ var, nanvar
1009
+
1010
+ Notes
1011
+ -----
1012
+ The arithmetic mean is the sum of the non-NaN elements along the axis
1013
+ divided by the number of non-NaN elements.
1014
+
1015
+ Note that for floating-point input, the mean is computed using the same
1016
+ precision the input has. Depending on the input data, this can cause
1017
+ the results to be inaccurate, especially for `float32`. Specifying a
1018
+ higher-precision accumulator using the `dtype` keyword can alleviate
1019
+ this issue.
1020
+
1021
+ Examples
1022
+ --------
1023
+ >>> a = np.array([[1, np.nan], [3, 4]])
1024
+ >>> np.nanmean(a)
1025
+ 2.6666666666666665
1026
+ >>> np.nanmean(a, axis=0)
1027
+ array([2., 4.])
1028
+ >>> np.nanmean(a, axis=1)
1029
+ array([1., 3.5]) # may vary
1030
+
1031
+ """
1032
+ arr, mask = _replace_nan(a, 0)
1033
+ if mask is None:
1034
+ return np.mean(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1035
+ where=where)
1036
+
1037
+ if dtype is not None:
1038
+ dtype = np.dtype(dtype)
1039
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
1040
+ raise TypeError("If a is inexact, then dtype must be inexact")
1041
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
1042
+ raise TypeError("If a is inexact, then out must be inexact")
1043
+
1044
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=keepdims,
1045
+ where=where)
1046
+ tot = np.sum(arr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1047
+ where=where)
1048
+ avg = _divide_by_count(tot, cnt, out=out)
1049
+
1050
+ isbad = (cnt == 0)
1051
+ if isbad.any():
1052
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
1053
+ # NaN is the only possible bad value, so no further
1054
+ # action is needed to handle bad results.
1055
+ return avg
1056
+
1057
+
1058
+ def _nanmedian1d(arr1d, overwrite_input=False):
1059
+ """
1060
+ Private function for rank 1 arrays. Compute the median ignoring NaNs.
1061
+ See nanmedian for parameter usage
1062
+ """
1063
+ arr1d_parsed, overwrite_input = _remove_nan_1d(
1064
+ arr1d, overwrite_input=overwrite_input,
1065
+ )
1066
+
1067
+ if arr1d_parsed.size == 0:
1068
+ # Ensure that a nan-esque scalar of the appropriate type (and unit)
1069
+ # is returned for `timedelta64` and `complexfloating`
1070
+ return arr1d[-1]
1071
+
1072
+ return np.median(arr1d_parsed, overwrite_input=overwrite_input)
1073
+
1074
+
1075
+ def _nanmedian(a, axis=None, out=None, overwrite_input=False):
1076
+ """
1077
+ Private function that doesn't support extended axis or keepdims.
1078
+ These methods are extended to this function using _ureduce
1079
+ See nanmedian for parameter usage
1080
+
1081
+ """
1082
+ if axis is None or a.ndim == 1:
1083
+ part = a.ravel()
1084
+ if out is None:
1085
+ return _nanmedian1d(part, overwrite_input)
1086
+ else:
1087
+ out[...] = _nanmedian1d(part, overwrite_input)
1088
+ return out
1089
+ else:
1090
+ # for small medians use sort + indexing which is still faster than
1091
+ # apply_along_axis
1092
+ # benchmarked with shuffled (50, 50, x) containing a few NaN
1093
+ if a.shape[axis] < 600:
1094
+ return _nanmedian_small(a, axis, out, overwrite_input)
1095
+ result = np.apply_along_axis(_nanmedian1d, axis, a, overwrite_input)
1096
+ if out is not None:
1097
+ out[...] = result
1098
+ return result
1099
+
1100
+
1101
+ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
1102
+ """
1103
+ sort + indexing median, faster for small medians along multiple
1104
+ dimensions due to the high overhead of apply_along_axis
1105
+
1106
+ see nanmedian for parameter usage
1107
+ """
1108
+ a = np.ma.masked_array(a, np.isnan(a))
1109
+ m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
1110
+ for i in range(np.count_nonzero(m.mask.ravel())):
1111
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
1112
+ stacklevel=5)
1113
+
1114
+ fill_value = np.timedelta64("NaT") if m.dtype.kind == "m" else np.nan
1115
+ if out is not None:
1116
+ out[...] = m.filled(fill_value)
1117
+ return out
1118
+ return m.filled(fill_value)
1119
+
1120
+
1121
+ def _nanmedian_dispatcher(
1122
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
1123
+ return (a, out)
1124
+
1125
+
1126
+ @array_function_dispatch(_nanmedian_dispatcher)
1127
+ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
1128
+ """
1129
+ Compute the median along the specified axis, while ignoring NaNs.
1130
+
1131
+ Returns the median of the array elements.
1132
+
1133
+ .. versionadded:: 1.9.0
1134
+
1135
+ Parameters
1136
+ ----------
1137
+ a : array_like
1138
+ Input array or object that can be converted to an array.
1139
+ axis : {int, sequence of int, None}, optional
1140
+ Axis or axes along which the medians are computed. The default
1141
+ is to compute the median along a flattened version of the array.
1142
+ A sequence of axes is supported since version 1.9.0.
1143
+ out : ndarray, optional
1144
+ Alternative output array in which to place the result. It must
1145
+ have the same shape and buffer length as the expected output,
1146
+ but the type (of the output) will be cast if necessary.
1147
+ overwrite_input : bool, optional
1148
+ If True, then allow use of memory of input array `a` for
1149
+ calculations. The input array will be modified by the call to
1150
+ `median`. This will save memory when you do not need to preserve
1151
+ the contents of the input array. Treat the input as undefined,
1152
+ but it will probably be fully or partially sorted. Default is
1153
+ False. If `overwrite_input` is ``True`` and `a` is not already an
1154
+ `ndarray`, an error will be raised.
1155
+ keepdims : bool, optional
1156
+ If this is set to True, the axes which are reduced are left
1157
+ in the result as dimensions with size one. With this option,
1158
+ the result will broadcast correctly against the original `a`.
1159
+
1160
+ If this is anything but the default value it will be passed
1161
+ through (in the special case of an empty array) to the
1162
+ `mean` function of the underlying array. If the array is
1163
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1164
+ will raise a RuntimeError.
1165
+
1166
+ Returns
1167
+ -------
1168
+ median : ndarray
1169
+ A new array holding the result. If the input contains integers
1170
+ or floats smaller than ``float64``, then the output data-type is
1171
+ ``np.float64``. Otherwise, the data-type of the output is the
1172
+ same as that of the input. If `out` is specified, that array is
1173
+ returned instead.
1174
+
1175
+ See Also
1176
+ --------
1177
+ mean, median, percentile
1178
+
1179
+ Notes
1180
+ -----
1181
+ Given a vector ``V`` of length ``N``, the median of ``V`` is the
1182
+ middle value of a sorted copy of ``V``, ``V_sorted`` - i.e.,
1183
+ ``V_sorted[(N-1)/2]``, when ``N`` is odd and the average of the two
1184
+ middle values of ``V_sorted`` when ``N`` is even.
1185
+
1186
+ Examples
1187
+ --------
1188
+ >>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
1189
+ >>> a[0, 1] = np.nan
1190
+ >>> a
1191
+ array([[10., nan, 4.],
1192
+ [ 3., 2., 1.]])
1193
+ >>> np.median(a)
1194
+ nan
1195
+ >>> np.nanmedian(a)
1196
+ 3.0
1197
+ >>> np.nanmedian(a, axis=0)
1198
+ array([6.5, 2. , 2.5])
1199
+ >>> np.median(a, axis=1)
1200
+ array([nan, 2.])
1201
+ >>> b = a.copy()
1202
+ >>> np.nanmedian(b, axis=1, overwrite_input=True)
1203
+ array([7., 2.])
1204
+ >>> assert not np.all(a==b)
1205
+ >>> b = a.copy()
1206
+ >>> np.nanmedian(b, axis=None, overwrite_input=True)
1207
+ 3.0
1208
+ >>> assert not np.all(a==b)
1209
+
1210
+ """
1211
+ a = np.asanyarray(a)
1212
+ # apply_along_axis in _nanmedian doesn't handle empty arrays well,
1213
+ # so deal them upfront
1214
+ if a.size == 0:
1215
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
1216
+
1217
+ return function_base._ureduce(a, func=_nanmedian, keepdims=keepdims,
1218
+ axis=axis, out=out,
1219
+ overwrite_input=overwrite_input)
1220
+
1221
+
1222
+ def _nanpercentile_dispatcher(
1223
+ a, q, axis=None, out=None, overwrite_input=None,
1224
+ method=None, keepdims=None, *, interpolation=None):
1225
+ return (a, q, out)
1226
+
1227
+
1228
+ @array_function_dispatch(_nanpercentile_dispatcher)
1229
+ def nanpercentile(
1230
+ a,
1231
+ q,
1232
+ axis=None,
1233
+ out=None,
1234
+ overwrite_input=False,
1235
+ method="linear",
1236
+ keepdims=np._NoValue,
1237
+ *,
1238
+ interpolation=None,
1239
+ ):
1240
+ """
1241
+ Compute the qth percentile of the data along the specified axis,
1242
+ while ignoring nan values.
1243
+
1244
+ Returns the qth percentile(s) of the array elements.
1245
+
1246
+ .. versionadded:: 1.9.0
1247
+
1248
+ Parameters
1249
+ ----------
1250
+ a : array_like
1251
+ Input array or object that can be converted to an array, containing
1252
+ nan values to be ignored.
1253
+ q : array_like of float
1254
+ Percentile or sequence of percentiles to compute, which must be
1255
+ between 0 and 100 inclusive.
1256
+ axis : {int, tuple of int, None}, optional
1257
+ Axis or axes along which the percentiles are computed. The default
1258
+ is to compute the percentile(s) along a flattened version of the
1259
+ array.
1260
+ out : ndarray, optional
1261
+ Alternative output array in which to place the result. It must have
1262
+ the same shape and buffer length as the expected output, but the
1263
+ type (of the output) will be cast if necessary.
1264
+ overwrite_input : bool, optional
1265
+ If True, then allow the input array `a` to be modified by
1266
+ intermediate calculations, to save memory. In this case, the
1267
+ contents of the input `a` after this function completes is
1268
+ undefined.
1269
+ method : str, optional
1270
+ This parameter specifies the method to use for estimating the
1271
+ percentile. There are many different methods, some unique to NumPy.
1272
+ See the notes for explanation. The options sorted by their R type
1273
+ as summarized in the H&F paper [1]_ are:
1274
+
1275
+ 1. 'inverted_cdf'
1276
+ 2. 'averaged_inverted_cdf'
1277
+ 3. 'closest_observation'
1278
+ 4. 'interpolated_inverted_cdf'
1279
+ 5. 'hazen'
1280
+ 6. 'weibull'
1281
+ 7. 'linear' (default)
1282
+ 8. 'median_unbiased'
1283
+ 9. 'normal_unbiased'
1284
+
1285
+ The first three methods are discontinuous. NumPy further defines the
1286
+ following discontinuous variations of the default 'linear' (7.) option:
1287
+
1288
+ * 'lower'
1289
+ * 'higher',
1290
+ * 'midpoint'
1291
+ * 'nearest'
1292
+
1293
+ .. versionchanged:: 1.22.0
1294
+ This argument was previously called "interpolation" and only
1295
+ offered the "linear" default and last four options.
1296
+
1297
+ keepdims : bool, optional
1298
+ If this is set to True, the axes which are reduced are left in
1299
+ the result as dimensions with size one. With this option, the
1300
+ result will broadcast correctly against the original array `a`.
1301
+
1302
+ If this is anything but the default value it will be passed
1303
+ through (in the special case of an empty array) to the
1304
+ `mean` function of the underlying array. If the array is
1305
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1306
+ will raise a RuntimeError.
1307
+
1308
+ interpolation : str, optional
1309
+ Deprecated name for the method keyword argument.
1310
+
1311
+ .. deprecated:: 1.22.0
1312
+
1313
+ Returns
1314
+ -------
1315
+ percentile : scalar or ndarray
1316
+ If `q` is a single percentile and `axis=None`, then the result
1317
+ is a scalar. If multiple percentiles are given, first axis of
1318
+ the result corresponds to the percentiles. The other axes are
1319
+ the axes that remain after the reduction of `a`. If the input
1320
+ contains integers or floats smaller than ``float64``, the output
1321
+ data-type is ``float64``. Otherwise, the output data-type is the
1322
+ same as that of the input. If `out` is specified, that array is
1323
+ returned instead.
1324
+
1325
+ See Also
1326
+ --------
1327
+ nanmean
1328
+ nanmedian : equivalent to ``nanpercentile(..., 50)``
1329
+ percentile, median, mean
1330
+ nanquantile : equivalent to nanpercentile, except q in range [0, 1].
1331
+
1332
+ Notes
1333
+ -----
1334
+ For more information please see `numpy.percentile`
1335
+
1336
+ Examples
1337
+ --------
1338
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
1339
+ >>> a[0][1] = np.nan
1340
+ >>> a
1341
+ array([[10., nan, 4.],
1342
+ [ 3., 2., 1.]])
1343
+ >>> np.percentile(a, 50)
1344
+ nan
1345
+ >>> np.nanpercentile(a, 50)
1346
+ 3.0
1347
+ >>> np.nanpercentile(a, 50, axis=0)
1348
+ array([6.5, 2. , 2.5])
1349
+ >>> np.nanpercentile(a, 50, axis=1, keepdims=True)
1350
+ array([[7.],
1351
+ [2.]])
1352
+ >>> m = np.nanpercentile(a, 50, axis=0)
1353
+ >>> out = np.zeros_like(m)
1354
+ >>> np.nanpercentile(a, 50, axis=0, out=out)
1355
+ array([6.5, 2. , 2.5])
1356
+ >>> m
1357
+ array([6.5, 2. , 2.5])
1358
+
1359
+ >>> b = a.copy()
1360
+ >>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
1361
+ array([7., 2.])
1362
+ >>> assert not np.all(a==b)
1363
+
1364
+ References
1365
+ ----------
1366
+ .. [1] R. J. Hyndman and Y. Fan,
1367
+ "Sample quantiles in statistical packages,"
1368
+ The American Statistician, 50(4), pp. 361-365, 1996
1369
+
1370
+ """
1371
+ if interpolation is not None:
1372
+ method = function_base._check_interpolation_as_method(
1373
+ method, interpolation, "nanpercentile")
1374
+
1375
+ a = np.asanyarray(a)
1376
+ if a.dtype.kind == "c":
1377
+ raise TypeError("a must be an array of real numbers")
1378
+
1379
+ q = np.true_divide(q, 100.0)
1380
+ # undo any decay that the ufunc performed (see gh-13105)
1381
+ q = np.asanyarray(q)
1382
+ if not function_base._quantile_is_valid(q):
1383
+ raise ValueError("Percentiles must be in the range [0, 100]")
1384
+ return _nanquantile_unchecked(
1385
+ a, q, axis, out, overwrite_input, method, keepdims)
1386
+
1387
+
1388
+ def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
1389
+ method=None, keepdims=None, *, interpolation=None):
1390
+ return (a, q, out)
1391
+
1392
+
1393
+ @array_function_dispatch(_nanquantile_dispatcher)
1394
+ def nanquantile(
1395
+ a,
1396
+ q,
1397
+ axis=None,
1398
+ out=None,
1399
+ overwrite_input=False,
1400
+ method="linear",
1401
+ keepdims=np._NoValue,
1402
+ *,
1403
+ interpolation=None,
1404
+ ):
1405
+ """
1406
+ Compute the qth quantile of the data along the specified axis,
1407
+ while ignoring nan values.
1408
+ Returns the qth quantile(s) of the array elements.
1409
+
1410
+ .. versionadded:: 1.15.0
1411
+
1412
+ Parameters
1413
+ ----------
1414
+ a : array_like
1415
+ Input array or object that can be converted to an array, containing
1416
+ nan values to be ignored
1417
+ q : array_like of float
1418
+ Probability or sequence of probabilities for the quantiles to compute.
1419
+ Values must be between 0 and 1 inclusive.
1420
+ axis : {int, tuple of int, None}, optional
1421
+ Axis or axes along which the quantiles are computed. The
1422
+ default is to compute the quantile(s) along a flattened
1423
+ version of the array.
1424
+ out : ndarray, optional
1425
+ Alternative output array in which to place the result. It must
1426
+ have the same shape and buffer length as the expected output,
1427
+ but the type (of the output) will be cast if necessary.
1428
+ overwrite_input : bool, optional
1429
+ If True, then allow the input array `a` to be modified by intermediate
1430
+ calculations, to save memory. In this case, the contents of the input
1431
+ `a` after this function completes is undefined.
1432
+ method : str, optional
1433
+ This parameter specifies the method to use for estimating the
1434
+ quantile. There are many different methods, some unique to NumPy.
1435
+ See the notes for explanation. The options sorted by their R type
1436
+ as summarized in the H&F paper [1]_ are:
1437
+
1438
+ 1. 'inverted_cdf'
1439
+ 2. 'averaged_inverted_cdf'
1440
+ 3. 'closest_observation'
1441
+ 4. 'interpolated_inverted_cdf'
1442
+ 5. 'hazen'
1443
+ 6. 'weibull'
1444
+ 7. 'linear' (default)
1445
+ 8. 'median_unbiased'
1446
+ 9. 'normal_unbiased'
1447
+
1448
+ The first three methods are discontinuous. NumPy further defines the
1449
+ following discontinuous variations of the default 'linear' (7.) option:
1450
+
1451
+ * 'lower'
1452
+ * 'higher',
1453
+ * 'midpoint'
1454
+ * 'nearest'
1455
+
1456
+ .. versionchanged:: 1.22.0
1457
+ This argument was previously called "interpolation" and only
1458
+ offered the "linear" default and last four options.
1459
+
1460
+ keepdims : bool, optional
1461
+ If this is set to True, the axes which are reduced are left in
1462
+ the result as dimensions with size one. With this option, the
1463
+ result will broadcast correctly against the original array `a`.
1464
+
1465
+ If this is anything but the default value it will be passed
1466
+ through (in the special case of an empty array) to the
1467
+ `mean` function of the underlying array. If the array is
1468
+ a sub-class and `mean` does not have the kwarg `keepdims` this
1469
+ will raise a RuntimeError.
1470
+
1471
+ interpolation : str, optional
1472
+ Deprecated name for the method keyword argument.
1473
+
1474
+ .. deprecated:: 1.22.0
1475
+
1476
+ Returns
1477
+ -------
1478
+ quantile : scalar or ndarray
1479
+ If `q` is a single probability and `axis=None`, then the result
1480
+ is a scalar. If multiple probability levels are given, first axis of
1481
+ the result corresponds to the quantiles. The other axes are
1482
+ the axes that remain after the reduction of `a`. If the input
1483
+ contains integers or floats smaller than ``float64``, the output
1484
+ data-type is ``float64``. Otherwise, the output data-type is the
1485
+ same as that of the input. If `out` is specified, that array is
1486
+ returned instead.
1487
+
1488
+ See Also
1489
+ --------
1490
+ quantile
1491
+ nanmean, nanmedian
1492
+ nanmedian : equivalent to ``nanquantile(..., 0.5)``
1493
+ nanpercentile : same as nanquantile, but with q in the range [0, 100].
1494
+
1495
+ Notes
1496
+ -----
1497
+ For more information please see `numpy.quantile`
1498
+
1499
+ Examples
1500
+ --------
1501
+ >>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
1502
+ >>> a[0][1] = np.nan
1503
+ >>> a
1504
+ array([[10., nan, 4.],
1505
+ [ 3., 2., 1.]])
1506
+ >>> np.quantile(a, 0.5)
1507
+ nan
1508
+ >>> np.nanquantile(a, 0.5)
1509
+ 3.0
1510
+ >>> np.nanquantile(a, 0.5, axis=0)
1511
+ array([6.5, 2. , 2.5])
1512
+ >>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
1513
+ array([[7.],
1514
+ [2.]])
1515
+ >>> m = np.nanquantile(a, 0.5, axis=0)
1516
+ >>> out = np.zeros_like(m)
1517
+ >>> np.nanquantile(a, 0.5, axis=0, out=out)
1518
+ array([6.5, 2. , 2.5])
1519
+ >>> m
1520
+ array([6.5, 2. , 2.5])
1521
+ >>> b = a.copy()
1522
+ >>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
1523
+ array([7., 2.])
1524
+ >>> assert not np.all(a==b)
1525
+
1526
+ References
1527
+ ----------
1528
+ .. [1] R. J. Hyndman and Y. Fan,
1529
+ "Sample quantiles in statistical packages,"
1530
+ The American Statistician, 50(4), pp. 361-365, 1996
1531
+
1532
+ """
1533
+
1534
+ if interpolation is not None:
1535
+ method = function_base._check_interpolation_as_method(
1536
+ method, interpolation, "nanquantile")
1537
+
1538
+ a = np.asanyarray(a)
1539
+ if a.dtype.kind == "c":
1540
+ raise TypeError("a must be an array of real numbers")
1541
+
1542
+ q = np.asanyarray(q)
1543
+ if not function_base._quantile_is_valid(q):
1544
+ raise ValueError("Quantiles must be in the range [0, 1]")
1545
+ return _nanquantile_unchecked(
1546
+ a, q, axis, out, overwrite_input, method, keepdims)
1547
+
1548
+
1549
+ def _nanquantile_unchecked(
1550
+ a,
1551
+ q,
1552
+ axis=None,
1553
+ out=None,
1554
+ overwrite_input=False,
1555
+ method="linear",
1556
+ keepdims=np._NoValue,
1557
+ ):
1558
+ """Assumes that q is in [0, 1], and is an ndarray"""
1559
+ # apply_along_axis in _nanpercentile doesn't handle empty arrays well,
1560
+ # so deal them upfront
1561
+ if a.size == 0:
1562
+ return np.nanmean(a, axis, out=out, keepdims=keepdims)
1563
+ return function_base._ureduce(a,
1564
+ func=_nanquantile_ureduce_func,
1565
+ q=q,
1566
+ keepdims=keepdims,
1567
+ axis=axis,
1568
+ out=out,
1569
+ overwrite_input=overwrite_input,
1570
+ method=method)
1571
+
1572
+
1573
+ def _nanquantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
1574
+ method="linear"):
1575
+ """
1576
+ Private function that doesn't support extended axis or keepdims.
1577
+ These methods are extended to this function using _ureduce
1578
+ See nanpercentile for parameter usage
1579
+ """
1580
+ if axis is None or a.ndim == 1:
1581
+ part = a.ravel()
1582
+ result = _nanquantile_1d(part, q, overwrite_input, method)
1583
+ else:
1584
+ result = np.apply_along_axis(_nanquantile_1d, axis, a, q,
1585
+ overwrite_input, method)
1586
+ # apply_along_axis fills in collapsed axis with results.
1587
+ # Move that axis to the beginning to match percentile's
1588
+ # convention.
1589
+ if q.ndim != 0:
1590
+ result = np.moveaxis(result, axis, 0)
1591
+
1592
+ if out is not None:
1593
+ out[...] = result
1594
+ return result
1595
+
1596
+
1597
+ def _nanquantile_1d(arr1d, q, overwrite_input=False, method="linear"):
1598
+ """
1599
+ Private function for rank 1 arrays. Compute quantile ignoring NaNs.
1600
+ See nanpercentile for parameter usage
1601
+ """
1602
+ arr1d, overwrite_input = _remove_nan_1d(arr1d,
1603
+ overwrite_input=overwrite_input)
1604
+ if arr1d.size == 0:
1605
+ # convert to scalar
1606
+ return np.full(q.shape, np.nan, dtype=arr1d.dtype)[()]
1607
+
1608
+ return function_base._quantile_unchecked(
1609
+ arr1d, q, overwrite_input=overwrite_input, method=method)
1610
+
1611
+
1612
+ def _nanvar_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
1613
+ keepdims=None, *, where=None):
1614
+ return (a, out)
1615
+
1616
+
1617
+ @array_function_dispatch(_nanvar_dispatcher)
1618
+ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
1619
+ *, where=np._NoValue):
1620
+ """
1621
+ Compute the variance along the specified axis, while ignoring NaNs.
1622
+
1623
+ Returns the variance of the array elements, a measure of the spread of
1624
+ a distribution. The variance is computed for the flattened array by
1625
+ default, otherwise over the specified axis.
1626
+
1627
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
1628
+ returned and a `RuntimeWarning` is raised.
1629
+
1630
+ .. versionadded:: 1.8.0
1631
+
1632
+ Parameters
1633
+ ----------
1634
+ a : array_like
1635
+ Array containing numbers whose variance is desired. If `a` is not an
1636
+ array, a conversion is attempted.
1637
+ axis : {int, tuple of int, None}, optional
1638
+ Axis or axes along which the variance is computed. The default is to compute
1639
+ the variance of the flattened array.
1640
+ dtype : data-type, optional
1641
+ Type to use in computing the variance. For arrays of integer type
1642
+ the default is `float64`; for arrays of float types it is the same as
1643
+ the array type.
1644
+ out : ndarray, optional
1645
+ Alternate output array in which to place the result. It must have
1646
+ the same shape as the expected output, but the type is cast if
1647
+ necessary.
1648
+ ddof : int, optional
1649
+ "Delta Degrees of Freedom": the divisor used in the calculation is
1650
+ ``N - ddof``, where ``N`` represents the number of non-NaN
1651
+ elements. By default `ddof` is zero.
1652
+ keepdims : bool, optional
1653
+ If this is set to True, the axes which are reduced are left
1654
+ in the result as dimensions with size one. With this option,
1655
+ the result will broadcast correctly against the original `a`.
1656
+ where : array_like of bool, optional
1657
+ Elements to include in the variance. See `~numpy.ufunc.reduce` for
1658
+ details.
1659
+
1660
+ .. versionadded:: 1.22.0
1661
+
1662
+ Returns
1663
+ -------
1664
+ variance : ndarray, see dtype parameter above
1665
+ If `out` is None, return a new array containing the variance,
1666
+ otherwise return a reference to the output array. If ddof is >= the
1667
+ number of non-NaN elements in a slice or the slice contains only
1668
+ NaNs, then the result for that slice is NaN.
1669
+
1670
+ See Also
1671
+ --------
1672
+ std : Standard deviation
1673
+ mean : Average
1674
+ var : Variance while not ignoring NaNs
1675
+ nanstd, nanmean
1676
+ :ref:`ufuncs-output-type`
1677
+
1678
+ Notes
1679
+ -----
1680
+ The variance is the average of the squared deviations from the mean,
1681
+ i.e., ``var = mean(abs(x - x.mean())**2)``.
1682
+
1683
+ The mean is normally calculated as ``x.sum() / N``, where ``N = len(x)``.
1684
+ If, however, `ddof` is specified, the divisor ``N - ddof`` is used
1685
+ instead. In standard statistical practice, ``ddof=1`` provides an
1686
+ unbiased estimator of the variance of a hypothetical infinite
1687
+ population. ``ddof=0`` provides a maximum likelihood estimate of the
1688
+ variance for normally distributed variables.
1689
+
1690
+ Note that for complex numbers, the absolute value is taken before
1691
+ squaring, so that the result is always real and nonnegative.
1692
+
1693
+ For floating-point input, the variance is computed using the same
1694
+ precision the input has. Depending on the input data, this can cause
1695
+ the results to be inaccurate, especially for `float32` (see example
1696
+ below). Specifying a higher-accuracy accumulator using the ``dtype``
1697
+ keyword can alleviate this issue.
1698
+
1699
+ For this function to work on sub-classes of ndarray, they must define
1700
+ `sum` with the kwarg `keepdims`
1701
+
1702
+ Examples
1703
+ --------
1704
+ >>> a = np.array([[1, np.nan], [3, 4]])
1705
+ >>> np.nanvar(a)
1706
+ 1.5555555555555554
1707
+ >>> np.nanvar(a, axis=0)
1708
+ array([1., 0.])
1709
+ >>> np.nanvar(a, axis=1)
1710
+ array([0., 0.25]) # may vary
1711
+
1712
+ """
1713
+ arr, mask = _replace_nan(a, 0)
1714
+ if mask is None:
1715
+ return np.var(arr, axis=axis, dtype=dtype, out=out, ddof=ddof,
1716
+ keepdims=keepdims, where=where)
1717
+
1718
+ if dtype is not None:
1719
+ dtype = np.dtype(dtype)
1720
+ if dtype is not None and not issubclass(dtype.type, np.inexact):
1721
+ raise TypeError("If a is inexact, then dtype must be inexact")
1722
+ if out is not None and not issubclass(out.dtype.type, np.inexact):
1723
+ raise TypeError("If a is inexact, then out must be inexact")
1724
+
1725
+ # Compute mean
1726
+ if type(arr) is np.matrix:
1727
+ _keepdims = np._NoValue
1728
+ else:
1729
+ _keepdims = True
1730
+ # we need to special case matrix for reverse compatibility
1731
+ # in order for this to work, these sums need to be called with
1732
+ # keepdims=True, however matrix now raises an error in this case, but
1733
+ # the reason that it drops the keepdims kwarg is to force keepdims=True
1734
+ # so this used to work by serendipity.
1735
+ cnt = np.sum(~mask, axis=axis, dtype=np.intp, keepdims=_keepdims,
1736
+ where=where)
1737
+ avg = np.sum(arr, axis=axis, dtype=dtype, keepdims=_keepdims, where=where)
1738
+ avg = _divide_by_count(avg, cnt)
1739
+
1740
+ # Compute squared deviation from mean.
1741
+ np.subtract(arr, avg, out=arr, casting='unsafe', where=where)
1742
+ arr = _copyto(arr, 0, mask)
1743
+ if issubclass(arr.dtype.type, np.complexfloating):
1744
+ sqr = np.multiply(arr, arr.conj(), out=arr, where=where).real
1745
+ else:
1746
+ sqr = np.multiply(arr, arr, out=arr, where=where)
1747
+
1748
+ # Compute variance.
1749
+ var = np.sum(sqr, axis=axis, dtype=dtype, out=out, keepdims=keepdims,
1750
+ where=where)
1751
+
1752
+ # Precaution against reduced object arrays
1753
+ try:
1754
+ var_ndim = var.ndim
1755
+ except AttributeError:
1756
+ var_ndim = np.ndim(var)
1757
+ if var_ndim < cnt.ndim:
1758
+ # Subclasses of ndarray may ignore keepdims, so check here.
1759
+ cnt = cnt.squeeze(axis)
1760
+ dof = cnt - ddof
1761
+ var = _divide_by_count(var, dof)
1762
+
1763
+ isbad = (dof <= 0)
1764
+ if np.any(isbad):
1765
+ warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
1766
+ stacklevel=2)
1767
+ # NaN, inf, or negative numbers are all possible bad
1768
+ # values, so explicitly replace them with NaN.
1769
+ var = _copyto(var, np.nan, isbad)
1770
+ return var
1771
+
1772
+
1773
+ def _nanstd_dispatcher(a, axis=None, dtype=None, out=None, ddof=None,
1774
+ keepdims=None, *, where=None):
1775
+ return (a, out)
1776
+
1777
+
1778
+ @array_function_dispatch(_nanstd_dispatcher)
1779
+ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue,
1780
+ *, where=np._NoValue):
1781
+ """
1782
+ Compute the standard deviation along the specified axis, while
1783
+ ignoring NaNs.
1784
+
1785
+ Returns the standard deviation, a measure of the spread of a
1786
+ distribution, of the non-NaN array elements. The standard deviation is
1787
+ computed for the flattened array by default, otherwise over the
1788
+ specified axis.
1789
+
1790
+ For all-NaN slices or slices with zero degrees of freedom, NaN is
1791
+ returned and a `RuntimeWarning` is raised.
1792
+
1793
+ .. versionadded:: 1.8.0
1794
+
1795
+ Parameters
1796
+ ----------
1797
+ a : array_like
1798
+ Calculate the standard deviation of the non-NaN values.
1799
+ axis : {int, tuple of int, None}, optional
1800
+ Axis or axes along which the standard deviation is computed. The default is
1801
+ to compute the standard deviation of the flattened array.
1802
+ dtype : dtype, optional
1803
+ Type to use in computing the standard deviation. For arrays of
1804
+ integer type the default is float64, for arrays of float types it
1805
+ is the same as the array type.
1806
+ out : ndarray, optional
1807
+ Alternative output array in which to place the result. It must have
1808
+ the same shape as the expected output but the type (of the
1809
+ calculated values) will be cast if necessary.
1810
+ ddof : int, optional
1811
+ Means Delta Degrees of Freedom. The divisor used in calculations
1812
+ is ``N - ddof``, where ``N`` represents the number of non-NaN
1813
+ elements. By default `ddof` is zero.
1814
+
1815
+ keepdims : bool, optional
1816
+ If this is set to True, the axes which are reduced are left
1817
+ in the result as dimensions with size one. With this option,
1818
+ the result will broadcast correctly against the original `a`.
1819
+
1820
+ If this value is anything but the default it is passed through
1821
+ as-is to the relevant functions of the sub-classes. If these
1822
+ functions do not have a `keepdims` kwarg, a RuntimeError will
1823
+ be raised.
1824
+ where : array_like of bool, optional
1825
+ Elements to include in the standard deviation.
1826
+ See `~numpy.ufunc.reduce` for details.
1827
+
1828
+ .. versionadded:: 1.22.0
1829
+
1830
+ Returns
1831
+ -------
1832
+ standard_deviation : ndarray, see dtype parameter above.
1833
+ If `out` is None, return a new array containing the standard
1834
+ deviation, otherwise return a reference to the output array. If
1835
+ ddof is >= the number of non-NaN elements in a slice or the slice
1836
+ contains only NaNs, then the result for that slice is NaN.
1837
+
1838
+ See Also
1839
+ --------
1840
+ var, mean, std
1841
+ nanvar, nanmean
1842
+ :ref:`ufuncs-output-type`
1843
+
1844
+ Notes
1845
+ -----
1846
+ The standard deviation is the square root of the average of the squared
1847
+ deviations from the mean: ``std = sqrt(mean(abs(x - x.mean())**2))``.
1848
+
1849
+ The average squared deviation is normally calculated as
1850
+ ``x.sum() / N``, where ``N = len(x)``. If, however, `ddof` is
1851
+ specified, the divisor ``N - ddof`` is used instead. In standard
1852
+ statistical practice, ``ddof=1`` provides an unbiased estimator of the
1853
+ variance of the infinite population. ``ddof=0`` provides a maximum
1854
+ likelihood estimate of the variance for normally distributed variables.
1855
+ The standard deviation computed in this function is the square root of
1856
+ the estimated variance, so even with ``ddof=1``, it will not be an
1857
+ unbiased estimate of the standard deviation per se.
1858
+
1859
+ Note that, for complex numbers, `std` takes the absolute value before
1860
+ squaring, so that the result is always real and nonnegative.
1861
+
1862
+ For floating-point input, the *std* is computed using the same
1863
+ precision the input has. Depending on the input data, this can cause
1864
+ the results to be inaccurate, especially for float32 (see example
1865
+ below). Specifying a higher-accuracy accumulator using the `dtype`
1866
+ keyword can alleviate this issue.
1867
+
1868
+ Examples
1869
+ --------
1870
+ >>> a = np.array([[1, np.nan], [3, 4]])
1871
+ >>> np.nanstd(a)
1872
+ 1.247219128924647
1873
+ >>> np.nanstd(a, axis=0)
1874
+ array([1., 0.])
1875
+ >>> np.nanstd(a, axis=1)
1876
+ array([0., 0.5]) # may vary
1877
+
1878
+ """
1879
+ var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
1880
+ keepdims=keepdims, where=where)
1881
+ if isinstance(var, np.ndarray):
1882
+ std = np.sqrt(var, out=var)
1883
+ elif hasattr(var, 'dtype'):
1884
+ std = var.dtype.type(np.sqrt(var))
1885
+ else:
1886
+ std = np.sqrt(var)
1887
+ return std
venv/lib/python3.10/site-packages/numpy/lib/npyio.py ADDED
@@ -0,0 +1,2547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import functools
4
+ import itertools
5
+ import warnings
6
+ import weakref
7
+ import contextlib
8
+ import operator
9
+ from operator import itemgetter, index as opindex, methodcaller
10
+ from collections.abc import Mapping
11
+
12
+ import numpy as np
13
+ from . import format
14
+ from ._datasource import DataSource
15
+ from numpy.core import overrides
16
+ from numpy.core.multiarray import packbits, unpackbits
17
+ from numpy.core._multiarray_umath import _load_from_filelike
18
+ from numpy.core.overrides import set_array_function_like_doc, set_module
19
+ from ._iotools import (
20
+ LineSplitter, NameValidator, StringConverter, ConverterError,
21
+ ConverterLockError, ConversionWarning, _is_string_like,
22
+ has_nested_fields, flatten_dtype, easy_dtype, _decode_line
23
+ )
24
+
25
+ from numpy.compat import (
26
+ asbytes, asstr, asunicode, os_fspath, os_PathLike,
27
+ pickle
28
+ )
29
+
30
+
31
+ __all__ = [
32
+ 'savetxt', 'loadtxt', 'genfromtxt',
33
+ 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
34
+ 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
35
+ ]
36
+
37
+
38
+ array_function_dispatch = functools.partial(
39
+ overrides.array_function_dispatch, module='numpy')
40
+
41
+
42
+ class BagObj:
43
+ """
44
+ BagObj(obj)
45
+
46
+ Convert attribute look-ups to getitems on the object passed in.
47
+
48
+ Parameters
49
+ ----------
50
+ obj : class instance
51
+ Object on which attribute look-up is performed.
52
+
53
+ Examples
54
+ --------
55
+ >>> from numpy.lib.npyio import BagObj as BO
56
+ >>> class BagDemo:
57
+ ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
58
+ ... # will call this method when any
59
+ ... # attribute look-up is required
60
+ ... result = "Doesn't matter what you want, "
61
+ ... return result + "you're gonna get this"
62
+ ...
63
+ >>> demo_obj = BagDemo()
64
+ >>> bagobj = BO(demo_obj)
65
+ >>> bagobj.hello_there
66
+ "Doesn't matter what you want, you're gonna get this"
67
+ >>> bagobj.I_can_be_anything
68
+ "Doesn't matter what you want, you're gonna get this"
69
+
70
+ """
71
+
72
+ def __init__(self, obj):
73
+ # Use weakref to make NpzFile objects collectable by refcount
74
+ self._obj = weakref.proxy(obj)
75
+
76
+ def __getattribute__(self, key):
77
+ try:
78
+ return object.__getattribute__(self, '_obj')[key]
79
+ except KeyError:
80
+ raise AttributeError(key) from None
81
+
82
+ def __dir__(self):
83
+ """
84
+ Enables dir(bagobj) to list the files in an NpzFile.
85
+
86
+ This also enables tab-completion in an interpreter or IPython.
87
+ """
88
+ return list(object.__getattribute__(self, '_obj').keys())
89
+
90
+
91
+ def zipfile_factory(file, *args, **kwargs):
92
+ """
93
+ Create a ZipFile.
94
+
95
+ Allows for Zip64, and the `file` argument can accept file, str, or
96
+ pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
97
+ constructor.
98
+ """
99
+ if not hasattr(file, 'read'):
100
+ file = os_fspath(file)
101
+ import zipfile
102
+ kwargs['allowZip64'] = True
103
+ return zipfile.ZipFile(file, *args, **kwargs)
104
+
105
+
106
+ class NpzFile(Mapping):
107
+ """
108
+ NpzFile(fid)
109
+
110
+ A dictionary-like object with lazy-loading of files in the zipped
111
+ archive provided on construction.
112
+
113
+ `NpzFile` is used to load files in the NumPy ``.npz`` data archive
114
+ format. It assumes that files in the archive have a ``.npy`` extension,
115
+ other files are ignored.
116
+
117
+ The arrays and file strings are lazily loaded on either
118
+ getitem access using ``obj['key']`` or attribute lookup using
119
+ ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
120
+ be obtained with ``obj.files`` and the ZipFile object itself using
121
+ ``obj.zip``.
122
+
123
+ Attributes
124
+ ----------
125
+ files : list of str
126
+ List of all files in the archive with a ``.npy`` extension.
127
+ zip : ZipFile instance
128
+ The ZipFile object initialized with the zipped archive.
129
+ f : BagObj instance
130
+ An object on which attribute can be performed as an alternative
131
+ to getitem access on the `NpzFile` instance itself.
132
+ allow_pickle : bool, optional
133
+ Allow loading pickled data. Default: False
134
+
135
+ .. versionchanged:: 1.16.3
136
+ Made default False in response to CVE-2019-6446.
137
+
138
+ pickle_kwargs : dict, optional
139
+ Additional keyword arguments to pass on to pickle.load.
140
+ These are only useful when loading object arrays saved on
141
+ Python 2 when using Python 3.
142
+ max_header_size : int, optional
143
+ Maximum allowed size of the header. Large headers may not be safe
144
+ to load securely and thus require explicitly passing a larger value.
145
+ See :py:func:`ast.literal_eval()` for details.
146
+ This option is ignored when `allow_pickle` is passed. In that case
147
+ the file is by definition trusted and the limit is unnecessary.
148
+
149
+ Parameters
150
+ ----------
151
+ fid : file or str
152
+ The zipped archive to open. This is either a file-like object
153
+ or a string containing the path to the archive.
154
+ own_fid : bool, optional
155
+ Whether NpzFile should close the file handle.
156
+ Requires that `fid` is a file-like object.
157
+
158
+ Examples
159
+ --------
160
+ >>> from tempfile import TemporaryFile
161
+ >>> outfile = TemporaryFile()
162
+ >>> x = np.arange(10)
163
+ >>> y = np.sin(x)
164
+ >>> np.savez(outfile, x=x, y=y)
165
+ >>> _ = outfile.seek(0)
166
+
167
+ >>> npz = np.load(outfile)
168
+ >>> isinstance(npz, np.lib.npyio.NpzFile)
169
+ True
170
+ >>> npz
171
+ NpzFile 'object' with keys x, y
172
+ >>> sorted(npz.files)
173
+ ['x', 'y']
174
+ >>> npz['x'] # getitem access
175
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
176
+ >>> npz.f.x # attribute lookup
177
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
178
+
179
+ """
180
+ # Make __exit__ safe if zipfile_factory raises an exception
181
+ zip = None
182
+ fid = None
183
+ _MAX_REPR_ARRAY_COUNT = 5
184
+
185
+ def __init__(self, fid, own_fid=False, allow_pickle=False,
186
+ pickle_kwargs=None, *,
187
+ max_header_size=format._MAX_HEADER_SIZE):
188
+ # Import is postponed to here since zipfile depends on gzip, an
189
+ # optional component of the so-called standard library.
190
+ _zip = zipfile_factory(fid)
191
+ self._files = _zip.namelist()
192
+ self.files = []
193
+ self.allow_pickle = allow_pickle
194
+ self.max_header_size = max_header_size
195
+ self.pickle_kwargs = pickle_kwargs
196
+ for x in self._files:
197
+ if x.endswith('.npy'):
198
+ self.files.append(x[:-4])
199
+ else:
200
+ self.files.append(x)
201
+ self.zip = _zip
202
+ self.f = BagObj(self)
203
+ if own_fid:
204
+ self.fid = fid
205
+
206
+ def __enter__(self):
207
+ return self
208
+
209
+ def __exit__(self, exc_type, exc_value, traceback):
210
+ self.close()
211
+
212
+ def close(self):
213
+ """
214
+ Close the file.
215
+
216
+ """
217
+ if self.zip is not None:
218
+ self.zip.close()
219
+ self.zip = None
220
+ if self.fid is not None:
221
+ self.fid.close()
222
+ self.fid = None
223
+ self.f = None # break reference cycle
224
+
225
+ def __del__(self):
226
+ self.close()
227
+
228
+ # Implement the Mapping ABC
229
+ def __iter__(self):
230
+ return iter(self.files)
231
+
232
+ def __len__(self):
233
+ return len(self.files)
234
+
235
+ def __getitem__(self, key):
236
+ # FIXME: This seems like it will copy strings around
237
+ # more than is strictly necessary. The zipfile
238
+ # will read the string and then
239
+ # the format.read_array will copy the string
240
+ # to another place in memory.
241
+ # It would be better if the zipfile could read
242
+ # (or at least uncompress) the data
243
+ # directly into the array memory.
244
+ member = False
245
+ if key in self._files:
246
+ member = True
247
+ elif key in self.files:
248
+ member = True
249
+ key += '.npy'
250
+ if member:
251
+ bytes = self.zip.open(key)
252
+ magic = bytes.read(len(format.MAGIC_PREFIX))
253
+ bytes.close()
254
+ if magic == format.MAGIC_PREFIX:
255
+ bytes = self.zip.open(key)
256
+ return format.read_array(bytes,
257
+ allow_pickle=self.allow_pickle,
258
+ pickle_kwargs=self.pickle_kwargs,
259
+ max_header_size=self.max_header_size)
260
+ else:
261
+ return self.zip.read(key)
262
+ else:
263
+ raise KeyError(f"{key} is not a file in the archive")
264
+
265
+ def __contains__(self, key):
266
+ return (key in self._files or key in self.files)
267
+
268
+ def __repr__(self):
269
+ # Get filename or default to `object`
270
+ if isinstance(self.fid, str):
271
+ filename = self.fid
272
+ else:
273
+ filename = getattr(self.fid, "name", "object")
274
+
275
+ # Get the name of arrays
276
+ array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
277
+ if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
278
+ array_names += "..."
279
+ return f"NpzFile {filename!r} with keys: {array_names}"
280
+
281
+
282
+ @set_module('numpy')
283
+ def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
284
+ encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE):
285
+ """
286
+ Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
287
+
288
+ .. warning:: Loading files that contain object arrays uses the ``pickle``
289
+ module, which is not secure against erroneous or maliciously
290
+ constructed data. Consider passing ``allow_pickle=False`` to
291
+ load data that is known not to contain object arrays for the
292
+ safer handling of untrusted sources.
293
+
294
+ Parameters
295
+ ----------
296
+ file : file-like object, string, or pathlib.Path
297
+ The file to read. File-like objects must support the
298
+ ``seek()`` and ``read()`` methods and must always
299
+ be opened in binary mode. Pickled files require that the
300
+ file-like object support the ``readline()`` method as well.
301
+ mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
302
+ If not None, then memory-map the file, using the given mode (see
303
+ `numpy.memmap` for a detailed description of the modes). A
304
+ memory-mapped array is kept on disk. However, it can be accessed
305
+ and sliced like any ndarray. Memory mapping is especially useful
306
+ for accessing small fragments of large files without reading the
307
+ entire file into memory.
308
+ allow_pickle : bool, optional
309
+ Allow loading pickled object arrays stored in npy files. Reasons for
310
+ disallowing pickles include security, as loading pickled data can
311
+ execute arbitrary code. If pickles are disallowed, loading object
312
+ arrays will fail. Default: False
313
+
314
+ .. versionchanged:: 1.16.3
315
+ Made default False in response to CVE-2019-6446.
316
+
317
+ fix_imports : bool, optional
318
+ Only useful when loading Python 2 generated pickled files on Python 3,
319
+ which includes npy/npz files containing object arrays. If `fix_imports`
320
+ is True, pickle will try to map the old Python 2 names to the new names
321
+ used in Python 3.
322
+ encoding : str, optional
323
+ What encoding to use when reading Python 2 strings. Only useful when
324
+ loading Python 2 generated pickled files in Python 3, which includes
325
+ npy/npz files containing object arrays. Values other than 'latin1',
326
+ 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
327
+ data. Default: 'ASCII'
328
+ max_header_size : int, optional
329
+ Maximum allowed size of the header. Large headers may not be safe
330
+ to load securely and thus require explicitly passing a larger value.
331
+ See :py:func:`ast.literal_eval()` for details.
332
+ This option is ignored when `allow_pickle` is passed. In that case
333
+ the file is by definition trusted and the limit is unnecessary.
334
+
335
+ Returns
336
+ -------
337
+ result : array, tuple, dict, etc.
338
+ Data stored in the file. For ``.npz`` files, the returned instance
339
+ of NpzFile class must be closed to avoid leaking file descriptors.
340
+
341
+ Raises
342
+ ------
343
+ OSError
344
+ If the input file does not exist or cannot be read.
345
+ UnpicklingError
346
+ If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
347
+ ValueError
348
+ The file contains an object array, but ``allow_pickle=False`` given.
349
+ EOFError
350
+ When calling ``np.load`` multiple times on the same file handle,
351
+ if all data has already been read
352
+
353
+ See Also
354
+ --------
355
+ save, savez, savez_compressed, loadtxt
356
+ memmap : Create a memory-map to an array stored in a file on disk.
357
+ lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
358
+
359
+ Notes
360
+ -----
361
+ - If the file contains pickle data, then whatever object is stored
362
+ in the pickle is returned.
363
+ - If the file is a ``.npy`` file, then a single array is returned.
364
+ - If the file is a ``.npz`` file, then a dictionary-like object is
365
+ returned, containing ``{filename: array}`` key-value pairs, one for
366
+ each file in the archive.
367
+ - If the file is a ``.npz`` file, the returned value supports the
368
+ context manager protocol in a similar fashion to the open function::
369
+
370
+ with load('foo.npz') as data:
371
+ a = data['a']
372
+
373
+ The underlying file descriptor is closed when exiting the 'with'
374
+ block.
375
+
376
+ Examples
377
+ --------
378
+ Store data to disk, and load it again:
379
+
380
+ >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
381
+ >>> np.load('/tmp/123.npy')
382
+ array([[1, 2, 3],
383
+ [4, 5, 6]])
384
+
385
+ Store compressed data to disk, and load it again:
386
+
387
+ >>> a=np.array([[1, 2, 3], [4, 5, 6]])
388
+ >>> b=np.array([1, 2])
389
+ >>> np.savez('/tmp/123.npz', a=a, b=b)
390
+ >>> data = np.load('/tmp/123.npz')
391
+ >>> data['a']
392
+ array([[1, 2, 3],
393
+ [4, 5, 6]])
394
+ >>> data['b']
395
+ array([1, 2])
396
+ >>> data.close()
397
+
398
+ Mem-map the stored array, and then access the second row
399
+ directly from disk:
400
+
401
+ >>> X = np.load('/tmp/123.npy', mmap_mode='r')
402
+ >>> X[1, :]
403
+ memmap([4, 5, 6])
404
+
405
+ """
406
+ if encoding not in ('ASCII', 'latin1', 'bytes'):
407
+ # The 'encoding' value for pickle also affects what encoding
408
+ # the serialized binary data of NumPy arrays is loaded
409
+ # in. Pickle does not pass on the encoding information to
410
+ # NumPy. The unpickling code in numpy.core.multiarray is
411
+ # written to assume that unicode data appearing where binary
412
+ # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
413
+ #
414
+ # Other encoding values can corrupt binary data, and we
415
+ # purposefully disallow them. For the same reason, the errors=
416
+ # argument is not exposed, as values other than 'strict'
417
+ # result can similarly silently corrupt numerical data.
418
+ raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
419
+
420
+ pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
421
+
422
+ with contextlib.ExitStack() as stack:
423
+ if hasattr(file, 'read'):
424
+ fid = file
425
+ own_fid = False
426
+ else:
427
+ fid = stack.enter_context(open(os_fspath(file), "rb"))
428
+ own_fid = True
429
+
430
+ # Code to distinguish from NumPy binary files and pickles.
431
+ _ZIP_PREFIX = b'PK\x03\x04'
432
+ _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
433
+ N = len(format.MAGIC_PREFIX)
434
+ magic = fid.read(N)
435
+ if not magic:
436
+ raise EOFError("No data left in file")
437
+ # If the file size is less than N, we need to make sure not
438
+ # to seek past the beginning of the file
439
+ fid.seek(-min(N, len(magic)), 1) # back-up
440
+ if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
441
+ # zip-file (assume .npz)
442
+ # Potentially transfer file ownership to NpzFile
443
+ stack.pop_all()
444
+ ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
445
+ pickle_kwargs=pickle_kwargs,
446
+ max_header_size=max_header_size)
447
+ return ret
448
+ elif magic == format.MAGIC_PREFIX:
449
+ # .npy file
450
+ if mmap_mode:
451
+ if allow_pickle:
452
+ max_header_size = 2**64
453
+ return format.open_memmap(file, mode=mmap_mode,
454
+ max_header_size=max_header_size)
455
+ else:
456
+ return format.read_array(fid, allow_pickle=allow_pickle,
457
+ pickle_kwargs=pickle_kwargs,
458
+ max_header_size=max_header_size)
459
+ else:
460
+ # Try a pickle
461
+ if not allow_pickle:
462
+ raise ValueError("Cannot load file containing pickled data "
463
+ "when allow_pickle=False")
464
+ try:
465
+ return pickle.load(fid, **pickle_kwargs)
466
+ except Exception as e:
467
+ raise pickle.UnpicklingError(
468
+ f"Failed to interpret file {file!r} as a pickle") from e
469
+
470
+
471
+ def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
472
+ return (arr,)
473
+
474
+
475
+ @array_function_dispatch(_save_dispatcher)
476
+ def save(file, arr, allow_pickle=True, fix_imports=True):
477
+ """
478
+ Save an array to a binary file in NumPy ``.npy`` format.
479
+
480
+ Parameters
481
+ ----------
482
+ file : file, str, or pathlib.Path
483
+ File or filename to which the data is saved. If file is a file-object,
484
+ then the filename is unchanged. If file is a string or Path, a ``.npy``
485
+ extension will be appended to the filename if it does not already
486
+ have one.
487
+ arr : array_like
488
+ Array data to be saved.
489
+ allow_pickle : bool, optional
490
+ Allow saving object arrays using Python pickles. Reasons for disallowing
491
+ pickles include security (loading pickled data can execute arbitrary
492
+ code) and portability (pickled objects may not be loadable on different
493
+ Python installations, for example if the stored objects require libraries
494
+ that are not available, and not all pickled data is compatible between
495
+ Python 2 and Python 3).
496
+ Default: True
497
+ fix_imports : bool, optional
498
+ Only useful in forcing objects in object arrays on Python 3 to be
499
+ pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
500
+ will try to map the new Python 3 names to the old module names used in
501
+ Python 2, so that the pickle data stream is readable with Python 2.
502
+
503
+ See Also
504
+ --------
505
+ savez : Save several arrays into a ``.npz`` archive
506
+ savetxt, load
507
+
508
+ Notes
509
+ -----
510
+ For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
511
+
512
+ Any data saved to the file is appended to the end of the file.
513
+
514
+ Examples
515
+ --------
516
+ >>> from tempfile import TemporaryFile
517
+ >>> outfile = TemporaryFile()
518
+
519
+ >>> x = np.arange(10)
520
+ >>> np.save(outfile, x)
521
+
522
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
523
+ >>> np.load(outfile)
524
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
525
+
526
+
527
+ >>> with open('test.npy', 'wb') as f:
528
+ ... np.save(f, np.array([1, 2]))
529
+ ... np.save(f, np.array([1, 3]))
530
+ >>> with open('test.npy', 'rb') as f:
531
+ ... a = np.load(f)
532
+ ... b = np.load(f)
533
+ >>> print(a, b)
534
+ # [1 2] [1 3]
535
+ """
536
+ if hasattr(file, 'write'):
537
+ file_ctx = contextlib.nullcontext(file)
538
+ else:
539
+ file = os_fspath(file)
540
+ if not file.endswith('.npy'):
541
+ file = file + '.npy'
542
+ file_ctx = open(file, "wb")
543
+
544
+ with file_ctx as fid:
545
+ arr = np.asanyarray(arr)
546
+ format.write_array(fid, arr, allow_pickle=allow_pickle,
547
+ pickle_kwargs=dict(fix_imports=fix_imports))
548
+
549
+
550
+ def _savez_dispatcher(file, *args, **kwds):
551
+ yield from args
552
+ yield from kwds.values()
553
+
554
+
555
+ @array_function_dispatch(_savez_dispatcher)
556
+ def savez(file, *args, **kwds):
557
+ """Save several arrays into a single file in uncompressed ``.npz`` format.
558
+
559
+ Provide arrays as keyword arguments to store them under the
560
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
561
+
562
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
563
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
564
+
565
+ Parameters
566
+ ----------
567
+ file : str or file
568
+ Either the filename (string) or an open file (file-like object)
569
+ where the data will be saved. If file is a string or a Path, the
570
+ ``.npz`` extension will be appended to the filename if it is not
571
+ already there.
572
+ args : Arguments, optional
573
+ Arrays to save to the file. Please use keyword arguments (see
574
+ `kwds` below) to assign names to arrays. Arrays specified as
575
+ args will be named "arr_0", "arr_1", and so on.
576
+ kwds : Keyword arguments, optional
577
+ Arrays to save to the file. Each array will be saved to the
578
+ output file with its corresponding keyword name.
579
+
580
+ Returns
581
+ -------
582
+ None
583
+
584
+ See Also
585
+ --------
586
+ save : Save a single array to a binary file in NumPy format.
587
+ savetxt : Save an array to a file as plain text.
588
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
589
+
590
+ Notes
591
+ -----
592
+ The ``.npz`` file format is a zipped archive of files named after the
593
+ variables they contain. The archive is not compressed and each file
594
+ in the archive contains one variable in ``.npy`` format. For a
595
+ description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
596
+
597
+ When opening the saved ``.npz`` file with `load` a `NpzFile` object is
598
+ returned. This is a dictionary-like object which can be queried for
599
+ its list of arrays (with the ``.files`` attribute), and for the arrays
600
+ themselves.
601
+
602
+ Keys passed in `kwds` are used as filenames inside the ZIP archive.
603
+ Therefore, keys should be valid filenames; e.g., avoid keys that begin with
604
+ ``/`` or contain ``.``.
605
+
606
+ When naming variables with keyword arguments, it is not possible to name a
607
+ variable ``file``, as this would cause the ``file`` argument to be defined
608
+ twice in the call to ``savez``.
609
+
610
+ Examples
611
+ --------
612
+ >>> from tempfile import TemporaryFile
613
+ >>> outfile = TemporaryFile()
614
+ >>> x = np.arange(10)
615
+ >>> y = np.sin(x)
616
+
617
+ Using `savez` with \\*args, the arrays are saved with default names.
618
+
619
+ >>> np.savez(outfile, x, y)
620
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
621
+ >>> npzfile = np.load(outfile)
622
+ >>> npzfile.files
623
+ ['arr_0', 'arr_1']
624
+ >>> npzfile['arr_0']
625
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
626
+
627
+ Using `savez` with \\**kwds, the arrays are saved with the keyword names.
628
+
629
+ >>> outfile = TemporaryFile()
630
+ >>> np.savez(outfile, x=x, y=y)
631
+ >>> _ = outfile.seek(0)
632
+ >>> npzfile = np.load(outfile)
633
+ >>> sorted(npzfile.files)
634
+ ['x', 'y']
635
+ >>> npzfile['x']
636
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
637
+
638
+ """
639
+ _savez(file, args, kwds, False)
640
+
641
+
642
+ def _savez_compressed_dispatcher(file, *args, **kwds):
643
+ yield from args
644
+ yield from kwds.values()
645
+
646
+
647
+ @array_function_dispatch(_savez_compressed_dispatcher)
648
+ def savez_compressed(file, *args, **kwds):
649
+ """
650
+ Save several arrays into a single file in compressed ``.npz`` format.
651
+
652
+ Provide arrays as keyword arguments to store them under the
653
+ corresponding name in the output file: ``savez(fn, x=x, y=y)``.
654
+
655
+ If arrays are specified as positional arguments, i.e., ``savez(fn,
656
+ x, y)``, their names will be `arr_0`, `arr_1`, etc.
657
+
658
+ Parameters
659
+ ----------
660
+ file : str or file
661
+ Either the filename (string) or an open file (file-like object)
662
+ where the data will be saved. If file is a string or a Path, the
663
+ ``.npz`` extension will be appended to the filename if it is not
664
+ already there.
665
+ args : Arguments, optional
666
+ Arrays to save to the file. Please use keyword arguments (see
667
+ `kwds` below) to assign names to arrays. Arrays specified as
668
+ args will be named "arr_0", "arr_1", and so on.
669
+ kwds : Keyword arguments, optional
670
+ Arrays to save to the file. Each array will be saved to the
671
+ output file with its corresponding keyword name.
672
+
673
+ Returns
674
+ -------
675
+ None
676
+
677
+ See Also
678
+ --------
679
+ numpy.save : Save a single array to a binary file in NumPy format.
680
+ numpy.savetxt : Save an array to a file as plain text.
681
+ numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
682
+ numpy.load : Load the files created by savez_compressed.
683
+
684
+ Notes
685
+ -----
686
+ The ``.npz`` file format is a zipped archive of files named after the
687
+ variables they contain. The archive is compressed with
688
+ ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
689
+ in ``.npy`` format. For a description of the ``.npy`` format, see
690
+ :py:mod:`numpy.lib.format`.
691
+
692
+
693
+ When opening the saved ``.npz`` file with `load` a `NpzFile` object is
694
+ returned. This is a dictionary-like object which can be queried for
695
+ its list of arrays (with the ``.files`` attribute), and for the arrays
696
+ themselves.
697
+
698
+ Examples
699
+ --------
700
+ >>> test_array = np.random.rand(3, 2)
701
+ >>> test_vector = np.random.rand(4)
702
+ >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
703
+ >>> loaded = np.load('/tmp/123.npz')
704
+ >>> print(np.array_equal(test_array, loaded['a']))
705
+ True
706
+ >>> print(np.array_equal(test_vector, loaded['b']))
707
+ True
708
+
709
+ """
710
+ _savez(file, args, kwds, True)
711
+
712
+
713
+ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
714
+ # Import is postponed to here since zipfile depends on gzip, an optional
715
+ # component of the so-called standard library.
716
+ import zipfile
717
+
718
+ if not hasattr(file, 'write'):
719
+ file = os_fspath(file)
720
+ if not file.endswith('.npz'):
721
+ file = file + '.npz'
722
+
723
+ namedict = kwds
724
+ for i, val in enumerate(args):
725
+ key = 'arr_%d' % i
726
+ if key in namedict.keys():
727
+ raise ValueError(
728
+ "Cannot use un-named variables and keyword %s" % key)
729
+ namedict[key] = val
730
+
731
+ if compress:
732
+ compression = zipfile.ZIP_DEFLATED
733
+ else:
734
+ compression = zipfile.ZIP_STORED
735
+
736
+ zipf = zipfile_factory(file, mode="w", compression=compression)
737
+
738
+ for key, val in namedict.items():
739
+ fname = key + '.npy'
740
+ val = np.asanyarray(val)
741
+ # always force zip64, gh-10776
742
+ with zipf.open(fname, 'w', force_zip64=True) as fid:
743
+ format.write_array(fid, val,
744
+ allow_pickle=allow_pickle,
745
+ pickle_kwargs=pickle_kwargs)
746
+
747
+ zipf.close()
748
+
749
+
750
+ def _ensure_ndmin_ndarray_check_param(ndmin):
751
+ """Just checks if the param ndmin is supported on
752
+ _ensure_ndmin_ndarray. It is intended to be used as
753
+ verification before running anything expensive.
754
+ e.g. loadtxt, genfromtxt
755
+ """
756
+ # Check correctness of the values of `ndmin`
757
+ if ndmin not in [0, 1, 2]:
758
+ raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
759
+
760
+ def _ensure_ndmin_ndarray(a, *, ndmin: int):
761
+ """This is a helper function of loadtxt and genfromtxt to ensure
762
+ proper minimum dimension as requested
763
+
764
+ ndim : int. Supported values 1, 2, 3
765
+ ^^ whenever this changes, keep in sync with
766
+ _ensure_ndmin_ndarray_check_param
767
+ """
768
+ # Verify that the array has at least dimensions `ndmin`.
769
+ # Tweak the size and shape of the arrays - remove extraneous dimensions
770
+ if a.ndim > ndmin:
771
+ a = np.squeeze(a)
772
+ # and ensure we have the minimum number of dimensions asked for
773
+ # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
774
+ if a.ndim < ndmin:
775
+ if ndmin == 1:
776
+ a = np.atleast_1d(a)
777
+ elif ndmin == 2:
778
+ a = np.atleast_2d(a).T
779
+
780
+ return a
781
+
782
+
783
+ # amount of lines loadtxt reads in one chunk, can be overridden for testing
784
+ _loadtxt_chunksize = 50000
785
+
786
+
787
+ def _check_nonneg_int(value, name="argument"):
788
+ try:
789
+ operator.index(value)
790
+ except TypeError:
791
+ raise TypeError(f"{name} must be an integer") from None
792
+ if value < 0:
793
+ raise ValueError(f"{name} must be nonnegative")
794
+
795
+
796
+ def _preprocess_comments(iterable, comments, encoding):
797
+ """
798
+ Generator that consumes a line iterated iterable and strips out the
799
+ multiple (or multi-character) comments from lines.
800
+ This is a pre-processing step to achieve feature parity with loadtxt
801
+ (we assume that this feature is a nieche feature).
802
+ """
803
+ for line in iterable:
804
+ if isinstance(line, bytes):
805
+ # Need to handle conversion here, or the splitting would fail
806
+ line = line.decode(encoding)
807
+
808
+ for c in comments:
809
+ line = line.split(c, 1)[0]
810
+
811
+ yield line
812
+
813
+
814
+ # The number of rows we read in one go if confronted with a parametric dtype
815
+ _loadtxt_chunksize = 50000
816
+
817
+
818
+ def _read(fname, *, delimiter=',', comment='#', quote='"',
819
+ imaginary_unit='j', usecols=None, skiplines=0,
820
+ max_rows=None, converters=None, ndmin=None, unpack=False,
821
+ dtype=np.float64, encoding="bytes"):
822
+ r"""
823
+ Read a NumPy array from a text file.
824
+
825
+ Parameters
826
+ ----------
827
+ fname : str or file object
828
+ The filename or the file to be read.
829
+ delimiter : str, optional
830
+ Field delimiter of the fields in line of the file.
831
+ Default is a comma, ','. If None any sequence of whitespace is
832
+ considered a delimiter.
833
+ comment : str or sequence of str or None, optional
834
+ Character that begins a comment. All text from the comment
835
+ character to the end of the line is ignored.
836
+ Multiple comments or multiple-character comment strings are supported,
837
+ but may be slower and `quote` must be empty if used.
838
+ Use None to disable all use of comments.
839
+ quote : str or None, optional
840
+ Character that is used to quote string fields. Default is '"'
841
+ (a double quote). Use None to disable quote support.
842
+ imaginary_unit : str, optional
843
+ Character that represent the imaginay unit `sqrt(-1)`.
844
+ Default is 'j'.
845
+ usecols : array_like, optional
846
+ A one-dimensional array of integer column numbers. These are the
847
+ columns from the file to be included in the array. If this value
848
+ is not given, all the columns are used.
849
+ skiplines : int, optional
850
+ Number of lines to skip before interpreting the data in the file.
851
+ max_rows : int, optional
852
+ Maximum number of rows of data to read. Default is to read the
853
+ entire file.
854
+ converters : dict or callable, optional
855
+ A function to parse all columns strings into the desired value, or
856
+ a dictionary mapping column number to a parser function.
857
+ E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
858
+ Converters can also be used to provide a default value for missing
859
+ data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
860
+ convert empty fields to 0.
861
+ Default: None
862
+ ndmin : int, optional
863
+ Minimum dimension of the array returned.
864
+ Allowed values are 0, 1 or 2. Default is 0.
865
+ unpack : bool, optional
866
+ If True, the returned array is transposed, so that arguments may be
867
+ unpacked using ``x, y, z = read(...)``. When used with a structured
868
+ data-type, arrays are returned for each field. Default is False.
869
+ dtype : numpy data type
870
+ A NumPy dtype instance, can be a structured dtype to map to the
871
+ columns of the file.
872
+ encoding : str, optional
873
+ Encoding used to decode the inputfile. The special value 'bytes'
874
+ (the default) enables backwards-compatible behavior for `converters`,
875
+ ensuring that inputs to the converter functions are encoded
876
+ bytes objects. The special value 'bytes' has no additional effect if
877
+ ``converters=None``. If encoding is ``'bytes'`` or ``None``, the
878
+ default system encoding is used.
879
+
880
+ Returns
881
+ -------
882
+ ndarray
883
+ NumPy array.
884
+
885
+ Examples
886
+ --------
887
+ First we create a file for the example.
888
+
889
+ >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n'
890
+ >>> with open('example1.csv', 'w') as f:
891
+ ... f.write(s1)
892
+ >>> a1 = read_from_filename('example1.csv')
893
+ >>> a1
894
+ array([[1., 2., 3.],
895
+ [4., 5., 6.]])
896
+
897
+ The second example has columns with different data types, so a
898
+ one-dimensional array with a structured data type is returned.
899
+ The tab character is used as the field delimiter.
900
+
901
+ >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n'
902
+ >>> with open('example2.tsv', 'w') as f:
903
+ ... f.write(s2)
904
+ >>> a2 = read_from_filename('example2.tsv', delimiter='\t')
905
+ >>> a2
906
+ array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')],
907
+ dtype=[('f0', '<f8'), ('f1', 'u1'), ('f2', 'S5')])
908
+ """
909
+ # Handle special 'bytes' keyword for encoding
910
+ byte_converters = False
911
+ if encoding == 'bytes':
912
+ encoding = None
913
+ byte_converters = True
914
+
915
+ if dtype is None:
916
+ raise TypeError("a dtype must be provided.")
917
+ dtype = np.dtype(dtype)
918
+
919
+ read_dtype_via_object_chunks = None
920
+ if dtype.kind in 'SUM' and (
921
+ dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'):
922
+ # This is a legacy "flexible" dtype. We do not truly support
923
+ # parametric dtypes currently (no dtype discovery step in the core),
924
+ # but have to support these for backward compatibility.
925
+ read_dtype_via_object_chunks = dtype
926
+ dtype = np.dtype(object)
927
+
928
+ if usecols is not None:
929
+ # Allow usecols to be a single int or a sequence of ints, the C-code
930
+ # handles the rest
931
+ try:
932
+ usecols = list(usecols)
933
+ except TypeError:
934
+ usecols = [usecols]
935
+
936
+ _ensure_ndmin_ndarray_check_param(ndmin)
937
+
938
+ if comment is None:
939
+ comments = None
940
+ else:
941
+ # assume comments are a sequence of strings
942
+ if "" in comment:
943
+ raise ValueError(
944
+ "comments cannot be an empty string. Use comments=None to "
945
+ "disable comments."
946
+ )
947
+ comments = tuple(comment)
948
+ comment = None
949
+ if len(comments) == 0:
950
+ comments = None # No comments at all
951
+ elif len(comments) == 1:
952
+ # If there is only one comment, and that comment has one character,
953
+ # the normal parsing can deal with it just fine.
954
+ if isinstance(comments[0], str) and len(comments[0]) == 1:
955
+ comment = comments[0]
956
+ comments = None
957
+ else:
958
+ # Input validation if there are multiple comment characters
959
+ if delimiter in comments:
960
+ raise TypeError(
961
+ f"Comment characters '{comments}' cannot include the "
962
+ f"delimiter '{delimiter}'"
963
+ )
964
+
965
+ # comment is now either a 1 or 0 character string or a tuple:
966
+ if comments is not None:
967
+ # Note: An earlier version support two character comments (and could
968
+ # have been extended to multiple characters, we assume this is
969
+ # rare enough to not optimize for.
970
+ if quote is not None:
971
+ raise ValueError(
972
+ "when multiple comments or a multi-character comment is "
973
+ "given, quotes are not supported. In this case quotechar "
974
+ "must be set to None.")
975
+
976
+ if len(imaginary_unit) != 1:
977
+ raise ValueError('len(imaginary_unit) must be 1.')
978
+
979
+ _check_nonneg_int(skiplines)
980
+ if max_rows is not None:
981
+ _check_nonneg_int(max_rows)
982
+ else:
983
+ # Passing -1 to the C code means "read the entire file".
984
+ max_rows = -1
985
+
986
+ fh_closing_ctx = contextlib.nullcontext()
987
+ filelike = False
988
+ try:
989
+ if isinstance(fname, os.PathLike):
990
+ fname = os.fspath(fname)
991
+ if isinstance(fname, str):
992
+ fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
993
+ if encoding is None:
994
+ encoding = getattr(fh, 'encoding', 'latin1')
995
+
996
+ fh_closing_ctx = contextlib.closing(fh)
997
+ data = fh
998
+ filelike = True
999
+ else:
1000
+ if encoding is None:
1001
+ encoding = getattr(fname, 'encoding', 'latin1')
1002
+ data = iter(fname)
1003
+ except TypeError as e:
1004
+ raise ValueError(
1005
+ f"fname must be a string, filehandle, list of strings,\n"
1006
+ f"or generator. Got {type(fname)} instead.") from e
1007
+
1008
+ with fh_closing_ctx:
1009
+ if comments is not None:
1010
+ if filelike:
1011
+ data = iter(data)
1012
+ filelike = False
1013
+ data = _preprocess_comments(data, comments, encoding)
1014
+
1015
+ if read_dtype_via_object_chunks is None:
1016
+ arr = _load_from_filelike(
1017
+ data, delimiter=delimiter, comment=comment, quote=quote,
1018
+ imaginary_unit=imaginary_unit,
1019
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
1020
+ converters=converters, dtype=dtype,
1021
+ encoding=encoding, filelike=filelike,
1022
+ byte_converters=byte_converters)
1023
+
1024
+ else:
1025
+ # This branch reads the file into chunks of object arrays and then
1026
+ # casts them to the desired actual dtype. This ensures correct
1027
+ # string-length and datetime-unit discovery (like `arr.astype()`).
1028
+ # Due to chunking, certain error reports are less clear, currently.
1029
+ if filelike:
1030
+ data = iter(data) # cannot chunk when reading from file
1031
+
1032
+ c_byte_converters = False
1033
+ if read_dtype_via_object_chunks == "S":
1034
+ c_byte_converters = True # Use latin1 rather than ascii
1035
+
1036
+ chunks = []
1037
+ while max_rows != 0:
1038
+ if max_rows < 0:
1039
+ chunk_size = _loadtxt_chunksize
1040
+ else:
1041
+ chunk_size = min(_loadtxt_chunksize, max_rows)
1042
+
1043
+ next_arr = _load_from_filelike(
1044
+ data, delimiter=delimiter, comment=comment, quote=quote,
1045
+ imaginary_unit=imaginary_unit,
1046
+ usecols=usecols, skiplines=skiplines, max_rows=max_rows,
1047
+ converters=converters, dtype=dtype,
1048
+ encoding=encoding, filelike=filelike,
1049
+ byte_converters=byte_converters,
1050
+ c_byte_converters=c_byte_converters)
1051
+ # Cast here already. We hope that this is better even for
1052
+ # large files because the storage is more compact. It could
1053
+ # be adapted (in principle the concatenate could cast).
1054
+ chunks.append(next_arr.astype(read_dtype_via_object_chunks))
1055
+
1056
+ skiprows = 0 # Only have to skip for first chunk
1057
+ if max_rows >= 0:
1058
+ max_rows -= chunk_size
1059
+ if len(next_arr) < chunk_size:
1060
+ # There was less data than requested, so we are done.
1061
+ break
1062
+
1063
+ # Need at least one chunk, but if empty, the last one may have
1064
+ # the wrong shape.
1065
+ if len(chunks) > 1 and len(chunks[-1]) == 0:
1066
+ del chunks[-1]
1067
+ if len(chunks) == 1:
1068
+ arr = chunks[0]
1069
+ else:
1070
+ arr = np.concatenate(chunks, axis=0)
1071
+
1072
+ # NOTE: ndmin works as advertised for structured dtypes, but normally
1073
+ # these would return a 1D result plus the structured dimension,
1074
+ # so ndmin=2 adds a third dimension even when no squeezing occurs.
1075
+ # A `squeeze=False` could be a better solution (pandas uses squeeze).
1076
+ arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
1077
+
1078
+ if arr.shape:
1079
+ if arr.shape[0] == 0:
1080
+ warnings.warn(
1081
+ f'loadtxt: input contained no data: "{fname}"',
1082
+ category=UserWarning,
1083
+ stacklevel=3
1084
+ )
1085
+
1086
+ if unpack:
1087
+ # Unpack structured dtypes if requested:
1088
+ dt = arr.dtype
1089
+ if dt.names is not None:
1090
+ # For structured arrays, return an array for each field.
1091
+ return [arr[field] for field in dt.names]
1092
+ else:
1093
+ return arr.T
1094
+ else:
1095
+ return arr
1096
+
1097
+
1098
+ @set_array_function_like_doc
1099
+ @set_module('numpy')
1100
+ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
1101
+ converters=None, skiprows=0, usecols=None, unpack=False,
1102
+ ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None,
1103
+ like=None):
1104
+ r"""
1105
+ Load data from a text file.
1106
+
1107
+ Parameters
1108
+ ----------
1109
+ fname : file, str, pathlib.Path, list of str, generator
1110
+ File, filename, list, or generator to read. If the filename
1111
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
1112
+ that generators must return bytes or strings. The strings
1113
+ in a list or produced by a generator are treated as lines.
1114
+ dtype : data-type, optional
1115
+ Data-type of the resulting array; default: float. If this is a
1116
+ structured data-type, the resulting array will be 1-dimensional, and
1117
+ each row will be interpreted as an element of the array. In this
1118
+ case, the number of columns used must match the number of fields in
1119
+ the data-type.
1120
+ comments : str or sequence of str or None, optional
1121
+ The characters or list of characters used to indicate the start of a
1122
+ comment. None implies no comments. For backwards compatibility, byte
1123
+ strings will be decoded as 'latin1'. The default is '#'.
1124
+ delimiter : str, optional
1125
+ The character used to separate the values. For backwards compatibility,
1126
+ byte strings will be decoded as 'latin1'. The default is whitespace.
1127
+
1128
+ .. versionchanged:: 1.23.0
1129
+ Only single character delimiters are supported. Newline characters
1130
+ cannot be used as the delimiter.
1131
+
1132
+ converters : dict or callable, optional
1133
+ Converter functions to customize value parsing. If `converters` is
1134
+ callable, the function is applied to all columns, else it must be a
1135
+ dict that maps column number to a parser function.
1136
+ See examples for further details.
1137
+ Default: None.
1138
+
1139
+ .. versionchanged:: 1.23.0
1140
+ The ability to pass a single callable to be applied to all columns
1141
+ was added.
1142
+
1143
+ skiprows : int, optional
1144
+ Skip the first `skiprows` lines, including comments; default: 0.
1145
+ usecols : int or sequence, optional
1146
+ Which columns to read, with 0 being the first. For example,
1147
+ ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
1148
+ The default, None, results in all columns being read.
1149
+
1150
+ .. versionchanged:: 1.11.0
1151
+ When a single column has to be read it is possible to use
1152
+ an integer instead of a tuple. E.g ``usecols = 3`` reads the
1153
+ fourth column the same way as ``usecols = (3,)`` would.
1154
+ unpack : bool, optional
1155
+ If True, the returned array is transposed, so that arguments may be
1156
+ unpacked using ``x, y, z = loadtxt(...)``. When used with a
1157
+ structured data-type, arrays are returned for each field.
1158
+ Default is False.
1159
+ ndmin : int, optional
1160
+ The returned array will have at least `ndmin` dimensions.
1161
+ Otherwise mono-dimensional axes will be squeezed.
1162
+ Legal values: 0 (default), 1 or 2.
1163
+
1164
+ .. versionadded:: 1.6.0
1165
+ encoding : str, optional
1166
+ Encoding used to decode the inputfile. Does not apply to input streams.
1167
+ The special value 'bytes' enables backward compatibility workarounds
1168
+ that ensures you receive byte arrays as results if possible and passes
1169
+ 'latin1' encoded strings to converters. Override this value to receive
1170
+ unicode arrays and pass strings as input to converters. If set to None
1171
+ the system default is used. The default value is 'bytes'.
1172
+
1173
+ .. versionadded:: 1.14.0
1174
+ max_rows : int, optional
1175
+ Read `max_rows` rows of content after `skiprows` lines. The default is
1176
+ to read all the rows. Note that empty rows containing no data such as
1177
+ empty lines and comment lines are not counted towards `max_rows`,
1178
+ while such lines are counted in `skiprows`.
1179
+
1180
+ .. versionadded:: 1.16.0
1181
+
1182
+ .. versionchanged:: 1.23.0
1183
+ Lines containing no data, including comment lines (e.g., lines
1184
+ starting with '#' or as specified via `comments`) are not counted
1185
+ towards `max_rows`.
1186
+ quotechar : unicode character or None, optional
1187
+ The character used to denote the start and end of a quoted item.
1188
+ Occurrences of the delimiter or comment characters are ignored within
1189
+ a quoted item. The default value is ``quotechar=None``, which means
1190
+ quoting support is disabled.
1191
+
1192
+ If two consecutive instances of `quotechar` are found within a quoted
1193
+ field, the first is treated as an escape character. See examples.
1194
+
1195
+ .. versionadded:: 1.23.0
1196
+ ${ARRAY_FUNCTION_LIKE}
1197
+
1198
+ .. versionadded:: 1.20.0
1199
+
1200
+ Returns
1201
+ -------
1202
+ out : ndarray
1203
+ Data read from the text file.
1204
+
1205
+ See Also
1206
+ --------
1207
+ load, fromstring, fromregex
1208
+ genfromtxt : Load data with missing values handled as specified.
1209
+ scipy.io.loadmat : reads MATLAB data files
1210
+
1211
+ Notes
1212
+ -----
1213
+ This function aims to be a fast reader for simply formatted files. The
1214
+ `genfromtxt` function provides more sophisticated handling of, e.g.,
1215
+ lines with missing values.
1216
+
1217
+ Each row in the input text file must have the same number of values to be
1218
+ able to read all values. If all rows do not have same number of values, a
1219
+ subset of up to n columns (where n is the least number of values present
1220
+ in all rows) can be read by specifying the columns via `usecols`.
1221
+
1222
+ .. versionadded:: 1.10.0
1223
+
1224
+ The strings produced by the Python float.hex method can be used as
1225
+ input for floats.
1226
+
1227
+ Examples
1228
+ --------
1229
+ >>> from io import StringIO # StringIO behaves like a file object
1230
+ >>> c = StringIO("0 1\n2 3")
1231
+ >>> np.loadtxt(c)
1232
+ array([[0., 1.],
1233
+ [2., 3.]])
1234
+
1235
+ >>> d = StringIO("M 21 72\nF 35 58")
1236
+ >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
1237
+ ... 'formats': ('S1', 'i4', 'f4')})
1238
+ array([(b'M', 21, 72.), (b'F', 35, 58.)],
1239
+ dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
1240
+
1241
+ >>> c = StringIO("1,0,2\n3,0,4")
1242
+ >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
1243
+ >>> x
1244
+ array([1., 3.])
1245
+ >>> y
1246
+ array([2., 4.])
1247
+
1248
+ The `converters` argument is used to specify functions to preprocess the
1249
+ text prior to parsing. `converters` can be a dictionary that maps
1250
+ preprocessing functions to each column:
1251
+
1252
+ >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
1253
+ >>> conv = {
1254
+ ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0
1255
+ ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1
1256
+ ... }
1257
+ >>> np.loadtxt(s, delimiter=",", converters=conv)
1258
+ array([[1., 3.],
1259
+ [3., 5.]])
1260
+
1261
+ `converters` can be a callable instead of a dictionary, in which case it
1262
+ is applied to all columns:
1263
+
1264
+ >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
1265
+ >>> import functools
1266
+ >>> conv = functools.partial(int, base=16)
1267
+ >>> np.loadtxt(s, converters=conv)
1268
+ array([[222., 173.],
1269
+ [192., 222.]])
1270
+
1271
+ This example shows how `converters` can be used to convert a field
1272
+ with a trailing minus sign into a negative number.
1273
+
1274
+ >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
1275
+ >>> def conv(fld):
1276
+ ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
1277
+ ...
1278
+ >>> np.loadtxt(s, converters=conv)
1279
+ array([[ 10.01, -31.25],
1280
+ [ 19.22, 64.31],
1281
+ [-17.57, 63.94]])
1282
+
1283
+ Using a callable as the converter can be particularly useful for handling
1284
+ values with different formatting, e.g. floats with underscores:
1285
+
1286
+ >>> s = StringIO("1 2.7 100_000")
1287
+ >>> np.loadtxt(s, converters=float)
1288
+ array([1.e+00, 2.7e+00, 1.e+05])
1289
+
1290
+ This idea can be extended to automatically handle values specified in
1291
+ many different formats:
1292
+
1293
+ >>> def conv(val):
1294
+ ... try:
1295
+ ... return float(val)
1296
+ ... except ValueError:
1297
+ ... return float.fromhex(val)
1298
+ >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
1299
+ >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None)
1300
+ array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
1301
+
1302
+ Note that with the default ``encoding="bytes"``, the inputs to the
1303
+ converter function are latin-1 encoded byte strings. To deactivate the
1304
+ implicit encoding prior to conversion, use ``encoding=None``
1305
+
1306
+ >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
1307
+ >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x)
1308
+ >>> np.loadtxt(s, converters=conv, encoding=None)
1309
+ array([[ 10.01, -31.25],
1310
+ [ 19.22, 64.31],
1311
+ [-17.57, 63.94]])
1312
+
1313
+ Support for quoted fields is enabled with the `quotechar` parameter.
1314
+ Comment and delimiter characters are ignored when they appear within a
1315
+ quoted item delineated by `quotechar`:
1316
+
1317
+ >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
1318
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
1319
+ >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
1320
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
1321
+ dtype=[('label', '<U12'), ('value', '<f8')])
1322
+
1323
+ Quoted fields can be separated by multiple whitespace characters:
1324
+
1325
+ >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
1326
+ >>> dtype = np.dtype([("label", "U12"), ("value", float)])
1327
+ >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
1328
+ array([('alpha, #42', 10.), ('beta, #64', 2.)],
1329
+ dtype=[('label', '<U12'), ('value', '<f8')])
1330
+
1331
+ Two consecutive quote characters within a quoted field are treated as a
1332
+ single escaped character:
1333
+
1334
+ >>> s = StringIO('"Hello, my name is ""Monty""!"')
1335
+ >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
1336
+ array('Hello, my name is "Monty"!', dtype='<U26')
1337
+
1338
+ Read subset of columns when all rows do not contain equal number of values:
1339
+
1340
+ >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
1341
+ >>> np.loadtxt(d, usecols=(0, 1))
1342
+ array([[ 1., 2.],
1343
+ [ 2., 4.],
1344
+ [ 3., 9.],
1345
+ [ 4., 16.]])
1346
+
1347
+ """
1348
+
1349
+ if like is not None:
1350
+ return _loadtxt_with_like(
1351
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
1352
+ converters=converters, skiprows=skiprows, usecols=usecols,
1353
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
1354
+ max_rows=max_rows
1355
+ )
1356
+
1357
+ if isinstance(delimiter, bytes):
1358
+ delimiter.decode("latin1")
1359
+
1360
+ if dtype is None:
1361
+ dtype = np.float64
1362
+
1363
+ comment = comments
1364
+ # Control character type conversions for Py3 convenience
1365
+ if comment is not None:
1366
+ if isinstance(comment, (str, bytes)):
1367
+ comment = [comment]
1368
+ comment = [
1369
+ x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
1370
+ if isinstance(delimiter, bytes):
1371
+ delimiter = delimiter.decode('latin1')
1372
+
1373
+ arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
1374
+ converters=converters, skiplines=skiprows, usecols=usecols,
1375
+ unpack=unpack, ndmin=ndmin, encoding=encoding,
1376
+ max_rows=max_rows, quote=quotechar)
1377
+
1378
+ return arr
1379
+
1380
+
1381
+ _loadtxt_with_like = array_function_dispatch()(loadtxt)
1382
+
1383
+
1384
+ def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
1385
+ header=None, footer=None, comments=None,
1386
+ encoding=None):
1387
+ return (X,)
1388
+
1389
+
1390
+ @array_function_dispatch(_savetxt_dispatcher)
1391
+ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
1392
+ footer='', comments='# ', encoding=None):
1393
+ """
1394
+ Save an array to a text file.
1395
+
1396
+ Parameters
1397
+ ----------
1398
+ fname : filename or file handle
1399
+ If the filename ends in ``.gz``, the file is automatically saved in
1400
+ compressed gzip format. `loadtxt` understands gzipped files
1401
+ transparently.
1402
+ X : 1D or 2D array_like
1403
+ Data to be saved to a text file.
1404
+ fmt : str or sequence of strs, optional
1405
+ A single format (%10.5f), a sequence of formats, or a
1406
+ multi-format string, e.g. 'Iteration %d -- %10.5f', in which
1407
+ case `delimiter` is ignored. For complex `X`, the legal options
1408
+ for `fmt` are:
1409
+
1410
+ * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
1411
+ like `' (%s+%sj)' % (fmt, fmt)`
1412
+ * a full string specifying every real and imaginary part, e.g.
1413
+ `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
1414
+ * a list of specifiers, one per column - in this case, the real
1415
+ and imaginary part must have separate specifiers,
1416
+ e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
1417
+ delimiter : str, optional
1418
+ String or character separating columns.
1419
+ newline : str, optional
1420
+ String or character separating lines.
1421
+
1422
+ .. versionadded:: 1.5.0
1423
+ header : str, optional
1424
+ String that will be written at the beginning of the file.
1425
+
1426
+ .. versionadded:: 1.7.0
1427
+ footer : str, optional
1428
+ String that will be written at the end of the file.
1429
+
1430
+ .. versionadded:: 1.7.0
1431
+ comments : str, optional
1432
+ String that will be prepended to the ``header`` and ``footer`` strings,
1433
+ to mark them as comments. Default: '# ', as expected by e.g.
1434
+ ``numpy.loadtxt``.
1435
+
1436
+ .. versionadded:: 1.7.0
1437
+ encoding : {None, str}, optional
1438
+ Encoding used to encode the outputfile. Does not apply to output
1439
+ streams. If the encoding is something other than 'bytes' or 'latin1'
1440
+ you will not be able to load the file in NumPy versions < 1.14. Default
1441
+ is 'latin1'.
1442
+
1443
+ .. versionadded:: 1.14.0
1444
+
1445
+
1446
+ See Also
1447
+ --------
1448
+ save : Save an array to a binary file in NumPy ``.npy`` format
1449
+ savez : Save several arrays into an uncompressed ``.npz`` archive
1450
+ savez_compressed : Save several arrays into a compressed ``.npz`` archive
1451
+
1452
+ Notes
1453
+ -----
1454
+ Further explanation of the `fmt` parameter
1455
+ (``%[flag]width[.precision]specifier``):
1456
+
1457
+ flags:
1458
+ ``-`` : left justify
1459
+
1460
+ ``+`` : Forces to precede result with + or -.
1461
+
1462
+ ``0`` : Left pad the number with zeros instead of space (see width).
1463
+
1464
+ width:
1465
+ Minimum number of characters to be printed. The value is not truncated
1466
+ if it has more characters.
1467
+
1468
+ precision:
1469
+ - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
1470
+ digits.
1471
+ - For ``e, E`` and ``f`` specifiers, the number of digits to print
1472
+ after the decimal point.
1473
+ - For ``g`` and ``G``, the maximum number of significant digits.
1474
+ - For ``s``, the maximum number of characters.
1475
+
1476
+ specifiers:
1477
+ ``c`` : character
1478
+
1479
+ ``d`` or ``i`` : signed decimal integer
1480
+
1481
+ ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
1482
+
1483
+ ``f`` : decimal floating point
1484
+
1485
+ ``g,G`` : use the shorter of ``e,E`` or ``f``
1486
+
1487
+ ``o`` : signed octal
1488
+
1489
+ ``s`` : string of characters
1490
+
1491
+ ``u`` : unsigned decimal integer
1492
+
1493
+ ``x,X`` : unsigned hexadecimal integer
1494
+
1495
+ This explanation of ``fmt`` is not complete, for an exhaustive
1496
+ specification see [1]_.
1497
+
1498
+ References
1499
+ ----------
1500
+ .. [1] `Format Specification Mini-Language
1501
+ <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
1502
+ Python Documentation.
1503
+
1504
+ Examples
1505
+ --------
1506
+ >>> x = y = z = np.arange(0.0,5.0,1.0)
1507
+ >>> np.savetxt('test.out', x, delimiter=',') # X is an array
1508
+ >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
1509
+ >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
1510
+
1511
+ """
1512
+
1513
+ # Py3 conversions first
1514
+ if isinstance(fmt, bytes):
1515
+ fmt = asstr(fmt)
1516
+ delimiter = asstr(delimiter)
1517
+
1518
+ class WriteWrap:
1519
+ """Convert to bytes on bytestream inputs.
1520
+
1521
+ """
1522
+ def __init__(self, fh, encoding):
1523
+ self.fh = fh
1524
+ self.encoding = encoding
1525
+ self.do_write = self.first_write
1526
+
1527
+ def close(self):
1528
+ self.fh.close()
1529
+
1530
+ def write(self, v):
1531
+ self.do_write(v)
1532
+
1533
+ def write_bytes(self, v):
1534
+ if isinstance(v, bytes):
1535
+ self.fh.write(v)
1536
+ else:
1537
+ self.fh.write(v.encode(self.encoding))
1538
+
1539
+ def write_normal(self, v):
1540
+ self.fh.write(asunicode(v))
1541
+
1542
+ def first_write(self, v):
1543
+ try:
1544
+ self.write_normal(v)
1545
+ self.write = self.write_normal
1546
+ except TypeError:
1547
+ # input is probably a bytestream
1548
+ self.write_bytes(v)
1549
+ self.write = self.write_bytes
1550
+
1551
+ own_fh = False
1552
+ if isinstance(fname, os_PathLike):
1553
+ fname = os_fspath(fname)
1554
+ if _is_string_like(fname):
1555
+ # datasource doesn't support creating a new file ...
1556
+ open(fname, 'wt').close()
1557
+ fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
1558
+ own_fh = True
1559
+ elif hasattr(fname, 'write'):
1560
+ # wrap to handle byte output streams
1561
+ fh = WriteWrap(fname, encoding or 'latin1')
1562
+ else:
1563
+ raise ValueError('fname must be a string or file handle')
1564
+
1565
+ try:
1566
+ X = np.asarray(X)
1567
+
1568
+ # Handle 1-dimensional arrays
1569
+ if X.ndim == 0 or X.ndim > 2:
1570
+ raise ValueError(
1571
+ "Expected 1D or 2D array, got %dD array instead" % X.ndim)
1572
+ elif X.ndim == 1:
1573
+ # Common case -- 1d array of numbers
1574
+ if X.dtype.names is None:
1575
+ X = np.atleast_2d(X).T
1576
+ ncol = 1
1577
+
1578
+ # Complex dtype -- each field indicates a separate column
1579
+ else:
1580
+ ncol = len(X.dtype.names)
1581
+ else:
1582
+ ncol = X.shape[1]
1583
+
1584
+ iscomplex_X = np.iscomplexobj(X)
1585
+ # `fmt` can be a string with multiple insertion points or a
1586
+ # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
1587
+ if type(fmt) in (list, tuple):
1588
+ if len(fmt) != ncol:
1589
+ raise AttributeError('fmt has wrong shape. %s' % str(fmt))
1590
+ format = asstr(delimiter).join(map(asstr, fmt))
1591
+ elif isinstance(fmt, str):
1592
+ n_fmt_chars = fmt.count('%')
1593
+ error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
1594
+ if n_fmt_chars == 1:
1595
+ if iscomplex_X:
1596
+ fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
1597
+ else:
1598
+ fmt = [fmt, ] * ncol
1599
+ format = delimiter.join(fmt)
1600
+ elif iscomplex_X and n_fmt_chars != (2 * ncol):
1601
+ raise error
1602
+ elif ((not iscomplex_X) and n_fmt_chars != ncol):
1603
+ raise error
1604
+ else:
1605
+ format = fmt
1606
+ else:
1607
+ raise ValueError('invalid fmt: %r' % (fmt,))
1608
+
1609
+ if len(header) > 0:
1610
+ header = header.replace('\n', '\n' + comments)
1611
+ fh.write(comments + header + newline)
1612
+ if iscomplex_X:
1613
+ for row in X:
1614
+ row2 = []
1615
+ for number in row:
1616
+ row2.append(number.real)
1617
+ row2.append(number.imag)
1618
+ s = format % tuple(row2) + newline
1619
+ fh.write(s.replace('+-', '-'))
1620
+ else:
1621
+ for row in X:
1622
+ try:
1623
+ v = format % tuple(row) + newline
1624
+ except TypeError as e:
1625
+ raise TypeError("Mismatch between array dtype ('%s') and "
1626
+ "format specifier ('%s')"
1627
+ % (str(X.dtype), format)) from e
1628
+ fh.write(v)
1629
+
1630
+ if len(footer) > 0:
1631
+ footer = footer.replace('\n', '\n' + comments)
1632
+ fh.write(comments + footer + newline)
1633
+ finally:
1634
+ if own_fh:
1635
+ fh.close()
1636
+
1637
+
1638
+ @set_module('numpy')
1639
+ def fromregex(file, regexp, dtype, encoding=None):
1640
+ r"""
1641
+ Construct an array from a text file, using regular expression parsing.
1642
+
1643
+ The returned array is always a structured array, and is constructed from
1644
+ all matches of the regular expression in the file. Groups in the regular
1645
+ expression are converted to fields of the structured array.
1646
+
1647
+ Parameters
1648
+ ----------
1649
+ file : path or file
1650
+ Filename or file object to read.
1651
+
1652
+ .. versionchanged:: 1.22.0
1653
+ Now accepts `os.PathLike` implementations.
1654
+ regexp : str or regexp
1655
+ Regular expression used to parse the file.
1656
+ Groups in the regular expression correspond to fields in the dtype.
1657
+ dtype : dtype or list of dtypes
1658
+ Dtype for the structured array; must be a structured datatype.
1659
+ encoding : str, optional
1660
+ Encoding used to decode the inputfile. Does not apply to input streams.
1661
+
1662
+ .. versionadded:: 1.14.0
1663
+
1664
+ Returns
1665
+ -------
1666
+ output : ndarray
1667
+ The output array, containing the part of the content of `file` that
1668
+ was matched by `regexp`. `output` is always a structured array.
1669
+
1670
+ Raises
1671
+ ------
1672
+ TypeError
1673
+ When `dtype` is not a valid dtype for a structured array.
1674
+
1675
+ See Also
1676
+ --------
1677
+ fromstring, loadtxt
1678
+
1679
+ Notes
1680
+ -----
1681
+ Dtypes for structured arrays can be specified in several forms, but all
1682
+ forms specify at least the data type and field name. For details see
1683
+ `basics.rec`.
1684
+
1685
+ Examples
1686
+ --------
1687
+ >>> from io import StringIO
1688
+ >>> text = StringIO("1312 foo\n1534 bar\n444 qux")
1689
+
1690
+ >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
1691
+ >>> output = np.fromregex(text, regexp,
1692
+ ... [('num', np.int64), ('key', 'S3')])
1693
+ >>> output
1694
+ array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
1695
+ dtype=[('num', '<i8'), ('key', 'S3')])
1696
+ >>> output['num']
1697
+ array([1312, 1534, 444])
1698
+
1699
+ """
1700
+ own_fh = False
1701
+ if not hasattr(file, "read"):
1702
+ file = os.fspath(file)
1703
+ file = np.lib._datasource.open(file, 'rt', encoding=encoding)
1704
+ own_fh = True
1705
+
1706
+ try:
1707
+ if not isinstance(dtype, np.dtype):
1708
+ dtype = np.dtype(dtype)
1709
+ if dtype.names is None:
1710
+ raise TypeError('dtype must be a structured datatype.')
1711
+
1712
+ content = file.read()
1713
+ if isinstance(content, bytes) and isinstance(regexp, str):
1714
+ regexp = asbytes(regexp)
1715
+ elif isinstance(content, str) and isinstance(regexp, bytes):
1716
+ regexp = asstr(regexp)
1717
+
1718
+ if not hasattr(regexp, 'match'):
1719
+ regexp = re.compile(regexp)
1720
+ seq = regexp.findall(content)
1721
+ if seq and not isinstance(seq[0], tuple):
1722
+ # Only one group is in the regexp.
1723
+ # Create the new array as a single data-type and then
1724
+ # re-interpret as a single-field structured array.
1725
+ newdtype = np.dtype(dtype[dtype.names[0]])
1726
+ output = np.array(seq, dtype=newdtype)
1727
+ output.dtype = dtype
1728
+ else:
1729
+ output = np.array(seq, dtype=dtype)
1730
+
1731
+ return output
1732
+ finally:
1733
+ if own_fh:
1734
+ file.close()
1735
+
1736
+
1737
+ #####--------------------------------------------------------------------------
1738
+ #---- --- ASCII functions ---
1739
+ #####--------------------------------------------------------------------------
1740
+
1741
+
1742
+ @set_array_function_like_doc
1743
+ @set_module('numpy')
1744
+ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
1745
+ skip_header=0, skip_footer=0, converters=None,
1746
+ missing_values=None, filling_values=None, usecols=None,
1747
+ names=None, excludelist=None,
1748
+ deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
1749
+ replace_space='_', autostrip=False, case_sensitive=True,
1750
+ defaultfmt="f%i", unpack=None, usemask=False, loose=True,
1751
+ invalid_raise=True, max_rows=None, encoding='bytes',
1752
+ *, ndmin=0, like=None):
1753
+ """
1754
+ Load data from a text file, with missing values handled as specified.
1755
+
1756
+ Each line past the first `skip_header` lines is split at the `delimiter`
1757
+ character, and characters following the `comments` character are discarded.
1758
+
1759
+ Parameters
1760
+ ----------
1761
+ fname : file, str, pathlib.Path, list of str, generator
1762
+ File, filename, list, or generator to read. If the filename
1763
+ extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
1764
+ that generators must return bytes or strings. The strings
1765
+ in a list or produced by a generator are treated as lines.
1766
+ dtype : dtype, optional
1767
+ Data type of the resulting array.
1768
+ If None, the dtypes will be determined by the contents of each
1769
+ column, individually.
1770
+ comments : str, optional
1771
+ The character used to indicate the start of a comment.
1772
+ All the characters occurring on a line after a comment are discarded.
1773
+ delimiter : str, int, or sequence, optional
1774
+ The string used to separate values. By default, any consecutive
1775
+ whitespaces act as delimiter. An integer or sequence of integers
1776
+ can also be provided as width(s) of each field.
1777
+ skiprows : int, optional
1778
+ `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
1779
+ skip_header : int, optional
1780
+ The number of lines to skip at the beginning of the file.
1781
+ skip_footer : int, optional
1782
+ The number of lines to skip at the end of the file.
1783
+ converters : variable, optional
1784
+ The set of functions that convert the data of a column to a value.
1785
+ The converters can also be used to provide a default value
1786
+ for missing data: ``converters = {3: lambda s: float(s or 0)}``.
1787
+ missing : variable, optional
1788
+ `missing` was removed in numpy 1.10. Please use `missing_values`
1789
+ instead.
1790
+ missing_values : variable, optional
1791
+ The set of strings corresponding to missing data.
1792
+ filling_values : variable, optional
1793
+ The set of values to be used as default when the data are missing.
1794
+ usecols : sequence, optional
1795
+ Which columns to read, with 0 being the first. For example,
1796
+ ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
1797
+ names : {None, True, str, sequence}, optional
1798
+ If `names` is True, the field names are read from the first line after
1799
+ the first `skip_header` lines. This line can optionally be preceded
1800
+ by a comment delimiter. If `names` is a sequence or a single-string of
1801
+ comma-separated names, the names will be used to define the field names
1802
+ in a structured dtype. If `names` is None, the names of the dtype
1803
+ fields will be used, if any.
1804
+ excludelist : sequence, optional
1805
+ A list of names to exclude. This list is appended to the default list
1806
+ ['return','file','print']. Excluded names are appended with an
1807
+ underscore: for example, `file` would become `file_`.
1808
+ deletechars : str, optional
1809
+ A string combining invalid characters that must be deleted from the
1810
+ names.
1811
+ defaultfmt : str, optional
1812
+ A format used to define default field names, such as "f%i" or "f_%02i".
1813
+ autostrip : bool, optional
1814
+ Whether to automatically strip white spaces from the variables.
1815
+ replace_space : char, optional
1816
+ Character(s) used in replacement of white spaces in the variable
1817
+ names. By default, use a '_'.
1818
+ case_sensitive : {True, False, 'upper', 'lower'}, optional
1819
+ If True, field names are case sensitive.
1820
+ If False or 'upper', field names are converted to upper case.
1821
+ If 'lower', field names are converted to lower case.
1822
+ unpack : bool, optional
1823
+ If True, the returned array is transposed, so that arguments may be
1824
+ unpacked using ``x, y, z = genfromtxt(...)``. When used with a
1825
+ structured data-type, arrays are returned for each field.
1826
+ Default is False.
1827
+ usemask : bool, optional
1828
+ If True, return a masked array.
1829
+ If False, return a regular array.
1830
+ loose : bool, optional
1831
+ If True, do not raise errors for invalid values.
1832
+ invalid_raise : bool, optional
1833
+ If True, an exception is raised if an inconsistency is detected in the
1834
+ number of columns.
1835
+ If False, a warning is emitted and the offending lines are skipped.
1836
+ max_rows : int, optional
1837
+ The maximum number of rows to read. Must not be used with skip_footer
1838
+ at the same time. If given, the value must be at least 1. Default is
1839
+ to read the entire file.
1840
+
1841
+ .. versionadded:: 1.10.0
1842
+ encoding : str, optional
1843
+ Encoding used to decode the inputfile. Does not apply when `fname` is
1844
+ a file object. The special value 'bytes' enables backward compatibility
1845
+ workarounds that ensure that you receive byte arrays when possible
1846
+ and passes latin1 encoded strings to converters. Override this value to
1847
+ receive unicode arrays and pass strings as input to converters. If set
1848
+ to None the system default is used. The default value is 'bytes'.
1849
+
1850
+ .. versionadded:: 1.14.0
1851
+ ndmin : int, optional
1852
+ Same parameter as `loadtxt`
1853
+
1854
+ .. versionadded:: 1.23.0
1855
+ ${ARRAY_FUNCTION_LIKE}
1856
+
1857
+ .. versionadded:: 1.20.0
1858
+
1859
+ Returns
1860
+ -------
1861
+ out : ndarray
1862
+ Data read from the text file. If `usemask` is True, this is a
1863
+ masked array.
1864
+
1865
+ See Also
1866
+ --------
1867
+ numpy.loadtxt : equivalent function when no data is missing.
1868
+
1869
+ Notes
1870
+ -----
1871
+ * When spaces are used as delimiters, or when no delimiter has been given
1872
+ as input, there should not be any missing data between two fields.
1873
+ * When the variables are named (either by a flexible dtype or with `names`),
1874
+ there must not be any header in the file (else a ValueError
1875
+ exception is raised).
1876
+ * Individual values are not stripped of spaces by default.
1877
+ When using a custom converter, make sure the function does remove spaces.
1878
+
1879
+ References
1880
+ ----------
1881
+ .. [1] NumPy User Guide, section `I/O with NumPy
1882
+ <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
1883
+
1884
+ Examples
1885
+ --------
1886
+ >>> from io import StringIO
1887
+ >>> import numpy as np
1888
+
1889
+ Comma delimited file with mixed dtype
1890
+
1891
+ >>> s = StringIO(u"1,1.3,abcde")
1892
+ >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
1893
+ ... ('mystring','S5')], delimiter=",")
1894
+ >>> data
1895
+ array((1, 1.3, b'abcde'),
1896
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1897
+
1898
+ Using dtype = None
1899
+
1900
+ >>> _ = s.seek(0) # needed for StringIO example only
1901
+ >>> data = np.genfromtxt(s, dtype=None,
1902
+ ... names = ['myint','myfloat','mystring'], delimiter=",")
1903
+ >>> data
1904
+ array((1, 1.3, b'abcde'),
1905
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1906
+
1907
+ Specifying dtype and names
1908
+
1909
+ >>> _ = s.seek(0)
1910
+ >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
1911
+ ... names=['myint','myfloat','mystring'], delimiter=",")
1912
+ >>> data
1913
+ array((1, 1.3, b'abcde'),
1914
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1915
+
1916
+ An example with fixed-width columns
1917
+
1918
+ >>> s = StringIO(u"11.3abcde")
1919
+ >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
1920
+ ... delimiter=[1,3,5])
1921
+ >>> data
1922
+ array((1, 1.3, b'abcde'),
1923
+ dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
1924
+
1925
+ An example to show comments
1926
+
1927
+ >>> f = StringIO('''
1928
+ ... text,# of chars
1929
+ ... hello world,11
1930
+ ... numpy,5''')
1931
+ >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
1932
+ array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
1933
+ dtype=[('f0', 'S12'), ('f1', 'S12')])
1934
+
1935
+ """
1936
+
1937
+ if like is not None:
1938
+ return _genfromtxt_with_like(
1939
+ like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
1940
+ skip_header=skip_header, skip_footer=skip_footer,
1941
+ converters=converters, missing_values=missing_values,
1942
+ filling_values=filling_values, usecols=usecols, names=names,
1943
+ excludelist=excludelist, deletechars=deletechars,
1944
+ replace_space=replace_space, autostrip=autostrip,
1945
+ case_sensitive=case_sensitive, defaultfmt=defaultfmt,
1946
+ unpack=unpack, usemask=usemask, loose=loose,
1947
+ invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
1948
+ ndmin=ndmin,
1949
+ )
1950
+
1951
+ _ensure_ndmin_ndarray_check_param(ndmin)
1952
+
1953
+ if max_rows is not None:
1954
+ if skip_footer:
1955
+ raise ValueError(
1956
+ "The keywords 'skip_footer' and 'max_rows' can not be "
1957
+ "specified at the same time.")
1958
+ if max_rows < 1:
1959
+ raise ValueError("'max_rows' must be at least 1.")
1960
+
1961
+ if usemask:
1962
+ from numpy.ma import MaskedArray, make_mask_descr
1963
+ # Check the input dictionary of converters
1964
+ user_converters = converters or {}
1965
+ if not isinstance(user_converters, dict):
1966
+ raise TypeError(
1967
+ "The input argument 'converter' should be a valid dictionary "
1968
+ "(got '%s' instead)" % type(user_converters))
1969
+
1970
+ if encoding == 'bytes':
1971
+ encoding = None
1972
+ byte_converters = True
1973
+ else:
1974
+ byte_converters = False
1975
+
1976
+ # Initialize the filehandle, the LineSplitter and the NameValidator
1977
+ if isinstance(fname, os_PathLike):
1978
+ fname = os_fspath(fname)
1979
+ if isinstance(fname, str):
1980
+ fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
1981
+ fid_ctx = contextlib.closing(fid)
1982
+ else:
1983
+ fid = fname
1984
+ fid_ctx = contextlib.nullcontext(fid)
1985
+ try:
1986
+ fhd = iter(fid)
1987
+ except TypeError as e:
1988
+ raise TypeError(
1989
+ "fname must be a string, a filehandle, a sequence of strings,\n"
1990
+ f"or an iterator of strings. Got {type(fname)} instead."
1991
+ ) from e
1992
+ with fid_ctx:
1993
+ split_line = LineSplitter(delimiter=delimiter, comments=comments,
1994
+ autostrip=autostrip, encoding=encoding)
1995
+ validate_names = NameValidator(excludelist=excludelist,
1996
+ deletechars=deletechars,
1997
+ case_sensitive=case_sensitive,
1998
+ replace_space=replace_space)
1999
+
2000
+ # Skip the first `skip_header` rows
2001
+ try:
2002
+ for i in range(skip_header):
2003
+ next(fhd)
2004
+
2005
+ # Keep on until we find the first valid values
2006
+ first_values = None
2007
+
2008
+ while not first_values:
2009
+ first_line = _decode_line(next(fhd), encoding)
2010
+ if (names is True) and (comments is not None):
2011
+ if comments in first_line:
2012
+ first_line = (
2013
+ ''.join(first_line.split(comments)[1:]))
2014
+ first_values = split_line(first_line)
2015
+ except StopIteration:
2016
+ # return an empty array if the datafile is empty
2017
+ first_line = ''
2018
+ first_values = []
2019
+ warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
2020
+
2021
+ # Should we take the first values as names ?
2022
+ if names is True:
2023
+ fval = first_values[0].strip()
2024
+ if comments is not None:
2025
+ if fval in comments:
2026
+ del first_values[0]
2027
+
2028
+ # Check the columns to use: make sure `usecols` is a list
2029
+ if usecols is not None:
2030
+ try:
2031
+ usecols = [_.strip() for _ in usecols.split(",")]
2032
+ except AttributeError:
2033
+ try:
2034
+ usecols = list(usecols)
2035
+ except TypeError:
2036
+ usecols = [usecols, ]
2037
+ nbcols = len(usecols or first_values)
2038
+
2039
+ # Check the names and overwrite the dtype.names if needed
2040
+ if names is True:
2041
+ names = validate_names([str(_.strip()) for _ in first_values])
2042
+ first_line = ''
2043
+ elif _is_string_like(names):
2044
+ names = validate_names([_.strip() for _ in names.split(',')])
2045
+ elif names:
2046
+ names = validate_names(names)
2047
+ # Get the dtype
2048
+ if dtype is not None:
2049
+ dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
2050
+ excludelist=excludelist,
2051
+ deletechars=deletechars,
2052
+ case_sensitive=case_sensitive,
2053
+ replace_space=replace_space)
2054
+ # Make sure the names is a list (for 2.5)
2055
+ if names is not None:
2056
+ names = list(names)
2057
+
2058
+ if usecols:
2059
+ for (i, current) in enumerate(usecols):
2060
+ # if usecols is a list of names, convert to a list of indices
2061
+ if _is_string_like(current):
2062
+ usecols[i] = names.index(current)
2063
+ elif current < 0:
2064
+ usecols[i] = current + len(first_values)
2065
+ # If the dtype is not None, make sure we update it
2066
+ if (dtype is not None) and (len(dtype) > nbcols):
2067
+ descr = dtype.descr
2068
+ dtype = np.dtype([descr[_] for _ in usecols])
2069
+ names = list(dtype.names)
2070
+ # If `names` is not None, update the names
2071
+ elif (names is not None) and (len(names) > nbcols):
2072
+ names = [names[_] for _ in usecols]
2073
+ elif (names is not None) and (dtype is not None):
2074
+ names = list(dtype.names)
2075
+
2076
+ # Process the missing values ...............................
2077
+ # Rename missing_values for convenience
2078
+ user_missing_values = missing_values or ()
2079
+ if isinstance(user_missing_values, bytes):
2080
+ user_missing_values = user_missing_values.decode('latin1')
2081
+
2082
+ # Define the list of missing_values (one column: one list)
2083
+ missing_values = [list(['']) for _ in range(nbcols)]
2084
+
2085
+ # We have a dictionary: process it field by field
2086
+ if isinstance(user_missing_values, dict):
2087
+ # Loop on the items
2088
+ for (key, val) in user_missing_values.items():
2089
+ # Is the key a string ?
2090
+ if _is_string_like(key):
2091
+ try:
2092
+ # Transform it into an integer
2093
+ key = names.index(key)
2094
+ except ValueError:
2095
+ # We couldn't find it: the name must have been dropped
2096
+ continue
2097
+ # Redefine the key as needed if it's a column number
2098
+ if usecols:
2099
+ try:
2100
+ key = usecols.index(key)
2101
+ except ValueError:
2102
+ pass
2103
+ # Transform the value as a list of string
2104
+ if isinstance(val, (list, tuple)):
2105
+ val = [str(_) for _ in val]
2106
+ else:
2107
+ val = [str(val), ]
2108
+ # Add the value(s) to the current list of missing
2109
+ if key is None:
2110
+ # None acts as default
2111
+ for miss in missing_values:
2112
+ miss.extend(val)
2113
+ else:
2114
+ missing_values[key].extend(val)
2115
+ # We have a sequence : each item matches a column
2116
+ elif isinstance(user_missing_values, (list, tuple)):
2117
+ for (value, entry) in zip(user_missing_values, missing_values):
2118
+ value = str(value)
2119
+ if value not in entry:
2120
+ entry.append(value)
2121
+ # We have a string : apply it to all entries
2122
+ elif isinstance(user_missing_values, str):
2123
+ user_value = user_missing_values.split(",")
2124
+ for entry in missing_values:
2125
+ entry.extend(user_value)
2126
+ # We have something else: apply it to all entries
2127
+ else:
2128
+ for entry in missing_values:
2129
+ entry.extend([str(user_missing_values)])
2130
+
2131
+ # Process the filling_values ...............................
2132
+ # Rename the input for convenience
2133
+ user_filling_values = filling_values
2134
+ if user_filling_values is None:
2135
+ user_filling_values = []
2136
+ # Define the default
2137
+ filling_values = [None] * nbcols
2138
+ # We have a dictionary : update each entry individually
2139
+ if isinstance(user_filling_values, dict):
2140
+ for (key, val) in user_filling_values.items():
2141
+ if _is_string_like(key):
2142
+ try:
2143
+ # Transform it into an integer
2144
+ key = names.index(key)
2145
+ except ValueError:
2146
+ # We couldn't find it: the name must have been dropped,
2147
+ continue
2148
+ # Redefine the key if it's a column number and usecols is defined
2149
+ if usecols:
2150
+ try:
2151
+ key = usecols.index(key)
2152
+ except ValueError:
2153
+ pass
2154
+ # Add the value to the list
2155
+ filling_values[key] = val
2156
+ # We have a sequence : update on a one-to-one basis
2157
+ elif isinstance(user_filling_values, (list, tuple)):
2158
+ n = len(user_filling_values)
2159
+ if (n <= nbcols):
2160
+ filling_values[:n] = user_filling_values
2161
+ else:
2162
+ filling_values = user_filling_values[:nbcols]
2163
+ # We have something else : use it for all entries
2164
+ else:
2165
+ filling_values = [user_filling_values] * nbcols
2166
+
2167
+ # Initialize the converters ................................
2168
+ if dtype is None:
2169
+ # Note: we can't use a [...]*nbcols, as we would have 3 times the same
2170
+ # ... converter, instead of 3 different converters.
2171
+ converters = [StringConverter(None, missing_values=miss, default=fill)
2172
+ for (miss, fill) in zip(missing_values, filling_values)]
2173
+ else:
2174
+ dtype_flat = flatten_dtype(dtype, flatten_base=True)
2175
+ # Initialize the converters
2176
+ if len(dtype_flat) > 1:
2177
+ # Flexible type : get a converter from each dtype
2178
+ zipit = zip(dtype_flat, missing_values, filling_values)
2179
+ converters = [StringConverter(dt, locked=True,
2180
+ missing_values=miss, default=fill)
2181
+ for (dt, miss, fill) in zipit]
2182
+ else:
2183
+ # Set to a default converter (but w/ different missing values)
2184
+ zipit = zip(missing_values, filling_values)
2185
+ converters = [StringConverter(dtype, locked=True,
2186
+ missing_values=miss, default=fill)
2187
+ for (miss, fill) in zipit]
2188
+ # Update the converters to use the user-defined ones
2189
+ uc_update = []
2190
+ for (j, conv) in user_converters.items():
2191
+ # If the converter is specified by column names, use the index instead
2192
+ if _is_string_like(j):
2193
+ try:
2194
+ j = names.index(j)
2195
+ i = j
2196
+ except ValueError:
2197
+ continue
2198
+ elif usecols:
2199
+ try:
2200
+ i = usecols.index(j)
2201
+ except ValueError:
2202
+ # Unused converter specified
2203
+ continue
2204
+ else:
2205
+ i = j
2206
+ # Find the value to test - first_line is not filtered by usecols:
2207
+ if len(first_line):
2208
+ testing_value = first_values[j]
2209
+ else:
2210
+ testing_value = None
2211
+ if conv is bytes:
2212
+ user_conv = asbytes
2213
+ elif byte_converters:
2214
+ # converters may use decode to workaround numpy's old behaviour,
2215
+ # so encode the string again before passing to the user converter
2216
+ def tobytes_first(x, conv):
2217
+ if type(x) is bytes:
2218
+ return conv(x)
2219
+ return conv(x.encode("latin1"))
2220
+ user_conv = functools.partial(tobytes_first, conv=conv)
2221
+ else:
2222
+ user_conv = conv
2223
+ converters[i].update(user_conv, locked=True,
2224
+ testing_value=testing_value,
2225
+ default=filling_values[i],
2226
+ missing_values=missing_values[i],)
2227
+ uc_update.append((i, user_conv))
2228
+ # Make sure we have the corrected keys in user_converters...
2229
+ user_converters.update(uc_update)
2230
+
2231
+ # Fixme: possible error as following variable never used.
2232
+ # miss_chars = [_.missing_values for _ in converters]
2233
+
2234
+ # Initialize the output lists ...
2235
+ # ... rows
2236
+ rows = []
2237
+ append_to_rows = rows.append
2238
+ # ... masks
2239
+ if usemask:
2240
+ masks = []
2241
+ append_to_masks = masks.append
2242
+ # ... invalid
2243
+ invalid = []
2244
+ append_to_invalid = invalid.append
2245
+
2246
+ # Parse each line
2247
+ for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
2248
+ values = split_line(line)
2249
+ nbvalues = len(values)
2250
+ # Skip an empty line
2251
+ if nbvalues == 0:
2252
+ continue
2253
+ if usecols:
2254
+ # Select only the columns we need
2255
+ try:
2256
+ values = [values[_] for _ in usecols]
2257
+ except IndexError:
2258
+ append_to_invalid((i + skip_header + 1, nbvalues))
2259
+ continue
2260
+ elif nbvalues != nbcols:
2261
+ append_to_invalid((i + skip_header + 1, nbvalues))
2262
+ continue
2263
+ # Store the values
2264
+ append_to_rows(tuple(values))
2265
+ if usemask:
2266
+ append_to_masks(tuple([v.strip() in m
2267
+ for (v, m) in zip(values,
2268
+ missing_values)]))
2269
+ if len(rows) == max_rows:
2270
+ break
2271
+
2272
+ # Upgrade the converters (if needed)
2273
+ if dtype is None:
2274
+ for (i, converter) in enumerate(converters):
2275
+ current_column = [itemgetter(i)(_m) for _m in rows]
2276
+ try:
2277
+ converter.iterupgrade(current_column)
2278
+ except ConverterLockError:
2279
+ errmsg = "Converter #%i is locked and cannot be upgraded: " % i
2280
+ current_column = map(itemgetter(i), rows)
2281
+ for (j, value) in enumerate(current_column):
2282
+ try:
2283
+ converter.upgrade(value)
2284
+ except (ConverterError, ValueError):
2285
+ errmsg += "(occurred line #%i for value '%s')"
2286
+ errmsg %= (j + 1 + skip_header, value)
2287
+ raise ConverterError(errmsg)
2288
+
2289
+ # Check that we don't have invalid values
2290
+ nbinvalid = len(invalid)
2291
+ if nbinvalid > 0:
2292
+ nbrows = len(rows) + nbinvalid - skip_footer
2293
+ # Construct the error message
2294
+ template = " Line #%%i (got %%i columns instead of %i)" % nbcols
2295
+ if skip_footer > 0:
2296
+ nbinvalid_skipped = len([_ for _ in invalid
2297
+ if _[0] > nbrows + skip_header])
2298
+ invalid = invalid[:nbinvalid - nbinvalid_skipped]
2299
+ skip_footer -= nbinvalid_skipped
2300
+ #
2301
+ # nbrows -= skip_footer
2302
+ # errmsg = [template % (i, nb)
2303
+ # for (i, nb) in invalid if i < nbrows]
2304
+ # else:
2305
+ errmsg = [template % (i, nb)
2306
+ for (i, nb) in invalid]
2307
+ if len(errmsg):
2308
+ errmsg.insert(0, "Some errors were detected !")
2309
+ errmsg = "\n".join(errmsg)
2310
+ # Raise an exception ?
2311
+ if invalid_raise:
2312
+ raise ValueError(errmsg)
2313
+ # Issue a warning ?
2314
+ else:
2315
+ warnings.warn(errmsg, ConversionWarning, stacklevel=2)
2316
+
2317
+ # Strip the last skip_footer data
2318
+ if skip_footer > 0:
2319
+ rows = rows[:-skip_footer]
2320
+ if usemask:
2321
+ masks = masks[:-skip_footer]
2322
+
2323
+ # Convert each value according to the converter:
2324
+ # We want to modify the list in place to avoid creating a new one...
2325
+ if loose:
2326
+ rows = list(
2327
+ zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
2328
+ for (i, conv) in enumerate(converters)]))
2329
+ else:
2330
+ rows = list(
2331
+ zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
2332
+ for (i, conv) in enumerate(converters)]))
2333
+
2334
+ # Reset the dtype
2335
+ data = rows
2336
+ if dtype is None:
2337
+ # Get the dtypes from the types of the converters
2338
+ column_types = [conv.type for conv in converters]
2339
+ # Find the columns with strings...
2340
+ strcolidx = [i for (i, v) in enumerate(column_types)
2341
+ if v == np.str_]
2342
+
2343
+ if byte_converters and strcolidx:
2344
+ # convert strings back to bytes for backward compatibility
2345
+ warnings.warn(
2346
+ "Reading unicode strings without specifying the encoding "
2347
+ "argument is deprecated. Set the encoding, use None for the "
2348
+ "system default.",
2349
+ np.VisibleDeprecationWarning, stacklevel=2)
2350
+ def encode_unicode_cols(row_tup):
2351
+ row = list(row_tup)
2352
+ for i in strcolidx:
2353
+ row[i] = row[i].encode('latin1')
2354
+ return tuple(row)
2355
+
2356
+ try:
2357
+ data = [encode_unicode_cols(r) for r in data]
2358
+ except UnicodeEncodeError:
2359
+ pass
2360
+ else:
2361
+ for i in strcolidx:
2362
+ column_types[i] = np.bytes_
2363
+
2364
+ # Update string types to be the right length
2365
+ sized_column_types = column_types[:]
2366
+ for i, col_type in enumerate(column_types):
2367
+ if np.issubdtype(col_type, np.character):
2368
+ n_chars = max(len(row[i]) for row in data)
2369
+ sized_column_types[i] = (col_type, n_chars)
2370
+
2371
+ if names is None:
2372
+ # If the dtype is uniform (before sizing strings)
2373
+ base = {
2374
+ c_type
2375
+ for c, c_type in zip(converters, column_types)
2376
+ if c._checked}
2377
+ if len(base) == 1:
2378
+ uniform_type, = base
2379
+ (ddtype, mdtype) = (uniform_type, bool)
2380
+ else:
2381
+ ddtype = [(defaultfmt % i, dt)
2382
+ for (i, dt) in enumerate(sized_column_types)]
2383
+ if usemask:
2384
+ mdtype = [(defaultfmt % i, bool)
2385
+ for (i, dt) in enumerate(sized_column_types)]
2386
+ else:
2387
+ ddtype = list(zip(names, sized_column_types))
2388
+ mdtype = list(zip(names, [bool] * len(sized_column_types)))
2389
+ output = np.array(data, dtype=ddtype)
2390
+ if usemask:
2391
+ outputmask = np.array(masks, dtype=mdtype)
2392
+ else:
2393
+ # Overwrite the initial dtype names if needed
2394
+ if names and dtype.names is not None:
2395
+ dtype.names = names
2396
+ # Case 1. We have a structured type
2397
+ if len(dtype_flat) > 1:
2398
+ # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
2399
+ # First, create the array using a flattened dtype:
2400
+ # [('a', int), ('b1', int), ('b2', float)]
2401
+ # Then, view the array using the specified dtype.
2402
+ if 'O' in (_.char for _ in dtype_flat):
2403
+ if has_nested_fields(dtype):
2404
+ raise NotImplementedError(
2405
+ "Nested fields involving objects are not supported...")
2406
+ else:
2407
+ output = np.array(data, dtype=dtype)
2408
+ else:
2409
+ rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
2410
+ output = rows.view(dtype)
2411
+ # Now, process the rowmasks the same way
2412
+ if usemask:
2413
+ rowmasks = np.array(
2414
+ masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
2415
+ # Construct the new dtype
2416
+ mdtype = make_mask_descr(dtype)
2417
+ outputmask = rowmasks.view(mdtype)
2418
+ # Case #2. We have a basic dtype
2419
+ else:
2420
+ # We used some user-defined converters
2421
+ if user_converters:
2422
+ ishomogeneous = True
2423
+ descr = []
2424
+ for i, ttype in enumerate([conv.type for conv in converters]):
2425
+ # Keep the dtype of the current converter
2426
+ if i in user_converters:
2427
+ ishomogeneous &= (ttype == dtype.type)
2428
+ if np.issubdtype(ttype, np.character):
2429
+ ttype = (ttype, max(len(row[i]) for row in data))
2430
+ descr.append(('', ttype))
2431
+ else:
2432
+ descr.append(('', dtype))
2433
+ # So we changed the dtype ?
2434
+ if not ishomogeneous:
2435
+ # We have more than one field
2436
+ if len(descr) > 1:
2437
+ dtype = np.dtype(descr)
2438
+ # We have only one field: drop the name if not needed.
2439
+ else:
2440
+ dtype = np.dtype(ttype)
2441
+ #
2442
+ output = np.array(data, dtype)
2443
+ if usemask:
2444
+ if dtype.names is not None:
2445
+ mdtype = [(_, bool) for _ in dtype.names]
2446
+ else:
2447
+ mdtype = bool
2448
+ outputmask = np.array(masks, dtype=mdtype)
2449
+ # Try to take care of the missing data we missed
2450
+ names = output.dtype.names
2451
+ if usemask and names:
2452
+ for (name, conv) in zip(names, converters):
2453
+ missing_values = [conv(_) for _ in conv.missing_values
2454
+ if _ != '']
2455
+ for mval in missing_values:
2456
+ outputmask[name] |= (output[name] == mval)
2457
+ # Construct the final array
2458
+ if usemask:
2459
+ output = output.view(MaskedArray)
2460
+ output._mask = outputmask
2461
+
2462
+ output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
2463
+
2464
+ if unpack:
2465
+ if names is None:
2466
+ return output.T
2467
+ elif len(names) == 1:
2468
+ # squeeze single-name dtypes too
2469
+ return output[names[0]]
2470
+ else:
2471
+ # For structured arrays with multiple fields,
2472
+ # return an array for each field.
2473
+ return [output[field] for field in names]
2474
+ return output
2475
+
2476
+
2477
+ _genfromtxt_with_like = array_function_dispatch()(genfromtxt)
2478
+
2479
+
2480
+ def recfromtxt(fname, **kwargs):
2481
+ """
2482
+ Load ASCII data from a file and return it in a record array.
2483
+
2484
+ If ``usemask=False`` a standard `recarray` is returned,
2485
+ if ``usemask=True`` a MaskedRecords array is returned.
2486
+
2487
+ Parameters
2488
+ ----------
2489
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
2490
+
2491
+ See Also
2492
+ --------
2493
+ numpy.genfromtxt : generic function
2494
+
2495
+ Notes
2496
+ -----
2497
+ By default, `dtype` is None, which means that the data-type of the output
2498
+ array will be determined from the data.
2499
+
2500
+ """
2501
+ kwargs.setdefault("dtype", None)
2502
+ usemask = kwargs.get('usemask', False)
2503
+ output = genfromtxt(fname, **kwargs)
2504
+ if usemask:
2505
+ from numpy.ma.mrecords import MaskedRecords
2506
+ output = output.view(MaskedRecords)
2507
+ else:
2508
+ output = output.view(np.recarray)
2509
+ return output
2510
+
2511
+
2512
+ def recfromcsv(fname, **kwargs):
2513
+ """
2514
+ Load ASCII data stored in a comma-separated file.
2515
+
2516
+ The returned array is a record array (if ``usemask=False``, see
2517
+ `recarray`) or a masked record array (if ``usemask=True``,
2518
+ see `ma.mrecords.MaskedRecords`).
2519
+
2520
+ Parameters
2521
+ ----------
2522
+ fname, kwargs : For a description of input parameters, see `genfromtxt`.
2523
+
2524
+ See Also
2525
+ --------
2526
+ numpy.genfromtxt : generic function to load ASCII data.
2527
+
2528
+ Notes
2529
+ -----
2530
+ By default, `dtype` is None, which means that the data-type of the output
2531
+ array will be determined from the data.
2532
+
2533
+ """
2534
+ # Set default kwargs for genfromtxt as relevant to csv import.
2535
+ kwargs.setdefault("case_sensitive", "lower")
2536
+ kwargs.setdefault("names", True)
2537
+ kwargs.setdefault("delimiter", ",")
2538
+ kwargs.setdefault("dtype", None)
2539
+ output = genfromtxt(fname, **kwargs)
2540
+
2541
+ usemask = kwargs.get("usemask", False)
2542
+ if usemask:
2543
+ from numpy.ma.mrecords import MaskedRecords
2544
+ output = output.view(MaskedRecords)
2545
+ else:
2546
+ output = output.view(np.recarray)
2547
+ return output
venv/lib/python3.10/site-packages/numpy/lib/npyio.pyi ADDED
@@ -0,0 +1,330 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import zipfile
4
+ import types
5
+ from re import Pattern
6
+ from collections.abc import Collection, Mapping, Iterator, Sequence, Callable, Iterable
7
+ from typing import (
8
+ Literal as L,
9
+ Any,
10
+ TypeVar,
11
+ Generic,
12
+ IO,
13
+ overload,
14
+ Protocol,
15
+ )
16
+
17
+ from numpy import (
18
+ DataSource as DataSource,
19
+ ndarray,
20
+ recarray,
21
+ dtype,
22
+ generic,
23
+ float64,
24
+ void,
25
+ record,
26
+ )
27
+
28
+ from numpy.ma.mrecords import MaskedRecords
29
+ from numpy._typing import (
30
+ ArrayLike,
31
+ DTypeLike,
32
+ NDArray,
33
+ _DTypeLike,
34
+ _SupportsArrayFunc,
35
+ )
36
+
37
+ from numpy.core.multiarray import (
38
+ packbits as packbits,
39
+ unpackbits as unpackbits,
40
+ )
41
+
42
+ _T = TypeVar("_T")
43
+ _T_contra = TypeVar("_T_contra", contravariant=True)
44
+ _T_co = TypeVar("_T_co", covariant=True)
45
+ _SCT = TypeVar("_SCT", bound=generic)
46
+ _CharType_co = TypeVar("_CharType_co", str, bytes, covariant=True)
47
+ _CharType_contra = TypeVar("_CharType_contra", str, bytes, contravariant=True)
48
+
49
+ class _SupportsGetItem(Protocol[_T_contra, _T_co]):
50
+ def __getitem__(self, key: _T_contra, /) -> _T_co: ...
51
+
52
+ class _SupportsRead(Protocol[_CharType_co]):
53
+ def read(self) -> _CharType_co: ...
54
+
55
+ class _SupportsReadSeek(Protocol[_CharType_co]):
56
+ def read(self, n: int, /) -> _CharType_co: ...
57
+ def seek(self, offset: int, whence: int, /) -> object: ...
58
+
59
+ class _SupportsWrite(Protocol[_CharType_contra]):
60
+ def write(self, s: _CharType_contra, /) -> object: ...
61
+
62
+ __all__: list[str]
63
+
64
+ class BagObj(Generic[_T_co]):
65
+ def __init__(self, obj: _SupportsGetItem[str, _T_co]) -> None: ...
66
+ def __getattribute__(self, key: str) -> _T_co: ...
67
+ def __dir__(self) -> list[str]: ...
68
+
69
+ class NpzFile(Mapping[str, NDArray[Any]]):
70
+ zip: zipfile.ZipFile
71
+ fid: None | IO[str]
72
+ files: list[str]
73
+ allow_pickle: bool
74
+ pickle_kwargs: None | Mapping[str, Any]
75
+ _MAX_REPR_ARRAY_COUNT: int
76
+ # Represent `f` as a mutable property so we can access the type of `self`
77
+ @property
78
+ def f(self: _T) -> BagObj[_T]: ...
79
+ @f.setter
80
+ def f(self: _T, value: BagObj[_T]) -> None: ...
81
+ def __init__(
82
+ self,
83
+ fid: IO[str],
84
+ own_fid: bool = ...,
85
+ allow_pickle: bool = ...,
86
+ pickle_kwargs: None | Mapping[str, Any] = ...,
87
+ ) -> None: ...
88
+ def __enter__(self: _T) -> _T: ...
89
+ def __exit__(
90
+ self,
91
+ exc_type: None | type[BaseException],
92
+ exc_value: None | BaseException,
93
+ traceback: None | types.TracebackType,
94
+ /,
95
+ ) -> None: ...
96
+ def close(self) -> None: ...
97
+ def __del__(self) -> None: ...
98
+ def __iter__(self) -> Iterator[str]: ...
99
+ def __len__(self) -> int: ...
100
+ def __getitem__(self, key: str) -> NDArray[Any]: ...
101
+ def __contains__(self, key: str) -> bool: ...
102
+ def __repr__(self) -> str: ...
103
+
104
+ # NOTE: Returns a `NpzFile` if file is a zip file;
105
+ # returns an `ndarray`/`memmap` otherwise
106
+ def load(
107
+ file: str | bytes | os.PathLike[Any] | _SupportsReadSeek[bytes],
108
+ mmap_mode: L[None, "r+", "r", "w+", "c"] = ...,
109
+ allow_pickle: bool = ...,
110
+ fix_imports: bool = ...,
111
+ encoding: L["ASCII", "latin1", "bytes"] = ...,
112
+ ) -> Any: ...
113
+
114
+ def save(
115
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
116
+ arr: ArrayLike,
117
+ allow_pickle: bool = ...,
118
+ fix_imports: bool = ...,
119
+ ) -> None: ...
120
+
121
+ def savez(
122
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
123
+ *args: ArrayLike,
124
+ **kwds: ArrayLike,
125
+ ) -> None: ...
126
+
127
+ def savez_compressed(
128
+ file: str | os.PathLike[str] | _SupportsWrite[bytes],
129
+ *args: ArrayLike,
130
+ **kwds: ArrayLike,
131
+ ) -> None: ...
132
+
133
+ # File-like objects only have to implement `__iter__` and,
134
+ # optionally, `encoding`
135
+ @overload
136
+ def loadtxt(
137
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
138
+ dtype: None = ...,
139
+ comments: None | str | Sequence[str] = ...,
140
+ delimiter: None | str = ...,
141
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
142
+ skiprows: int = ...,
143
+ usecols: int | Sequence[int] = ...,
144
+ unpack: bool = ...,
145
+ ndmin: L[0, 1, 2] = ...,
146
+ encoding: None | str = ...,
147
+ max_rows: None | int = ...,
148
+ *,
149
+ quotechar: None | str = ...,
150
+ like: None | _SupportsArrayFunc = ...
151
+ ) -> NDArray[float64]: ...
152
+ @overload
153
+ def loadtxt(
154
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
155
+ dtype: _DTypeLike[_SCT],
156
+ comments: None | str | Sequence[str] = ...,
157
+ delimiter: None | str = ...,
158
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
159
+ skiprows: int = ...,
160
+ usecols: int | Sequence[int] = ...,
161
+ unpack: bool = ...,
162
+ ndmin: L[0, 1, 2] = ...,
163
+ encoding: None | str = ...,
164
+ max_rows: None | int = ...,
165
+ *,
166
+ quotechar: None | str = ...,
167
+ like: None | _SupportsArrayFunc = ...
168
+ ) -> NDArray[_SCT]: ...
169
+ @overload
170
+ def loadtxt(
171
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
172
+ dtype: DTypeLike,
173
+ comments: None | str | Sequence[str] = ...,
174
+ delimiter: None | str = ...,
175
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
176
+ skiprows: int = ...,
177
+ usecols: int | Sequence[int] = ...,
178
+ unpack: bool = ...,
179
+ ndmin: L[0, 1, 2] = ...,
180
+ encoding: None | str = ...,
181
+ max_rows: None | int = ...,
182
+ *,
183
+ quotechar: None | str = ...,
184
+ like: None | _SupportsArrayFunc = ...
185
+ ) -> NDArray[Any]: ...
186
+
187
+ def savetxt(
188
+ fname: str | os.PathLike[str] | _SupportsWrite[str] | _SupportsWrite[bytes],
189
+ X: ArrayLike,
190
+ fmt: str | Sequence[str] = ...,
191
+ delimiter: str = ...,
192
+ newline: str = ...,
193
+ header: str = ...,
194
+ footer: str = ...,
195
+ comments: str = ...,
196
+ encoding: None | str = ...,
197
+ ) -> None: ...
198
+
199
+ @overload
200
+ def fromregex(
201
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
202
+ regexp: str | bytes | Pattern[Any],
203
+ dtype: _DTypeLike[_SCT],
204
+ encoding: None | str = ...
205
+ ) -> NDArray[_SCT]: ...
206
+ @overload
207
+ def fromregex(
208
+ file: str | os.PathLike[str] | _SupportsRead[str] | _SupportsRead[bytes],
209
+ regexp: str | bytes | Pattern[Any],
210
+ dtype: DTypeLike,
211
+ encoding: None | str = ...
212
+ ) -> NDArray[Any]: ...
213
+
214
+ @overload
215
+ def genfromtxt(
216
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
217
+ dtype: None = ...,
218
+ comments: str = ...,
219
+ delimiter: None | str | int | Iterable[int] = ...,
220
+ skip_header: int = ...,
221
+ skip_footer: int = ...,
222
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
223
+ missing_values: Any = ...,
224
+ filling_values: Any = ...,
225
+ usecols: None | Sequence[int] = ...,
226
+ names: L[None, True] | str | Collection[str] = ...,
227
+ excludelist: None | Sequence[str] = ...,
228
+ deletechars: str = ...,
229
+ replace_space: str = ...,
230
+ autostrip: bool = ...,
231
+ case_sensitive: bool | L['upper', 'lower'] = ...,
232
+ defaultfmt: str = ...,
233
+ unpack: None | bool = ...,
234
+ usemask: bool = ...,
235
+ loose: bool = ...,
236
+ invalid_raise: bool = ...,
237
+ max_rows: None | int = ...,
238
+ encoding: str = ...,
239
+ *,
240
+ ndmin: L[0, 1, 2] = ...,
241
+ like: None | _SupportsArrayFunc = ...,
242
+ ) -> NDArray[Any]: ...
243
+ @overload
244
+ def genfromtxt(
245
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
246
+ dtype: _DTypeLike[_SCT],
247
+ comments: str = ...,
248
+ delimiter: None | str | int | Iterable[int] = ...,
249
+ skip_header: int = ...,
250
+ skip_footer: int = ...,
251
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
252
+ missing_values: Any = ...,
253
+ filling_values: Any = ...,
254
+ usecols: None | Sequence[int] = ...,
255
+ names: L[None, True] | str | Collection[str] = ...,
256
+ excludelist: None | Sequence[str] = ...,
257
+ deletechars: str = ...,
258
+ replace_space: str = ...,
259
+ autostrip: bool = ...,
260
+ case_sensitive: bool | L['upper', 'lower'] = ...,
261
+ defaultfmt: str = ...,
262
+ unpack: None | bool = ...,
263
+ usemask: bool = ...,
264
+ loose: bool = ...,
265
+ invalid_raise: bool = ...,
266
+ max_rows: None | int = ...,
267
+ encoding: str = ...,
268
+ *,
269
+ ndmin: L[0, 1, 2] = ...,
270
+ like: None | _SupportsArrayFunc = ...,
271
+ ) -> NDArray[_SCT]: ...
272
+ @overload
273
+ def genfromtxt(
274
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
275
+ dtype: DTypeLike,
276
+ comments: str = ...,
277
+ delimiter: None | str | int | Iterable[int] = ...,
278
+ skip_header: int = ...,
279
+ skip_footer: int = ...,
280
+ converters: None | Mapping[int | str, Callable[[str], Any]] = ...,
281
+ missing_values: Any = ...,
282
+ filling_values: Any = ...,
283
+ usecols: None | Sequence[int] = ...,
284
+ names: L[None, True] | str | Collection[str] = ...,
285
+ excludelist: None | Sequence[str] = ...,
286
+ deletechars: str = ...,
287
+ replace_space: str = ...,
288
+ autostrip: bool = ...,
289
+ case_sensitive: bool | L['upper', 'lower'] = ...,
290
+ defaultfmt: str = ...,
291
+ unpack: None | bool = ...,
292
+ usemask: bool = ...,
293
+ loose: bool = ...,
294
+ invalid_raise: bool = ...,
295
+ max_rows: None | int = ...,
296
+ encoding: str = ...,
297
+ *,
298
+ ndmin: L[0, 1, 2] = ...,
299
+ like: None | _SupportsArrayFunc = ...,
300
+ ) -> NDArray[Any]: ...
301
+
302
+ @overload
303
+ def recfromtxt(
304
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
305
+ *,
306
+ usemask: L[False] = ...,
307
+ **kwargs: Any,
308
+ ) -> recarray[Any, dtype[record]]: ...
309
+ @overload
310
+ def recfromtxt(
311
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
312
+ *,
313
+ usemask: L[True],
314
+ **kwargs: Any,
315
+ ) -> MaskedRecords[Any, dtype[void]]: ...
316
+
317
+ @overload
318
+ def recfromcsv(
319
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
320
+ *,
321
+ usemask: L[False] = ...,
322
+ **kwargs: Any,
323
+ ) -> recarray[Any, dtype[record]]: ...
324
+ @overload
325
+ def recfromcsv(
326
+ fname: str | os.PathLike[str] | Iterable[str] | Iterable[bytes],
327
+ *,
328
+ usemask: L[True],
329
+ **kwargs: Any,
330
+ ) -> MaskedRecords[Any, dtype[void]]: ...
venv/lib/python3.10/site-packages/numpy/lib/polynomial.py ADDED
@@ -0,0 +1,1453 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions to operate on polynomials.
3
+
4
+ """
5
+ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
6
+ 'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
7
+ 'polyfit', 'RankWarning']
8
+
9
+ import functools
10
+ import re
11
+ import warnings
12
+
13
+ from .._utils import set_module
14
+ import numpy.core.numeric as NX
15
+
16
+ from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
17
+ ones)
18
+ from numpy.core import overrides
19
+ from numpy.lib.twodim_base import diag, vander
20
+ from numpy.lib.function_base import trim_zeros
21
+ from numpy.lib.type_check import iscomplex, real, imag, mintypecode
22
+ from numpy.linalg import eigvals, lstsq, inv
23
+
24
+
25
+ array_function_dispatch = functools.partial(
26
+ overrides.array_function_dispatch, module='numpy')
27
+
28
+
29
+ @set_module('numpy')
30
+ class RankWarning(UserWarning):
31
+ """
32
+ Issued by `polyfit` when the Vandermonde matrix is rank deficient.
33
+
34
+ For more information, a way to suppress the warning, and an example of
35
+ `RankWarning` being issued, see `polyfit`.
36
+
37
+ """
38
+ pass
39
+
40
+
41
+ def _poly_dispatcher(seq_of_zeros):
42
+ return seq_of_zeros
43
+
44
+
45
+ @array_function_dispatch(_poly_dispatcher)
46
+ def poly(seq_of_zeros):
47
+ """
48
+ Find the coefficients of a polynomial with the given sequence of roots.
49
+
50
+ .. note::
51
+ This forms part of the old polynomial API. Since version 1.4, the
52
+ new polynomial API defined in `numpy.polynomial` is preferred.
53
+ A summary of the differences can be found in the
54
+ :doc:`transition guide </reference/routines.polynomials>`.
55
+
56
+ Returns the coefficients of the polynomial whose leading coefficient
57
+ is one for the given sequence of zeros (multiple roots must be included
58
+ in the sequence as many times as their multiplicity; see Examples).
59
+ A square matrix (or array, which will be treated as a matrix) can also
60
+ be given, in which case the coefficients of the characteristic polynomial
61
+ of the matrix are returned.
62
+
63
+ Parameters
64
+ ----------
65
+ seq_of_zeros : array_like, shape (N,) or (N, N)
66
+ A sequence of polynomial roots, or a square array or matrix object.
67
+
68
+ Returns
69
+ -------
70
+ c : ndarray
71
+ 1D array of polynomial coefficients from highest to lowest degree:
72
+
73
+ ``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
74
+ where c[0] always equals 1.
75
+
76
+ Raises
77
+ ------
78
+ ValueError
79
+ If input is the wrong shape (the input must be a 1-D or square
80
+ 2-D array).
81
+
82
+ See Also
83
+ --------
84
+ polyval : Compute polynomial values.
85
+ roots : Return the roots of a polynomial.
86
+ polyfit : Least squares polynomial fit.
87
+ poly1d : A one-dimensional polynomial class.
88
+
89
+ Notes
90
+ -----
91
+ Specifying the roots of a polynomial still leaves one degree of
92
+ freedom, typically represented by an undetermined leading
93
+ coefficient. [1]_ In the case of this function, that coefficient -
94
+ the first one in the returned array - is always taken as one. (If
95
+ for some reason you have one other point, the only automatic way
96
+ presently to leverage that information is to use ``polyfit``.)
97
+
98
+ The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
99
+ matrix **A** is given by
100
+
101
+ :math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
102
+
103
+ where **I** is the `n`-by-`n` identity matrix. [2]_
104
+
105
+ References
106
+ ----------
107
+ .. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trigonometry,
108
+ Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
109
+
110
+ .. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
111
+ Academic Press, pg. 182, 1980.
112
+
113
+ Examples
114
+ --------
115
+ Given a sequence of a polynomial's zeros:
116
+
117
+ >>> np.poly((0, 0, 0)) # Multiple root example
118
+ array([1., 0., 0., 0.])
119
+
120
+ The line above represents z**3 + 0*z**2 + 0*z + 0.
121
+
122
+ >>> np.poly((-1./2, 0, 1./2))
123
+ array([ 1. , 0. , -0.25, 0. ])
124
+
125
+ The line above represents z**3 - z/4
126
+
127
+ >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
128
+ array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
129
+
130
+ Given a square array object:
131
+
132
+ >>> P = np.array([[0, 1./3], [-1./2, 0]])
133
+ >>> np.poly(P)
134
+ array([1. , 0. , 0.16666667])
135
+
136
+ Note how in all cases the leading coefficient is always 1.
137
+
138
+ """
139
+ seq_of_zeros = atleast_1d(seq_of_zeros)
140
+ sh = seq_of_zeros.shape
141
+
142
+ if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
143
+ seq_of_zeros = eigvals(seq_of_zeros)
144
+ elif len(sh) == 1:
145
+ dt = seq_of_zeros.dtype
146
+ # Let object arrays slip through, e.g. for arbitrary precision
147
+ if dt != object:
148
+ seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
149
+ else:
150
+ raise ValueError("input must be 1d or non-empty square 2d array.")
151
+
152
+ if len(seq_of_zeros) == 0:
153
+ return 1.0
154
+ dt = seq_of_zeros.dtype
155
+ a = ones((1,), dtype=dt)
156
+ for zero in seq_of_zeros:
157
+ a = NX.convolve(a, array([1, -zero], dtype=dt), mode='full')
158
+
159
+ if issubclass(a.dtype.type, NX.complexfloating):
160
+ # if complex roots are all complex conjugates, the roots are real.
161
+ roots = NX.asarray(seq_of_zeros, complex)
162
+ if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
163
+ a = a.real.copy()
164
+
165
+ return a
166
+
167
+
168
+ def _roots_dispatcher(p):
169
+ return p
170
+
171
+
172
+ @array_function_dispatch(_roots_dispatcher)
173
+ def roots(p):
174
+ """
175
+ Return the roots of a polynomial with coefficients given in p.
176
+
177
+ .. note::
178
+ This forms part of the old polynomial API. Since version 1.4, the
179
+ new polynomial API defined in `numpy.polynomial` is preferred.
180
+ A summary of the differences can be found in the
181
+ :doc:`transition guide </reference/routines.polynomials>`.
182
+
183
+ The values in the rank-1 array `p` are coefficients of a polynomial.
184
+ If the length of `p` is n+1 then the polynomial is described by::
185
+
186
+ p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
187
+
188
+ Parameters
189
+ ----------
190
+ p : array_like
191
+ Rank-1 array of polynomial coefficients.
192
+
193
+ Returns
194
+ -------
195
+ out : ndarray
196
+ An array containing the roots of the polynomial.
197
+
198
+ Raises
199
+ ------
200
+ ValueError
201
+ When `p` cannot be converted to a rank-1 array.
202
+
203
+ See also
204
+ --------
205
+ poly : Find the coefficients of a polynomial with a given sequence
206
+ of roots.
207
+ polyval : Compute polynomial values.
208
+ polyfit : Least squares polynomial fit.
209
+ poly1d : A one-dimensional polynomial class.
210
+
211
+ Notes
212
+ -----
213
+ The algorithm relies on computing the eigenvalues of the
214
+ companion matrix [1]_.
215
+
216
+ References
217
+ ----------
218
+ .. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
219
+ Cambridge University Press, 1999, pp. 146-7.
220
+
221
+ Examples
222
+ --------
223
+ >>> coeff = [3.2, 2, 1]
224
+ >>> np.roots(coeff)
225
+ array([-0.3125+0.46351241j, -0.3125-0.46351241j])
226
+
227
+ """
228
+ # If input is scalar, this makes it an array
229
+ p = atleast_1d(p)
230
+ if p.ndim != 1:
231
+ raise ValueError("Input must be a rank-1 array.")
232
+
233
+ # find non-zero array entries
234
+ non_zero = NX.nonzero(NX.ravel(p))[0]
235
+
236
+ # Return an empty array if polynomial is all zeros
237
+ if len(non_zero) == 0:
238
+ return NX.array([])
239
+
240
+ # find the number of trailing zeros -- this is the number of roots at 0.
241
+ trailing_zeros = len(p) - non_zero[-1] - 1
242
+
243
+ # strip leading and trailing zeros
244
+ p = p[int(non_zero[0]):int(non_zero[-1])+1]
245
+
246
+ # casting: if incoming array isn't floating point, make it floating point.
247
+ if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
248
+ p = p.astype(float)
249
+
250
+ N = len(p)
251
+ if N > 1:
252
+ # build companion matrix and find its eigenvalues (the roots)
253
+ A = diag(NX.ones((N-2,), p.dtype), -1)
254
+ A[0,:] = -p[1:] / p[0]
255
+ roots = eigvals(A)
256
+ else:
257
+ roots = NX.array([])
258
+
259
+ # tack any zeros onto the back of the array
260
+ roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
261
+ return roots
262
+
263
+
264
+ def _polyint_dispatcher(p, m=None, k=None):
265
+ return (p,)
266
+
267
+
268
+ @array_function_dispatch(_polyint_dispatcher)
269
+ def polyint(p, m=1, k=None):
270
+ """
271
+ Return an antiderivative (indefinite integral) of a polynomial.
272
+
273
+ .. note::
274
+ This forms part of the old polynomial API. Since version 1.4, the
275
+ new polynomial API defined in `numpy.polynomial` is preferred.
276
+ A summary of the differences can be found in the
277
+ :doc:`transition guide </reference/routines.polynomials>`.
278
+
279
+ The returned order `m` antiderivative `P` of polynomial `p` satisfies
280
+ :math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
281
+ integration constants `k`. The constants determine the low-order
282
+ polynomial part
283
+
284
+ .. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
285
+
286
+ of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
287
+
288
+ Parameters
289
+ ----------
290
+ p : array_like or poly1d
291
+ Polynomial to integrate.
292
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
293
+ m : int, optional
294
+ Order of the antiderivative. (Default: 1)
295
+ k : list of `m` scalars or scalar, optional
296
+ Integration constants. They are given in the order of integration:
297
+ those corresponding to highest-order terms come first.
298
+
299
+ If ``None`` (default), all constants are assumed to be zero.
300
+ If `m = 1`, a single scalar can be given instead of a list.
301
+
302
+ See Also
303
+ --------
304
+ polyder : derivative of a polynomial
305
+ poly1d.integ : equivalent method
306
+
307
+ Examples
308
+ --------
309
+ The defining property of the antiderivative:
310
+
311
+ >>> p = np.poly1d([1,1,1])
312
+ >>> P = np.polyint(p)
313
+ >>> P
314
+ poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
315
+ >>> np.polyder(P) == p
316
+ True
317
+
318
+ The integration constants default to zero, but can be specified:
319
+
320
+ >>> P = np.polyint(p, 3)
321
+ >>> P(0)
322
+ 0.0
323
+ >>> np.polyder(P)(0)
324
+ 0.0
325
+ >>> np.polyder(P, 2)(0)
326
+ 0.0
327
+ >>> P = np.polyint(p, 3, k=[6,5,3])
328
+ >>> P
329
+ poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
330
+
331
+ Note that 3 = 6 / 2!, and that the constants are given in the order of
332
+ integrations. Constant of the highest-order polynomial term comes first:
333
+
334
+ >>> np.polyder(P, 2)(0)
335
+ 6.0
336
+ >>> np.polyder(P, 1)(0)
337
+ 5.0
338
+ >>> P(0)
339
+ 3.0
340
+
341
+ """
342
+ m = int(m)
343
+ if m < 0:
344
+ raise ValueError("Order of integral must be positive (see polyder)")
345
+ if k is None:
346
+ k = NX.zeros(m, float)
347
+ k = atleast_1d(k)
348
+ if len(k) == 1 and m > 1:
349
+ k = k[0]*NX.ones(m, float)
350
+ if len(k) < m:
351
+ raise ValueError(
352
+ "k must be a scalar or a rank-1 array of length 1 or >m.")
353
+
354
+ truepoly = isinstance(p, poly1d)
355
+ p = NX.asarray(p)
356
+ if m == 0:
357
+ if truepoly:
358
+ return poly1d(p)
359
+ return p
360
+ else:
361
+ # Note: this must work also with object and integer arrays
362
+ y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
363
+ val = polyint(y, m - 1, k=k[1:])
364
+ if truepoly:
365
+ return poly1d(val)
366
+ return val
367
+
368
+
369
+ def _polyder_dispatcher(p, m=None):
370
+ return (p,)
371
+
372
+
373
+ @array_function_dispatch(_polyder_dispatcher)
374
+ def polyder(p, m=1):
375
+ """
376
+ Return the derivative of the specified order of a polynomial.
377
+
378
+ .. note::
379
+ This forms part of the old polynomial API. Since version 1.4, the
380
+ new polynomial API defined in `numpy.polynomial` is preferred.
381
+ A summary of the differences can be found in the
382
+ :doc:`transition guide </reference/routines.polynomials>`.
383
+
384
+ Parameters
385
+ ----------
386
+ p : poly1d or sequence
387
+ Polynomial to differentiate.
388
+ A sequence is interpreted as polynomial coefficients, see `poly1d`.
389
+ m : int, optional
390
+ Order of differentiation (default: 1)
391
+
392
+ Returns
393
+ -------
394
+ der : poly1d
395
+ A new polynomial representing the derivative.
396
+
397
+ See Also
398
+ --------
399
+ polyint : Anti-derivative of a polynomial.
400
+ poly1d : Class for one-dimensional polynomials.
401
+
402
+ Examples
403
+ --------
404
+ The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
405
+
406
+ >>> p = np.poly1d([1,1,1,1])
407
+ >>> p2 = np.polyder(p)
408
+ >>> p2
409
+ poly1d([3, 2, 1])
410
+
411
+ which evaluates to:
412
+
413
+ >>> p2(2.)
414
+ 17.0
415
+
416
+ We can verify this, approximating the derivative with
417
+ ``(f(x + h) - f(x))/h``:
418
+
419
+ >>> (p(2. + 0.001) - p(2.)) / 0.001
420
+ 17.007000999997857
421
+
422
+ The fourth-order derivative of a 3rd-order polynomial is zero:
423
+
424
+ >>> np.polyder(p, 2)
425
+ poly1d([6, 2])
426
+ >>> np.polyder(p, 3)
427
+ poly1d([6])
428
+ >>> np.polyder(p, 4)
429
+ poly1d([0])
430
+
431
+ """
432
+ m = int(m)
433
+ if m < 0:
434
+ raise ValueError("Order of derivative must be positive (see polyint)")
435
+
436
+ truepoly = isinstance(p, poly1d)
437
+ p = NX.asarray(p)
438
+ n = len(p) - 1
439
+ y = p[:-1] * NX.arange(n, 0, -1)
440
+ if m == 0:
441
+ val = p
442
+ else:
443
+ val = polyder(y, m - 1)
444
+ if truepoly:
445
+ val = poly1d(val)
446
+ return val
447
+
448
+
449
+ def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
450
+ return (x, y, w)
451
+
452
+
453
+ @array_function_dispatch(_polyfit_dispatcher)
454
+ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
455
+ """
456
+ Least squares polynomial fit.
457
+
458
+ .. note::
459
+ This forms part of the old polynomial API. Since version 1.4, the
460
+ new polynomial API defined in `numpy.polynomial` is preferred.
461
+ A summary of the differences can be found in the
462
+ :doc:`transition guide </reference/routines.polynomials>`.
463
+
464
+ Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
465
+ to points `(x, y)`. Returns a vector of coefficients `p` that minimises
466
+ the squared error in the order `deg`, `deg-1`, ... `0`.
467
+
468
+ The `Polynomial.fit <numpy.polynomial.polynomial.Polynomial.fit>` class
469
+ method is recommended for new code as it is more stable numerically. See
470
+ the documentation of the method for more information.
471
+
472
+ Parameters
473
+ ----------
474
+ x : array_like, shape (M,)
475
+ x-coordinates of the M sample points ``(x[i], y[i])``.
476
+ y : array_like, shape (M,) or (M, K)
477
+ y-coordinates of the sample points. Several data sets of sample
478
+ points sharing the same x-coordinates can be fitted at once by
479
+ passing in a 2D-array that contains one dataset per column.
480
+ deg : int
481
+ Degree of the fitting polynomial
482
+ rcond : float, optional
483
+ Relative condition number of the fit. Singular values smaller than
484
+ this relative to the largest singular value will be ignored. The
485
+ default value is len(x)*eps, where eps is the relative precision of
486
+ the float type, about 2e-16 in most cases.
487
+ full : bool, optional
488
+ Switch determining nature of return value. When it is False (the
489
+ default) just the coefficients are returned, when True diagnostic
490
+ information from the singular value decomposition is also returned.
491
+ w : array_like, shape (M,), optional
492
+ Weights. If not None, the weight ``w[i]`` applies to the unsquared
493
+ residual ``y[i] - y_hat[i]`` at ``x[i]``. Ideally the weights are
494
+ chosen so that the errors of the products ``w[i]*y[i]`` all have the
495
+ same variance. When using inverse-variance weighting, use
496
+ ``w[i] = 1/sigma(y[i])``. The default value is None.
497
+ cov : bool or str, optional
498
+ If given and not `False`, return not just the estimate but also its
499
+ covariance matrix. By default, the covariance are scaled by
500
+ chi2/dof, where dof = M - (deg + 1), i.e., the weights are presumed
501
+ to be unreliable except in a relative sense and everything is scaled
502
+ such that the reduced chi2 is unity. This scaling is omitted if
503
+ ``cov='unscaled'``, as is relevant for the case that the weights are
504
+ w = 1/sigma, with sigma known to be a reliable estimate of the
505
+ uncertainty.
506
+
507
+ Returns
508
+ -------
509
+ p : ndarray, shape (deg + 1,) or (deg + 1, K)
510
+ Polynomial coefficients, highest power first. If `y` was 2-D, the
511
+ coefficients for `k`-th data set are in ``p[:,k]``.
512
+
513
+ residuals, rank, singular_values, rcond
514
+ These values are only returned if ``full == True``
515
+
516
+ - residuals -- sum of squared residuals of the least squares fit
517
+ - rank -- the effective rank of the scaled Vandermonde
518
+ coefficient matrix
519
+ - singular_values -- singular values of the scaled Vandermonde
520
+ coefficient matrix
521
+ - rcond -- value of `rcond`.
522
+
523
+ For more details, see `numpy.linalg.lstsq`.
524
+
525
+ V : ndarray, shape (M,M) or (M,M,K)
526
+ Present only if ``full == False`` and ``cov == True``. The covariance
527
+ matrix of the polynomial coefficient estimates. The diagonal of
528
+ this matrix are the variance estimates for each coefficient. If y
529
+ is a 2-D array, then the covariance matrix for the `k`-th data set
530
+ are in ``V[:,:,k]``
531
+
532
+
533
+ Warns
534
+ -----
535
+ RankWarning
536
+ The rank of the coefficient matrix in the least-squares fit is
537
+ deficient. The warning is only raised if ``full == False``.
538
+
539
+ The warnings can be turned off by
540
+
541
+ >>> import warnings
542
+ >>> warnings.simplefilter('ignore', np.RankWarning)
543
+
544
+ See Also
545
+ --------
546
+ polyval : Compute polynomial values.
547
+ linalg.lstsq : Computes a least-squares fit.
548
+ scipy.interpolate.UnivariateSpline : Computes spline fits.
549
+
550
+ Notes
551
+ -----
552
+ The solution minimizes the squared error
553
+
554
+ .. math::
555
+ E = \\sum_{j=0}^k |p(x_j) - y_j|^2
556
+
557
+ in the equations::
558
+
559
+ x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
560
+ x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
561
+ ...
562
+ x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
563
+
564
+ The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
565
+
566
+ `polyfit` issues a `RankWarning` when the least-squares fit is badly
567
+ conditioned. This implies that the best fit is not well-defined due
568
+ to numerical error. The results may be improved by lowering the polynomial
569
+ degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
570
+ can also be set to a value smaller than its default, but the resulting
571
+ fit may be spurious: including contributions from the small singular
572
+ values can add numerical noise to the result.
573
+
574
+ Note that fitting polynomial coefficients is inherently badly conditioned
575
+ when the degree of the polynomial is large or the interval of sample points
576
+ is badly centered. The quality of the fit should always be checked in these
577
+ cases. When polynomial fits are not satisfactory, splines may be a good
578
+ alternative.
579
+
580
+ References
581
+ ----------
582
+ .. [1] Wikipedia, "Curve fitting",
583
+ https://en.wikipedia.org/wiki/Curve_fitting
584
+ .. [2] Wikipedia, "Polynomial interpolation",
585
+ https://en.wikipedia.org/wiki/Polynomial_interpolation
586
+
587
+ Examples
588
+ --------
589
+ >>> import warnings
590
+ >>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
591
+ >>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
592
+ >>> z = np.polyfit(x, y, 3)
593
+ >>> z
594
+ array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
595
+
596
+ It is convenient to use `poly1d` objects for dealing with polynomials:
597
+
598
+ >>> p = np.poly1d(z)
599
+ >>> p(0.5)
600
+ 0.6143849206349179 # may vary
601
+ >>> p(3.5)
602
+ -0.34732142857143039 # may vary
603
+ >>> p(10)
604
+ 22.579365079365115 # may vary
605
+
606
+ High-order polynomials may oscillate wildly:
607
+
608
+ >>> with warnings.catch_warnings():
609
+ ... warnings.simplefilter('ignore', np.RankWarning)
610
+ ... p30 = np.poly1d(np.polyfit(x, y, 30))
611
+ ...
612
+ >>> p30(4)
613
+ -0.80000000000000204 # may vary
614
+ >>> p30(5)
615
+ -0.99999999999999445 # may vary
616
+ >>> p30(4.5)
617
+ -0.10547061179440398 # may vary
618
+
619
+ Illustration:
620
+
621
+ >>> import matplotlib.pyplot as plt
622
+ >>> xp = np.linspace(-2, 6, 100)
623
+ >>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
624
+ >>> plt.ylim(-2,2)
625
+ (-2, 2)
626
+ >>> plt.show()
627
+
628
+ """
629
+ order = int(deg) + 1
630
+ x = NX.asarray(x) + 0.0
631
+ y = NX.asarray(y) + 0.0
632
+
633
+ # check arguments.
634
+ if deg < 0:
635
+ raise ValueError("expected deg >= 0")
636
+ if x.ndim != 1:
637
+ raise TypeError("expected 1D vector for x")
638
+ if x.size == 0:
639
+ raise TypeError("expected non-empty vector for x")
640
+ if y.ndim < 1 or y.ndim > 2:
641
+ raise TypeError("expected 1D or 2D array for y")
642
+ if x.shape[0] != y.shape[0]:
643
+ raise TypeError("expected x and y to have same length")
644
+
645
+ # set rcond
646
+ if rcond is None:
647
+ rcond = len(x)*finfo(x.dtype).eps
648
+
649
+ # set up least squares equation for powers of x
650
+ lhs = vander(x, order)
651
+ rhs = y
652
+
653
+ # apply weighting
654
+ if w is not None:
655
+ w = NX.asarray(w) + 0.0
656
+ if w.ndim != 1:
657
+ raise TypeError("expected a 1-d array for weights")
658
+ if w.shape[0] != y.shape[0]:
659
+ raise TypeError("expected w and y to have the same length")
660
+ lhs *= w[:, NX.newaxis]
661
+ if rhs.ndim == 2:
662
+ rhs *= w[:, NX.newaxis]
663
+ else:
664
+ rhs *= w
665
+
666
+ # scale lhs to improve condition number and solve
667
+ scale = NX.sqrt((lhs*lhs).sum(axis=0))
668
+ lhs /= scale
669
+ c, resids, rank, s = lstsq(lhs, rhs, rcond)
670
+ c = (c.T/scale).T # broadcast scale coefficients
671
+
672
+ # warn on rank reduction, which indicates an ill conditioned matrix
673
+ if rank != order and not full:
674
+ msg = "Polyfit may be poorly conditioned"
675
+ warnings.warn(msg, RankWarning, stacklevel=2)
676
+
677
+ if full:
678
+ return c, resids, rank, s, rcond
679
+ elif cov:
680
+ Vbase = inv(dot(lhs.T, lhs))
681
+ Vbase /= NX.outer(scale, scale)
682
+ if cov == "unscaled":
683
+ fac = 1
684
+ else:
685
+ if len(x) <= order:
686
+ raise ValueError("the number of data points must exceed order "
687
+ "to scale the covariance matrix")
688
+ # note, this used to be: fac = resids / (len(x) - order - 2.0)
689
+ # it was deciced that the "- 2" (originally justified by "Bayesian
690
+ # uncertainty analysis") is not what the user expects
691
+ # (see gh-11196 and gh-11197)
692
+ fac = resids / (len(x) - order)
693
+ if y.ndim == 1:
694
+ return c, Vbase * fac
695
+ else:
696
+ return c, Vbase[:,:, NX.newaxis] * fac
697
+ else:
698
+ return c
699
+
700
+
701
+ def _polyval_dispatcher(p, x):
702
+ return (p, x)
703
+
704
+
705
+ @array_function_dispatch(_polyval_dispatcher)
706
+ def polyval(p, x):
707
+ """
708
+ Evaluate a polynomial at specific values.
709
+
710
+ .. note::
711
+ This forms part of the old polynomial API. Since version 1.4, the
712
+ new polynomial API defined in `numpy.polynomial` is preferred.
713
+ A summary of the differences can be found in the
714
+ :doc:`transition guide </reference/routines.polynomials>`.
715
+
716
+ If `p` is of length N, this function returns the value:
717
+
718
+ ``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
719
+
720
+ If `x` is a sequence, then ``p(x)`` is returned for each element of ``x``.
721
+ If `x` is another polynomial then the composite polynomial ``p(x(t))``
722
+ is returned.
723
+
724
+ Parameters
725
+ ----------
726
+ p : array_like or poly1d object
727
+ 1D array of polynomial coefficients (including coefficients equal
728
+ to zero) from highest degree to the constant term, or an
729
+ instance of poly1d.
730
+ x : array_like or poly1d object
731
+ A number, an array of numbers, or an instance of poly1d, at
732
+ which to evaluate `p`.
733
+
734
+ Returns
735
+ -------
736
+ values : ndarray or poly1d
737
+ If `x` is a poly1d instance, the result is the composition of the two
738
+ polynomials, i.e., `x` is "substituted" in `p` and the simplified
739
+ result is returned. In addition, the type of `x` - array_like or
740
+ poly1d - governs the type of the output: `x` array_like => `values`
741
+ array_like, `x` a poly1d object => `values` is also.
742
+
743
+ See Also
744
+ --------
745
+ poly1d: A polynomial class.
746
+
747
+ Notes
748
+ -----
749
+ Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
750
+ for polynomials of high degree the values may be inaccurate due to
751
+ rounding errors. Use carefully.
752
+
753
+ If `x` is a subtype of `ndarray` the return value will be of the same type.
754
+
755
+ References
756
+ ----------
757
+ .. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
758
+ trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
759
+ Reinhold Co., 1985, pg. 720.
760
+
761
+ Examples
762
+ --------
763
+ >>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
764
+ 76
765
+ >>> np.polyval([3,0,1], np.poly1d(5))
766
+ poly1d([76])
767
+ >>> np.polyval(np.poly1d([3,0,1]), 5)
768
+ 76
769
+ >>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
770
+ poly1d([76])
771
+
772
+ """
773
+ p = NX.asarray(p)
774
+ if isinstance(x, poly1d):
775
+ y = 0
776
+ else:
777
+ x = NX.asanyarray(x)
778
+ y = NX.zeros_like(x)
779
+ for pv in p:
780
+ y = y * x + pv
781
+ return y
782
+
783
+
784
+ def _binary_op_dispatcher(a1, a2):
785
+ return (a1, a2)
786
+
787
+
788
+ @array_function_dispatch(_binary_op_dispatcher)
789
+ def polyadd(a1, a2):
790
+ """
791
+ Find the sum of two polynomials.
792
+
793
+ .. note::
794
+ This forms part of the old polynomial API. Since version 1.4, the
795
+ new polynomial API defined in `numpy.polynomial` is preferred.
796
+ A summary of the differences can be found in the
797
+ :doc:`transition guide </reference/routines.polynomials>`.
798
+
799
+ Returns the polynomial resulting from the sum of two input polynomials.
800
+ Each input must be either a poly1d object or a 1D sequence of polynomial
801
+ coefficients, from highest to lowest degree.
802
+
803
+ Parameters
804
+ ----------
805
+ a1, a2 : array_like or poly1d object
806
+ Input polynomials.
807
+
808
+ Returns
809
+ -------
810
+ out : ndarray or poly1d object
811
+ The sum of the inputs. If either input is a poly1d object, then the
812
+ output is also a poly1d object. Otherwise, it is a 1D array of
813
+ polynomial coefficients from highest to lowest degree.
814
+
815
+ See Also
816
+ --------
817
+ poly1d : A one-dimensional polynomial class.
818
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
819
+
820
+ Examples
821
+ --------
822
+ >>> np.polyadd([1, 2], [9, 5, 4])
823
+ array([9, 6, 6])
824
+
825
+ Using poly1d objects:
826
+
827
+ >>> p1 = np.poly1d([1, 2])
828
+ >>> p2 = np.poly1d([9, 5, 4])
829
+ >>> print(p1)
830
+ 1 x + 2
831
+ >>> print(p2)
832
+ 2
833
+ 9 x + 5 x + 4
834
+ >>> print(np.polyadd(p1, p2))
835
+ 2
836
+ 9 x + 6 x + 6
837
+
838
+ """
839
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
840
+ a1 = atleast_1d(a1)
841
+ a2 = atleast_1d(a2)
842
+ diff = len(a2) - len(a1)
843
+ if diff == 0:
844
+ val = a1 + a2
845
+ elif diff > 0:
846
+ zr = NX.zeros(diff, a1.dtype)
847
+ val = NX.concatenate((zr, a1)) + a2
848
+ else:
849
+ zr = NX.zeros(abs(diff), a2.dtype)
850
+ val = a1 + NX.concatenate((zr, a2))
851
+ if truepoly:
852
+ val = poly1d(val)
853
+ return val
854
+
855
+
856
+ @array_function_dispatch(_binary_op_dispatcher)
857
+ def polysub(a1, a2):
858
+ """
859
+ Difference (subtraction) of two polynomials.
860
+
861
+ .. note::
862
+ This forms part of the old polynomial API. Since version 1.4, the
863
+ new polynomial API defined in `numpy.polynomial` is preferred.
864
+ A summary of the differences can be found in the
865
+ :doc:`transition guide </reference/routines.polynomials>`.
866
+
867
+ Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
868
+ `a1` and `a2` can be either array_like sequences of the polynomials'
869
+ coefficients (including coefficients equal to zero), or `poly1d` objects.
870
+
871
+ Parameters
872
+ ----------
873
+ a1, a2 : array_like or poly1d
874
+ Minuend and subtrahend polynomials, respectively.
875
+
876
+ Returns
877
+ -------
878
+ out : ndarray or poly1d
879
+ Array or `poly1d` object of the difference polynomial's coefficients.
880
+
881
+ See Also
882
+ --------
883
+ polyval, polydiv, polymul, polyadd
884
+
885
+ Examples
886
+ --------
887
+ .. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
888
+
889
+ >>> np.polysub([2, 10, -2], [3, 10, -4])
890
+ array([-1, 0, 2])
891
+
892
+ """
893
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
894
+ a1 = atleast_1d(a1)
895
+ a2 = atleast_1d(a2)
896
+ diff = len(a2) - len(a1)
897
+ if diff == 0:
898
+ val = a1 - a2
899
+ elif diff > 0:
900
+ zr = NX.zeros(diff, a1.dtype)
901
+ val = NX.concatenate((zr, a1)) - a2
902
+ else:
903
+ zr = NX.zeros(abs(diff), a2.dtype)
904
+ val = a1 - NX.concatenate((zr, a2))
905
+ if truepoly:
906
+ val = poly1d(val)
907
+ return val
908
+
909
+
910
+ @array_function_dispatch(_binary_op_dispatcher)
911
+ def polymul(a1, a2):
912
+ """
913
+ Find the product of two polynomials.
914
+
915
+ .. note::
916
+ This forms part of the old polynomial API. Since version 1.4, the
917
+ new polynomial API defined in `numpy.polynomial` is preferred.
918
+ A summary of the differences can be found in the
919
+ :doc:`transition guide </reference/routines.polynomials>`.
920
+
921
+ Finds the polynomial resulting from the multiplication of the two input
922
+ polynomials. Each input must be either a poly1d object or a 1D sequence
923
+ of polynomial coefficients, from highest to lowest degree.
924
+
925
+ Parameters
926
+ ----------
927
+ a1, a2 : array_like or poly1d object
928
+ Input polynomials.
929
+
930
+ Returns
931
+ -------
932
+ out : ndarray or poly1d object
933
+ The polynomial resulting from the multiplication of the inputs. If
934
+ either inputs is a poly1d object, then the output is also a poly1d
935
+ object. Otherwise, it is a 1D array of polynomial coefficients from
936
+ highest to lowest degree.
937
+
938
+ See Also
939
+ --------
940
+ poly1d : A one-dimensional polynomial class.
941
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
942
+ convolve : Array convolution. Same output as polymul, but has parameter
943
+ for overlap mode.
944
+
945
+ Examples
946
+ --------
947
+ >>> np.polymul([1, 2, 3], [9, 5, 1])
948
+ array([ 9, 23, 38, 17, 3])
949
+
950
+ Using poly1d objects:
951
+
952
+ >>> p1 = np.poly1d([1, 2, 3])
953
+ >>> p2 = np.poly1d([9, 5, 1])
954
+ >>> print(p1)
955
+ 2
956
+ 1 x + 2 x + 3
957
+ >>> print(p2)
958
+ 2
959
+ 9 x + 5 x + 1
960
+ >>> print(np.polymul(p1, p2))
961
+ 4 3 2
962
+ 9 x + 23 x + 38 x + 17 x + 3
963
+
964
+ """
965
+ truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
966
+ a1, a2 = poly1d(a1), poly1d(a2)
967
+ val = NX.convolve(a1, a2)
968
+ if truepoly:
969
+ val = poly1d(val)
970
+ return val
971
+
972
+
973
+ def _polydiv_dispatcher(u, v):
974
+ return (u, v)
975
+
976
+
977
+ @array_function_dispatch(_polydiv_dispatcher)
978
+ def polydiv(u, v):
979
+ """
980
+ Returns the quotient and remainder of polynomial division.
981
+
982
+ .. note::
983
+ This forms part of the old polynomial API. Since version 1.4, the
984
+ new polynomial API defined in `numpy.polynomial` is preferred.
985
+ A summary of the differences can be found in the
986
+ :doc:`transition guide </reference/routines.polynomials>`.
987
+
988
+ The input arrays are the coefficients (including any coefficients
989
+ equal to zero) of the "numerator" (dividend) and "denominator"
990
+ (divisor) polynomials, respectively.
991
+
992
+ Parameters
993
+ ----------
994
+ u : array_like or poly1d
995
+ Dividend polynomial's coefficients.
996
+
997
+ v : array_like or poly1d
998
+ Divisor polynomial's coefficients.
999
+
1000
+ Returns
1001
+ -------
1002
+ q : ndarray
1003
+ Coefficients, including those equal to zero, of the quotient.
1004
+ r : ndarray
1005
+ Coefficients, including those equal to zero, of the remainder.
1006
+
1007
+ See Also
1008
+ --------
1009
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
1010
+ polyval
1011
+
1012
+ Notes
1013
+ -----
1014
+ Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
1015
+ not equal `v.ndim`. In other words, all four possible combinations -
1016
+ ``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
1017
+ ``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
1018
+
1019
+ Examples
1020
+ --------
1021
+ .. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
1022
+
1023
+ >>> x = np.array([3.0, 5.0, 2.0])
1024
+ >>> y = np.array([2.0, 1.0])
1025
+ >>> np.polydiv(x, y)
1026
+ (array([1.5 , 1.75]), array([0.25]))
1027
+
1028
+ """
1029
+ truepoly = (isinstance(u, poly1d) or isinstance(v, poly1d))
1030
+ u = atleast_1d(u) + 0.0
1031
+ v = atleast_1d(v) + 0.0
1032
+ # w has the common type
1033
+ w = u[0] + v[0]
1034
+ m = len(u) - 1
1035
+ n = len(v) - 1
1036
+ scale = 1. / v[0]
1037
+ q = NX.zeros((max(m - n + 1, 1),), w.dtype)
1038
+ r = u.astype(w.dtype)
1039
+ for k in range(0, m-n+1):
1040
+ d = scale * r[k]
1041
+ q[k] = d
1042
+ r[k:k+n+1] -= d*v
1043
+ while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
1044
+ r = r[1:]
1045
+ if truepoly:
1046
+ return poly1d(q), poly1d(r)
1047
+ return q, r
1048
+
1049
+ _poly_mat = re.compile(r"\*\*([0-9]*)")
1050
+ def _raise_power(astr, wrap=70):
1051
+ n = 0
1052
+ line1 = ''
1053
+ line2 = ''
1054
+ output = ' '
1055
+ while True:
1056
+ mat = _poly_mat.search(astr, n)
1057
+ if mat is None:
1058
+ break
1059
+ span = mat.span()
1060
+ power = mat.groups()[0]
1061
+ partstr = astr[n:span[0]]
1062
+ n = span[1]
1063
+ toadd2 = partstr + ' '*(len(power)-1)
1064
+ toadd1 = ' '*(len(partstr)-1) + power
1065
+ if ((len(line2) + len(toadd2) > wrap) or
1066
+ (len(line1) + len(toadd1) > wrap)):
1067
+ output += line1 + "\n" + line2 + "\n "
1068
+ line1 = toadd1
1069
+ line2 = toadd2
1070
+ else:
1071
+ line2 += partstr + ' '*(len(power)-1)
1072
+ line1 += ' '*(len(partstr)-1) + power
1073
+ output += line1 + "\n" + line2
1074
+ return output + astr[n:]
1075
+
1076
+
1077
+ @set_module('numpy')
1078
+ class poly1d:
1079
+ """
1080
+ A one-dimensional polynomial class.
1081
+
1082
+ .. note::
1083
+ This forms part of the old polynomial API. Since version 1.4, the
1084
+ new polynomial API defined in `numpy.polynomial` is preferred.
1085
+ A summary of the differences can be found in the
1086
+ :doc:`transition guide </reference/routines.polynomials>`.
1087
+
1088
+ A convenience class, used to encapsulate "natural" operations on
1089
+ polynomials so that said operations may take on their customary
1090
+ form in code (see Examples).
1091
+
1092
+ Parameters
1093
+ ----------
1094
+ c_or_r : array_like
1095
+ The polynomial's coefficients, in decreasing powers, or if
1096
+ the value of the second parameter is True, the polynomial's
1097
+ roots (values where the polynomial evaluates to 0). For example,
1098
+ ``poly1d([1, 2, 3])`` returns an object that represents
1099
+ :math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
1100
+ one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
1101
+ r : bool, optional
1102
+ If True, `c_or_r` specifies the polynomial's roots; the default
1103
+ is False.
1104
+ variable : str, optional
1105
+ Changes the variable used when printing `p` from `x` to `variable`
1106
+ (see Examples).
1107
+
1108
+ Examples
1109
+ --------
1110
+ Construct the polynomial :math:`x^2 + 2x + 3`:
1111
+
1112
+ >>> p = np.poly1d([1, 2, 3])
1113
+ >>> print(np.poly1d(p))
1114
+ 2
1115
+ 1 x + 2 x + 3
1116
+
1117
+ Evaluate the polynomial at :math:`x = 0.5`:
1118
+
1119
+ >>> p(0.5)
1120
+ 4.25
1121
+
1122
+ Find the roots:
1123
+
1124
+ >>> p.r
1125
+ array([-1.+1.41421356j, -1.-1.41421356j])
1126
+ >>> p(p.r)
1127
+ array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
1128
+
1129
+ These numbers in the previous line represent (0, 0) to machine precision
1130
+
1131
+ Show the coefficients:
1132
+
1133
+ >>> p.c
1134
+ array([1, 2, 3])
1135
+
1136
+ Display the order (the leading zero-coefficients are removed):
1137
+
1138
+ >>> p.order
1139
+ 2
1140
+
1141
+ Show the coefficient of the k-th power in the polynomial
1142
+ (which is equivalent to ``p.c[-(i+1)]``):
1143
+
1144
+ >>> p[1]
1145
+ 2
1146
+
1147
+ Polynomials can be added, subtracted, multiplied, and divided
1148
+ (returns quotient and remainder):
1149
+
1150
+ >>> p * p
1151
+ poly1d([ 1, 4, 10, 12, 9])
1152
+
1153
+ >>> (p**3 + 4) / p
1154
+ (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
1155
+
1156
+ ``asarray(p)`` gives the coefficient array, so polynomials can be
1157
+ used in all functions that accept arrays:
1158
+
1159
+ >>> p**2 # square of polynomial
1160
+ poly1d([ 1, 4, 10, 12, 9])
1161
+
1162
+ >>> np.square(p) # square of individual coefficients
1163
+ array([1, 4, 9])
1164
+
1165
+ The variable used in the string representation of `p` can be modified,
1166
+ using the `variable` parameter:
1167
+
1168
+ >>> p = np.poly1d([1,2,3], variable='z')
1169
+ >>> print(p)
1170
+ 2
1171
+ 1 z + 2 z + 3
1172
+
1173
+ Construct a polynomial from its roots:
1174
+
1175
+ >>> np.poly1d([1, 2], True)
1176
+ poly1d([ 1., -3., 2.])
1177
+
1178
+ This is the same polynomial as obtained by:
1179
+
1180
+ >>> np.poly1d([1, -1]) * np.poly1d([1, -2])
1181
+ poly1d([ 1, -3, 2])
1182
+
1183
+ """
1184
+ __hash__ = None
1185
+
1186
+ @property
1187
+ def coeffs(self):
1188
+ """ The polynomial coefficients """
1189
+ return self._coeffs
1190
+
1191
+ @coeffs.setter
1192
+ def coeffs(self, value):
1193
+ # allowing this makes p.coeffs *= 2 legal
1194
+ if value is not self._coeffs:
1195
+ raise AttributeError("Cannot set attribute")
1196
+
1197
+ @property
1198
+ def variable(self):
1199
+ """ The name of the polynomial variable """
1200
+ return self._variable
1201
+
1202
+ # calculated attributes
1203
+ @property
1204
+ def order(self):
1205
+ """ The order or degree of the polynomial """
1206
+ return len(self._coeffs) - 1
1207
+
1208
+ @property
1209
+ def roots(self):
1210
+ """ The roots of the polynomial, where self(x) == 0 """
1211
+ return roots(self._coeffs)
1212
+
1213
+ # our internal _coeffs property need to be backed by __dict__['coeffs'] for
1214
+ # scipy to work correctly.
1215
+ @property
1216
+ def _coeffs(self):
1217
+ return self.__dict__['coeffs']
1218
+ @_coeffs.setter
1219
+ def _coeffs(self, coeffs):
1220
+ self.__dict__['coeffs'] = coeffs
1221
+
1222
+ # alias attributes
1223
+ r = roots
1224
+ c = coef = coefficients = coeffs
1225
+ o = order
1226
+
1227
+ def __init__(self, c_or_r, r=False, variable=None):
1228
+ if isinstance(c_or_r, poly1d):
1229
+ self._variable = c_or_r._variable
1230
+ self._coeffs = c_or_r._coeffs
1231
+
1232
+ if set(c_or_r.__dict__) - set(self.__dict__):
1233
+ msg = ("In the future extra properties will not be copied "
1234
+ "across when constructing one poly1d from another")
1235
+ warnings.warn(msg, FutureWarning, stacklevel=2)
1236
+ self.__dict__.update(c_or_r.__dict__)
1237
+
1238
+ if variable is not None:
1239
+ self._variable = variable
1240
+ return
1241
+ if r:
1242
+ c_or_r = poly(c_or_r)
1243
+ c_or_r = atleast_1d(c_or_r)
1244
+ if c_or_r.ndim > 1:
1245
+ raise ValueError("Polynomial must be 1d only.")
1246
+ c_or_r = trim_zeros(c_or_r, trim='f')
1247
+ if len(c_or_r) == 0:
1248
+ c_or_r = NX.array([0], dtype=c_or_r.dtype)
1249
+ self._coeffs = c_or_r
1250
+ if variable is None:
1251
+ variable = 'x'
1252
+ self._variable = variable
1253
+
1254
+ def __array__(self, t=None):
1255
+ if t:
1256
+ return NX.asarray(self.coeffs, t)
1257
+ else:
1258
+ return NX.asarray(self.coeffs)
1259
+
1260
+ def __repr__(self):
1261
+ vals = repr(self.coeffs)
1262
+ vals = vals[6:-1]
1263
+ return "poly1d(%s)" % vals
1264
+
1265
+ def __len__(self):
1266
+ return self.order
1267
+
1268
+ def __str__(self):
1269
+ thestr = "0"
1270
+ var = self.variable
1271
+
1272
+ # Remove leading zeros
1273
+ coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
1274
+ N = len(coeffs)-1
1275
+
1276
+ def fmt_float(q):
1277
+ s = '%.4g' % q
1278
+ if s.endswith('.0000'):
1279
+ s = s[:-5]
1280
+ return s
1281
+
1282
+ for k, coeff in enumerate(coeffs):
1283
+ if not iscomplex(coeff):
1284
+ coefstr = fmt_float(real(coeff))
1285
+ elif real(coeff) == 0:
1286
+ coefstr = '%sj' % fmt_float(imag(coeff))
1287
+ else:
1288
+ coefstr = '(%s + %sj)' % (fmt_float(real(coeff)),
1289
+ fmt_float(imag(coeff)))
1290
+
1291
+ power = (N-k)
1292
+ if power == 0:
1293
+ if coefstr != '0':
1294
+ newstr = '%s' % (coefstr,)
1295
+ else:
1296
+ if k == 0:
1297
+ newstr = '0'
1298
+ else:
1299
+ newstr = ''
1300
+ elif power == 1:
1301
+ if coefstr == '0':
1302
+ newstr = ''
1303
+ elif coefstr == 'b':
1304
+ newstr = var
1305
+ else:
1306
+ newstr = '%s %s' % (coefstr, var)
1307
+ else:
1308
+ if coefstr == '0':
1309
+ newstr = ''
1310
+ elif coefstr == 'b':
1311
+ newstr = '%s**%d' % (var, power,)
1312
+ else:
1313
+ newstr = '%s %s**%d' % (coefstr, var, power)
1314
+
1315
+ if k > 0:
1316
+ if newstr != '':
1317
+ if newstr.startswith('-'):
1318
+ thestr = "%s - %s" % (thestr, newstr[1:])
1319
+ else:
1320
+ thestr = "%s + %s" % (thestr, newstr)
1321
+ else:
1322
+ thestr = newstr
1323
+ return _raise_power(thestr)
1324
+
1325
+ def __call__(self, val):
1326
+ return polyval(self.coeffs, val)
1327
+
1328
+ def __neg__(self):
1329
+ return poly1d(-self.coeffs)
1330
+
1331
+ def __pos__(self):
1332
+ return self
1333
+
1334
+ def __mul__(self, other):
1335
+ if isscalar(other):
1336
+ return poly1d(self.coeffs * other)
1337
+ else:
1338
+ other = poly1d(other)
1339
+ return poly1d(polymul(self.coeffs, other.coeffs))
1340
+
1341
+ def __rmul__(self, other):
1342
+ if isscalar(other):
1343
+ return poly1d(other * self.coeffs)
1344
+ else:
1345
+ other = poly1d(other)
1346
+ return poly1d(polymul(self.coeffs, other.coeffs))
1347
+
1348
+ def __add__(self, other):
1349
+ other = poly1d(other)
1350
+ return poly1d(polyadd(self.coeffs, other.coeffs))
1351
+
1352
+ def __radd__(self, other):
1353
+ other = poly1d(other)
1354
+ return poly1d(polyadd(self.coeffs, other.coeffs))
1355
+
1356
+ def __pow__(self, val):
1357
+ if not isscalar(val) or int(val) != val or val < 0:
1358
+ raise ValueError("Power to non-negative integers only.")
1359
+ res = [1]
1360
+ for _ in range(val):
1361
+ res = polymul(self.coeffs, res)
1362
+ return poly1d(res)
1363
+
1364
+ def __sub__(self, other):
1365
+ other = poly1d(other)
1366
+ return poly1d(polysub(self.coeffs, other.coeffs))
1367
+
1368
+ def __rsub__(self, other):
1369
+ other = poly1d(other)
1370
+ return poly1d(polysub(other.coeffs, self.coeffs))
1371
+
1372
+ def __div__(self, other):
1373
+ if isscalar(other):
1374
+ return poly1d(self.coeffs/other)
1375
+ else:
1376
+ other = poly1d(other)
1377
+ return polydiv(self, other)
1378
+
1379
+ __truediv__ = __div__
1380
+
1381
+ def __rdiv__(self, other):
1382
+ if isscalar(other):
1383
+ return poly1d(other/self.coeffs)
1384
+ else:
1385
+ other = poly1d(other)
1386
+ return polydiv(other, self)
1387
+
1388
+ __rtruediv__ = __rdiv__
1389
+
1390
+ def __eq__(self, other):
1391
+ if not isinstance(other, poly1d):
1392
+ return NotImplemented
1393
+ if self.coeffs.shape != other.coeffs.shape:
1394
+ return False
1395
+ return (self.coeffs == other.coeffs).all()
1396
+
1397
+ def __ne__(self, other):
1398
+ if not isinstance(other, poly1d):
1399
+ return NotImplemented
1400
+ return not self.__eq__(other)
1401
+
1402
+
1403
+ def __getitem__(self, val):
1404
+ ind = self.order - val
1405
+ if val > self.order:
1406
+ return self.coeffs.dtype.type(0)
1407
+ if val < 0:
1408
+ return self.coeffs.dtype.type(0)
1409
+ return self.coeffs[ind]
1410
+
1411
+ def __setitem__(self, key, val):
1412
+ ind = self.order - key
1413
+ if key < 0:
1414
+ raise ValueError("Does not support negative powers.")
1415
+ if key > self.order:
1416
+ zr = NX.zeros(key-self.order, self.coeffs.dtype)
1417
+ self._coeffs = NX.concatenate((zr, self.coeffs))
1418
+ ind = 0
1419
+ self._coeffs[ind] = val
1420
+ return
1421
+
1422
+ def __iter__(self):
1423
+ return iter(self.coeffs)
1424
+
1425
+ def integ(self, m=1, k=0):
1426
+ """
1427
+ Return an antiderivative (indefinite integral) of this polynomial.
1428
+
1429
+ Refer to `polyint` for full documentation.
1430
+
1431
+ See Also
1432
+ --------
1433
+ polyint : equivalent function
1434
+
1435
+ """
1436
+ return poly1d(polyint(self.coeffs, m=m, k=k))
1437
+
1438
+ def deriv(self, m=1):
1439
+ """
1440
+ Return a derivative of this polynomial.
1441
+
1442
+ Refer to `polyder` for full documentation.
1443
+
1444
+ See Also
1445
+ --------
1446
+ polyder : equivalent function
1447
+
1448
+ """
1449
+ return poly1d(polyder(self.coeffs, m=m))
1450
+
1451
+ # Stuff to do on module import
1452
+
1453
+ warnings.simplefilter('always', RankWarning)
venv/lib/python3.10/site-packages/numpy/lib/polynomial.pyi ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import (
2
+ Literal as L,
3
+ overload,
4
+ Any,
5
+ SupportsInt,
6
+ SupportsIndex,
7
+ TypeVar,
8
+ NoReturn,
9
+ )
10
+
11
+ from numpy import (
12
+ RankWarning as RankWarning,
13
+ poly1d as poly1d,
14
+ unsignedinteger,
15
+ signedinteger,
16
+ floating,
17
+ complexfloating,
18
+ bool_,
19
+ int32,
20
+ int64,
21
+ float64,
22
+ complex128,
23
+ object_,
24
+ )
25
+
26
+ from numpy._typing import (
27
+ NDArray,
28
+ ArrayLike,
29
+ _ArrayLikeBool_co,
30
+ _ArrayLikeUInt_co,
31
+ _ArrayLikeInt_co,
32
+ _ArrayLikeFloat_co,
33
+ _ArrayLikeComplex_co,
34
+ _ArrayLikeObject_co,
35
+ )
36
+
37
+ _T = TypeVar("_T")
38
+
39
+ _2Tup = tuple[_T, _T]
40
+ _5Tup = tuple[
41
+ _T,
42
+ NDArray[float64],
43
+ NDArray[int32],
44
+ NDArray[float64],
45
+ NDArray[float64],
46
+ ]
47
+
48
+ __all__: list[str]
49
+
50
+ def poly(seq_of_zeros: ArrayLike) -> NDArray[floating[Any]]: ...
51
+
52
+ # Returns either a float or complex array depending on the input values.
53
+ # See `np.linalg.eigvals`.
54
+ def roots(p: ArrayLike) -> NDArray[complexfloating[Any, Any]] | NDArray[floating[Any]]: ...
55
+
56
+ @overload
57
+ def polyint(
58
+ p: poly1d,
59
+ m: SupportsInt | SupportsIndex = ...,
60
+ k: None | _ArrayLikeComplex_co | _ArrayLikeObject_co = ...,
61
+ ) -> poly1d: ...
62
+ @overload
63
+ def polyint(
64
+ p: _ArrayLikeFloat_co,
65
+ m: SupportsInt | SupportsIndex = ...,
66
+ k: None | _ArrayLikeFloat_co = ...,
67
+ ) -> NDArray[floating[Any]]: ...
68
+ @overload
69
+ def polyint(
70
+ p: _ArrayLikeComplex_co,
71
+ m: SupportsInt | SupportsIndex = ...,
72
+ k: None | _ArrayLikeComplex_co = ...,
73
+ ) -> NDArray[complexfloating[Any, Any]]: ...
74
+ @overload
75
+ def polyint(
76
+ p: _ArrayLikeObject_co,
77
+ m: SupportsInt | SupportsIndex = ...,
78
+ k: None | _ArrayLikeObject_co = ...,
79
+ ) -> NDArray[object_]: ...
80
+
81
+ @overload
82
+ def polyder(
83
+ p: poly1d,
84
+ m: SupportsInt | SupportsIndex = ...,
85
+ ) -> poly1d: ...
86
+ @overload
87
+ def polyder(
88
+ p: _ArrayLikeFloat_co,
89
+ m: SupportsInt | SupportsIndex = ...,
90
+ ) -> NDArray[floating[Any]]: ...
91
+ @overload
92
+ def polyder(
93
+ p: _ArrayLikeComplex_co,
94
+ m: SupportsInt | SupportsIndex = ...,
95
+ ) -> NDArray[complexfloating[Any, Any]]: ...
96
+ @overload
97
+ def polyder(
98
+ p: _ArrayLikeObject_co,
99
+ m: SupportsInt | SupportsIndex = ...,
100
+ ) -> NDArray[object_]: ...
101
+
102
+ @overload
103
+ def polyfit(
104
+ x: _ArrayLikeFloat_co,
105
+ y: _ArrayLikeFloat_co,
106
+ deg: SupportsIndex | SupportsInt,
107
+ rcond: None | float = ...,
108
+ full: L[False] = ...,
109
+ w: None | _ArrayLikeFloat_co = ...,
110
+ cov: L[False] = ...,
111
+ ) -> NDArray[float64]: ...
112
+ @overload
113
+ def polyfit(
114
+ x: _ArrayLikeComplex_co,
115
+ y: _ArrayLikeComplex_co,
116
+ deg: SupportsIndex | SupportsInt,
117
+ rcond: None | float = ...,
118
+ full: L[False] = ...,
119
+ w: None | _ArrayLikeFloat_co = ...,
120
+ cov: L[False] = ...,
121
+ ) -> NDArray[complex128]: ...
122
+ @overload
123
+ def polyfit(
124
+ x: _ArrayLikeFloat_co,
125
+ y: _ArrayLikeFloat_co,
126
+ deg: SupportsIndex | SupportsInt,
127
+ rcond: None | float = ...,
128
+ full: L[False] = ...,
129
+ w: None | _ArrayLikeFloat_co = ...,
130
+ cov: L[True, "unscaled"] = ...,
131
+ ) -> _2Tup[NDArray[float64]]: ...
132
+ @overload
133
+ def polyfit(
134
+ x: _ArrayLikeComplex_co,
135
+ y: _ArrayLikeComplex_co,
136
+ deg: SupportsIndex | SupportsInt,
137
+ rcond: None | float = ...,
138
+ full: L[False] = ...,
139
+ w: None | _ArrayLikeFloat_co = ...,
140
+ cov: L[True, "unscaled"] = ...,
141
+ ) -> _2Tup[NDArray[complex128]]: ...
142
+ @overload
143
+ def polyfit(
144
+ x: _ArrayLikeFloat_co,
145
+ y: _ArrayLikeFloat_co,
146
+ deg: SupportsIndex | SupportsInt,
147
+ rcond: None | float = ...,
148
+ full: L[True] = ...,
149
+ w: None | _ArrayLikeFloat_co = ...,
150
+ cov: bool | L["unscaled"] = ...,
151
+ ) -> _5Tup[NDArray[float64]]: ...
152
+ @overload
153
+ def polyfit(
154
+ x: _ArrayLikeComplex_co,
155
+ y: _ArrayLikeComplex_co,
156
+ deg: SupportsIndex | SupportsInt,
157
+ rcond: None | float = ...,
158
+ full: L[True] = ...,
159
+ w: None | _ArrayLikeFloat_co = ...,
160
+ cov: bool | L["unscaled"] = ...,
161
+ ) -> _5Tup[NDArray[complex128]]: ...
162
+
163
+ @overload
164
+ def polyval(
165
+ p: _ArrayLikeBool_co,
166
+ x: _ArrayLikeBool_co,
167
+ ) -> NDArray[int64]: ...
168
+ @overload
169
+ def polyval(
170
+ p: _ArrayLikeUInt_co,
171
+ x: _ArrayLikeUInt_co,
172
+ ) -> NDArray[unsignedinteger[Any]]: ...
173
+ @overload
174
+ def polyval(
175
+ p: _ArrayLikeInt_co,
176
+ x: _ArrayLikeInt_co,
177
+ ) -> NDArray[signedinteger[Any]]: ...
178
+ @overload
179
+ def polyval(
180
+ p: _ArrayLikeFloat_co,
181
+ x: _ArrayLikeFloat_co,
182
+ ) -> NDArray[floating[Any]]: ...
183
+ @overload
184
+ def polyval(
185
+ p: _ArrayLikeComplex_co,
186
+ x: _ArrayLikeComplex_co,
187
+ ) -> NDArray[complexfloating[Any, Any]]: ...
188
+ @overload
189
+ def polyval(
190
+ p: _ArrayLikeObject_co,
191
+ x: _ArrayLikeObject_co,
192
+ ) -> NDArray[object_]: ...
193
+
194
+ @overload
195
+ def polyadd(
196
+ a1: poly1d,
197
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
198
+ ) -> poly1d: ...
199
+ @overload
200
+ def polyadd(
201
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
202
+ a2: poly1d,
203
+ ) -> poly1d: ...
204
+ @overload
205
+ def polyadd(
206
+ a1: _ArrayLikeBool_co,
207
+ a2: _ArrayLikeBool_co,
208
+ ) -> NDArray[bool_]: ...
209
+ @overload
210
+ def polyadd(
211
+ a1: _ArrayLikeUInt_co,
212
+ a2: _ArrayLikeUInt_co,
213
+ ) -> NDArray[unsignedinteger[Any]]: ...
214
+ @overload
215
+ def polyadd(
216
+ a1: _ArrayLikeInt_co,
217
+ a2: _ArrayLikeInt_co,
218
+ ) -> NDArray[signedinteger[Any]]: ...
219
+ @overload
220
+ def polyadd(
221
+ a1: _ArrayLikeFloat_co,
222
+ a2: _ArrayLikeFloat_co,
223
+ ) -> NDArray[floating[Any]]: ...
224
+ @overload
225
+ def polyadd(
226
+ a1: _ArrayLikeComplex_co,
227
+ a2: _ArrayLikeComplex_co,
228
+ ) -> NDArray[complexfloating[Any, Any]]: ...
229
+ @overload
230
+ def polyadd(
231
+ a1: _ArrayLikeObject_co,
232
+ a2: _ArrayLikeObject_co,
233
+ ) -> NDArray[object_]: ...
234
+
235
+ @overload
236
+ def polysub(
237
+ a1: poly1d,
238
+ a2: _ArrayLikeComplex_co | _ArrayLikeObject_co,
239
+ ) -> poly1d: ...
240
+ @overload
241
+ def polysub(
242
+ a1: _ArrayLikeComplex_co | _ArrayLikeObject_co,
243
+ a2: poly1d,
244
+ ) -> poly1d: ...
245
+ @overload
246
+ def polysub(
247
+ a1: _ArrayLikeBool_co,
248
+ a2: _ArrayLikeBool_co,
249
+ ) -> NoReturn: ...
250
+ @overload
251
+ def polysub(
252
+ a1: _ArrayLikeUInt_co,
253
+ a2: _ArrayLikeUInt_co,
254
+ ) -> NDArray[unsignedinteger[Any]]: ...
255
+ @overload
256
+ def polysub(
257
+ a1: _ArrayLikeInt_co,
258
+ a2: _ArrayLikeInt_co,
259
+ ) -> NDArray[signedinteger[Any]]: ...
260
+ @overload
261
+ def polysub(
262
+ a1: _ArrayLikeFloat_co,
263
+ a2: _ArrayLikeFloat_co,
264
+ ) -> NDArray[floating[Any]]: ...
265
+ @overload
266
+ def polysub(
267
+ a1: _ArrayLikeComplex_co,
268
+ a2: _ArrayLikeComplex_co,
269
+ ) -> NDArray[complexfloating[Any, Any]]: ...
270
+ @overload
271
+ def polysub(
272
+ a1: _ArrayLikeObject_co,
273
+ a2: _ArrayLikeObject_co,
274
+ ) -> NDArray[object_]: ...
275
+
276
+ # NOTE: Not an alias, but they do have the same signature (that we can reuse)
277
+ polymul = polyadd
278
+
279
+ @overload
280
+ def polydiv(
281
+ u: poly1d,
282
+ v: _ArrayLikeComplex_co | _ArrayLikeObject_co,
283
+ ) -> _2Tup[poly1d]: ...
284
+ @overload
285
+ def polydiv(
286
+ u: _ArrayLikeComplex_co | _ArrayLikeObject_co,
287
+ v: poly1d,
288
+ ) -> _2Tup[poly1d]: ...
289
+ @overload
290
+ def polydiv(
291
+ u: _ArrayLikeFloat_co,
292
+ v: _ArrayLikeFloat_co,
293
+ ) -> _2Tup[NDArray[floating[Any]]]: ...
294
+ @overload
295
+ def polydiv(
296
+ u: _ArrayLikeComplex_co,
297
+ v: _ArrayLikeComplex_co,
298
+ ) -> _2Tup[NDArray[complexfloating[Any, Any]]]: ...
299
+ @overload
300
+ def polydiv(
301
+ u: _ArrayLikeObject_co,
302
+ v: _ArrayLikeObject_co,
303
+ ) -> _2Tup[NDArray[Any]]: ...
venv/lib/python3.10/site-packages/numpy/lib/scimath.py ADDED
@@ -0,0 +1,625 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Wrapper functions to more user-friendly calling of certain math functions
3
+ whose output data-type is different than the input data-type in certain
4
+ domains of the input.
5
+
6
+ For example, for functions like `log` with branch cuts, the versions in this
7
+ module provide the mathematically valid answers in the complex plane::
8
+
9
+ >>> import math
10
+ >>> np.emath.log(-math.exp(1)) == (1+1j*math.pi)
11
+ True
12
+
13
+ Similarly, `sqrt`, other base logarithms, `power` and trig functions are
14
+ correctly handled. See their respective docstrings for specific examples.
15
+
16
+ Functions
17
+ ---------
18
+
19
+ .. autosummary::
20
+ :toctree: generated/
21
+
22
+ sqrt
23
+ log
24
+ log2
25
+ logn
26
+ log10
27
+ power
28
+ arccos
29
+ arcsin
30
+ arctanh
31
+
32
+ """
33
+ import numpy.core.numeric as nx
34
+ import numpy.core.numerictypes as nt
35
+ from numpy.core.numeric import asarray, any
36
+ from numpy.core.overrides import array_function_dispatch
37
+ from numpy.lib.type_check import isreal
38
+
39
+
40
+ __all__ = [
41
+ 'sqrt', 'log', 'log2', 'logn', 'log10', 'power', 'arccos', 'arcsin',
42
+ 'arctanh'
43
+ ]
44
+
45
+
46
+ _ln2 = nx.log(2.0)
47
+
48
+
49
+ def _tocomplex(arr):
50
+ """Convert its input `arr` to a complex array.
51
+
52
+ The input is returned as a complex array of the smallest type that will fit
53
+ the original data: types like single, byte, short, etc. become csingle,
54
+ while others become cdouble.
55
+
56
+ A copy of the input is always made.
57
+
58
+ Parameters
59
+ ----------
60
+ arr : array
61
+
62
+ Returns
63
+ -------
64
+ array
65
+ An array with the same input data as the input but in complex form.
66
+
67
+ Examples
68
+ --------
69
+
70
+ First, consider an input of type short:
71
+
72
+ >>> a = np.array([1,2,3],np.short)
73
+
74
+ >>> ac = np.lib.scimath._tocomplex(a); ac
75
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
76
+
77
+ >>> ac.dtype
78
+ dtype('complex64')
79
+
80
+ If the input is of type double, the output is correspondingly of the
81
+ complex double type as well:
82
+
83
+ >>> b = np.array([1,2,3],np.double)
84
+
85
+ >>> bc = np.lib.scimath._tocomplex(b); bc
86
+ array([1.+0.j, 2.+0.j, 3.+0.j])
87
+
88
+ >>> bc.dtype
89
+ dtype('complex128')
90
+
91
+ Note that even if the input was complex to begin with, a copy is still
92
+ made, since the astype() method always copies:
93
+
94
+ >>> c = np.array([1,2,3],np.csingle)
95
+
96
+ >>> cc = np.lib.scimath._tocomplex(c); cc
97
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
98
+
99
+ >>> c *= 2; c
100
+ array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
101
+
102
+ >>> cc
103
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
104
+ """
105
+ if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
106
+ nt.ushort, nt.csingle)):
107
+ return arr.astype(nt.csingle)
108
+ else:
109
+ return arr.astype(nt.cdouble)
110
+
111
+
112
+ def _fix_real_lt_zero(x):
113
+ """Convert `x` to complex if it has real, negative components.
114
+
115
+ Otherwise, output is just the array version of the input (via asarray).
116
+
117
+ Parameters
118
+ ----------
119
+ x : array_like
120
+
121
+ Returns
122
+ -------
123
+ array
124
+
125
+ Examples
126
+ --------
127
+ >>> np.lib.scimath._fix_real_lt_zero([1,2])
128
+ array([1, 2])
129
+
130
+ >>> np.lib.scimath._fix_real_lt_zero([-1,2])
131
+ array([-1.+0.j, 2.+0.j])
132
+
133
+ """
134
+ x = asarray(x)
135
+ if any(isreal(x) & (x < 0)):
136
+ x = _tocomplex(x)
137
+ return x
138
+
139
+
140
+ def _fix_int_lt_zero(x):
141
+ """Convert `x` to double if it has real, negative components.
142
+
143
+ Otherwise, output is just the array version of the input (via asarray).
144
+
145
+ Parameters
146
+ ----------
147
+ x : array_like
148
+
149
+ Returns
150
+ -------
151
+ array
152
+
153
+ Examples
154
+ --------
155
+ >>> np.lib.scimath._fix_int_lt_zero([1,2])
156
+ array([1, 2])
157
+
158
+ >>> np.lib.scimath._fix_int_lt_zero([-1,2])
159
+ array([-1., 2.])
160
+ """
161
+ x = asarray(x)
162
+ if any(isreal(x) & (x < 0)):
163
+ x = x * 1.0
164
+ return x
165
+
166
+
167
+ def _fix_real_abs_gt_1(x):
168
+ """Convert `x` to complex if it has real components x_i with abs(x_i)>1.
169
+
170
+ Otherwise, output is just the array version of the input (via asarray).
171
+
172
+ Parameters
173
+ ----------
174
+ x : array_like
175
+
176
+ Returns
177
+ -------
178
+ array
179
+
180
+ Examples
181
+ --------
182
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,1])
183
+ array([0, 1])
184
+
185
+ >>> np.lib.scimath._fix_real_abs_gt_1([0,2])
186
+ array([0.+0.j, 2.+0.j])
187
+ """
188
+ x = asarray(x)
189
+ if any(isreal(x) & (abs(x) > 1)):
190
+ x = _tocomplex(x)
191
+ return x
192
+
193
+
194
+ def _unary_dispatcher(x):
195
+ return (x,)
196
+
197
+
198
+ @array_function_dispatch(_unary_dispatcher)
199
+ def sqrt(x):
200
+ """
201
+ Compute the square root of x.
202
+
203
+ For negative input elements, a complex value is returned
204
+ (unlike `numpy.sqrt` which returns NaN).
205
+
206
+ Parameters
207
+ ----------
208
+ x : array_like
209
+ The input value(s).
210
+
211
+ Returns
212
+ -------
213
+ out : ndarray or scalar
214
+ The square root of `x`. If `x` was a scalar, so is `out`,
215
+ otherwise an array is returned.
216
+
217
+ See Also
218
+ --------
219
+ numpy.sqrt
220
+
221
+ Examples
222
+ --------
223
+ For real, non-negative inputs this works just like `numpy.sqrt`:
224
+
225
+ >>> np.emath.sqrt(1)
226
+ 1.0
227
+ >>> np.emath.sqrt([1, 4])
228
+ array([1., 2.])
229
+
230
+ But it automatically handles negative inputs:
231
+
232
+ >>> np.emath.sqrt(-1)
233
+ 1j
234
+ >>> np.emath.sqrt([-1,4])
235
+ array([0.+1.j, 2.+0.j])
236
+
237
+ Different results are expected because:
238
+ floating point 0.0 and -0.0 are distinct.
239
+
240
+ For more control, explicitly use complex() as follows:
241
+
242
+ >>> np.emath.sqrt(complex(-4.0, 0.0))
243
+ 2j
244
+ >>> np.emath.sqrt(complex(-4.0, -0.0))
245
+ -2j
246
+ """
247
+ x = _fix_real_lt_zero(x)
248
+ return nx.sqrt(x)
249
+
250
+
251
+ @array_function_dispatch(_unary_dispatcher)
252
+ def log(x):
253
+ """
254
+ Compute the natural logarithm of `x`.
255
+
256
+ Return the "principal value" (for a description of this, see `numpy.log`)
257
+ of :math:`log_e(x)`. For real `x > 0`, this is a real number (``log(0)``
258
+ returns ``-inf`` and ``log(np.inf)`` returns ``inf``). Otherwise, the
259
+ complex principle value is returned.
260
+
261
+ Parameters
262
+ ----------
263
+ x : array_like
264
+ The value(s) whose log is (are) required.
265
+
266
+ Returns
267
+ -------
268
+ out : ndarray or scalar
269
+ The log of the `x` value(s). If `x` was a scalar, so is `out`,
270
+ otherwise an array is returned.
271
+
272
+ See Also
273
+ --------
274
+ numpy.log
275
+
276
+ Notes
277
+ -----
278
+ For a log() that returns ``NAN`` when real `x < 0`, use `numpy.log`
279
+ (note, however, that otherwise `numpy.log` and this `log` are identical,
280
+ i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`, and,
281
+ notably, the complex principle value if ``x.imag != 0``).
282
+
283
+ Examples
284
+ --------
285
+ >>> np.emath.log(np.exp(1))
286
+ 1.0
287
+
288
+ Negative arguments are handled "correctly" (recall that
289
+ ``exp(log(x)) == x`` does *not* hold for real ``x < 0``):
290
+
291
+ >>> np.emath.log(-np.exp(1)) == (1 + np.pi * 1j)
292
+ True
293
+
294
+ """
295
+ x = _fix_real_lt_zero(x)
296
+ return nx.log(x)
297
+
298
+
299
+ @array_function_dispatch(_unary_dispatcher)
300
+ def log10(x):
301
+ """
302
+ Compute the logarithm base 10 of `x`.
303
+
304
+ Return the "principal value" (for a description of this, see
305
+ `numpy.log10`) of :math:`log_{10}(x)`. For real `x > 0`, this
306
+ is a real number (``log10(0)`` returns ``-inf`` and ``log10(np.inf)``
307
+ returns ``inf``). Otherwise, the complex principle value is returned.
308
+
309
+ Parameters
310
+ ----------
311
+ x : array_like or scalar
312
+ The value(s) whose log base 10 is (are) required.
313
+
314
+ Returns
315
+ -------
316
+ out : ndarray or scalar
317
+ The log base 10 of the `x` value(s). If `x` was a scalar, so is `out`,
318
+ otherwise an array object is returned.
319
+
320
+ See Also
321
+ --------
322
+ numpy.log10
323
+
324
+ Notes
325
+ -----
326
+ For a log10() that returns ``NAN`` when real `x < 0`, use `numpy.log10`
327
+ (note, however, that otherwise `numpy.log10` and this `log10` are
328
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
329
+ and, notably, the complex principle value if ``x.imag != 0``).
330
+
331
+ Examples
332
+ --------
333
+
334
+ (We set the printing precision so the example can be auto-tested)
335
+
336
+ >>> np.set_printoptions(precision=4)
337
+
338
+ >>> np.emath.log10(10**1)
339
+ 1.0
340
+
341
+ >>> np.emath.log10([-10**1, -10**2, 10**2])
342
+ array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
343
+
344
+ """
345
+ x = _fix_real_lt_zero(x)
346
+ return nx.log10(x)
347
+
348
+
349
+ def _logn_dispatcher(n, x):
350
+ return (n, x,)
351
+
352
+
353
+ @array_function_dispatch(_logn_dispatcher)
354
+ def logn(n, x):
355
+ """
356
+ Take log base n of x.
357
+
358
+ If `x` contains negative inputs, the answer is computed and returned in the
359
+ complex domain.
360
+
361
+ Parameters
362
+ ----------
363
+ n : array_like
364
+ The integer base(s) in which the log is taken.
365
+ x : array_like
366
+ The value(s) whose log base `n` is (are) required.
367
+
368
+ Returns
369
+ -------
370
+ out : ndarray or scalar
371
+ The log base `n` of the `x` value(s). If `x` was a scalar, so is
372
+ `out`, otherwise an array is returned.
373
+
374
+ Examples
375
+ --------
376
+ >>> np.set_printoptions(precision=4)
377
+
378
+ >>> np.emath.logn(2, [4, 8])
379
+ array([2., 3.])
380
+ >>> np.emath.logn(2, [-4, -8, 8])
381
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
382
+
383
+ """
384
+ x = _fix_real_lt_zero(x)
385
+ n = _fix_real_lt_zero(n)
386
+ return nx.log(x)/nx.log(n)
387
+
388
+
389
+ @array_function_dispatch(_unary_dispatcher)
390
+ def log2(x):
391
+ """
392
+ Compute the logarithm base 2 of `x`.
393
+
394
+ Return the "principal value" (for a description of this, see
395
+ `numpy.log2`) of :math:`log_2(x)`. For real `x > 0`, this is
396
+ a real number (``log2(0)`` returns ``-inf`` and ``log2(np.inf)`` returns
397
+ ``inf``). Otherwise, the complex principle value is returned.
398
+
399
+ Parameters
400
+ ----------
401
+ x : array_like
402
+ The value(s) whose log base 2 is (are) required.
403
+
404
+ Returns
405
+ -------
406
+ out : ndarray or scalar
407
+ The log base 2 of the `x` value(s). If `x` was a scalar, so is `out`,
408
+ otherwise an array is returned.
409
+
410
+ See Also
411
+ --------
412
+ numpy.log2
413
+
414
+ Notes
415
+ -----
416
+ For a log2() that returns ``NAN`` when real `x < 0`, use `numpy.log2`
417
+ (note, however, that otherwise `numpy.log2` and this `log2` are
418
+ identical, i.e., both return ``-inf`` for `x = 0`, ``inf`` for `x = inf`,
419
+ and, notably, the complex principle value if ``x.imag != 0``).
420
+
421
+ Examples
422
+ --------
423
+ We set the printing precision so the example can be auto-tested:
424
+
425
+ >>> np.set_printoptions(precision=4)
426
+
427
+ >>> np.emath.log2(8)
428
+ 3.0
429
+ >>> np.emath.log2([-4, -8, 8])
430
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
431
+
432
+ """
433
+ x = _fix_real_lt_zero(x)
434
+ return nx.log2(x)
435
+
436
+
437
+ def _power_dispatcher(x, p):
438
+ return (x, p)
439
+
440
+
441
+ @array_function_dispatch(_power_dispatcher)
442
+ def power(x, p):
443
+ """
444
+ Return x to the power p, (x**p).
445
+
446
+ If `x` contains negative values, the output is converted to the
447
+ complex domain.
448
+
449
+ Parameters
450
+ ----------
451
+ x : array_like
452
+ The input value(s).
453
+ p : array_like of ints
454
+ The power(s) to which `x` is raised. If `x` contains multiple values,
455
+ `p` has to either be a scalar, or contain the same number of values
456
+ as `x`. In the latter case, the result is
457
+ ``x[0]**p[0], x[1]**p[1], ...``.
458
+
459
+ Returns
460
+ -------
461
+ out : ndarray or scalar
462
+ The result of ``x**p``. If `x` and `p` are scalars, so is `out`,
463
+ otherwise an array is returned.
464
+
465
+ See Also
466
+ --------
467
+ numpy.power
468
+
469
+ Examples
470
+ --------
471
+ >>> np.set_printoptions(precision=4)
472
+
473
+ >>> np.emath.power([2, 4], 2)
474
+ array([ 4, 16])
475
+ >>> np.emath.power([2, 4], -2)
476
+ array([0.25 , 0.0625])
477
+ >>> np.emath.power([-2, 4], 2)
478
+ array([ 4.-0.j, 16.+0.j])
479
+
480
+ """
481
+ x = _fix_real_lt_zero(x)
482
+ p = _fix_int_lt_zero(p)
483
+ return nx.power(x, p)
484
+
485
+
486
+ @array_function_dispatch(_unary_dispatcher)
487
+ def arccos(x):
488
+ """
489
+ Compute the inverse cosine of x.
490
+
491
+ Return the "principal value" (for a description of this, see
492
+ `numpy.arccos`) of the inverse cosine of `x`. For real `x` such that
493
+ `abs(x) <= 1`, this is a real number in the closed interval
494
+ :math:`[0, \\pi]`. Otherwise, the complex principle value is returned.
495
+
496
+ Parameters
497
+ ----------
498
+ x : array_like or scalar
499
+ The value(s) whose arccos is (are) required.
500
+
501
+ Returns
502
+ -------
503
+ out : ndarray or scalar
504
+ The inverse cosine(s) of the `x` value(s). If `x` was a scalar, so
505
+ is `out`, otherwise an array object is returned.
506
+
507
+ See Also
508
+ --------
509
+ numpy.arccos
510
+
511
+ Notes
512
+ -----
513
+ For an arccos() that returns ``NAN`` when real `x` is not in the
514
+ interval ``[-1,1]``, use `numpy.arccos`.
515
+
516
+ Examples
517
+ --------
518
+ >>> np.set_printoptions(precision=4)
519
+
520
+ >>> np.emath.arccos(1) # a scalar is returned
521
+ 0.0
522
+
523
+ >>> np.emath.arccos([1,2])
524
+ array([0.-0.j , 0.-1.317j])
525
+
526
+ """
527
+ x = _fix_real_abs_gt_1(x)
528
+ return nx.arccos(x)
529
+
530
+
531
+ @array_function_dispatch(_unary_dispatcher)
532
+ def arcsin(x):
533
+ """
534
+ Compute the inverse sine of x.
535
+
536
+ Return the "principal value" (for a description of this, see
537
+ `numpy.arcsin`) of the inverse sine of `x`. For real `x` such that
538
+ `abs(x) <= 1`, this is a real number in the closed interval
539
+ :math:`[-\\pi/2, \\pi/2]`. Otherwise, the complex principle value is
540
+ returned.
541
+
542
+ Parameters
543
+ ----------
544
+ x : array_like or scalar
545
+ The value(s) whose arcsin is (are) required.
546
+
547
+ Returns
548
+ -------
549
+ out : ndarray or scalar
550
+ The inverse sine(s) of the `x` value(s). If `x` was a scalar, so
551
+ is `out`, otherwise an array object is returned.
552
+
553
+ See Also
554
+ --------
555
+ numpy.arcsin
556
+
557
+ Notes
558
+ -----
559
+ For an arcsin() that returns ``NAN`` when real `x` is not in the
560
+ interval ``[-1,1]``, use `numpy.arcsin`.
561
+
562
+ Examples
563
+ --------
564
+ >>> np.set_printoptions(precision=4)
565
+
566
+ >>> np.emath.arcsin(0)
567
+ 0.0
568
+
569
+ >>> np.emath.arcsin([0,1])
570
+ array([0. , 1.5708])
571
+
572
+ """
573
+ x = _fix_real_abs_gt_1(x)
574
+ return nx.arcsin(x)
575
+
576
+
577
+ @array_function_dispatch(_unary_dispatcher)
578
+ def arctanh(x):
579
+ """
580
+ Compute the inverse hyperbolic tangent of `x`.
581
+
582
+ Return the "principal value" (for a description of this, see
583
+ `numpy.arctanh`) of ``arctanh(x)``. For real `x` such that
584
+ ``abs(x) < 1``, this is a real number. If `abs(x) > 1`, or if `x` is
585
+ complex, the result is complex. Finally, `x = 1` returns``inf`` and
586
+ ``x=-1`` returns ``-inf``.
587
+
588
+ Parameters
589
+ ----------
590
+ x : array_like
591
+ The value(s) whose arctanh is (are) required.
592
+
593
+ Returns
594
+ -------
595
+ out : ndarray or scalar
596
+ The inverse hyperbolic tangent(s) of the `x` value(s). If `x` was
597
+ a scalar so is `out`, otherwise an array is returned.
598
+
599
+
600
+ See Also
601
+ --------
602
+ numpy.arctanh
603
+
604
+ Notes
605
+ -----
606
+ For an arctanh() that returns ``NAN`` when real `x` is not in the
607
+ interval ``(-1,1)``, use `numpy.arctanh` (this latter, however, does
608
+ return +/-inf for ``x = +/-1``).
609
+
610
+ Examples
611
+ --------
612
+ >>> np.set_printoptions(precision=4)
613
+
614
+ >>> from numpy.testing import suppress_warnings
615
+ >>> with suppress_warnings() as sup:
616
+ ... sup.filter(RuntimeWarning)
617
+ ... np.emath.arctanh(np.eye(2))
618
+ array([[inf, 0.],
619
+ [ 0., inf]])
620
+ >>> np.emath.arctanh([1j])
621
+ array([0.+0.7854j])
622
+
623
+ """
624
+ x = _fix_real_abs_gt_1(x)
625
+ return nx.arctanh(x)
venv/lib/python3.10/site-packages/numpy/lib/scimath.pyi ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import overload, Any
2
+
3
+ from numpy import complexfloating
4
+
5
+ from numpy._typing import (
6
+ NDArray,
7
+ _ArrayLikeFloat_co,
8
+ _ArrayLikeComplex_co,
9
+ _ComplexLike_co,
10
+ _FloatLike_co,
11
+ )
12
+
13
+ __all__: list[str]
14
+
15
+ @overload
16
+ def sqrt(x: _FloatLike_co) -> Any: ...
17
+ @overload
18
+ def sqrt(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
19
+ @overload
20
+ def sqrt(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
21
+ @overload
22
+ def sqrt(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
23
+
24
+ @overload
25
+ def log(x: _FloatLike_co) -> Any: ...
26
+ @overload
27
+ def log(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
28
+ @overload
29
+ def log(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
30
+ @overload
31
+ def log(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
32
+
33
+ @overload
34
+ def log10(x: _FloatLike_co) -> Any: ...
35
+ @overload
36
+ def log10(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
37
+ @overload
38
+ def log10(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
39
+ @overload
40
+ def log10(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
41
+
42
+ @overload
43
+ def log2(x: _FloatLike_co) -> Any: ...
44
+ @overload
45
+ def log2(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
46
+ @overload
47
+ def log2(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
48
+ @overload
49
+ def log2(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
50
+
51
+ @overload
52
+ def logn(n: _FloatLike_co, x: _FloatLike_co) -> Any: ...
53
+ @overload
54
+ def logn(n: _ComplexLike_co, x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
55
+ @overload
56
+ def logn(n: _ArrayLikeFloat_co, x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
57
+ @overload
58
+ def logn(n: _ArrayLikeComplex_co, x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
59
+
60
+ @overload
61
+ def power(x: _FloatLike_co, p: _FloatLike_co) -> Any: ...
62
+ @overload
63
+ def power(x: _ComplexLike_co, p: _ComplexLike_co) -> complexfloating[Any, Any]: ...
64
+ @overload
65
+ def power(x: _ArrayLikeFloat_co, p: _ArrayLikeFloat_co) -> NDArray[Any]: ...
66
+ @overload
67
+ def power(x: _ArrayLikeComplex_co, p: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
68
+
69
+ @overload
70
+ def arccos(x: _FloatLike_co) -> Any: ...
71
+ @overload
72
+ def arccos(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
73
+ @overload
74
+ def arccos(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
75
+ @overload
76
+ def arccos(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
77
+
78
+ @overload
79
+ def arcsin(x: _FloatLike_co) -> Any: ...
80
+ @overload
81
+ def arcsin(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
82
+ @overload
83
+ def arcsin(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
84
+ @overload
85
+ def arcsin(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
86
+
87
+ @overload
88
+ def arctanh(x: _FloatLike_co) -> Any: ...
89
+ @overload
90
+ def arctanh(x: _ComplexLike_co) -> complexfloating[Any, Any]: ...
91
+ @overload
92
+ def arctanh(x: _ArrayLikeFloat_co) -> NDArray[Any]: ...
93
+ @overload
94
+ def arctanh(x: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
venv/lib/python3.10/site-packages/numpy/lib/setup.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def configuration(parent_package='',top_path=None):
2
+ from numpy.distutils.misc_util import Configuration
3
+
4
+ config = Configuration('lib', parent_package, top_path)
5
+ config.add_subpackage('tests')
6
+ config.add_data_dir('tests/data')
7
+ config.add_data_files('*.pyi')
8
+ return config
9
+
10
+ if __name__ == '__main__':
11
+ from numpy.distutils.core import setup
12
+ setup(configuration=configuration)
venv/lib/python3.10/site-packages/numpy/lib/shape_base.pyi ADDED
@@ -0,0 +1,220 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ from collections.abc import Callable, Sequence
3
+ from typing import TypeVar, Any, overload, SupportsIndex, Protocol
4
+
5
+ if sys.version_info >= (3, 10):
6
+ from typing import ParamSpec, Concatenate
7
+ else:
8
+ from typing_extensions import ParamSpec, Concatenate
9
+
10
+ from numpy import (
11
+ generic,
12
+ integer,
13
+ ufunc,
14
+ bool_,
15
+ unsignedinteger,
16
+ signedinteger,
17
+ floating,
18
+ complexfloating,
19
+ object_,
20
+ )
21
+
22
+ from numpy._typing import (
23
+ ArrayLike,
24
+ NDArray,
25
+ _ShapeLike,
26
+ _ArrayLike,
27
+ _ArrayLikeBool_co,
28
+ _ArrayLikeUInt_co,
29
+ _ArrayLikeInt_co,
30
+ _ArrayLikeFloat_co,
31
+ _ArrayLikeComplex_co,
32
+ _ArrayLikeObject_co,
33
+ )
34
+
35
+ from numpy.core.shape_base import vstack
36
+
37
+ _P = ParamSpec("_P")
38
+ _SCT = TypeVar("_SCT", bound=generic)
39
+
40
+ # The signatures of `__array_wrap__` and `__array_prepare__` are the same;
41
+ # give them unique names for the sake of clarity
42
+ class _ArrayWrap(Protocol):
43
+ def __call__(
44
+ self,
45
+ array: NDArray[Any],
46
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
47
+ /,
48
+ ) -> Any: ...
49
+
50
+ class _ArrayPrepare(Protocol):
51
+ def __call__(
52
+ self,
53
+ array: NDArray[Any],
54
+ context: None | tuple[ufunc, tuple[Any, ...], int] = ...,
55
+ /,
56
+ ) -> Any: ...
57
+
58
+ class _SupportsArrayWrap(Protocol):
59
+ @property
60
+ def __array_wrap__(self) -> _ArrayWrap: ...
61
+
62
+ class _SupportsArrayPrepare(Protocol):
63
+ @property
64
+ def __array_prepare__(self) -> _ArrayPrepare: ...
65
+
66
+ __all__: list[str]
67
+
68
+ row_stack = vstack
69
+
70
+ def take_along_axis(
71
+ arr: _SCT | NDArray[_SCT],
72
+ indices: NDArray[integer[Any]],
73
+ axis: None | int,
74
+ ) -> NDArray[_SCT]: ...
75
+
76
+ def put_along_axis(
77
+ arr: NDArray[_SCT],
78
+ indices: NDArray[integer[Any]],
79
+ values: ArrayLike,
80
+ axis: None | int,
81
+ ) -> None: ...
82
+
83
+ @overload
84
+ def apply_along_axis(
85
+ func1d: Callable[Concatenate[NDArray[Any], _P], _ArrayLike[_SCT]],
86
+ axis: SupportsIndex,
87
+ arr: ArrayLike,
88
+ *args: _P.args,
89
+ **kwargs: _P.kwargs,
90
+ ) -> NDArray[_SCT]: ...
91
+ @overload
92
+ def apply_along_axis(
93
+ func1d: Callable[Concatenate[NDArray[Any], _P], ArrayLike],
94
+ axis: SupportsIndex,
95
+ arr: ArrayLike,
96
+ *args: _P.args,
97
+ **kwargs: _P.kwargs,
98
+ ) -> NDArray[Any]: ...
99
+
100
+ def apply_over_axes(
101
+ func: Callable[[NDArray[Any], int], NDArray[_SCT]],
102
+ a: ArrayLike,
103
+ axes: int | Sequence[int],
104
+ ) -> NDArray[_SCT]: ...
105
+
106
+ @overload
107
+ def expand_dims(
108
+ a: _ArrayLike[_SCT],
109
+ axis: _ShapeLike,
110
+ ) -> NDArray[_SCT]: ...
111
+ @overload
112
+ def expand_dims(
113
+ a: ArrayLike,
114
+ axis: _ShapeLike,
115
+ ) -> NDArray[Any]: ...
116
+
117
+ @overload
118
+ def column_stack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
119
+ @overload
120
+ def column_stack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
121
+
122
+ @overload
123
+ def dstack(tup: Sequence[_ArrayLike[_SCT]]) -> NDArray[_SCT]: ...
124
+ @overload
125
+ def dstack(tup: Sequence[ArrayLike]) -> NDArray[Any]: ...
126
+
127
+ @overload
128
+ def array_split(
129
+ ary: _ArrayLike[_SCT],
130
+ indices_or_sections: _ShapeLike,
131
+ axis: SupportsIndex = ...,
132
+ ) -> list[NDArray[_SCT]]: ...
133
+ @overload
134
+ def array_split(
135
+ ary: ArrayLike,
136
+ indices_or_sections: _ShapeLike,
137
+ axis: SupportsIndex = ...,
138
+ ) -> list[NDArray[Any]]: ...
139
+
140
+ @overload
141
+ def split(
142
+ ary: _ArrayLike[_SCT],
143
+ indices_or_sections: _ShapeLike,
144
+ axis: SupportsIndex = ...,
145
+ ) -> list[NDArray[_SCT]]: ...
146
+ @overload
147
+ def split(
148
+ ary: ArrayLike,
149
+ indices_or_sections: _ShapeLike,
150
+ axis: SupportsIndex = ...,
151
+ ) -> list[NDArray[Any]]: ...
152
+
153
+ @overload
154
+ def hsplit(
155
+ ary: _ArrayLike[_SCT],
156
+ indices_or_sections: _ShapeLike,
157
+ ) -> list[NDArray[_SCT]]: ...
158
+ @overload
159
+ def hsplit(
160
+ ary: ArrayLike,
161
+ indices_or_sections: _ShapeLike,
162
+ ) -> list[NDArray[Any]]: ...
163
+
164
+ @overload
165
+ def vsplit(
166
+ ary: _ArrayLike[_SCT],
167
+ indices_or_sections: _ShapeLike,
168
+ ) -> list[NDArray[_SCT]]: ...
169
+ @overload
170
+ def vsplit(
171
+ ary: ArrayLike,
172
+ indices_or_sections: _ShapeLike,
173
+ ) -> list[NDArray[Any]]: ...
174
+
175
+ @overload
176
+ def dsplit(
177
+ ary: _ArrayLike[_SCT],
178
+ indices_or_sections: _ShapeLike,
179
+ ) -> list[NDArray[_SCT]]: ...
180
+ @overload
181
+ def dsplit(
182
+ ary: ArrayLike,
183
+ indices_or_sections: _ShapeLike,
184
+ ) -> list[NDArray[Any]]: ...
185
+
186
+ @overload
187
+ def get_array_prepare(*args: _SupportsArrayPrepare) -> _ArrayPrepare: ...
188
+ @overload
189
+ def get_array_prepare(*args: object) -> None | _ArrayPrepare: ...
190
+
191
+ @overload
192
+ def get_array_wrap(*args: _SupportsArrayWrap) -> _ArrayWrap: ...
193
+ @overload
194
+ def get_array_wrap(*args: object) -> None | _ArrayWrap: ...
195
+
196
+ @overload
197
+ def kron(a: _ArrayLikeBool_co, b: _ArrayLikeBool_co) -> NDArray[bool_]: ... # type: ignore[misc]
198
+ @overload
199
+ def kron(a: _ArrayLikeUInt_co, b: _ArrayLikeUInt_co) -> NDArray[unsignedinteger[Any]]: ... # type: ignore[misc]
200
+ @overload
201
+ def kron(a: _ArrayLikeInt_co, b: _ArrayLikeInt_co) -> NDArray[signedinteger[Any]]: ... # type: ignore[misc]
202
+ @overload
203
+ def kron(a: _ArrayLikeFloat_co, b: _ArrayLikeFloat_co) -> NDArray[floating[Any]]: ... # type: ignore[misc]
204
+ @overload
205
+ def kron(a: _ArrayLikeComplex_co, b: _ArrayLikeComplex_co) -> NDArray[complexfloating[Any, Any]]: ...
206
+ @overload
207
+ def kron(a: _ArrayLikeObject_co, b: Any) -> NDArray[object_]: ...
208
+ @overload
209
+ def kron(a: Any, b: _ArrayLikeObject_co) -> NDArray[object_]: ...
210
+
211
+ @overload
212
+ def tile(
213
+ A: _ArrayLike[_SCT],
214
+ reps: int | Sequence[int],
215
+ ) -> NDArray[_SCT]: ...
216
+ @overload
217
+ def tile(
218
+ A: ArrayLike,
219
+ reps: int | Sequence[int],
220
+ ) -> NDArray[Any]: ...