Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/numpy/core/_add_newdocs.py +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_add_newdocs_scalars.py +372 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_asarray.py +134 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_exceptions.py +172 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_machar.py +356 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_operand_flag_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_rational_tests.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_string_helpers.py +100 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/_ufunc_config.pyi +37 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/arrayprint.py +1725 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/defchararray.pyi +421 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/einsumfunc.py +1443 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/getlimits.py +735 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/getlimits.pyi +6 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/multiarray.py +1715 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/numeric.py +2530 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/numeric.pyi +660 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/shape_base.pyi +123 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/umath.py +36 -0
- env-llmeval/lib/python3.10/site-packages/numpy/core/umath_tests.py +13 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c +27 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c +15 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_clx.c +22 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c +24 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_icl.c +26 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c +25 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c +30 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c +26 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512cd.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c +22 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c +22 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c +19 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c +11 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c +21 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c +32 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c +20 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx.c +21 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c +13 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx4.c +14 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vx.c +16 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe2.c +21 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_xop.c +12 -0
- env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c +18 -0
env-llmeval/lib/python3.10/site-packages/numpy/core/_add_newdocs.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_add_newdocs_scalars.py
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
|
3 |
+
our sphinx ``conf.py`` during doc builds, where we want to avoid showing
|
4 |
+
platform-dependent information.
|
5 |
+
"""
|
6 |
+
import sys
|
7 |
+
import os
|
8 |
+
from numpy.core import dtype
|
9 |
+
from numpy.core import numerictypes as _numerictypes
|
10 |
+
from numpy.core.function_base import add_newdoc
|
11 |
+
|
12 |
+
##############################################################################
|
13 |
+
#
|
14 |
+
# Documentation for concrete scalar classes
|
15 |
+
#
|
16 |
+
##############################################################################
|
17 |
+
|
18 |
+
def numeric_type_aliases(aliases):
|
19 |
+
def type_aliases_gen():
|
20 |
+
for alias, doc in aliases:
|
21 |
+
try:
|
22 |
+
alias_type = getattr(_numerictypes, alias)
|
23 |
+
except AttributeError:
|
24 |
+
# The set of aliases that actually exist varies between platforms
|
25 |
+
pass
|
26 |
+
else:
|
27 |
+
yield (alias_type, alias, doc)
|
28 |
+
return list(type_aliases_gen())
|
29 |
+
|
30 |
+
|
31 |
+
possible_aliases = numeric_type_aliases([
|
32 |
+
('int8', '8-bit signed integer (``-128`` to ``127``)'),
|
33 |
+
('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
|
34 |
+
('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
|
35 |
+
('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
|
36 |
+
('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
|
37 |
+
('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
|
38 |
+
('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
|
39 |
+
('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
|
40 |
+
('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
|
41 |
+
('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
|
42 |
+
('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
|
43 |
+
('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
|
44 |
+
('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
|
45 |
+
('float96', '96-bit extended-precision floating-point number type'),
|
46 |
+
('float128', '128-bit extended-precision floating-point number type'),
|
47 |
+
('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
|
48 |
+
('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
|
49 |
+
('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
|
50 |
+
('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
|
51 |
+
])
|
52 |
+
|
53 |
+
|
54 |
+
def _get_platform_and_machine():
|
55 |
+
try:
|
56 |
+
system, _, _, _, machine = os.uname()
|
57 |
+
except AttributeError:
|
58 |
+
system = sys.platform
|
59 |
+
if system == 'win32':
|
60 |
+
machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
|
61 |
+
or os.environ.get('PROCESSOR_ARCHITECTURE', '')
|
62 |
+
else:
|
63 |
+
machine = 'unknown'
|
64 |
+
return system, machine
|
65 |
+
|
66 |
+
|
67 |
+
_system, _machine = _get_platform_and_machine()
|
68 |
+
_doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
|
69 |
+
|
70 |
+
|
71 |
+
def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
|
72 |
+
# note: `:field: value` is rST syntax which renders as field lists.
|
73 |
+
o = getattr(_numerictypes, obj)
|
74 |
+
|
75 |
+
character_code = dtype(o).char
|
76 |
+
canonical_name_doc = "" if obj == o.__name__ else \
|
77 |
+
f":Canonical name: `numpy.{obj}`\n "
|
78 |
+
if fixed_aliases:
|
79 |
+
alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
|
80 |
+
for alias in fixed_aliases)
|
81 |
+
else:
|
82 |
+
alias_doc = ''
|
83 |
+
alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
|
84 |
+
for (alias_type, alias, doc) in possible_aliases if alias_type is o)
|
85 |
+
|
86 |
+
docstring = f"""
|
87 |
+
{doc.strip()}
|
88 |
+
|
89 |
+
:Character code: ``'{character_code}'``
|
90 |
+
{canonical_name_doc}{alias_doc}
|
91 |
+
"""
|
92 |
+
|
93 |
+
add_newdoc('numpy.core.numerictypes', obj, docstring)
|
94 |
+
|
95 |
+
|
96 |
+
add_newdoc_for_scalar_type('bool_', [],
|
97 |
+
"""
|
98 |
+
Boolean type (True or False), stored as a byte.
|
99 |
+
|
100 |
+
.. warning::
|
101 |
+
|
102 |
+
The :class:`bool_` type is not a subclass of the :class:`int_` type
|
103 |
+
(the :class:`bool_` is not even a number type). This is different
|
104 |
+
than Python's default implementation of :class:`bool` as a
|
105 |
+
sub-class of :class:`int`.
|
106 |
+
""")
|
107 |
+
|
108 |
+
add_newdoc_for_scalar_type('byte', [],
|
109 |
+
"""
|
110 |
+
Signed integer type, compatible with C ``char``.
|
111 |
+
""")
|
112 |
+
|
113 |
+
add_newdoc_for_scalar_type('short', [],
|
114 |
+
"""
|
115 |
+
Signed integer type, compatible with C ``short``.
|
116 |
+
""")
|
117 |
+
|
118 |
+
add_newdoc_for_scalar_type('intc', [],
|
119 |
+
"""
|
120 |
+
Signed integer type, compatible with C ``int``.
|
121 |
+
""")
|
122 |
+
|
123 |
+
add_newdoc_for_scalar_type('int_', [],
|
124 |
+
"""
|
125 |
+
Signed integer type, compatible with Python `int` and C ``long``.
|
126 |
+
""")
|
127 |
+
|
128 |
+
add_newdoc_for_scalar_type('longlong', [],
|
129 |
+
"""
|
130 |
+
Signed integer type, compatible with C ``long long``.
|
131 |
+
""")
|
132 |
+
|
133 |
+
add_newdoc_for_scalar_type('ubyte', [],
|
134 |
+
"""
|
135 |
+
Unsigned integer type, compatible with C ``unsigned char``.
|
136 |
+
""")
|
137 |
+
|
138 |
+
add_newdoc_for_scalar_type('ushort', [],
|
139 |
+
"""
|
140 |
+
Unsigned integer type, compatible with C ``unsigned short``.
|
141 |
+
""")
|
142 |
+
|
143 |
+
add_newdoc_for_scalar_type('uintc', [],
|
144 |
+
"""
|
145 |
+
Unsigned integer type, compatible with C ``unsigned int``.
|
146 |
+
""")
|
147 |
+
|
148 |
+
add_newdoc_for_scalar_type('uint', [],
|
149 |
+
"""
|
150 |
+
Unsigned integer type, compatible with C ``unsigned long``.
|
151 |
+
""")
|
152 |
+
|
153 |
+
add_newdoc_for_scalar_type('ulonglong', [],
|
154 |
+
"""
|
155 |
+
Signed integer type, compatible with C ``unsigned long long``.
|
156 |
+
""")
|
157 |
+
|
158 |
+
add_newdoc_for_scalar_type('half', [],
|
159 |
+
"""
|
160 |
+
Half-precision floating-point number type.
|
161 |
+
""")
|
162 |
+
|
163 |
+
add_newdoc_for_scalar_type('single', [],
|
164 |
+
"""
|
165 |
+
Single-precision floating-point number type, compatible with C ``float``.
|
166 |
+
""")
|
167 |
+
|
168 |
+
add_newdoc_for_scalar_type('double', ['float_'],
|
169 |
+
"""
|
170 |
+
Double-precision floating-point number type, compatible with Python `float`
|
171 |
+
and C ``double``.
|
172 |
+
""")
|
173 |
+
|
174 |
+
add_newdoc_for_scalar_type('longdouble', ['longfloat'],
|
175 |
+
"""
|
176 |
+
Extended-precision floating-point number type, compatible with C
|
177 |
+
``long double`` but not necessarily with IEEE 754 quadruple-precision.
|
178 |
+
""")
|
179 |
+
|
180 |
+
add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
|
181 |
+
"""
|
182 |
+
Complex number type composed of two single-precision floating-point
|
183 |
+
numbers.
|
184 |
+
""")
|
185 |
+
|
186 |
+
add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
|
187 |
+
"""
|
188 |
+
Complex number type composed of two double-precision floating-point
|
189 |
+
numbers, compatible with Python `complex`.
|
190 |
+
""")
|
191 |
+
|
192 |
+
add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
|
193 |
+
"""
|
194 |
+
Complex number type composed of two extended-precision floating-point
|
195 |
+
numbers.
|
196 |
+
""")
|
197 |
+
|
198 |
+
add_newdoc_for_scalar_type('object_', [],
|
199 |
+
"""
|
200 |
+
Any Python object.
|
201 |
+
""")
|
202 |
+
|
203 |
+
add_newdoc_for_scalar_type('str_', ['unicode_'],
|
204 |
+
r"""
|
205 |
+
A unicode string.
|
206 |
+
|
207 |
+
This type strips trailing null codepoints.
|
208 |
+
|
209 |
+
>>> s = np.str_("abc\x00")
|
210 |
+
>>> s
|
211 |
+
'abc'
|
212 |
+
|
213 |
+
Unlike the builtin `str`, this supports the :ref:`python:bufferobjects`, exposing its
|
214 |
+
contents as UCS4:
|
215 |
+
|
216 |
+
>>> m = memoryview(np.str_("abc"))
|
217 |
+
>>> m.format
|
218 |
+
'3w'
|
219 |
+
>>> m.tobytes()
|
220 |
+
b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
|
221 |
+
""")
|
222 |
+
|
223 |
+
add_newdoc_for_scalar_type('bytes_', ['string_'],
|
224 |
+
r"""
|
225 |
+
A byte string.
|
226 |
+
|
227 |
+
When used in arrays, this type strips trailing null bytes.
|
228 |
+
""")
|
229 |
+
|
230 |
+
add_newdoc_for_scalar_type('void', [],
|
231 |
+
r"""
|
232 |
+
np.void(length_or_data, /, dtype=None)
|
233 |
+
|
234 |
+
Create a new structured or unstructured void scalar.
|
235 |
+
|
236 |
+
Parameters
|
237 |
+
----------
|
238 |
+
length_or_data : int, array-like, bytes-like, object
|
239 |
+
One of multiple meanings (see notes). The length or
|
240 |
+
bytes data of an unstructured void. Or alternatively,
|
241 |
+
the data to be stored in the new scalar when `dtype`
|
242 |
+
is provided.
|
243 |
+
This can be an array-like, in which case an array may
|
244 |
+
be returned.
|
245 |
+
dtype : dtype, optional
|
246 |
+
If provided the dtype of the new scalar. This dtype must
|
247 |
+
be "void" dtype (i.e. a structured or unstructured void,
|
248 |
+
see also :ref:`defining-structured-types`).
|
249 |
+
|
250 |
+
..versionadded:: 1.24
|
251 |
+
|
252 |
+
Notes
|
253 |
+
-----
|
254 |
+
For historical reasons and because void scalars can represent both
|
255 |
+
arbitrary byte data and structured dtypes, the void constructor
|
256 |
+
has three calling conventions:
|
257 |
+
|
258 |
+
1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
|
259 |
+
``\0`` bytes. The 5 can be a Python or NumPy integer.
|
260 |
+
2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
|
261 |
+
The dtype itemsize will match the byte string length, here ``"V10"``.
|
262 |
+
3. When a ``dtype=`` is passed the call is roughly the same as an
|
263 |
+
array creation. However, a void scalar rather than array is returned.
|
264 |
+
|
265 |
+
Please see the examples which show all three different conventions.
|
266 |
+
|
267 |
+
Examples
|
268 |
+
--------
|
269 |
+
>>> np.void(5)
|
270 |
+
void(b'\x00\x00\x00\x00\x00')
|
271 |
+
>>> np.void(b'abcd')
|
272 |
+
void(b'\x61\x62\x63\x64')
|
273 |
+
>>> np.void((5, 3.2, "eggs"), dtype="i,d,S5")
|
274 |
+
(5, 3.2, b'eggs') # looks like a tuple, but is `np.void`
|
275 |
+
>>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
|
276 |
+
(3, 3) # looks like a tuple, but is `np.void`
|
277 |
+
|
278 |
+
""")
|
279 |
+
|
280 |
+
add_newdoc_for_scalar_type('datetime64', [],
|
281 |
+
"""
|
282 |
+
If created from a 64-bit integer, it represents an offset from
|
283 |
+
``1970-01-01T00:00:00``.
|
284 |
+
If created from string, the string can be in ISO 8601 date
|
285 |
+
or datetime format.
|
286 |
+
|
287 |
+
>>> np.datetime64(10, 'Y')
|
288 |
+
numpy.datetime64('1980')
|
289 |
+
>>> np.datetime64('1980', 'Y')
|
290 |
+
numpy.datetime64('1980')
|
291 |
+
>>> np.datetime64(10, 'D')
|
292 |
+
numpy.datetime64('1970-01-11')
|
293 |
+
|
294 |
+
See :ref:`arrays.datetime` for more information.
|
295 |
+
""")
|
296 |
+
|
297 |
+
add_newdoc_for_scalar_type('timedelta64', [],
|
298 |
+
"""
|
299 |
+
A timedelta stored as a 64-bit integer.
|
300 |
+
|
301 |
+
See :ref:`arrays.datetime` for more information.
|
302 |
+
""")
|
303 |
+
|
304 |
+
add_newdoc('numpy.core.numerictypes', "integer", ('is_integer',
|
305 |
+
"""
|
306 |
+
integer.is_integer() -> bool
|
307 |
+
|
308 |
+
Return ``True`` if the number is finite with integral value.
|
309 |
+
|
310 |
+
.. versionadded:: 1.22
|
311 |
+
|
312 |
+
Examples
|
313 |
+
--------
|
314 |
+
>>> np.int64(-2).is_integer()
|
315 |
+
True
|
316 |
+
>>> np.uint32(5).is_integer()
|
317 |
+
True
|
318 |
+
"""))
|
319 |
+
|
320 |
+
# TODO: work out how to put this on the base class, np.floating
|
321 |
+
for float_name in ('half', 'single', 'double', 'longdouble'):
|
322 |
+
add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
|
323 |
+
"""
|
324 |
+
{ftype}.as_integer_ratio() -> (int, int)
|
325 |
+
|
326 |
+
Return a pair of integers, whose ratio is exactly equal to the original
|
327 |
+
floating point number, and with a positive denominator.
|
328 |
+
Raise `OverflowError` on infinities and a `ValueError` on NaNs.
|
329 |
+
|
330 |
+
>>> np.{ftype}(10.0).as_integer_ratio()
|
331 |
+
(10, 1)
|
332 |
+
>>> np.{ftype}(0.0).as_integer_ratio()
|
333 |
+
(0, 1)
|
334 |
+
>>> np.{ftype}(-.25).as_integer_ratio()
|
335 |
+
(-1, 4)
|
336 |
+
""".format(ftype=float_name)))
|
337 |
+
|
338 |
+
add_newdoc('numpy.core.numerictypes', float_name, ('is_integer',
|
339 |
+
f"""
|
340 |
+
{float_name}.is_integer() -> bool
|
341 |
+
|
342 |
+
Return ``True`` if the floating point number is finite with integral
|
343 |
+
value, and ``False`` otherwise.
|
344 |
+
|
345 |
+
.. versionadded:: 1.22
|
346 |
+
|
347 |
+
Examples
|
348 |
+
--------
|
349 |
+
>>> np.{float_name}(-2.0).is_integer()
|
350 |
+
True
|
351 |
+
>>> np.{float_name}(3.2).is_integer()
|
352 |
+
False
|
353 |
+
"""))
|
354 |
+
|
355 |
+
for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
|
356 |
+
'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
|
357 |
+
# Add negative examples for signed cases by checking typecode
|
358 |
+
add_newdoc('numpy.core.numerictypes', int_name, ('bit_count',
|
359 |
+
f"""
|
360 |
+
{int_name}.bit_count() -> int
|
361 |
+
|
362 |
+
Computes the number of 1-bits in the absolute value of the input.
|
363 |
+
Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
|
364 |
+
|
365 |
+
Examples
|
366 |
+
--------
|
367 |
+
>>> np.{int_name}(127).bit_count()
|
368 |
+
7""" +
|
369 |
+
(f"""
|
370 |
+
>>> np.{int_name}(-127).bit_count()
|
371 |
+
7
|
372 |
+
""" if dtype(int_name).char.islower() else "")))
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_asarray.py
ADDED
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Functions in the ``as*array`` family that promote array-likes into arrays.
|
3 |
+
|
4 |
+
`require` fits this category despite its name not matching this pattern.
|
5 |
+
"""
|
6 |
+
from .overrides import (
|
7 |
+
array_function_dispatch,
|
8 |
+
set_array_function_like_doc,
|
9 |
+
set_module,
|
10 |
+
)
|
11 |
+
from .multiarray import array, asanyarray
|
12 |
+
|
13 |
+
|
14 |
+
__all__ = ["require"]
|
15 |
+
|
16 |
+
|
17 |
+
POSSIBLE_FLAGS = {
|
18 |
+
'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
|
19 |
+
'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
|
20 |
+
'A': 'A', 'ALIGNED': 'A',
|
21 |
+
'W': 'W', 'WRITEABLE': 'W',
|
22 |
+
'O': 'O', 'OWNDATA': 'O',
|
23 |
+
'E': 'E', 'ENSUREARRAY': 'E'
|
24 |
+
}
|
25 |
+
|
26 |
+
|
27 |
+
@set_array_function_like_doc
|
28 |
+
@set_module('numpy')
|
29 |
+
def require(a, dtype=None, requirements=None, *, like=None):
|
30 |
+
"""
|
31 |
+
Return an ndarray of the provided type that satisfies requirements.
|
32 |
+
|
33 |
+
This function is useful to be sure that an array with the correct flags
|
34 |
+
is returned for passing to compiled code (perhaps through ctypes).
|
35 |
+
|
36 |
+
Parameters
|
37 |
+
----------
|
38 |
+
a : array_like
|
39 |
+
The object to be converted to a type-and-requirement-satisfying array.
|
40 |
+
dtype : data-type
|
41 |
+
The required data-type. If None preserve the current dtype. If your
|
42 |
+
application requires the data to be in native byteorder, include
|
43 |
+
a byteorder specification as a part of the dtype specification.
|
44 |
+
requirements : str or sequence of str
|
45 |
+
The requirements list can be any of the following
|
46 |
+
|
47 |
+
* 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
|
48 |
+
* 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
|
49 |
+
* 'ALIGNED' ('A') - ensure a data-type aligned array
|
50 |
+
* 'WRITEABLE' ('W') - ensure a writable array
|
51 |
+
* 'OWNDATA' ('O') - ensure an array that owns its own data
|
52 |
+
* 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
|
53 |
+
${ARRAY_FUNCTION_LIKE}
|
54 |
+
|
55 |
+
.. versionadded:: 1.20.0
|
56 |
+
|
57 |
+
Returns
|
58 |
+
-------
|
59 |
+
out : ndarray
|
60 |
+
Array with specified requirements and type if given.
|
61 |
+
|
62 |
+
See Also
|
63 |
+
--------
|
64 |
+
asarray : Convert input to an ndarray.
|
65 |
+
asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
|
66 |
+
ascontiguousarray : Convert input to a contiguous array.
|
67 |
+
asfortranarray : Convert input to an ndarray with column-major
|
68 |
+
memory order.
|
69 |
+
ndarray.flags : Information about the memory layout of the array.
|
70 |
+
|
71 |
+
Notes
|
72 |
+
-----
|
73 |
+
The returned array will be guaranteed to have the listed requirements
|
74 |
+
by making a copy if needed.
|
75 |
+
|
76 |
+
Examples
|
77 |
+
--------
|
78 |
+
>>> x = np.arange(6).reshape(2,3)
|
79 |
+
>>> x.flags
|
80 |
+
C_CONTIGUOUS : True
|
81 |
+
F_CONTIGUOUS : False
|
82 |
+
OWNDATA : False
|
83 |
+
WRITEABLE : True
|
84 |
+
ALIGNED : True
|
85 |
+
WRITEBACKIFCOPY : False
|
86 |
+
|
87 |
+
>>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
|
88 |
+
>>> y.flags
|
89 |
+
C_CONTIGUOUS : False
|
90 |
+
F_CONTIGUOUS : True
|
91 |
+
OWNDATA : True
|
92 |
+
WRITEABLE : True
|
93 |
+
ALIGNED : True
|
94 |
+
WRITEBACKIFCOPY : False
|
95 |
+
|
96 |
+
"""
|
97 |
+
if like is not None:
|
98 |
+
return _require_with_like(
|
99 |
+
like,
|
100 |
+
a,
|
101 |
+
dtype=dtype,
|
102 |
+
requirements=requirements,
|
103 |
+
)
|
104 |
+
|
105 |
+
if not requirements:
|
106 |
+
return asanyarray(a, dtype=dtype)
|
107 |
+
|
108 |
+
requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
|
109 |
+
|
110 |
+
if 'E' in requirements:
|
111 |
+
requirements.remove('E')
|
112 |
+
subok = False
|
113 |
+
else:
|
114 |
+
subok = True
|
115 |
+
|
116 |
+
order = 'A'
|
117 |
+
if requirements >= {'C', 'F'}:
|
118 |
+
raise ValueError('Cannot specify both "C" and "F" order')
|
119 |
+
elif 'F' in requirements:
|
120 |
+
order = 'F'
|
121 |
+
requirements.remove('F')
|
122 |
+
elif 'C' in requirements:
|
123 |
+
order = 'C'
|
124 |
+
requirements.remove('C')
|
125 |
+
|
126 |
+
arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
|
127 |
+
|
128 |
+
for prop in requirements:
|
129 |
+
if not arr.flags[prop]:
|
130 |
+
return arr.copy(order)
|
131 |
+
return arr
|
132 |
+
|
133 |
+
|
134 |
+
_require_with_like = array_function_dispatch()(require)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_exceptions.py
ADDED
@@ -0,0 +1,172 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Various richly-typed exceptions, that also help us deal with string formatting
|
3 |
+
in python where it's easier.
|
4 |
+
|
5 |
+
By putting the formatting in `__str__`, we also avoid paying the cost for
|
6 |
+
users who silence the exceptions.
|
7 |
+
"""
|
8 |
+
from .._utils import set_module
|
9 |
+
|
10 |
+
def _unpack_tuple(tup):
|
11 |
+
if len(tup) == 1:
|
12 |
+
return tup[0]
|
13 |
+
else:
|
14 |
+
return tup
|
15 |
+
|
16 |
+
|
17 |
+
def _display_as_base(cls):
|
18 |
+
"""
|
19 |
+
A decorator that makes an exception class look like its base.
|
20 |
+
|
21 |
+
We use this to hide subclasses that are implementation details - the user
|
22 |
+
should catch the base type, which is what the traceback will show them.
|
23 |
+
|
24 |
+
Classes decorated with this decorator are subject to removal without a
|
25 |
+
deprecation warning.
|
26 |
+
"""
|
27 |
+
assert issubclass(cls, Exception)
|
28 |
+
cls.__name__ = cls.__base__.__name__
|
29 |
+
return cls
|
30 |
+
|
31 |
+
|
32 |
+
class UFuncTypeError(TypeError):
|
33 |
+
""" Base class for all ufunc exceptions """
|
34 |
+
def __init__(self, ufunc):
|
35 |
+
self.ufunc = ufunc
|
36 |
+
|
37 |
+
|
38 |
+
@_display_as_base
|
39 |
+
class _UFuncNoLoopError(UFuncTypeError):
|
40 |
+
""" Thrown when a ufunc loop cannot be found """
|
41 |
+
def __init__(self, ufunc, dtypes):
|
42 |
+
super().__init__(ufunc)
|
43 |
+
self.dtypes = tuple(dtypes)
|
44 |
+
|
45 |
+
def __str__(self):
|
46 |
+
return (
|
47 |
+
"ufunc {!r} did not contain a loop with signature matching types "
|
48 |
+
"{!r} -> {!r}"
|
49 |
+
).format(
|
50 |
+
self.ufunc.__name__,
|
51 |
+
_unpack_tuple(self.dtypes[:self.ufunc.nin]),
|
52 |
+
_unpack_tuple(self.dtypes[self.ufunc.nin:])
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
+
@_display_as_base
|
57 |
+
class _UFuncBinaryResolutionError(_UFuncNoLoopError):
|
58 |
+
""" Thrown when a binary resolution fails """
|
59 |
+
def __init__(self, ufunc, dtypes):
|
60 |
+
super().__init__(ufunc, dtypes)
|
61 |
+
assert len(self.dtypes) == 2
|
62 |
+
|
63 |
+
def __str__(self):
|
64 |
+
return (
|
65 |
+
"ufunc {!r} cannot use operands with types {!r} and {!r}"
|
66 |
+
).format(
|
67 |
+
self.ufunc.__name__, *self.dtypes
|
68 |
+
)
|
69 |
+
|
70 |
+
|
71 |
+
@_display_as_base
|
72 |
+
class _UFuncCastingError(UFuncTypeError):
|
73 |
+
def __init__(self, ufunc, casting, from_, to):
|
74 |
+
super().__init__(ufunc)
|
75 |
+
self.casting = casting
|
76 |
+
self.from_ = from_
|
77 |
+
self.to = to
|
78 |
+
|
79 |
+
|
80 |
+
@_display_as_base
|
81 |
+
class _UFuncInputCastingError(_UFuncCastingError):
|
82 |
+
""" Thrown when a ufunc input cannot be casted """
|
83 |
+
def __init__(self, ufunc, casting, from_, to, i):
|
84 |
+
super().__init__(ufunc, casting, from_, to)
|
85 |
+
self.in_i = i
|
86 |
+
|
87 |
+
def __str__(self):
|
88 |
+
# only show the number if more than one input exists
|
89 |
+
i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
|
90 |
+
return (
|
91 |
+
"Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
|
92 |
+
"rule {!r}"
|
93 |
+
).format(
|
94 |
+
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
|
95 |
+
)
|
96 |
+
|
97 |
+
|
98 |
+
@_display_as_base
|
99 |
+
class _UFuncOutputCastingError(_UFuncCastingError):
|
100 |
+
""" Thrown when a ufunc output cannot be casted """
|
101 |
+
def __init__(self, ufunc, casting, from_, to, i):
|
102 |
+
super().__init__(ufunc, casting, from_, to)
|
103 |
+
self.out_i = i
|
104 |
+
|
105 |
+
def __str__(self):
|
106 |
+
# only show the number if more than one output exists
|
107 |
+
i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
|
108 |
+
return (
|
109 |
+
"Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
|
110 |
+
"rule {!r}"
|
111 |
+
).format(
|
112 |
+
self.ufunc.__name__, i_str, self.from_, self.to, self.casting
|
113 |
+
)
|
114 |
+
|
115 |
+
|
116 |
+
@_display_as_base
|
117 |
+
class _ArrayMemoryError(MemoryError):
|
118 |
+
""" Thrown when an array cannot be allocated"""
|
119 |
+
def __init__(self, shape, dtype):
|
120 |
+
self.shape = shape
|
121 |
+
self.dtype = dtype
|
122 |
+
|
123 |
+
@property
|
124 |
+
def _total_size(self):
|
125 |
+
num_bytes = self.dtype.itemsize
|
126 |
+
for dim in self.shape:
|
127 |
+
num_bytes *= dim
|
128 |
+
return num_bytes
|
129 |
+
|
130 |
+
@staticmethod
|
131 |
+
def _size_to_string(num_bytes):
|
132 |
+
""" Convert a number of bytes into a binary size string """
|
133 |
+
|
134 |
+
# https://en.wikipedia.org/wiki/Binary_prefix
|
135 |
+
LOG2_STEP = 10
|
136 |
+
STEP = 1024
|
137 |
+
units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
|
138 |
+
|
139 |
+
unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
|
140 |
+
unit_val = 1 << (unit_i * LOG2_STEP)
|
141 |
+
n_units = num_bytes / unit_val
|
142 |
+
del unit_val
|
143 |
+
|
144 |
+
# ensure we pick a unit that is correct after rounding
|
145 |
+
if round(n_units) == STEP:
|
146 |
+
unit_i += 1
|
147 |
+
n_units /= STEP
|
148 |
+
|
149 |
+
# deal with sizes so large that we don't have units for them
|
150 |
+
if unit_i >= len(units):
|
151 |
+
new_unit_i = len(units) - 1
|
152 |
+
n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
|
153 |
+
unit_i = new_unit_i
|
154 |
+
|
155 |
+
unit_name = units[unit_i]
|
156 |
+
# format with a sensible number of digits
|
157 |
+
if unit_i == 0:
|
158 |
+
# no decimal point on bytes
|
159 |
+
return '{:.0f} {}'.format(n_units, unit_name)
|
160 |
+
elif round(n_units) < 1000:
|
161 |
+
# 3 significant figures, if none are dropped to the left of the .
|
162 |
+
return '{:#.3g} {}'.format(n_units, unit_name)
|
163 |
+
else:
|
164 |
+
# just give all the digits otherwise
|
165 |
+
return '{:#.0f} {}'.format(n_units, unit_name)
|
166 |
+
|
167 |
+
def __str__(self):
|
168 |
+
size_str = self._size_to_string(self._total_size)
|
169 |
+
return (
|
170 |
+
"Unable to allocate {} for an array with shape {} and data type {}"
|
171 |
+
.format(size_str, self.shape, self.dtype)
|
172 |
+
)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_machar.py
ADDED
@@ -0,0 +1,356 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Machine arithmetic - determine the parameters of the
|
3 |
+
floating-point arithmetic system
|
4 |
+
|
5 |
+
Author: Pearu Peterson, September 2003
|
6 |
+
|
7 |
+
"""
|
8 |
+
__all__ = ['MachAr']
|
9 |
+
|
10 |
+
from .fromnumeric import any
|
11 |
+
from ._ufunc_config import errstate
|
12 |
+
from .._utils import set_module
|
13 |
+
|
14 |
+
# Need to speed this up...especially for longfloat
|
15 |
+
|
16 |
+
# Deprecated 2021-10-20, NumPy 1.22
|
17 |
+
class MachAr:
|
18 |
+
"""
|
19 |
+
Diagnosing machine parameters.
|
20 |
+
|
21 |
+
Attributes
|
22 |
+
----------
|
23 |
+
ibeta : int
|
24 |
+
Radix in which numbers are represented.
|
25 |
+
it : int
|
26 |
+
Number of base-`ibeta` digits in the floating point mantissa M.
|
27 |
+
machep : int
|
28 |
+
Exponent of the smallest (most negative) power of `ibeta` that,
|
29 |
+
added to 1.0, gives something different from 1.0
|
30 |
+
eps : float
|
31 |
+
Floating-point number ``beta**machep`` (floating point precision)
|
32 |
+
negep : int
|
33 |
+
Exponent of the smallest power of `ibeta` that, subtracted
|
34 |
+
from 1.0, gives something different from 1.0.
|
35 |
+
epsneg : float
|
36 |
+
Floating-point number ``beta**negep``.
|
37 |
+
iexp : int
|
38 |
+
Number of bits in the exponent (including its sign and bias).
|
39 |
+
minexp : int
|
40 |
+
Smallest (most negative) power of `ibeta` consistent with there
|
41 |
+
being no leading zeros in the mantissa.
|
42 |
+
xmin : float
|
43 |
+
Floating-point number ``beta**minexp`` (the smallest [in
|
44 |
+
magnitude] positive floating point number with full precision).
|
45 |
+
maxexp : int
|
46 |
+
Smallest (positive) power of `ibeta` that causes overflow.
|
47 |
+
xmax : float
|
48 |
+
``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
|
49 |
+
usable floating value).
|
50 |
+
irnd : int
|
51 |
+
In ``range(6)``, information on what kind of rounding is done
|
52 |
+
in addition, and on how underflow is handled.
|
53 |
+
ngrd : int
|
54 |
+
Number of 'guard digits' used when truncating the product
|
55 |
+
of two mantissas to fit the representation.
|
56 |
+
epsilon : float
|
57 |
+
Same as `eps`.
|
58 |
+
tiny : float
|
59 |
+
An alias for `smallest_normal`, kept for backwards compatibility.
|
60 |
+
huge : float
|
61 |
+
Same as `xmax`.
|
62 |
+
precision : float
|
63 |
+
``- int(-log10(eps))``
|
64 |
+
resolution : float
|
65 |
+
``- 10**(-precision)``
|
66 |
+
smallest_normal : float
|
67 |
+
The smallest positive floating point number with 1 as leading bit in
|
68 |
+
the mantissa following IEEE-754. Same as `xmin`.
|
69 |
+
smallest_subnormal : float
|
70 |
+
The smallest positive floating point number with 0 as leading bit in
|
71 |
+
the mantissa following IEEE-754.
|
72 |
+
|
73 |
+
Parameters
|
74 |
+
----------
|
75 |
+
float_conv : function, optional
|
76 |
+
Function that converts an integer or integer array to a float
|
77 |
+
or float array. Default is `float`.
|
78 |
+
int_conv : function, optional
|
79 |
+
Function that converts a float or float array to an integer or
|
80 |
+
integer array. Default is `int`.
|
81 |
+
float_to_float : function, optional
|
82 |
+
Function that converts a float array to float. Default is `float`.
|
83 |
+
Note that this does not seem to do anything useful in the current
|
84 |
+
implementation.
|
85 |
+
float_to_str : function, optional
|
86 |
+
Function that converts a single float to a string. Default is
|
87 |
+
``lambda v:'%24.16e' %v``.
|
88 |
+
title : str, optional
|
89 |
+
Title that is printed in the string representation of `MachAr`.
|
90 |
+
|
91 |
+
See Also
|
92 |
+
--------
|
93 |
+
finfo : Machine limits for floating point types.
|
94 |
+
iinfo : Machine limits for integer types.
|
95 |
+
|
96 |
+
References
|
97 |
+
----------
|
98 |
+
.. [1] Press, Teukolsky, Vetterling and Flannery,
|
99 |
+
"Numerical Recipes in C++," 2nd ed,
|
100 |
+
Cambridge University Press, 2002, p. 31.
|
101 |
+
|
102 |
+
"""
|
103 |
+
|
104 |
+
def __init__(self, float_conv=float,int_conv=int,
|
105 |
+
float_to_float=float,
|
106 |
+
float_to_str=lambda v:'%24.16e' % v,
|
107 |
+
title='Python floating point number'):
|
108 |
+
"""
|
109 |
+
|
110 |
+
float_conv - convert integer to float (array)
|
111 |
+
int_conv - convert float (array) to integer
|
112 |
+
float_to_float - convert float array to float
|
113 |
+
float_to_str - convert array float to str
|
114 |
+
title - description of used floating point numbers
|
115 |
+
|
116 |
+
"""
|
117 |
+
# We ignore all errors here because we are purposely triggering
|
118 |
+
# underflow to detect the properties of the runninng arch.
|
119 |
+
with errstate(under='ignore'):
|
120 |
+
self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
|
121 |
+
|
122 |
+
def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
|
123 |
+
max_iterN = 10000
|
124 |
+
msg = "Did not converge after %d tries with %s"
|
125 |
+
one = float_conv(1)
|
126 |
+
two = one + one
|
127 |
+
zero = one - one
|
128 |
+
|
129 |
+
# Do we really need to do this? Aren't they 2 and 2.0?
|
130 |
+
# Determine ibeta and beta
|
131 |
+
a = one
|
132 |
+
for _ in range(max_iterN):
|
133 |
+
a = a + a
|
134 |
+
temp = a + one
|
135 |
+
temp1 = temp - a
|
136 |
+
if any(temp1 - one != zero):
|
137 |
+
break
|
138 |
+
else:
|
139 |
+
raise RuntimeError(msg % (_, one.dtype))
|
140 |
+
b = one
|
141 |
+
for _ in range(max_iterN):
|
142 |
+
b = b + b
|
143 |
+
temp = a + b
|
144 |
+
itemp = int_conv(temp-a)
|
145 |
+
if any(itemp != 0):
|
146 |
+
break
|
147 |
+
else:
|
148 |
+
raise RuntimeError(msg % (_, one.dtype))
|
149 |
+
ibeta = itemp
|
150 |
+
beta = float_conv(ibeta)
|
151 |
+
|
152 |
+
# Determine it and irnd
|
153 |
+
it = -1
|
154 |
+
b = one
|
155 |
+
for _ in range(max_iterN):
|
156 |
+
it = it + 1
|
157 |
+
b = b * beta
|
158 |
+
temp = b + one
|
159 |
+
temp1 = temp - b
|
160 |
+
if any(temp1 - one != zero):
|
161 |
+
break
|
162 |
+
else:
|
163 |
+
raise RuntimeError(msg % (_, one.dtype))
|
164 |
+
|
165 |
+
betah = beta / two
|
166 |
+
a = one
|
167 |
+
for _ in range(max_iterN):
|
168 |
+
a = a + a
|
169 |
+
temp = a + one
|
170 |
+
temp1 = temp - a
|
171 |
+
if any(temp1 - one != zero):
|
172 |
+
break
|
173 |
+
else:
|
174 |
+
raise RuntimeError(msg % (_, one.dtype))
|
175 |
+
temp = a + betah
|
176 |
+
irnd = 0
|
177 |
+
if any(temp-a != zero):
|
178 |
+
irnd = 1
|
179 |
+
tempa = a + beta
|
180 |
+
temp = tempa + betah
|
181 |
+
if irnd == 0 and any(temp-tempa != zero):
|
182 |
+
irnd = 2
|
183 |
+
|
184 |
+
# Determine negep and epsneg
|
185 |
+
negep = it + 3
|
186 |
+
betain = one / beta
|
187 |
+
a = one
|
188 |
+
for i in range(negep):
|
189 |
+
a = a * betain
|
190 |
+
b = a
|
191 |
+
for _ in range(max_iterN):
|
192 |
+
temp = one - a
|
193 |
+
if any(temp-one != zero):
|
194 |
+
break
|
195 |
+
a = a * beta
|
196 |
+
negep = negep - 1
|
197 |
+
# Prevent infinite loop on PPC with gcc 4.0:
|
198 |
+
if negep < 0:
|
199 |
+
raise RuntimeError("could not determine machine tolerance "
|
200 |
+
"for 'negep', locals() -> %s" % (locals()))
|
201 |
+
else:
|
202 |
+
raise RuntimeError(msg % (_, one.dtype))
|
203 |
+
negep = -negep
|
204 |
+
epsneg = a
|
205 |
+
|
206 |
+
# Determine machep and eps
|
207 |
+
machep = - it - 3
|
208 |
+
a = b
|
209 |
+
|
210 |
+
for _ in range(max_iterN):
|
211 |
+
temp = one + a
|
212 |
+
if any(temp-one != zero):
|
213 |
+
break
|
214 |
+
a = a * beta
|
215 |
+
machep = machep + 1
|
216 |
+
else:
|
217 |
+
raise RuntimeError(msg % (_, one.dtype))
|
218 |
+
eps = a
|
219 |
+
|
220 |
+
# Determine ngrd
|
221 |
+
ngrd = 0
|
222 |
+
temp = one + eps
|
223 |
+
if irnd == 0 and any(temp*one - one != zero):
|
224 |
+
ngrd = 1
|
225 |
+
|
226 |
+
# Determine iexp
|
227 |
+
i = 0
|
228 |
+
k = 1
|
229 |
+
z = betain
|
230 |
+
t = one + eps
|
231 |
+
nxres = 0
|
232 |
+
for _ in range(max_iterN):
|
233 |
+
y = z
|
234 |
+
z = y*y
|
235 |
+
a = z*one # Check here for underflow
|
236 |
+
temp = z*t
|
237 |
+
if any(a+a == zero) or any(abs(z) >= y):
|
238 |
+
break
|
239 |
+
temp1 = temp * betain
|
240 |
+
if any(temp1*beta == z):
|
241 |
+
break
|
242 |
+
i = i + 1
|
243 |
+
k = k + k
|
244 |
+
else:
|
245 |
+
raise RuntimeError(msg % (_, one.dtype))
|
246 |
+
if ibeta != 10:
|
247 |
+
iexp = i + 1
|
248 |
+
mx = k + k
|
249 |
+
else:
|
250 |
+
iexp = 2
|
251 |
+
iz = ibeta
|
252 |
+
while k >= iz:
|
253 |
+
iz = iz * ibeta
|
254 |
+
iexp = iexp + 1
|
255 |
+
mx = iz + iz - 1
|
256 |
+
|
257 |
+
# Determine minexp and xmin
|
258 |
+
for _ in range(max_iterN):
|
259 |
+
xmin = y
|
260 |
+
y = y * betain
|
261 |
+
a = y * one
|
262 |
+
temp = y * t
|
263 |
+
if any((a + a) != zero) and any(abs(y) < xmin):
|
264 |
+
k = k + 1
|
265 |
+
temp1 = temp * betain
|
266 |
+
if any(temp1*beta == y) and any(temp != y):
|
267 |
+
nxres = 3
|
268 |
+
xmin = y
|
269 |
+
break
|
270 |
+
else:
|
271 |
+
break
|
272 |
+
else:
|
273 |
+
raise RuntimeError(msg % (_, one.dtype))
|
274 |
+
minexp = -k
|
275 |
+
|
276 |
+
# Determine maxexp, xmax
|
277 |
+
if mx <= k + k - 3 and ibeta != 10:
|
278 |
+
mx = mx + mx
|
279 |
+
iexp = iexp + 1
|
280 |
+
maxexp = mx + minexp
|
281 |
+
irnd = irnd + nxres
|
282 |
+
if irnd >= 2:
|
283 |
+
maxexp = maxexp - 2
|
284 |
+
i = maxexp + minexp
|
285 |
+
if ibeta == 2 and not i:
|
286 |
+
maxexp = maxexp - 1
|
287 |
+
if i > 20:
|
288 |
+
maxexp = maxexp - 1
|
289 |
+
if any(a != y):
|
290 |
+
maxexp = maxexp - 2
|
291 |
+
xmax = one - epsneg
|
292 |
+
if any(xmax*one != xmax):
|
293 |
+
xmax = one - beta*epsneg
|
294 |
+
xmax = xmax / (xmin*beta*beta*beta)
|
295 |
+
i = maxexp + minexp + 3
|
296 |
+
for j in range(i):
|
297 |
+
if ibeta == 2:
|
298 |
+
xmax = xmax + xmax
|
299 |
+
else:
|
300 |
+
xmax = xmax * beta
|
301 |
+
|
302 |
+
smallest_subnormal = abs(xmin / beta ** (it))
|
303 |
+
|
304 |
+
self.ibeta = ibeta
|
305 |
+
self.it = it
|
306 |
+
self.negep = negep
|
307 |
+
self.epsneg = float_to_float(epsneg)
|
308 |
+
self._str_epsneg = float_to_str(epsneg)
|
309 |
+
self.machep = machep
|
310 |
+
self.eps = float_to_float(eps)
|
311 |
+
self._str_eps = float_to_str(eps)
|
312 |
+
self.ngrd = ngrd
|
313 |
+
self.iexp = iexp
|
314 |
+
self.minexp = minexp
|
315 |
+
self.xmin = float_to_float(xmin)
|
316 |
+
self._str_xmin = float_to_str(xmin)
|
317 |
+
self.maxexp = maxexp
|
318 |
+
self.xmax = float_to_float(xmax)
|
319 |
+
self._str_xmax = float_to_str(xmax)
|
320 |
+
self.irnd = irnd
|
321 |
+
|
322 |
+
self.title = title
|
323 |
+
# Commonly used parameters
|
324 |
+
self.epsilon = self.eps
|
325 |
+
self.tiny = self.xmin
|
326 |
+
self.huge = self.xmax
|
327 |
+
self.smallest_normal = self.xmin
|
328 |
+
self._str_smallest_normal = float_to_str(self.xmin)
|
329 |
+
self.smallest_subnormal = float_to_float(smallest_subnormal)
|
330 |
+
self._str_smallest_subnormal = float_to_str(smallest_subnormal)
|
331 |
+
|
332 |
+
import math
|
333 |
+
self.precision = int(-math.log10(float_to_float(self.eps)))
|
334 |
+
ten = two + two + two + two + two
|
335 |
+
resolution = ten ** (-self.precision)
|
336 |
+
self.resolution = float_to_float(resolution)
|
337 |
+
self._str_resolution = float_to_str(resolution)
|
338 |
+
|
339 |
+
def __str__(self):
|
340 |
+
fmt = (
|
341 |
+
'Machine parameters for %(title)s\n'
|
342 |
+
'---------------------------------------------------------------------\n'
|
343 |
+
'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
|
344 |
+
'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
|
345 |
+
'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
|
346 |
+
'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
|
347 |
+
'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
|
348 |
+
'smallest_normal=%(smallest_normal)s '
|
349 |
+
'smallest_subnormal=%(smallest_subnormal)s\n'
|
350 |
+
'---------------------------------------------------------------------\n'
|
351 |
+
)
|
352 |
+
return fmt % self.__dict__
|
353 |
+
|
354 |
+
|
355 |
+
if __name__ == '__main__':
|
356 |
+
print(MachAr())
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_operand_flag_tests.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (16.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_rational_tests.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (59.8 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_string_helpers.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
String-handling utilities to avoid locale-dependence.
|
3 |
+
|
4 |
+
Used primarily to generate type name aliases.
|
5 |
+
"""
|
6 |
+
# "import string" is costly to import!
|
7 |
+
# Construct the translation tables directly
|
8 |
+
# "A" = chr(65), "a" = chr(97)
|
9 |
+
_all_chars = tuple(map(chr, range(256)))
|
10 |
+
_ascii_upper = _all_chars[65:65+26]
|
11 |
+
_ascii_lower = _all_chars[97:97+26]
|
12 |
+
LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
|
13 |
+
UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
|
14 |
+
|
15 |
+
|
16 |
+
def english_lower(s):
|
17 |
+
""" Apply English case rules to convert ASCII strings to all lower case.
|
18 |
+
|
19 |
+
This is an internal utility function to replace calls to str.lower() such
|
20 |
+
that we can avoid changing behavior with changing locales. In particular,
|
21 |
+
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
|
22 |
+
both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
|
23 |
+
|
24 |
+
Parameters
|
25 |
+
----------
|
26 |
+
s : str
|
27 |
+
|
28 |
+
Returns
|
29 |
+
-------
|
30 |
+
lowered : str
|
31 |
+
|
32 |
+
Examples
|
33 |
+
--------
|
34 |
+
>>> from numpy.core.numerictypes import english_lower
|
35 |
+
>>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
|
36 |
+
'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
|
37 |
+
>>> english_lower('')
|
38 |
+
''
|
39 |
+
"""
|
40 |
+
lowered = s.translate(LOWER_TABLE)
|
41 |
+
return lowered
|
42 |
+
|
43 |
+
|
44 |
+
def english_upper(s):
|
45 |
+
""" Apply English case rules to convert ASCII strings to all upper case.
|
46 |
+
|
47 |
+
This is an internal utility function to replace calls to str.upper() such
|
48 |
+
that we can avoid changing behavior with changing locales. In particular,
|
49 |
+
Turkish has distinct dotted and dotless variants of the Latin letter "I" in
|
50 |
+
both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
|
51 |
+
|
52 |
+
Parameters
|
53 |
+
----------
|
54 |
+
s : str
|
55 |
+
|
56 |
+
Returns
|
57 |
+
-------
|
58 |
+
uppered : str
|
59 |
+
|
60 |
+
Examples
|
61 |
+
--------
|
62 |
+
>>> from numpy.core.numerictypes import english_upper
|
63 |
+
>>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
|
64 |
+
'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
|
65 |
+
>>> english_upper('')
|
66 |
+
''
|
67 |
+
"""
|
68 |
+
uppered = s.translate(UPPER_TABLE)
|
69 |
+
return uppered
|
70 |
+
|
71 |
+
|
72 |
+
def english_capitalize(s):
|
73 |
+
""" Apply English case rules to convert the first character of an ASCII
|
74 |
+
string to upper case.
|
75 |
+
|
76 |
+
This is an internal utility function to replace calls to str.capitalize()
|
77 |
+
such that we can avoid changing behavior with changing locales.
|
78 |
+
|
79 |
+
Parameters
|
80 |
+
----------
|
81 |
+
s : str
|
82 |
+
|
83 |
+
Returns
|
84 |
+
-------
|
85 |
+
capitalized : str
|
86 |
+
|
87 |
+
Examples
|
88 |
+
--------
|
89 |
+
>>> from numpy.core.numerictypes import english_capitalize
|
90 |
+
>>> english_capitalize('int8')
|
91 |
+
'Int8'
|
92 |
+
>>> english_capitalize('Int8')
|
93 |
+
'Int8'
|
94 |
+
>>> english_capitalize('')
|
95 |
+
''
|
96 |
+
"""
|
97 |
+
if s:
|
98 |
+
return english_upper(s[0]) + s[1:]
|
99 |
+
else:
|
100 |
+
return s
|
env-llmeval/lib/python3.10/site-packages/numpy/core/_ufunc_config.pyi
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Callable
|
2 |
+
from typing import Any, Literal, TypedDict
|
3 |
+
|
4 |
+
from numpy import _SupportsWrite
|
5 |
+
|
6 |
+
_ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"]
|
7 |
+
_ErrFunc = Callable[[str, int], Any]
|
8 |
+
|
9 |
+
class _ErrDict(TypedDict):
|
10 |
+
divide: _ErrKind
|
11 |
+
over: _ErrKind
|
12 |
+
under: _ErrKind
|
13 |
+
invalid: _ErrKind
|
14 |
+
|
15 |
+
class _ErrDictOptional(TypedDict, total=False):
|
16 |
+
all: None | _ErrKind
|
17 |
+
divide: None | _ErrKind
|
18 |
+
over: None | _ErrKind
|
19 |
+
under: None | _ErrKind
|
20 |
+
invalid: None | _ErrKind
|
21 |
+
|
22 |
+
def seterr(
|
23 |
+
all: None | _ErrKind = ...,
|
24 |
+
divide: None | _ErrKind = ...,
|
25 |
+
over: None | _ErrKind = ...,
|
26 |
+
under: None | _ErrKind = ...,
|
27 |
+
invalid: None | _ErrKind = ...,
|
28 |
+
) -> _ErrDict: ...
|
29 |
+
def geterr() -> _ErrDict: ...
|
30 |
+
def setbufsize(size: int) -> int: ...
|
31 |
+
def getbufsize() -> int: ...
|
32 |
+
def seterrcall(
|
33 |
+
func: None | _ErrFunc | _SupportsWrite[str]
|
34 |
+
) -> None | _ErrFunc | _SupportsWrite[str]: ...
|
35 |
+
def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ...
|
36 |
+
|
37 |
+
# See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`
|
env-llmeval/lib/python3.10/site-packages/numpy/core/arrayprint.py
ADDED
@@ -0,0 +1,1725 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Array printing function
|
2 |
+
|
3 |
+
$Id: arrayprint.py,v 1.9 2005/09/13 13:58:44 teoliphant Exp $
|
4 |
+
|
5 |
+
"""
|
6 |
+
__all__ = ["array2string", "array_str", "array_repr", "set_string_function",
|
7 |
+
"set_printoptions", "get_printoptions", "printoptions",
|
8 |
+
"format_float_positional", "format_float_scientific"]
|
9 |
+
__docformat__ = 'restructuredtext'
|
10 |
+
|
11 |
+
#
|
12 |
+
# Written by Konrad Hinsen <[email protected]>
|
13 |
+
# last revision: 1996-3-13
|
14 |
+
# modified by Jim Hugunin 1997-3-3 for repr's and str's (and other details)
|
15 |
+
# and by Perry Greenfield 2000-4-1 for numarray
|
16 |
+
# and by Travis Oliphant 2005-8-22 for numpy
|
17 |
+
|
18 |
+
|
19 |
+
# Note: Both scalartypes.c.src and arrayprint.py implement strs for numpy
|
20 |
+
# scalars but for different purposes. scalartypes.c.src has str/reprs for when
|
21 |
+
# the scalar is printed on its own, while arrayprint.py has strs for when
|
22 |
+
# scalars are printed inside an ndarray. Only the latter strs are currently
|
23 |
+
# user-customizable.
|
24 |
+
|
25 |
+
import functools
|
26 |
+
import numbers
|
27 |
+
import sys
|
28 |
+
try:
|
29 |
+
from _thread import get_ident
|
30 |
+
except ImportError:
|
31 |
+
from _dummy_thread import get_ident
|
32 |
+
|
33 |
+
import numpy as np
|
34 |
+
from . import numerictypes as _nt
|
35 |
+
from .umath import absolute, isinf, isfinite, isnat
|
36 |
+
from . import multiarray
|
37 |
+
from .multiarray import (array, dragon4_positional, dragon4_scientific,
|
38 |
+
datetime_as_string, datetime_data, ndarray,
|
39 |
+
set_legacy_print_mode)
|
40 |
+
from .fromnumeric import any
|
41 |
+
from .numeric import concatenate, asarray, errstate
|
42 |
+
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
|
43 |
+
flexible)
|
44 |
+
from .overrides import array_function_dispatch, set_module
|
45 |
+
import operator
|
46 |
+
import warnings
|
47 |
+
import contextlib
|
48 |
+
|
49 |
+
_format_options = {
|
50 |
+
'edgeitems': 3, # repr N leading and trailing items of each dimension
|
51 |
+
'threshold': 1000, # total items > triggers array summarization
|
52 |
+
'floatmode': 'maxprec',
|
53 |
+
'precision': 8, # precision of floating point representations
|
54 |
+
'suppress': False, # suppress printing small floating values in exp format
|
55 |
+
'linewidth': 75,
|
56 |
+
'nanstr': 'nan',
|
57 |
+
'infstr': 'inf',
|
58 |
+
'sign': '-',
|
59 |
+
'formatter': None,
|
60 |
+
# Internally stored as an int to simplify comparisons; converted from/to
|
61 |
+
# str/False on the way in/out.
|
62 |
+
'legacy': sys.maxsize}
|
63 |
+
|
64 |
+
def _make_options_dict(precision=None, threshold=None, edgeitems=None,
|
65 |
+
linewidth=None, suppress=None, nanstr=None, infstr=None,
|
66 |
+
sign=None, formatter=None, floatmode=None, legacy=None):
|
67 |
+
"""
|
68 |
+
Make a dictionary out of the non-None arguments, plus conversion of
|
69 |
+
*legacy* and sanity checks.
|
70 |
+
"""
|
71 |
+
|
72 |
+
options = {k: v for k, v in locals().items() if v is not None}
|
73 |
+
|
74 |
+
if suppress is not None:
|
75 |
+
options['suppress'] = bool(suppress)
|
76 |
+
|
77 |
+
modes = ['fixed', 'unique', 'maxprec', 'maxprec_equal']
|
78 |
+
if floatmode not in modes + [None]:
|
79 |
+
raise ValueError("floatmode option must be one of " +
|
80 |
+
", ".join('"{}"'.format(m) for m in modes))
|
81 |
+
|
82 |
+
if sign not in [None, '-', '+', ' ']:
|
83 |
+
raise ValueError("sign option must be one of ' ', '+', or '-'")
|
84 |
+
|
85 |
+
if legacy == False:
|
86 |
+
options['legacy'] = sys.maxsize
|
87 |
+
elif legacy == '1.13':
|
88 |
+
options['legacy'] = 113
|
89 |
+
elif legacy == '1.21':
|
90 |
+
options['legacy'] = 121
|
91 |
+
elif legacy is None:
|
92 |
+
pass # OK, do nothing.
|
93 |
+
else:
|
94 |
+
warnings.warn(
|
95 |
+
"legacy printing option can currently only be '1.13', '1.21', or "
|
96 |
+
"`False`", stacklevel=3)
|
97 |
+
|
98 |
+
if threshold is not None:
|
99 |
+
# forbid the bad threshold arg suggested by stack overflow, gh-12351
|
100 |
+
if not isinstance(threshold, numbers.Number):
|
101 |
+
raise TypeError("threshold must be numeric")
|
102 |
+
if np.isnan(threshold):
|
103 |
+
raise ValueError("threshold must be non-NAN, try "
|
104 |
+
"sys.maxsize for untruncated representation")
|
105 |
+
|
106 |
+
if precision is not None:
|
107 |
+
# forbid the bad precision arg as suggested by issue #18254
|
108 |
+
try:
|
109 |
+
options['precision'] = operator.index(precision)
|
110 |
+
except TypeError as e:
|
111 |
+
raise TypeError('precision must be an integer') from e
|
112 |
+
|
113 |
+
return options
|
114 |
+
|
115 |
+
|
116 |
+
@set_module('numpy')
|
117 |
+
def set_printoptions(precision=None, threshold=None, edgeitems=None,
|
118 |
+
linewidth=None, suppress=None, nanstr=None, infstr=None,
|
119 |
+
formatter=None, sign=None, floatmode=None, *, legacy=None):
|
120 |
+
"""
|
121 |
+
Set printing options.
|
122 |
+
|
123 |
+
These options determine the way floating point numbers, arrays and
|
124 |
+
other NumPy objects are displayed.
|
125 |
+
|
126 |
+
Parameters
|
127 |
+
----------
|
128 |
+
precision : int or None, optional
|
129 |
+
Number of digits of precision for floating point output (default 8).
|
130 |
+
May be None if `floatmode` is not `fixed`, to print as many digits as
|
131 |
+
necessary to uniquely specify the value.
|
132 |
+
threshold : int, optional
|
133 |
+
Total number of array elements which trigger summarization
|
134 |
+
rather than full repr (default 1000).
|
135 |
+
To always use the full repr without summarization, pass `sys.maxsize`.
|
136 |
+
edgeitems : int, optional
|
137 |
+
Number of array items in summary at beginning and end of
|
138 |
+
each dimension (default 3).
|
139 |
+
linewidth : int, optional
|
140 |
+
The number of characters per line for the purpose of inserting
|
141 |
+
line breaks (default 75).
|
142 |
+
suppress : bool, optional
|
143 |
+
If True, always print floating point numbers using fixed point
|
144 |
+
notation, in which case numbers equal to zero in the current precision
|
145 |
+
will print as zero. If False, then scientific notation is used when
|
146 |
+
absolute value of the smallest number is < 1e-4 or the ratio of the
|
147 |
+
maximum absolute value to the minimum is > 1e3. The default is False.
|
148 |
+
nanstr : str, optional
|
149 |
+
String representation of floating point not-a-number (default nan).
|
150 |
+
infstr : str, optional
|
151 |
+
String representation of floating point infinity (default inf).
|
152 |
+
sign : string, either '-', '+', or ' ', optional
|
153 |
+
Controls printing of the sign of floating-point types. If '+', always
|
154 |
+
print the sign of positive values. If ' ', always prints a space
|
155 |
+
(whitespace character) in the sign position of positive values. If
|
156 |
+
'-', omit the sign character of positive values. (default '-')
|
157 |
+
formatter : dict of callables, optional
|
158 |
+
If not None, the keys should indicate the type(s) that the respective
|
159 |
+
formatting function applies to. Callables should return a string.
|
160 |
+
Types that are not specified (by their corresponding keys) are handled
|
161 |
+
by the default formatters. Individual types for which a formatter
|
162 |
+
can be set are:
|
163 |
+
|
164 |
+
- 'bool'
|
165 |
+
- 'int'
|
166 |
+
- 'timedelta' : a `numpy.timedelta64`
|
167 |
+
- 'datetime' : a `numpy.datetime64`
|
168 |
+
- 'float'
|
169 |
+
- 'longfloat' : 128-bit floats
|
170 |
+
- 'complexfloat'
|
171 |
+
- 'longcomplexfloat' : composed of two 128-bit floats
|
172 |
+
- 'numpystr' : types `numpy.bytes_` and `numpy.str_`
|
173 |
+
- 'object' : `np.object_` arrays
|
174 |
+
|
175 |
+
Other keys that can be used to set a group of types at once are:
|
176 |
+
|
177 |
+
- 'all' : sets all types
|
178 |
+
- 'int_kind' : sets 'int'
|
179 |
+
- 'float_kind' : sets 'float' and 'longfloat'
|
180 |
+
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
|
181 |
+
- 'str_kind' : sets 'numpystr'
|
182 |
+
floatmode : str, optional
|
183 |
+
Controls the interpretation of the `precision` option for
|
184 |
+
floating-point types. Can take the following values
|
185 |
+
(default maxprec_equal):
|
186 |
+
|
187 |
+
* 'fixed': Always print exactly `precision` fractional digits,
|
188 |
+
even if this would print more or fewer digits than
|
189 |
+
necessary to specify the value uniquely.
|
190 |
+
* 'unique': Print the minimum number of fractional digits necessary
|
191 |
+
to represent each value uniquely. Different elements may
|
192 |
+
have a different number of digits. The value of the
|
193 |
+
`precision` option is ignored.
|
194 |
+
* 'maxprec': Print at most `precision` fractional digits, but if
|
195 |
+
an element can be uniquely represented with fewer digits
|
196 |
+
only print it with that many.
|
197 |
+
* 'maxprec_equal': Print at most `precision` fractional digits,
|
198 |
+
but if every element in the array can be uniquely
|
199 |
+
represented with an equal number of fewer digits, use that
|
200 |
+
many digits for all elements.
|
201 |
+
legacy : string or `False`, optional
|
202 |
+
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
|
203 |
+
approximates numpy 1.13 print output by including a space in the sign
|
204 |
+
position of floats and different behavior for 0d arrays. This also
|
205 |
+
enables 1.21 legacy printing mode (described below).
|
206 |
+
|
207 |
+
If set to the string `'1.21'` enables 1.21 legacy printing mode. This
|
208 |
+
approximates numpy 1.21 print output of complex structured dtypes
|
209 |
+
by not inserting spaces after commas that separate fields and after
|
210 |
+
colons.
|
211 |
+
|
212 |
+
If set to `False`, disables legacy mode.
|
213 |
+
|
214 |
+
Unrecognized strings will be ignored with a warning for forward
|
215 |
+
compatibility.
|
216 |
+
|
217 |
+
.. versionadded:: 1.14.0
|
218 |
+
.. versionchanged:: 1.22.0
|
219 |
+
|
220 |
+
See Also
|
221 |
+
--------
|
222 |
+
get_printoptions, printoptions, set_string_function, array2string
|
223 |
+
|
224 |
+
Notes
|
225 |
+
-----
|
226 |
+
`formatter` is always reset with a call to `set_printoptions`.
|
227 |
+
|
228 |
+
Use `printoptions` as a context manager to set the values temporarily.
|
229 |
+
|
230 |
+
Examples
|
231 |
+
--------
|
232 |
+
Floating point precision can be set:
|
233 |
+
|
234 |
+
>>> np.set_printoptions(precision=4)
|
235 |
+
>>> np.array([1.123456789])
|
236 |
+
[1.1235]
|
237 |
+
|
238 |
+
Long arrays can be summarised:
|
239 |
+
|
240 |
+
>>> np.set_printoptions(threshold=5)
|
241 |
+
>>> np.arange(10)
|
242 |
+
array([0, 1, 2, ..., 7, 8, 9])
|
243 |
+
|
244 |
+
Small results can be suppressed:
|
245 |
+
|
246 |
+
>>> eps = np.finfo(float).eps
|
247 |
+
>>> x = np.arange(4.)
|
248 |
+
>>> x**2 - (x + eps)**2
|
249 |
+
array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
|
250 |
+
>>> np.set_printoptions(suppress=True)
|
251 |
+
>>> x**2 - (x + eps)**2
|
252 |
+
array([-0., -0., 0., 0.])
|
253 |
+
|
254 |
+
A custom formatter can be used to display array elements as desired:
|
255 |
+
|
256 |
+
>>> np.set_printoptions(formatter={'all':lambda x: 'int: '+str(-x)})
|
257 |
+
>>> x = np.arange(3)
|
258 |
+
>>> x
|
259 |
+
array([int: 0, int: -1, int: -2])
|
260 |
+
>>> np.set_printoptions() # formatter gets reset
|
261 |
+
>>> x
|
262 |
+
array([0, 1, 2])
|
263 |
+
|
264 |
+
To put back the default options, you can use:
|
265 |
+
|
266 |
+
>>> np.set_printoptions(edgeitems=3, infstr='inf',
|
267 |
+
... linewidth=75, nanstr='nan', precision=8,
|
268 |
+
... suppress=False, threshold=1000, formatter=None)
|
269 |
+
|
270 |
+
Also to temporarily override options, use `printoptions` as a context manager:
|
271 |
+
|
272 |
+
>>> with np.printoptions(precision=2, suppress=True, threshold=5):
|
273 |
+
... np.linspace(0, 10, 10)
|
274 |
+
array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
|
275 |
+
|
276 |
+
"""
|
277 |
+
opt = _make_options_dict(precision, threshold, edgeitems, linewidth,
|
278 |
+
suppress, nanstr, infstr, sign, formatter,
|
279 |
+
floatmode, legacy)
|
280 |
+
# formatter is always reset
|
281 |
+
opt['formatter'] = formatter
|
282 |
+
_format_options.update(opt)
|
283 |
+
|
284 |
+
# set the C variable for legacy mode
|
285 |
+
if _format_options['legacy'] == 113:
|
286 |
+
set_legacy_print_mode(113)
|
287 |
+
# reset the sign option in legacy mode to avoid confusion
|
288 |
+
_format_options['sign'] = '-'
|
289 |
+
elif _format_options['legacy'] == 121:
|
290 |
+
set_legacy_print_mode(121)
|
291 |
+
elif _format_options['legacy'] == sys.maxsize:
|
292 |
+
set_legacy_print_mode(0)
|
293 |
+
|
294 |
+
|
295 |
+
@set_module('numpy')
|
296 |
+
def get_printoptions():
|
297 |
+
"""
|
298 |
+
Return the current print options.
|
299 |
+
|
300 |
+
Returns
|
301 |
+
-------
|
302 |
+
print_opts : dict
|
303 |
+
Dictionary of current print options with keys
|
304 |
+
|
305 |
+
- precision : int
|
306 |
+
- threshold : int
|
307 |
+
- edgeitems : int
|
308 |
+
- linewidth : int
|
309 |
+
- suppress : bool
|
310 |
+
- nanstr : str
|
311 |
+
- infstr : str
|
312 |
+
- formatter : dict of callables
|
313 |
+
- sign : str
|
314 |
+
|
315 |
+
For a full description of these options, see `set_printoptions`.
|
316 |
+
|
317 |
+
See Also
|
318 |
+
--------
|
319 |
+
set_printoptions, printoptions, set_string_function
|
320 |
+
|
321 |
+
"""
|
322 |
+
opts = _format_options.copy()
|
323 |
+
opts['legacy'] = {
|
324 |
+
113: '1.13', 121: '1.21', sys.maxsize: False,
|
325 |
+
}[opts['legacy']]
|
326 |
+
return opts
|
327 |
+
|
328 |
+
|
329 |
+
def _get_legacy_print_mode():
|
330 |
+
"""Return the legacy print mode as an int."""
|
331 |
+
return _format_options['legacy']
|
332 |
+
|
333 |
+
|
334 |
+
@set_module('numpy')
|
335 |
+
@contextlib.contextmanager
|
336 |
+
def printoptions(*args, **kwargs):
|
337 |
+
"""Context manager for setting print options.
|
338 |
+
|
339 |
+
Set print options for the scope of the `with` block, and restore the old
|
340 |
+
options at the end. See `set_printoptions` for the full description of
|
341 |
+
available options.
|
342 |
+
|
343 |
+
Examples
|
344 |
+
--------
|
345 |
+
|
346 |
+
>>> from numpy.testing import assert_equal
|
347 |
+
>>> with np.printoptions(precision=2):
|
348 |
+
... np.array([2.0]) / 3
|
349 |
+
array([0.67])
|
350 |
+
|
351 |
+
The `as`-clause of the `with`-statement gives the current print options:
|
352 |
+
|
353 |
+
>>> with np.printoptions(precision=2) as opts:
|
354 |
+
... assert_equal(opts, np.get_printoptions())
|
355 |
+
|
356 |
+
See Also
|
357 |
+
--------
|
358 |
+
set_printoptions, get_printoptions
|
359 |
+
|
360 |
+
"""
|
361 |
+
opts = np.get_printoptions()
|
362 |
+
try:
|
363 |
+
np.set_printoptions(*args, **kwargs)
|
364 |
+
yield np.get_printoptions()
|
365 |
+
finally:
|
366 |
+
np.set_printoptions(**opts)
|
367 |
+
|
368 |
+
|
369 |
+
def _leading_trailing(a, edgeitems, index=()):
|
370 |
+
"""
|
371 |
+
Keep only the N-D corners (leading and trailing edges) of an array.
|
372 |
+
|
373 |
+
Should be passed a base-class ndarray, since it makes no guarantees about
|
374 |
+
preserving subclasses.
|
375 |
+
"""
|
376 |
+
axis = len(index)
|
377 |
+
if axis == a.ndim:
|
378 |
+
return a[index]
|
379 |
+
|
380 |
+
if a.shape[axis] > 2*edgeitems:
|
381 |
+
return concatenate((
|
382 |
+
_leading_trailing(a, edgeitems, index + np.index_exp[ :edgeitems]),
|
383 |
+
_leading_trailing(a, edgeitems, index + np.index_exp[-edgeitems:])
|
384 |
+
), axis=axis)
|
385 |
+
else:
|
386 |
+
return _leading_trailing(a, edgeitems, index + np.index_exp[:])
|
387 |
+
|
388 |
+
|
389 |
+
def _object_format(o):
|
390 |
+
""" Object arrays containing lists should be printed unambiguously """
|
391 |
+
if type(o) is list:
|
392 |
+
fmt = 'list({!r})'
|
393 |
+
else:
|
394 |
+
fmt = '{!r}'
|
395 |
+
return fmt.format(o)
|
396 |
+
|
397 |
+
def repr_format(x):
|
398 |
+
return repr(x)
|
399 |
+
|
400 |
+
def str_format(x):
|
401 |
+
return str(x)
|
402 |
+
|
403 |
+
def _get_formatdict(data, *, precision, floatmode, suppress, sign, legacy,
|
404 |
+
formatter, **kwargs):
|
405 |
+
# note: extra arguments in kwargs are ignored
|
406 |
+
|
407 |
+
# wrapped in lambdas to avoid taking a code path with the wrong type of data
|
408 |
+
formatdict = {
|
409 |
+
'bool': lambda: BoolFormat(data),
|
410 |
+
'int': lambda: IntegerFormat(data),
|
411 |
+
'float': lambda: FloatingFormat(
|
412 |
+
data, precision, floatmode, suppress, sign, legacy=legacy),
|
413 |
+
'longfloat': lambda: FloatingFormat(
|
414 |
+
data, precision, floatmode, suppress, sign, legacy=legacy),
|
415 |
+
'complexfloat': lambda: ComplexFloatingFormat(
|
416 |
+
data, precision, floatmode, suppress, sign, legacy=legacy),
|
417 |
+
'longcomplexfloat': lambda: ComplexFloatingFormat(
|
418 |
+
data, precision, floatmode, suppress, sign, legacy=legacy),
|
419 |
+
'datetime': lambda: DatetimeFormat(data, legacy=legacy),
|
420 |
+
'timedelta': lambda: TimedeltaFormat(data),
|
421 |
+
'object': lambda: _object_format,
|
422 |
+
'void': lambda: str_format,
|
423 |
+
'numpystr': lambda: repr_format}
|
424 |
+
|
425 |
+
# we need to wrap values in `formatter` in a lambda, so that the interface
|
426 |
+
# is the same as the above values.
|
427 |
+
def indirect(x):
|
428 |
+
return lambda: x
|
429 |
+
|
430 |
+
if formatter is not None:
|
431 |
+
fkeys = [k for k in formatter.keys() if formatter[k] is not None]
|
432 |
+
if 'all' in fkeys:
|
433 |
+
for key in formatdict.keys():
|
434 |
+
formatdict[key] = indirect(formatter['all'])
|
435 |
+
if 'int_kind' in fkeys:
|
436 |
+
for key in ['int']:
|
437 |
+
formatdict[key] = indirect(formatter['int_kind'])
|
438 |
+
if 'float_kind' in fkeys:
|
439 |
+
for key in ['float', 'longfloat']:
|
440 |
+
formatdict[key] = indirect(formatter['float_kind'])
|
441 |
+
if 'complex_kind' in fkeys:
|
442 |
+
for key in ['complexfloat', 'longcomplexfloat']:
|
443 |
+
formatdict[key] = indirect(formatter['complex_kind'])
|
444 |
+
if 'str_kind' in fkeys:
|
445 |
+
formatdict['numpystr'] = indirect(formatter['str_kind'])
|
446 |
+
for key in formatdict.keys():
|
447 |
+
if key in fkeys:
|
448 |
+
formatdict[key] = indirect(formatter[key])
|
449 |
+
|
450 |
+
return formatdict
|
451 |
+
|
452 |
+
def _get_format_function(data, **options):
|
453 |
+
"""
|
454 |
+
find the right formatting function for the dtype_
|
455 |
+
"""
|
456 |
+
dtype_ = data.dtype
|
457 |
+
dtypeobj = dtype_.type
|
458 |
+
formatdict = _get_formatdict(data, **options)
|
459 |
+
if dtypeobj is None:
|
460 |
+
return formatdict["numpystr"]()
|
461 |
+
elif issubclass(dtypeobj, _nt.bool_):
|
462 |
+
return formatdict['bool']()
|
463 |
+
elif issubclass(dtypeobj, _nt.integer):
|
464 |
+
if issubclass(dtypeobj, _nt.timedelta64):
|
465 |
+
return formatdict['timedelta']()
|
466 |
+
else:
|
467 |
+
return formatdict['int']()
|
468 |
+
elif issubclass(dtypeobj, _nt.floating):
|
469 |
+
if issubclass(dtypeobj, _nt.longfloat):
|
470 |
+
return formatdict['longfloat']()
|
471 |
+
else:
|
472 |
+
return formatdict['float']()
|
473 |
+
elif issubclass(dtypeobj, _nt.complexfloating):
|
474 |
+
if issubclass(dtypeobj, _nt.clongfloat):
|
475 |
+
return formatdict['longcomplexfloat']()
|
476 |
+
else:
|
477 |
+
return formatdict['complexfloat']()
|
478 |
+
elif issubclass(dtypeobj, (_nt.str_, _nt.bytes_)):
|
479 |
+
return formatdict['numpystr']()
|
480 |
+
elif issubclass(dtypeobj, _nt.datetime64):
|
481 |
+
return formatdict['datetime']()
|
482 |
+
elif issubclass(dtypeobj, _nt.object_):
|
483 |
+
return formatdict['object']()
|
484 |
+
elif issubclass(dtypeobj, _nt.void):
|
485 |
+
if dtype_.names is not None:
|
486 |
+
return StructuredVoidFormat.from_data(data, **options)
|
487 |
+
else:
|
488 |
+
return formatdict['void']()
|
489 |
+
else:
|
490 |
+
return formatdict['numpystr']()
|
491 |
+
|
492 |
+
|
493 |
+
def _recursive_guard(fillvalue='...'):
|
494 |
+
"""
|
495 |
+
Like the python 3.2 reprlib.recursive_repr, but forwards *args and **kwargs
|
496 |
+
|
497 |
+
Decorates a function such that if it calls itself with the same first
|
498 |
+
argument, it returns `fillvalue` instead of recursing.
|
499 |
+
|
500 |
+
Largely copied from reprlib.recursive_repr
|
501 |
+
"""
|
502 |
+
|
503 |
+
def decorating_function(f):
|
504 |
+
repr_running = set()
|
505 |
+
|
506 |
+
@functools.wraps(f)
|
507 |
+
def wrapper(self, *args, **kwargs):
|
508 |
+
key = id(self), get_ident()
|
509 |
+
if key in repr_running:
|
510 |
+
return fillvalue
|
511 |
+
repr_running.add(key)
|
512 |
+
try:
|
513 |
+
return f(self, *args, **kwargs)
|
514 |
+
finally:
|
515 |
+
repr_running.discard(key)
|
516 |
+
|
517 |
+
return wrapper
|
518 |
+
|
519 |
+
return decorating_function
|
520 |
+
|
521 |
+
|
522 |
+
# gracefully handle recursive calls, when object arrays contain themselves
|
523 |
+
@_recursive_guard()
|
524 |
+
def _array2string(a, options, separator=' ', prefix=""):
|
525 |
+
# The formatter __init__s in _get_format_function cannot deal with
|
526 |
+
# subclasses yet, and we also need to avoid recursion issues in
|
527 |
+
# _formatArray with subclasses which return 0d arrays in place of scalars
|
528 |
+
data = asarray(a)
|
529 |
+
if a.shape == ():
|
530 |
+
a = data
|
531 |
+
|
532 |
+
if a.size > options['threshold']:
|
533 |
+
summary_insert = "..."
|
534 |
+
data = _leading_trailing(data, options['edgeitems'])
|
535 |
+
else:
|
536 |
+
summary_insert = ""
|
537 |
+
|
538 |
+
# find the right formatting function for the array
|
539 |
+
format_function = _get_format_function(data, **options)
|
540 |
+
|
541 |
+
# skip over "["
|
542 |
+
next_line_prefix = " "
|
543 |
+
# skip over array(
|
544 |
+
next_line_prefix += " "*len(prefix)
|
545 |
+
|
546 |
+
lst = _formatArray(a, format_function, options['linewidth'],
|
547 |
+
next_line_prefix, separator, options['edgeitems'],
|
548 |
+
summary_insert, options['legacy'])
|
549 |
+
return lst
|
550 |
+
|
551 |
+
|
552 |
+
def _array2string_dispatcher(
|
553 |
+
a, max_line_width=None, precision=None,
|
554 |
+
suppress_small=None, separator=None, prefix=None,
|
555 |
+
style=None, formatter=None, threshold=None,
|
556 |
+
edgeitems=None, sign=None, floatmode=None, suffix=None,
|
557 |
+
*, legacy=None):
|
558 |
+
return (a,)
|
559 |
+
|
560 |
+
|
561 |
+
@array_function_dispatch(_array2string_dispatcher, module='numpy')
|
562 |
+
def array2string(a, max_line_width=None, precision=None,
|
563 |
+
suppress_small=None, separator=' ', prefix="",
|
564 |
+
style=np._NoValue, formatter=None, threshold=None,
|
565 |
+
edgeitems=None, sign=None, floatmode=None, suffix="",
|
566 |
+
*, legacy=None):
|
567 |
+
"""
|
568 |
+
Return a string representation of an array.
|
569 |
+
|
570 |
+
Parameters
|
571 |
+
----------
|
572 |
+
a : ndarray
|
573 |
+
Input array.
|
574 |
+
max_line_width : int, optional
|
575 |
+
Inserts newlines if text is longer than `max_line_width`.
|
576 |
+
Defaults to ``numpy.get_printoptions()['linewidth']``.
|
577 |
+
precision : int or None, optional
|
578 |
+
Floating point precision.
|
579 |
+
Defaults to ``numpy.get_printoptions()['precision']``.
|
580 |
+
suppress_small : bool, optional
|
581 |
+
Represent numbers "very close" to zero as zero; default is False.
|
582 |
+
Very close is defined by precision: if the precision is 8, e.g.,
|
583 |
+
numbers smaller (in absolute value) than 5e-9 are represented as
|
584 |
+
zero.
|
585 |
+
Defaults to ``numpy.get_printoptions()['suppress']``.
|
586 |
+
separator : str, optional
|
587 |
+
Inserted between elements.
|
588 |
+
prefix : str, optional
|
589 |
+
suffix : str, optional
|
590 |
+
The length of the prefix and suffix strings are used to respectively
|
591 |
+
align and wrap the output. An array is typically printed as::
|
592 |
+
|
593 |
+
prefix + array2string(a) + suffix
|
594 |
+
|
595 |
+
The output is left-padded by the length of the prefix string, and
|
596 |
+
wrapping is forced at the column ``max_line_width - len(suffix)``.
|
597 |
+
It should be noted that the content of prefix and suffix strings are
|
598 |
+
not included in the output.
|
599 |
+
style : _NoValue, optional
|
600 |
+
Has no effect, do not use.
|
601 |
+
|
602 |
+
.. deprecated:: 1.14.0
|
603 |
+
formatter : dict of callables, optional
|
604 |
+
If not None, the keys should indicate the type(s) that the respective
|
605 |
+
formatting function applies to. Callables should return a string.
|
606 |
+
Types that are not specified (by their corresponding keys) are handled
|
607 |
+
by the default formatters. Individual types for which a formatter
|
608 |
+
can be set are:
|
609 |
+
|
610 |
+
- 'bool'
|
611 |
+
- 'int'
|
612 |
+
- 'timedelta' : a `numpy.timedelta64`
|
613 |
+
- 'datetime' : a `numpy.datetime64`
|
614 |
+
- 'float'
|
615 |
+
- 'longfloat' : 128-bit floats
|
616 |
+
- 'complexfloat'
|
617 |
+
- 'longcomplexfloat' : composed of two 128-bit floats
|
618 |
+
- 'void' : type `numpy.void`
|
619 |
+
- 'numpystr' : types `numpy.bytes_` and `numpy.str_`
|
620 |
+
|
621 |
+
Other keys that can be used to set a group of types at once are:
|
622 |
+
|
623 |
+
- 'all' : sets all types
|
624 |
+
- 'int_kind' : sets 'int'
|
625 |
+
- 'float_kind' : sets 'float' and 'longfloat'
|
626 |
+
- 'complex_kind' : sets 'complexfloat' and 'longcomplexfloat'
|
627 |
+
- 'str_kind' : sets 'numpystr'
|
628 |
+
threshold : int, optional
|
629 |
+
Total number of array elements which trigger summarization
|
630 |
+
rather than full repr.
|
631 |
+
Defaults to ``numpy.get_printoptions()['threshold']``.
|
632 |
+
edgeitems : int, optional
|
633 |
+
Number of array items in summary at beginning and end of
|
634 |
+
each dimension.
|
635 |
+
Defaults to ``numpy.get_printoptions()['edgeitems']``.
|
636 |
+
sign : string, either '-', '+', or ' ', optional
|
637 |
+
Controls printing of the sign of floating-point types. If '+', always
|
638 |
+
print the sign of positive values. If ' ', always prints a space
|
639 |
+
(whitespace character) in the sign position of positive values. If
|
640 |
+
'-', omit the sign character of positive values.
|
641 |
+
Defaults to ``numpy.get_printoptions()['sign']``.
|
642 |
+
floatmode : str, optional
|
643 |
+
Controls the interpretation of the `precision` option for
|
644 |
+
floating-point types.
|
645 |
+
Defaults to ``numpy.get_printoptions()['floatmode']``.
|
646 |
+
Can take the following values:
|
647 |
+
|
648 |
+
- 'fixed': Always print exactly `precision` fractional digits,
|
649 |
+
even if this would print more or fewer digits than
|
650 |
+
necessary to specify the value uniquely.
|
651 |
+
- 'unique': Print the minimum number of fractional digits necessary
|
652 |
+
to represent each value uniquely. Different elements may
|
653 |
+
have a different number of digits. The value of the
|
654 |
+
`precision` option is ignored.
|
655 |
+
- 'maxprec': Print at most `precision` fractional digits, but if
|
656 |
+
an element can be uniquely represented with fewer digits
|
657 |
+
only print it with that many.
|
658 |
+
- 'maxprec_equal': Print at most `precision` fractional digits,
|
659 |
+
but if every element in the array can be uniquely
|
660 |
+
represented with an equal number of fewer digits, use that
|
661 |
+
many digits for all elements.
|
662 |
+
legacy : string or `False`, optional
|
663 |
+
If set to the string `'1.13'` enables 1.13 legacy printing mode. This
|
664 |
+
approximates numpy 1.13 print output by including a space in the sign
|
665 |
+
position of floats and different behavior for 0d arrays. If set to
|
666 |
+
`False`, disables legacy mode. Unrecognized strings will be ignored
|
667 |
+
with a warning for forward compatibility.
|
668 |
+
|
669 |
+
.. versionadded:: 1.14.0
|
670 |
+
|
671 |
+
Returns
|
672 |
+
-------
|
673 |
+
array_str : str
|
674 |
+
String representation of the array.
|
675 |
+
|
676 |
+
Raises
|
677 |
+
------
|
678 |
+
TypeError
|
679 |
+
if a callable in `formatter` does not return a string.
|
680 |
+
|
681 |
+
See Also
|
682 |
+
--------
|
683 |
+
array_str, array_repr, set_printoptions, get_printoptions
|
684 |
+
|
685 |
+
Notes
|
686 |
+
-----
|
687 |
+
If a formatter is specified for a certain type, the `precision` keyword is
|
688 |
+
ignored for that type.
|
689 |
+
|
690 |
+
This is a very flexible function; `array_repr` and `array_str` are using
|
691 |
+
`array2string` internally so keywords with the same name should work
|
692 |
+
identically in all three functions.
|
693 |
+
|
694 |
+
Examples
|
695 |
+
--------
|
696 |
+
>>> x = np.array([1e-16,1,2,3])
|
697 |
+
>>> np.array2string(x, precision=2, separator=',',
|
698 |
+
... suppress_small=True)
|
699 |
+
'[0.,1.,2.,3.]'
|
700 |
+
|
701 |
+
>>> x = np.arange(3.)
|
702 |
+
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
|
703 |
+
'[0.00 1.00 2.00]'
|
704 |
+
|
705 |
+
>>> x = np.arange(3)
|
706 |
+
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
|
707 |
+
'[0x0 0x1 0x2]'
|
708 |
+
|
709 |
+
"""
|
710 |
+
|
711 |
+
overrides = _make_options_dict(precision, threshold, edgeitems,
|
712 |
+
max_line_width, suppress_small, None, None,
|
713 |
+
sign, formatter, floatmode, legacy)
|
714 |
+
options = _format_options.copy()
|
715 |
+
options.update(overrides)
|
716 |
+
|
717 |
+
if options['legacy'] <= 113:
|
718 |
+
if style is np._NoValue:
|
719 |
+
style = repr
|
720 |
+
|
721 |
+
if a.shape == () and a.dtype.names is None:
|
722 |
+
return style(a.item())
|
723 |
+
elif style is not np._NoValue:
|
724 |
+
# Deprecation 11-9-2017 v1.14
|
725 |
+
warnings.warn("'style' argument is deprecated and no longer functional"
|
726 |
+
" except in 1.13 'legacy' mode",
|
727 |
+
DeprecationWarning, stacklevel=2)
|
728 |
+
|
729 |
+
if options['legacy'] > 113:
|
730 |
+
options['linewidth'] -= len(suffix)
|
731 |
+
|
732 |
+
# treat as a null array if any of shape elements == 0
|
733 |
+
if a.size == 0:
|
734 |
+
return "[]"
|
735 |
+
|
736 |
+
return _array2string(a, options, separator, prefix)
|
737 |
+
|
738 |
+
|
739 |
+
def _extendLine(s, line, word, line_width, next_line_prefix, legacy):
|
740 |
+
needs_wrap = len(line) + len(word) > line_width
|
741 |
+
if legacy > 113:
|
742 |
+
# don't wrap lines if it won't help
|
743 |
+
if len(line) <= len(next_line_prefix):
|
744 |
+
needs_wrap = False
|
745 |
+
|
746 |
+
if needs_wrap:
|
747 |
+
s += line.rstrip() + "\n"
|
748 |
+
line = next_line_prefix
|
749 |
+
line += word
|
750 |
+
return s, line
|
751 |
+
|
752 |
+
|
753 |
+
def _extendLine_pretty(s, line, word, line_width, next_line_prefix, legacy):
|
754 |
+
"""
|
755 |
+
Extends line with nicely formatted (possibly multi-line) string ``word``.
|
756 |
+
"""
|
757 |
+
words = word.splitlines()
|
758 |
+
if len(words) == 1 or legacy <= 113:
|
759 |
+
return _extendLine(s, line, word, line_width, next_line_prefix, legacy)
|
760 |
+
|
761 |
+
max_word_length = max(len(word) for word in words)
|
762 |
+
if (len(line) + max_word_length > line_width and
|
763 |
+
len(line) > len(next_line_prefix)):
|
764 |
+
s += line.rstrip() + '\n'
|
765 |
+
line = next_line_prefix + words[0]
|
766 |
+
indent = next_line_prefix
|
767 |
+
else:
|
768 |
+
indent = len(line)*' '
|
769 |
+
line += words[0]
|
770 |
+
|
771 |
+
for word in words[1::]:
|
772 |
+
s += line.rstrip() + '\n'
|
773 |
+
line = indent + word
|
774 |
+
|
775 |
+
suffix_length = max_word_length - len(words[-1])
|
776 |
+
line += suffix_length*' '
|
777 |
+
|
778 |
+
return s, line
|
779 |
+
|
780 |
+
def _formatArray(a, format_function, line_width, next_line_prefix,
|
781 |
+
separator, edge_items, summary_insert, legacy):
|
782 |
+
"""formatArray is designed for two modes of operation:
|
783 |
+
|
784 |
+
1. Full output
|
785 |
+
|
786 |
+
2. Summarized output
|
787 |
+
|
788 |
+
"""
|
789 |
+
def recurser(index, hanging_indent, curr_width):
|
790 |
+
"""
|
791 |
+
By using this local function, we don't need to recurse with all the
|
792 |
+
arguments. Since this function is not created recursively, the cost is
|
793 |
+
not significant
|
794 |
+
"""
|
795 |
+
axis = len(index)
|
796 |
+
axes_left = a.ndim - axis
|
797 |
+
|
798 |
+
if axes_left == 0:
|
799 |
+
return format_function(a[index])
|
800 |
+
|
801 |
+
# when recursing, add a space to align with the [ added, and reduce the
|
802 |
+
# length of the line by 1
|
803 |
+
next_hanging_indent = hanging_indent + ' '
|
804 |
+
if legacy <= 113:
|
805 |
+
next_width = curr_width
|
806 |
+
else:
|
807 |
+
next_width = curr_width - len(']')
|
808 |
+
|
809 |
+
a_len = a.shape[axis]
|
810 |
+
show_summary = summary_insert and 2*edge_items < a_len
|
811 |
+
if show_summary:
|
812 |
+
leading_items = edge_items
|
813 |
+
trailing_items = edge_items
|
814 |
+
else:
|
815 |
+
leading_items = 0
|
816 |
+
trailing_items = a_len
|
817 |
+
|
818 |
+
# stringify the array with the hanging indent on the first line too
|
819 |
+
s = ''
|
820 |
+
|
821 |
+
# last axis (rows) - wrap elements if they would not fit on one line
|
822 |
+
if axes_left == 1:
|
823 |
+
# the length up until the beginning of the separator / bracket
|
824 |
+
if legacy <= 113:
|
825 |
+
elem_width = curr_width - len(separator.rstrip())
|
826 |
+
else:
|
827 |
+
elem_width = curr_width - max(len(separator.rstrip()), len(']'))
|
828 |
+
|
829 |
+
line = hanging_indent
|
830 |
+
for i in range(leading_items):
|
831 |
+
word = recurser(index + (i,), next_hanging_indent, next_width)
|
832 |
+
s, line = _extendLine_pretty(
|
833 |
+
s, line, word, elem_width, hanging_indent, legacy)
|
834 |
+
line += separator
|
835 |
+
|
836 |
+
if show_summary:
|
837 |
+
s, line = _extendLine(
|
838 |
+
s, line, summary_insert, elem_width, hanging_indent, legacy)
|
839 |
+
if legacy <= 113:
|
840 |
+
line += ", "
|
841 |
+
else:
|
842 |
+
line += separator
|
843 |
+
|
844 |
+
for i in range(trailing_items, 1, -1):
|
845 |
+
word = recurser(index + (-i,), next_hanging_indent, next_width)
|
846 |
+
s, line = _extendLine_pretty(
|
847 |
+
s, line, word, elem_width, hanging_indent, legacy)
|
848 |
+
line += separator
|
849 |
+
|
850 |
+
if legacy <= 113:
|
851 |
+
# width of the separator is not considered on 1.13
|
852 |
+
elem_width = curr_width
|
853 |
+
word = recurser(index + (-1,), next_hanging_indent, next_width)
|
854 |
+
s, line = _extendLine_pretty(
|
855 |
+
s, line, word, elem_width, hanging_indent, legacy)
|
856 |
+
|
857 |
+
s += line
|
858 |
+
|
859 |
+
# other axes - insert newlines between rows
|
860 |
+
else:
|
861 |
+
s = ''
|
862 |
+
line_sep = separator.rstrip() + '\n'*(axes_left - 1)
|
863 |
+
|
864 |
+
for i in range(leading_items):
|
865 |
+
nested = recurser(index + (i,), next_hanging_indent, next_width)
|
866 |
+
s += hanging_indent + nested + line_sep
|
867 |
+
|
868 |
+
if show_summary:
|
869 |
+
if legacy <= 113:
|
870 |
+
# trailing space, fixed nbr of newlines, and fixed separator
|
871 |
+
s += hanging_indent + summary_insert + ", \n"
|
872 |
+
else:
|
873 |
+
s += hanging_indent + summary_insert + line_sep
|
874 |
+
|
875 |
+
for i in range(trailing_items, 1, -1):
|
876 |
+
nested = recurser(index + (-i,), next_hanging_indent,
|
877 |
+
next_width)
|
878 |
+
s += hanging_indent + nested + line_sep
|
879 |
+
|
880 |
+
nested = recurser(index + (-1,), next_hanging_indent, next_width)
|
881 |
+
s += hanging_indent + nested
|
882 |
+
|
883 |
+
# remove the hanging indent, and wrap in []
|
884 |
+
s = '[' + s[len(hanging_indent):] + ']'
|
885 |
+
return s
|
886 |
+
|
887 |
+
try:
|
888 |
+
# invoke the recursive part with an initial index and prefix
|
889 |
+
return recurser(index=(),
|
890 |
+
hanging_indent=next_line_prefix,
|
891 |
+
curr_width=line_width)
|
892 |
+
finally:
|
893 |
+
# recursive closures have a cyclic reference to themselves, which
|
894 |
+
# requires gc to collect (gh-10620). To avoid this problem, for
|
895 |
+
# performance and PyPy friendliness, we break the cycle:
|
896 |
+
recurser = None
|
897 |
+
|
898 |
+
def _none_or_positive_arg(x, name):
|
899 |
+
if x is None:
|
900 |
+
return -1
|
901 |
+
if x < 0:
|
902 |
+
raise ValueError("{} must be >= 0".format(name))
|
903 |
+
return x
|
904 |
+
|
905 |
+
class FloatingFormat:
|
906 |
+
""" Formatter for subtypes of np.floating """
|
907 |
+
def __init__(self, data, precision, floatmode, suppress_small, sign=False,
|
908 |
+
*, legacy=None):
|
909 |
+
# for backcompatibility, accept bools
|
910 |
+
if isinstance(sign, bool):
|
911 |
+
sign = '+' if sign else '-'
|
912 |
+
|
913 |
+
self._legacy = legacy
|
914 |
+
if self._legacy <= 113:
|
915 |
+
# when not 0d, legacy does not support '-'
|
916 |
+
if data.shape != () and sign == '-':
|
917 |
+
sign = ' '
|
918 |
+
|
919 |
+
self.floatmode = floatmode
|
920 |
+
if floatmode == 'unique':
|
921 |
+
self.precision = None
|
922 |
+
else:
|
923 |
+
self.precision = precision
|
924 |
+
|
925 |
+
self.precision = _none_or_positive_arg(self.precision, 'precision')
|
926 |
+
|
927 |
+
self.suppress_small = suppress_small
|
928 |
+
self.sign = sign
|
929 |
+
self.exp_format = False
|
930 |
+
self.large_exponent = False
|
931 |
+
|
932 |
+
self.fillFormat(data)
|
933 |
+
|
934 |
+
def fillFormat(self, data):
|
935 |
+
# only the finite values are used to compute the number of digits
|
936 |
+
finite_vals = data[isfinite(data)]
|
937 |
+
|
938 |
+
# choose exponential mode based on the non-zero finite values:
|
939 |
+
abs_non_zero = absolute(finite_vals[finite_vals != 0])
|
940 |
+
if len(abs_non_zero) != 0:
|
941 |
+
max_val = np.max(abs_non_zero)
|
942 |
+
min_val = np.min(abs_non_zero)
|
943 |
+
with errstate(over='ignore'): # division can overflow
|
944 |
+
if max_val >= 1.e8 or (not self.suppress_small and
|
945 |
+
(min_val < 0.0001 or max_val/min_val > 1000.)):
|
946 |
+
self.exp_format = True
|
947 |
+
|
948 |
+
# do a first pass of printing all the numbers, to determine sizes
|
949 |
+
if len(finite_vals) == 0:
|
950 |
+
self.pad_left = 0
|
951 |
+
self.pad_right = 0
|
952 |
+
self.trim = '.'
|
953 |
+
self.exp_size = -1
|
954 |
+
self.unique = True
|
955 |
+
self.min_digits = None
|
956 |
+
elif self.exp_format:
|
957 |
+
trim, unique = '.', True
|
958 |
+
if self.floatmode == 'fixed' or self._legacy <= 113:
|
959 |
+
trim, unique = 'k', False
|
960 |
+
strs = (dragon4_scientific(x, precision=self.precision,
|
961 |
+
unique=unique, trim=trim, sign=self.sign == '+')
|
962 |
+
for x in finite_vals)
|
963 |
+
frac_strs, _, exp_strs = zip(*(s.partition('e') for s in strs))
|
964 |
+
int_part, frac_part = zip(*(s.split('.') for s in frac_strs))
|
965 |
+
self.exp_size = max(len(s) for s in exp_strs) - 1
|
966 |
+
|
967 |
+
self.trim = 'k'
|
968 |
+
self.precision = max(len(s) for s in frac_part)
|
969 |
+
self.min_digits = self.precision
|
970 |
+
self.unique = unique
|
971 |
+
|
972 |
+
# for back-compat with np 1.13, use 2 spaces & sign and full prec
|
973 |
+
if self._legacy <= 113:
|
974 |
+
self.pad_left = 3
|
975 |
+
else:
|
976 |
+
# this should be only 1 or 2. Can be calculated from sign.
|
977 |
+
self.pad_left = max(len(s) for s in int_part)
|
978 |
+
# pad_right is only needed for nan length calculation
|
979 |
+
self.pad_right = self.exp_size + 2 + self.precision
|
980 |
+
else:
|
981 |
+
trim, unique = '.', True
|
982 |
+
if self.floatmode == 'fixed':
|
983 |
+
trim, unique = 'k', False
|
984 |
+
strs = (dragon4_positional(x, precision=self.precision,
|
985 |
+
fractional=True,
|
986 |
+
unique=unique, trim=trim,
|
987 |
+
sign=self.sign == '+')
|
988 |
+
for x in finite_vals)
|
989 |
+
int_part, frac_part = zip(*(s.split('.') for s in strs))
|
990 |
+
if self._legacy <= 113:
|
991 |
+
self.pad_left = 1 + max(len(s.lstrip('-+')) for s in int_part)
|
992 |
+
else:
|
993 |
+
self.pad_left = max(len(s) for s in int_part)
|
994 |
+
self.pad_right = max(len(s) for s in frac_part)
|
995 |
+
self.exp_size = -1
|
996 |
+
self.unique = unique
|
997 |
+
|
998 |
+
if self.floatmode in ['fixed', 'maxprec_equal']:
|
999 |
+
self.precision = self.min_digits = self.pad_right
|
1000 |
+
self.trim = 'k'
|
1001 |
+
else:
|
1002 |
+
self.trim = '.'
|
1003 |
+
self.min_digits = 0
|
1004 |
+
|
1005 |
+
if self._legacy > 113:
|
1006 |
+
# account for sign = ' ' by adding one to pad_left
|
1007 |
+
if self.sign == ' ' and not any(np.signbit(finite_vals)):
|
1008 |
+
self.pad_left += 1
|
1009 |
+
|
1010 |
+
# if there are non-finite values, may need to increase pad_left
|
1011 |
+
if data.size != finite_vals.size:
|
1012 |
+
neginf = self.sign != '-' or any(data[isinf(data)] < 0)
|
1013 |
+
nanlen = len(_format_options['nanstr'])
|
1014 |
+
inflen = len(_format_options['infstr']) + neginf
|
1015 |
+
offset = self.pad_right + 1 # +1 for decimal pt
|
1016 |
+
self.pad_left = max(self.pad_left, nanlen - offset, inflen - offset)
|
1017 |
+
|
1018 |
+
def __call__(self, x):
|
1019 |
+
if not np.isfinite(x):
|
1020 |
+
with errstate(invalid='ignore'):
|
1021 |
+
if np.isnan(x):
|
1022 |
+
sign = '+' if self.sign == '+' else ''
|
1023 |
+
ret = sign + _format_options['nanstr']
|
1024 |
+
else: # isinf
|
1025 |
+
sign = '-' if x < 0 else '+' if self.sign == '+' else ''
|
1026 |
+
ret = sign + _format_options['infstr']
|
1027 |
+
return ' '*(self.pad_left + self.pad_right + 1 - len(ret)) + ret
|
1028 |
+
|
1029 |
+
if self.exp_format:
|
1030 |
+
return dragon4_scientific(x,
|
1031 |
+
precision=self.precision,
|
1032 |
+
min_digits=self.min_digits,
|
1033 |
+
unique=self.unique,
|
1034 |
+
trim=self.trim,
|
1035 |
+
sign=self.sign == '+',
|
1036 |
+
pad_left=self.pad_left,
|
1037 |
+
exp_digits=self.exp_size)
|
1038 |
+
else:
|
1039 |
+
return dragon4_positional(x,
|
1040 |
+
precision=self.precision,
|
1041 |
+
min_digits=self.min_digits,
|
1042 |
+
unique=self.unique,
|
1043 |
+
fractional=True,
|
1044 |
+
trim=self.trim,
|
1045 |
+
sign=self.sign == '+',
|
1046 |
+
pad_left=self.pad_left,
|
1047 |
+
pad_right=self.pad_right)
|
1048 |
+
|
1049 |
+
|
1050 |
+
@set_module('numpy')
|
1051 |
+
def format_float_scientific(x, precision=None, unique=True, trim='k',
|
1052 |
+
sign=False, pad_left=None, exp_digits=None,
|
1053 |
+
min_digits=None):
|
1054 |
+
"""
|
1055 |
+
Format a floating-point scalar as a decimal string in scientific notation.
|
1056 |
+
|
1057 |
+
Provides control over rounding, trimming and padding. Uses and assumes
|
1058 |
+
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
|
1059 |
+
|
1060 |
+
Parameters
|
1061 |
+
----------
|
1062 |
+
x : python float or numpy floating scalar
|
1063 |
+
Value to format.
|
1064 |
+
precision : non-negative integer or None, optional
|
1065 |
+
Maximum number of digits to print. May be None if `unique` is
|
1066 |
+
`True`, but must be an integer if unique is `False`.
|
1067 |
+
unique : boolean, optional
|
1068 |
+
If `True`, use a digit-generation strategy which gives the shortest
|
1069 |
+
representation which uniquely identifies the floating-point number from
|
1070 |
+
other values of the same type, by judicious rounding. If `precision`
|
1071 |
+
is given fewer digits than necessary can be printed. If `min_digits`
|
1072 |
+
is given more can be printed, in which cases the last digit is rounded
|
1073 |
+
with unbiased rounding.
|
1074 |
+
If `False`, digits are generated as if printing an infinite-precision
|
1075 |
+
value and stopping after `precision` digits, rounding the remaining
|
1076 |
+
value with unbiased rounding
|
1077 |
+
trim : one of 'k', '.', '0', '-', optional
|
1078 |
+
Controls post-processing trimming of trailing digits, as follows:
|
1079 |
+
|
1080 |
+
* 'k' : keep trailing zeros, keep decimal point (no trimming)
|
1081 |
+
* '.' : trim all trailing zeros, leave decimal point
|
1082 |
+
* '0' : trim all but the zero before the decimal point. Insert the
|
1083 |
+
zero if it is missing.
|
1084 |
+
* '-' : trim trailing zeros and any trailing decimal point
|
1085 |
+
sign : boolean, optional
|
1086 |
+
Whether to show the sign for positive values.
|
1087 |
+
pad_left : non-negative integer, optional
|
1088 |
+
Pad the left side of the string with whitespace until at least that
|
1089 |
+
many characters are to the left of the decimal point.
|
1090 |
+
exp_digits : non-negative integer, optional
|
1091 |
+
Pad the exponent with zeros until it contains at least this many digits.
|
1092 |
+
If omitted, the exponent will be at least 2 digits.
|
1093 |
+
min_digits : non-negative integer or None, optional
|
1094 |
+
Minimum number of digits to print. This only has an effect for
|
1095 |
+
`unique=True`. In that case more digits than necessary to uniquely
|
1096 |
+
identify the value may be printed and rounded unbiased.
|
1097 |
+
|
1098 |
+
-- versionadded:: 1.21.0
|
1099 |
+
|
1100 |
+
Returns
|
1101 |
+
-------
|
1102 |
+
rep : string
|
1103 |
+
The string representation of the floating point value
|
1104 |
+
|
1105 |
+
See Also
|
1106 |
+
--------
|
1107 |
+
format_float_positional
|
1108 |
+
|
1109 |
+
Examples
|
1110 |
+
--------
|
1111 |
+
>>> np.format_float_scientific(np.float32(np.pi))
|
1112 |
+
'3.1415927e+00'
|
1113 |
+
>>> s = np.float32(1.23e24)
|
1114 |
+
>>> np.format_float_scientific(s, unique=False, precision=15)
|
1115 |
+
'1.230000071797338e+24'
|
1116 |
+
>>> np.format_float_scientific(s, exp_digits=4)
|
1117 |
+
'1.23e+0024'
|
1118 |
+
"""
|
1119 |
+
precision = _none_or_positive_arg(precision, 'precision')
|
1120 |
+
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
|
1121 |
+
exp_digits = _none_or_positive_arg(exp_digits, 'exp_digits')
|
1122 |
+
min_digits = _none_or_positive_arg(min_digits, 'min_digits')
|
1123 |
+
if min_digits > 0 and precision > 0 and min_digits > precision:
|
1124 |
+
raise ValueError("min_digits must be less than or equal to precision")
|
1125 |
+
return dragon4_scientific(x, precision=precision, unique=unique,
|
1126 |
+
trim=trim, sign=sign, pad_left=pad_left,
|
1127 |
+
exp_digits=exp_digits, min_digits=min_digits)
|
1128 |
+
|
1129 |
+
|
1130 |
+
@set_module('numpy')
|
1131 |
+
def format_float_positional(x, precision=None, unique=True,
|
1132 |
+
fractional=True, trim='k', sign=False,
|
1133 |
+
pad_left=None, pad_right=None, min_digits=None):
|
1134 |
+
"""
|
1135 |
+
Format a floating-point scalar as a decimal string in positional notation.
|
1136 |
+
|
1137 |
+
Provides control over rounding, trimming and padding. Uses and assumes
|
1138 |
+
IEEE unbiased rounding. Uses the "Dragon4" algorithm.
|
1139 |
+
|
1140 |
+
Parameters
|
1141 |
+
----------
|
1142 |
+
x : python float or numpy floating scalar
|
1143 |
+
Value to format.
|
1144 |
+
precision : non-negative integer or None, optional
|
1145 |
+
Maximum number of digits to print. May be None if `unique` is
|
1146 |
+
`True`, but must be an integer if unique is `False`.
|
1147 |
+
unique : boolean, optional
|
1148 |
+
If `True`, use a digit-generation strategy which gives the shortest
|
1149 |
+
representation which uniquely identifies the floating-point number from
|
1150 |
+
other values of the same type, by judicious rounding. If `precision`
|
1151 |
+
is given fewer digits than necessary can be printed, or if `min_digits`
|
1152 |
+
is given more can be printed, in which cases the last digit is rounded
|
1153 |
+
with unbiased rounding.
|
1154 |
+
If `False`, digits are generated as if printing an infinite-precision
|
1155 |
+
value and stopping after `precision` digits, rounding the remaining
|
1156 |
+
value with unbiased rounding
|
1157 |
+
fractional : boolean, optional
|
1158 |
+
If `True`, the cutoffs of `precision` and `min_digits` refer to the
|
1159 |
+
total number of digits after the decimal point, including leading
|
1160 |
+
zeros.
|
1161 |
+
If `False`, `precision` and `min_digits` refer to the total number of
|
1162 |
+
significant digits, before or after the decimal point, ignoring leading
|
1163 |
+
zeros.
|
1164 |
+
trim : one of 'k', '.', '0', '-', optional
|
1165 |
+
Controls post-processing trimming of trailing digits, as follows:
|
1166 |
+
|
1167 |
+
* 'k' : keep trailing zeros, keep decimal point (no trimming)
|
1168 |
+
* '.' : trim all trailing zeros, leave decimal point
|
1169 |
+
* '0' : trim all but the zero before the decimal point. Insert the
|
1170 |
+
zero if it is missing.
|
1171 |
+
* '-' : trim trailing zeros and any trailing decimal point
|
1172 |
+
sign : boolean, optional
|
1173 |
+
Whether to show the sign for positive values.
|
1174 |
+
pad_left : non-negative integer, optional
|
1175 |
+
Pad the left side of the string with whitespace until at least that
|
1176 |
+
many characters are to the left of the decimal point.
|
1177 |
+
pad_right : non-negative integer, optional
|
1178 |
+
Pad the right side of the string with whitespace until at least that
|
1179 |
+
many characters are to the right of the decimal point.
|
1180 |
+
min_digits : non-negative integer or None, optional
|
1181 |
+
Minimum number of digits to print. Only has an effect if `unique=True`
|
1182 |
+
in which case additional digits past those necessary to uniquely
|
1183 |
+
identify the value may be printed, rounding the last additional digit.
|
1184 |
+
|
1185 |
+
-- versionadded:: 1.21.0
|
1186 |
+
|
1187 |
+
Returns
|
1188 |
+
-------
|
1189 |
+
rep : string
|
1190 |
+
The string representation of the floating point value
|
1191 |
+
|
1192 |
+
See Also
|
1193 |
+
--------
|
1194 |
+
format_float_scientific
|
1195 |
+
|
1196 |
+
Examples
|
1197 |
+
--------
|
1198 |
+
>>> np.format_float_positional(np.float32(np.pi))
|
1199 |
+
'3.1415927'
|
1200 |
+
>>> np.format_float_positional(np.float16(np.pi))
|
1201 |
+
'3.14'
|
1202 |
+
>>> np.format_float_positional(np.float16(0.3))
|
1203 |
+
'0.3'
|
1204 |
+
>>> np.format_float_positional(np.float16(0.3), unique=False, precision=10)
|
1205 |
+
'0.3000488281'
|
1206 |
+
"""
|
1207 |
+
precision = _none_or_positive_arg(precision, 'precision')
|
1208 |
+
pad_left = _none_or_positive_arg(pad_left, 'pad_left')
|
1209 |
+
pad_right = _none_or_positive_arg(pad_right, 'pad_right')
|
1210 |
+
min_digits = _none_or_positive_arg(min_digits, 'min_digits')
|
1211 |
+
if not fractional and precision == 0:
|
1212 |
+
raise ValueError("precision must be greater than 0 if "
|
1213 |
+
"fractional=False")
|
1214 |
+
if min_digits > 0 and precision > 0 and min_digits > precision:
|
1215 |
+
raise ValueError("min_digits must be less than or equal to precision")
|
1216 |
+
return dragon4_positional(x, precision=precision, unique=unique,
|
1217 |
+
fractional=fractional, trim=trim,
|
1218 |
+
sign=sign, pad_left=pad_left,
|
1219 |
+
pad_right=pad_right, min_digits=min_digits)
|
1220 |
+
|
1221 |
+
|
1222 |
+
class IntegerFormat:
|
1223 |
+
def __init__(self, data):
|
1224 |
+
if data.size > 0:
|
1225 |
+
max_str_len = max(len(str(np.max(data))),
|
1226 |
+
len(str(np.min(data))))
|
1227 |
+
else:
|
1228 |
+
max_str_len = 0
|
1229 |
+
self.format = '%{}d'.format(max_str_len)
|
1230 |
+
|
1231 |
+
def __call__(self, x):
|
1232 |
+
return self.format % x
|
1233 |
+
|
1234 |
+
|
1235 |
+
class BoolFormat:
|
1236 |
+
def __init__(self, data, **kwargs):
|
1237 |
+
# add an extra space so " True" and "False" have the same length and
|
1238 |
+
# array elements align nicely when printed, except in 0d arrays
|
1239 |
+
self.truestr = ' True' if data.shape != () else 'True'
|
1240 |
+
|
1241 |
+
def __call__(self, x):
|
1242 |
+
return self.truestr if x else "False"
|
1243 |
+
|
1244 |
+
|
1245 |
+
class ComplexFloatingFormat:
|
1246 |
+
""" Formatter for subtypes of np.complexfloating """
|
1247 |
+
def __init__(self, x, precision, floatmode, suppress_small,
|
1248 |
+
sign=False, *, legacy=None):
|
1249 |
+
# for backcompatibility, accept bools
|
1250 |
+
if isinstance(sign, bool):
|
1251 |
+
sign = '+' if sign else '-'
|
1252 |
+
|
1253 |
+
floatmode_real = floatmode_imag = floatmode
|
1254 |
+
if legacy <= 113:
|
1255 |
+
floatmode_real = 'maxprec_equal'
|
1256 |
+
floatmode_imag = 'maxprec'
|
1257 |
+
|
1258 |
+
self.real_format = FloatingFormat(
|
1259 |
+
x.real, precision, floatmode_real, suppress_small,
|
1260 |
+
sign=sign, legacy=legacy
|
1261 |
+
)
|
1262 |
+
self.imag_format = FloatingFormat(
|
1263 |
+
x.imag, precision, floatmode_imag, suppress_small,
|
1264 |
+
sign='+', legacy=legacy
|
1265 |
+
)
|
1266 |
+
|
1267 |
+
def __call__(self, x):
|
1268 |
+
r = self.real_format(x.real)
|
1269 |
+
i = self.imag_format(x.imag)
|
1270 |
+
|
1271 |
+
# add the 'j' before the terminal whitespace in i
|
1272 |
+
sp = len(i.rstrip())
|
1273 |
+
i = i[:sp] + 'j' + i[sp:]
|
1274 |
+
|
1275 |
+
return r + i
|
1276 |
+
|
1277 |
+
|
1278 |
+
class _TimelikeFormat:
|
1279 |
+
def __init__(self, data):
|
1280 |
+
non_nat = data[~isnat(data)]
|
1281 |
+
if len(non_nat) > 0:
|
1282 |
+
# Max str length of non-NaT elements
|
1283 |
+
max_str_len = max(len(self._format_non_nat(np.max(non_nat))),
|
1284 |
+
len(self._format_non_nat(np.min(non_nat))))
|
1285 |
+
else:
|
1286 |
+
max_str_len = 0
|
1287 |
+
if len(non_nat) < data.size:
|
1288 |
+
# data contains a NaT
|
1289 |
+
max_str_len = max(max_str_len, 5)
|
1290 |
+
self._format = '%{}s'.format(max_str_len)
|
1291 |
+
self._nat = "'NaT'".rjust(max_str_len)
|
1292 |
+
|
1293 |
+
def _format_non_nat(self, x):
|
1294 |
+
# override in subclass
|
1295 |
+
raise NotImplementedError
|
1296 |
+
|
1297 |
+
def __call__(self, x):
|
1298 |
+
if isnat(x):
|
1299 |
+
return self._nat
|
1300 |
+
else:
|
1301 |
+
return self._format % self._format_non_nat(x)
|
1302 |
+
|
1303 |
+
|
1304 |
+
class DatetimeFormat(_TimelikeFormat):
|
1305 |
+
def __init__(self, x, unit=None, timezone=None, casting='same_kind',
|
1306 |
+
legacy=False):
|
1307 |
+
# Get the unit from the dtype
|
1308 |
+
if unit is None:
|
1309 |
+
if x.dtype.kind == 'M':
|
1310 |
+
unit = datetime_data(x.dtype)[0]
|
1311 |
+
else:
|
1312 |
+
unit = 's'
|
1313 |
+
|
1314 |
+
if timezone is None:
|
1315 |
+
timezone = 'naive'
|
1316 |
+
self.timezone = timezone
|
1317 |
+
self.unit = unit
|
1318 |
+
self.casting = casting
|
1319 |
+
self.legacy = legacy
|
1320 |
+
|
1321 |
+
# must be called after the above are configured
|
1322 |
+
super().__init__(x)
|
1323 |
+
|
1324 |
+
def __call__(self, x):
|
1325 |
+
if self.legacy <= 113:
|
1326 |
+
return self._format_non_nat(x)
|
1327 |
+
return super().__call__(x)
|
1328 |
+
|
1329 |
+
def _format_non_nat(self, x):
|
1330 |
+
return "'%s'" % datetime_as_string(x,
|
1331 |
+
unit=self.unit,
|
1332 |
+
timezone=self.timezone,
|
1333 |
+
casting=self.casting)
|
1334 |
+
|
1335 |
+
|
1336 |
+
class TimedeltaFormat(_TimelikeFormat):
|
1337 |
+
def _format_non_nat(self, x):
|
1338 |
+
return str(x.astype('i8'))
|
1339 |
+
|
1340 |
+
|
1341 |
+
class SubArrayFormat:
|
1342 |
+
def __init__(self, format_function, **options):
|
1343 |
+
self.format_function = format_function
|
1344 |
+
self.threshold = options['threshold']
|
1345 |
+
self.edge_items = options['edgeitems']
|
1346 |
+
|
1347 |
+
def __call__(self, a):
|
1348 |
+
self.summary_insert = "..." if a.size > self.threshold else ""
|
1349 |
+
return self.format_array(a)
|
1350 |
+
|
1351 |
+
def format_array(self, a):
|
1352 |
+
if np.ndim(a) == 0:
|
1353 |
+
return self.format_function(a)
|
1354 |
+
|
1355 |
+
if self.summary_insert and a.shape[0] > 2*self.edge_items:
|
1356 |
+
formatted = (
|
1357 |
+
[self.format_array(a_) for a_ in a[:self.edge_items]]
|
1358 |
+
+ [self.summary_insert]
|
1359 |
+
+ [self.format_array(a_) for a_ in a[-self.edge_items:]]
|
1360 |
+
)
|
1361 |
+
else:
|
1362 |
+
formatted = [self.format_array(a_) for a_ in a]
|
1363 |
+
|
1364 |
+
return "[" + ", ".join(formatted) + "]"
|
1365 |
+
|
1366 |
+
|
1367 |
+
class StructuredVoidFormat:
|
1368 |
+
"""
|
1369 |
+
Formatter for structured np.void objects.
|
1370 |
+
|
1371 |
+
This does not work on structured alias types like np.dtype(('i4', 'i2,i2')),
|
1372 |
+
as alias scalars lose their field information, and the implementation
|
1373 |
+
relies upon np.void.__getitem__.
|
1374 |
+
"""
|
1375 |
+
def __init__(self, format_functions):
|
1376 |
+
self.format_functions = format_functions
|
1377 |
+
|
1378 |
+
@classmethod
|
1379 |
+
def from_data(cls, data, **options):
|
1380 |
+
"""
|
1381 |
+
This is a second way to initialize StructuredVoidFormat, using the raw data
|
1382 |
+
as input. Added to avoid changing the signature of __init__.
|
1383 |
+
"""
|
1384 |
+
format_functions = []
|
1385 |
+
for field_name in data.dtype.names:
|
1386 |
+
format_function = _get_format_function(data[field_name], **options)
|
1387 |
+
if data.dtype[field_name].shape != ():
|
1388 |
+
format_function = SubArrayFormat(format_function, **options)
|
1389 |
+
format_functions.append(format_function)
|
1390 |
+
return cls(format_functions)
|
1391 |
+
|
1392 |
+
def __call__(self, x):
|
1393 |
+
str_fields = [
|
1394 |
+
format_function(field)
|
1395 |
+
for field, format_function in zip(x, self.format_functions)
|
1396 |
+
]
|
1397 |
+
if len(str_fields) == 1:
|
1398 |
+
return "({},)".format(str_fields[0])
|
1399 |
+
else:
|
1400 |
+
return "({})".format(", ".join(str_fields))
|
1401 |
+
|
1402 |
+
|
1403 |
+
def _void_scalar_repr(x):
|
1404 |
+
"""
|
1405 |
+
Implements the repr for structured-void scalars. It is called from the
|
1406 |
+
scalartypes.c.src code, and is placed here because it uses the elementwise
|
1407 |
+
formatters defined above.
|
1408 |
+
"""
|
1409 |
+
return StructuredVoidFormat.from_data(array(x), **_format_options)(x)
|
1410 |
+
|
1411 |
+
|
1412 |
+
_typelessdata = [int_, float_, complex_, bool_]
|
1413 |
+
|
1414 |
+
|
1415 |
+
def dtype_is_implied(dtype):
|
1416 |
+
"""
|
1417 |
+
Determine if the given dtype is implied by the representation of its values.
|
1418 |
+
|
1419 |
+
Parameters
|
1420 |
+
----------
|
1421 |
+
dtype : dtype
|
1422 |
+
Data type
|
1423 |
+
|
1424 |
+
Returns
|
1425 |
+
-------
|
1426 |
+
implied : bool
|
1427 |
+
True if the dtype is implied by the representation of its values.
|
1428 |
+
|
1429 |
+
Examples
|
1430 |
+
--------
|
1431 |
+
>>> np.core.arrayprint.dtype_is_implied(int)
|
1432 |
+
True
|
1433 |
+
>>> np.array([1, 2, 3], int)
|
1434 |
+
array([1, 2, 3])
|
1435 |
+
>>> np.core.arrayprint.dtype_is_implied(np.int8)
|
1436 |
+
False
|
1437 |
+
>>> np.array([1, 2, 3], np.int8)
|
1438 |
+
array([1, 2, 3], dtype=int8)
|
1439 |
+
"""
|
1440 |
+
dtype = np.dtype(dtype)
|
1441 |
+
if _format_options['legacy'] <= 113 and dtype.type == bool_:
|
1442 |
+
return False
|
1443 |
+
|
1444 |
+
# not just void types can be structured, and names are not part of the repr
|
1445 |
+
if dtype.names is not None:
|
1446 |
+
return False
|
1447 |
+
|
1448 |
+
# should care about endianness *unless size is 1* (e.g., int8, bool)
|
1449 |
+
if not dtype.isnative:
|
1450 |
+
return False
|
1451 |
+
|
1452 |
+
return dtype.type in _typelessdata
|
1453 |
+
|
1454 |
+
|
1455 |
+
def dtype_short_repr(dtype):
|
1456 |
+
"""
|
1457 |
+
Convert a dtype to a short form which evaluates to the same dtype.
|
1458 |
+
|
1459 |
+
The intent is roughly that the following holds
|
1460 |
+
|
1461 |
+
>>> from numpy import *
|
1462 |
+
>>> dt = np.int64([1, 2]).dtype
|
1463 |
+
>>> assert eval(dtype_short_repr(dt)) == dt
|
1464 |
+
"""
|
1465 |
+
if type(dtype).__repr__ != np.dtype.__repr__:
|
1466 |
+
# TODO: Custom repr for user DTypes, logic should likely move.
|
1467 |
+
return repr(dtype)
|
1468 |
+
if dtype.names is not None:
|
1469 |
+
# structured dtypes give a list or tuple repr
|
1470 |
+
return str(dtype)
|
1471 |
+
elif issubclass(dtype.type, flexible):
|
1472 |
+
# handle these separately so they don't give garbage like str256
|
1473 |
+
return "'%s'" % str(dtype)
|
1474 |
+
|
1475 |
+
typename = dtype.name
|
1476 |
+
if not dtype.isnative:
|
1477 |
+
# deal with cases like dtype('<u2') that are identical to an
|
1478 |
+
# established dtype (in this case uint16)
|
1479 |
+
# except that they have a different endianness.
|
1480 |
+
return "'%s'" % str(dtype)
|
1481 |
+
# quote typenames which can't be represented as python variable names
|
1482 |
+
if typename and not (typename[0].isalpha() and typename.isalnum()):
|
1483 |
+
typename = repr(typename)
|
1484 |
+
return typename
|
1485 |
+
|
1486 |
+
|
1487 |
+
def _array_repr_implementation(
|
1488 |
+
arr, max_line_width=None, precision=None, suppress_small=None,
|
1489 |
+
array2string=array2string):
|
1490 |
+
"""Internal version of array_repr() that allows overriding array2string."""
|
1491 |
+
if max_line_width is None:
|
1492 |
+
max_line_width = _format_options['linewidth']
|
1493 |
+
|
1494 |
+
if type(arr) is not ndarray:
|
1495 |
+
class_name = type(arr).__name__
|
1496 |
+
else:
|
1497 |
+
class_name = "array"
|
1498 |
+
|
1499 |
+
skipdtype = dtype_is_implied(arr.dtype) and arr.size > 0
|
1500 |
+
|
1501 |
+
prefix = class_name + "("
|
1502 |
+
suffix = ")" if skipdtype else ","
|
1503 |
+
|
1504 |
+
if (_format_options['legacy'] <= 113 and
|
1505 |
+
arr.shape == () and not arr.dtype.names):
|
1506 |
+
lst = repr(arr.item())
|
1507 |
+
elif arr.size > 0 or arr.shape == (0,):
|
1508 |
+
lst = array2string(arr, max_line_width, precision, suppress_small,
|
1509 |
+
', ', prefix, suffix=suffix)
|
1510 |
+
else: # show zero-length shape unless it is (0,)
|
1511 |
+
lst = "[], shape=%s" % (repr(arr.shape),)
|
1512 |
+
|
1513 |
+
arr_str = prefix + lst + suffix
|
1514 |
+
|
1515 |
+
if skipdtype:
|
1516 |
+
return arr_str
|
1517 |
+
|
1518 |
+
dtype_str = "dtype={})".format(dtype_short_repr(arr.dtype))
|
1519 |
+
|
1520 |
+
# compute whether we should put dtype on a new line: Do so if adding the
|
1521 |
+
# dtype would extend the last line past max_line_width.
|
1522 |
+
# Note: This line gives the correct result even when rfind returns -1.
|
1523 |
+
last_line_len = len(arr_str) - (arr_str.rfind('\n') + 1)
|
1524 |
+
spacer = " "
|
1525 |
+
if _format_options['legacy'] <= 113:
|
1526 |
+
if issubclass(arr.dtype.type, flexible):
|
1527 |
+
spacer = '\n' + ' '*len(class_name + "(")
|
1528 |
+
elif last_line_len + len(dtype_str) + 1 > max_line_width:
|
1529 |
+
spacer = '\n' + ' '*len(class_name + "(")
|
1530 |
+
|
1531 |
+
return arr_str + spacer + dtype_str
|
1532 |
+
|
1533 |
+
|
1534 |
+
def _array_repr_dispatcher(
|
1535 |
+
arr, max_line_width=None, precision=None, suppress_small=None):
|
1536 |
+
return (arr,)
|
1537 |
+
|
1538 |
+
|
1539 |
+
@array_function_dispatch(_array_repr_dispatcher, module='numpy')
|
1540 |
+
def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
|
1541 |
+
"""
|
1542 |
+
Return the string representation of an array.
|
1543 |
+
|
1544 |
+
Parameters
|
1545 |
+
----------
|
1546 |
+
arr : ndarray
|
1547 |
+
Input array.
|
1548 |
+
max_line_width : int, optional
|
1549 |
+
Inserts newlines if text is longer than `max_line_width`.
|
1550 |
+
Defaults to ``numpy.get_printoptions()['linewidth']``.
|
1551 |
+
precision : int, optional
|
1552 |
+
Floating point precision.
|
1553 |
+
Defaults to ``numpy.get_printoptions()['precision']``.
|
1554 |
+
suppress_small : bool, optional
|
1555 |
+
Represent numbers "very close" to zero as zero; default is False.
|
1556 |
+
Very close is defined by precision: if the precision is 8, e.g.,
|
1557 |
+
numbers smaller (in absolute value) than 5e-9 are represented as
|
1558 |
+
zero.
|
1559 |
+
Defaults to ``numpy.get_printoptions()['suppress']``.
|
1560 |
+
|
1561 |
+
Returns
|
1562 |
+
-------
|
1563 |
+
string : str
|
1564 |
+
The string representation of an array.
|
1565 |
+
|
1566 |
+
See Also
|
1567 |
+
--------
|
1568 |
+
array_str, array2string, set_printoptions
|
1569 |
+
|
1570 |
+
Examples
|
1571 |
+
--------
|
1572 |
+
>>> np.array_repr(np.array([1,2]))
|
1573 |
+
'array([1, 2])'
|
1574 |
+
>>> np.array_repr(np.ma.array([0.]))
|
1575 |
+
'MaskedArray([0.])'
|
1576 |
+
>>> np.array_repr(np.array([], np.int32))
|
1577 |
+
'array([], dtype=int32)'
|
1578 |
+
|
1579 |
+
>>> x = np.array([1e-6, 4e-7, 2, 3])
|
1580 |
+
>>> np.array_repr(x, precision=6, suppress_small=True)
|
1581 |
+
'array([0.000001, 0. , 2. , 3. ])'
|
1582 |
+
|
1583 |
+
"""
|
1584 |
+
return _array_repr_implementation(
|
1585 |
+
arr, max_line_width, precision, suppress_small)
|
1586 |
+
|
1587 |
+
|
1588 |
+
@_recursive_guard()
|
1589 |
+
def _guarded_repr_or_str(v):
|
1590 |
+
if isinstance(v, bytes):
|
1591 |
+
return repr(v)
|
1592 |
+
return str(v)
|
1593 |
+
|
1594 |
+
|
1595 |
+
def _array_str_implementation(
|
1596 |
+
a, max_line_width=None, precision=None, suppress_small=None,
|
1597 |
+
array2string=array2string):
|
1598 |
+
"""Internal version of array_str() that allows overriding array2string."""
|
1599 |
+
if (_format_options['legacy'] <= 113 and
|
1600 |
+
a.shape == () and not a.dtype.names):
|
1601 |
+
return str(a.item())
|
1602 |
+
|
1603 |
+
# the str of 0d arrays is a special case: It should appear like a scalar,
|
1604 |
+
# so floats are not truncated by `precision`, and strings are not wrapped
|
1605 |
+
# in quotes. So we return the str of the scalar value.
|
1606 |
+
if a.shape == ():
|
1607 |
+
# obtain a scalar and call str on it, avoiding problems for subclasses
|
1608 |
+
# for which indexing with () returns a 0d instead of a scalar by using
|
1609 |
+
# ndarray's getindex. Also guard against recursive 0d object arrays.
|
1610 |
+
return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
|
1611 |
+
|
1612 |
+
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
|
1613 |
+
|
1614 |
+
|
1615 |
+
def _array_str_dispatcher(
|
1616 |
+
a, max_line_width=None, precision=None, suppress_small=None):
|
1617 |
+
return (a,)
|
1618 |
+
|
1619 |
+
|
1620 |
+
@array_function_dispatch(_array_str_dispatcher, module='numpy')
|
1621 |
+
def array_str(a, max_line_width=None, precision=None, suppress_small=None):
|
1622 |
+
"""
|
1623 |
+
Return a string representation of the data in an array.
|
1624 |
+
|
1625 |
+
The data in the array is returned as a single string. This function is
|
1626 |
+
similar to `array_repr`, the difference being that `array_repr` also
|
1627 |
+
returns information on the kind of array and its data type.
|
1628 |
+
|
1629 |
+
Parameters
|
1630 |
+
----------
|
1631 |
+
a : ndarray
|
1632 |
+
Input array.
|
1633 |
+
max_line_width : int, optional
|
1634 |
+
Inserts newlines if text is longer than `max_line_width`.
|
1635 |
+
Defaults to ``numpy.get_printoptions()['linewidth']``.
|
1636 |
+
precision : int, optional
|
1637 |
+
Floating point precision.
|
1638 |
+
Defaults to ``numpy.get_printoptions()['precision']``.
|
1639 |
+
suppress_small : bool, optional
|
1640 |
+
Represent numbers "very close" to zero as zero; default is False.
|
1641 |
+
Very close is defined by precision: if the precision is 8, e.g.,
|
1642 |
+
numbers smaller (in absolute value) than 5e-9 are represented as
|
1643 |
+
zero.
|
1644 |
+
Defaults to ``numpy.get_printoptions()['suppress']``.
|
1645 |
+
|
1646 |
+
See Also
|
1647 |
+
--------
|
1648 |
+
array2string, array_repr, set_printoptions
|
1649 |
+
|
1650 |
+
Examples
|
1651 |
+
--------
|
1652 |
+
>>> np.array_str(np.arange(3))
|
1653 |
+
'[0 1 2]'
|
1654 |
+
|
1655 |
+
"""
|
1656 |
+
return _array_str_implementation(
|
1657 |
+
a, max_line_width, precision, suppress_small)
|
1658 |
+
|
1659 |
+
|
1660 |
+
# needed if __array_function__ is disabled
|
1661 |
+
_array2string_impl = getattr(array2string, '__wrapped__', array2string)
|
1662 |
+
_default_array_str = functools.partial(_array_str_implementation,
|
1663 |
+
array2string=_array2string_impl)
|
1664 |
+
_default_array_repr = functools.partial(_array_repr_implementation,
|
1665 |
+
array2string=_array2string_impl)
|
1666 |
+
|
1667 |
+
|
1668 |
+
def set_string_function(f, repr=True):
|
1669 |
+
"""
|
1670 |
+
Set a Python function to be used when pretty printing arrays.
|
1671 |
+
|
1672 |
+
Parameters
|
1673 |
+
----------
|
1674 |
+
f : function or None
|
1675 |
+
Function to be used to pretty print arrays. The function should expect
|
1676 |
+
a single array argument and return a string of the representation of
|
1677 |
+
the array. If None, the function is reset to the default NumPy function
|
1678 |
+
to print arrays.
|
1679 |
+
repr : bool, optional
|
1680 |
+
If True (default), the function for pretty printing (``__repr__``)
|
1681 |
+
is set, if False the function that returns the default string
|
1682 |
+
representation (``__str__``) is set.
|
1683 |
+
|
1684 |
+
See Also
|
1685 |
+
--------
|
1686 |
+
set_printoptions, get_printoptions
|
1687 |
+
|
1688 |
+
Examples
|
1689 |
+
--------
|
1690 |
+
>>> def pprint(arr):
|
1691 |
+
... return 'HA! - What are you going to do now?'
|
1692 |
+
...
|
1693 |
+
>>> np.set_string_function(pprint)
|
1694 |
+
>>> a = np.arange(10)
|
1695 |
+
>>> a
|
1696 |
+
HA! - What are you going to do now?
|
1697 |
+
>>> _ = a
|
1698 |
+
>>> # [0 1 2 3 4 5 6 7 8 9]
|
1699 |
+
|
1700 |
+
We can reset the function to the default:
|
1701 |
+
|
1702 |
+
>>> np.set_string_function(None)
|
1703 |
+
>>> a
|
1704 |
+
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
1705 |
+
|
1706 |
+
`repr` affects either pretty printing or normal string representation.
|
1707 |
+
Note that ``__repr__`` is still affected by setting ``__str__``
|
1708 |
+
because the width of each array element in the returned string becomes
|
1709 |
+
equal to the length of the result of ``__str__()``.
|
1710 |
+
|
1711 |
+
>>> x = np.arange(4)
|
1712 |
+
>>> np.set_string_function(lambda x:'random', repr=False)
|
1713 |
+
>>> x.__str__()
|
1714 |
+
'random'
|
1715 |
+
>>> x.__repr__()
|
1716 |
+
'array([0, 1, 2, 3])'
|
1717 |
+
|
1718 |
+
"""
|
1719 |
+
if f is None:
|
1720 |
+
if repr:
|
1721 |
+
return multiarray.set_string_function(_default_array_repr, 1)
|
1722 |
+
else:
|
1723 |
+
return multiarray.set_string_function(_default_array_str, 0)
|
1724 |
+
else:
|
1725 |
+
return multiarray.set_string_function(f, repr)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/defchararray.pyi
ADDED
@@ -0,0 +1,421 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import (
|
2 |
+
Literal as L,
|
3 |
+
overload,
|
4 |
+
TypeVar,
|
5 |
+
Any,
|
6 |
+
)
|
7 |
+
|
8 |
+
from numpy import (
|
9 |
+
chararray as chararray,
|
10 |
+
dtype,
|
11 |
+
str_,
|
12 |
+
bytes_,
|
13 |
+
int_,
|
14 |
+
bool_,
|
15 |
+
object_,
|
16 |
+
_OrderKACF,
|
17 |
+
)
|
18 |
+
|
19 |
+
from numpy._typing import (
|
20 |
+
NDArray,
|
21 |
+
_ArrayLikeStr_co as U_co,
|
22 |
+
_ArrayLikeBytes_co as S_co,
|
23 |
+
_ArrayLikeInt_co as i_co,
|
24 |
+
_ArrayLikeBool_co as b_co,
|
25 |
+
)
|
26 |
+
|
27 |
+
from numpy.core.multiarray import compare_chararrays as compare_chararrays
|
28 |
+
|
29 |
+
_SCT = TypeVar("_SCT", str_, bytes_)
|
30 |
+
_CharArray = chararray[Any, dtype[_SCT]]
|
31 |
+
|
32 |
+
__all__: list[str]
|
33 |
+
|
34 |
+
# Comparison
|
35 |
+
@overload
|
36 |
+
def equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
|
37 |
+
@overload
|
38 |
+
def equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
|
39 |
+
|
40 |
+
@overload
|
41 |
+
def not_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
|
42 |
+
@overload
|
43 |
+
def not_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
|
44 |
+
|
45 |
+
@overload
|
46 |
+
def greater_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
|
47 |
+
@overload
|
48 |
+
def greater_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
|
49 |
+
|
50 |
+
@overload
|
51 |
+
def less_equal(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
|
52 |
+
@overload
|
53 |
+
def less_equal(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
|
54 |
+
|
55 |
+
@overload
|
56 |
+
def greater(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
|
57 |
+
@overload
|
58 |
+
def greater(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
|
59 |
+
|
60 |
+
@overload
|
61 |
+
def less(x1: U_co, x2: U_co) -> NDArray[bool_]: ...
|
62 |
+
@overload
|
63 |
+
def less(x1: S_co, x2: S_co) -> NDArray[bool_]: ...
|
64 |
+
|
65 |
+
# String operations
|
66 |
+
@overload
|
67 |
+
def add(x1: U_co, x2: U_co) -> NDArray[str_]: ...
|
68 |
+
@overload
|
69 |
+
def add(x1: S_co, x2: S_co) -> NDArray[bytes_]: ...
|
70 |
+
|
71 |
+
@overload
|
72 |
+
def multiply(a: U_co, i: i_co) -> NDArray[str_]: ...
|
73 |
+
@overload
|
74 |
+
def multiply(a: S_co, i: i_co) -> NDArray[bytes_]: ...
|
75 |
+
|
76 |
+
@overload
|
77 |
+
def mod(a: U_co, value: Any) -> NDArray[str_]: ...
|
78 |
+
@overload
|
79 |
+
def mod(a: S_co, value: Any) -> NDArray[bytes_]: ...
|
80 |
+
|
81 |
+
@overload
|
82 |
+
def capitalize(a: U_co) -> NDArray[str_]: ...
|
83 |
+
@overload
|
84 |
+
def capitalize(a: S_co) -> NDArray[bytes_]: ...
|
85 |
+
|
86 |
+
@overload
|
87 |
+
def center(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
|
88 |
+
@overload
|
89 |
+
def center(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
|
90 |
+
|
91 |
+
def decode(
|
92 |
+
a: S_co,
|
93 |
+
encoding: None | str = ...,
|
94 |
+
errors: None | str = ...,
|
95 |
+
) -> NDArray[str_]: ...
|
96 |
+
|
97 |
+
def encode(
|
98 |
+
a: U_co,
|
99 |
+
encoding: None | str = ...,
|
100 |
+
errors: None | str = ...,
|
101 |
+
) -> NDArray[bytes_]: ...
|
102 |
+
|
103 |
+
@overload
|
104 |
+
def expandtabs(a: U_co, tabsize: i_co = ...) -> NDArray[str_]: ...
|
105 |
+
@overload
|
106 |
+
def expandtabs(a: S_co, tabsize: i_co = ...) -> NDArray[bytes_]: ...
|
107 |
+
|
108 |
+
@overload
|
109 |
+
def join(sep: U_co, seq: U_co) -> NDArray[str_]: ...
|
110 |
+
@overload
|
111 |
+
def join(sep: S_co, seq: S_co) -> NDArray[bytes_]: ...
|
112 |
+
|
113 |
+
@overload
|
114 |
+
def ljust(a: U_co, width: i_co, fillchar: U_co = ...) -> NDArray[str_]: ...
|
115 |
+
@overload
|
116 |
+
def ljust(a: S_co, width: i_co, fillchar: S_co = ...) -> NDArray[bytes_]: ...
|
117 |
+
|
118 |
+
@overload
|
119 |
+
def lower(a: U_co) -> NDArray[str_]: ...
|
120 |
+
@overload
|
121 |
+
def lower(a: S_co) -> NDArray[bytes_]: ...
|
122 |
+
|
123 |
+
@overload
|
124 |
+
def lstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
|
125 |
+
@overload
|
126 |
+
def lstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
|
127 |
+
|
128 |
+
@overload
|
129 |
+
def partition(a: U_co, sep: U_co) -> NDArray[str_]: ...
|
130 |
+
@overload
|
131 |
+
def partition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
|
132 |
+
|
133 |
+
@overload
|
134 |
+
def replace(
|
135 |
+
a: U_co,
|
136 |
+
old: U_co,
|
137 |
+
new: U_co,
|
138 |
+
count: None | i_co = ...,
|
139 |
+
) -> NDArray[str_]: ...
|
140 |
+
@overload
|
141 |
+
def replace(
|
142 |
+
a: S_co,
|
143 |
+
old: S_co,
|
144 |
+
new: S_co,
|
145 |
+
count: None | i_co = ...,
|
146 |
+
) -> NDArray[bytes_]: ...
|
147 |
+
|
148 |
+
@overload
|
149 |
+
def rjust(
|
150 |
+
a: U_co,
|
151 |
+
width: i_co,
|
152 |
+
fillchar: U_co = ...,
|
153 |
+
) -> NDArray[str_]: ...
|
154 |
+
@overload
|
155 |
+
def rjust(
|
156 |
+
a: S_co,
|
157 |
+
width: i_co,
|
158 |
+
fillchar: S_co = ...,
|
159 |
+
) -> NDArray[bytes_]: ...
|
160 |
+
|
161 |
+
@overload
|
162 |
+
def rpartition(a: U_co, sep: U_co) -> NDArray[str_]: ...
|
163 |
+
@overload
|
164 |
+
def rpartition(a: S_co, sep: S_co) -> NDArray[bytes_]: ...
|
165 |
+
|
166 |
+
@overload
|
167 |
+
def rsplit(
|
168 |
+
a: U_co,
|
169 |
+
sep: None | U_co = ...,
|
170 |
+
maxsplit: None | i_co = ...,
|
171 |
+
) -> NDArray[object_]: ...
|
172 |
+
@overload
|
173 |
+
def rsplit(
|
174 |
+
a: S_co,
|
175 |
+
sep: None | S_co = ...,
|
176 |
+
maxsplit: None | i_co = ...,
|
177 |
+
) -> NDArray[object_]: ...
|
178 |
+
|
179 |
+
@overload
|
180 |
+
def rstrip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
|
181 |
+
@overload
|
182 |
+
def rstrip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
|
183 |
+
|
184 |
+
@overload
|
185 |
+
def split(
|
186 |
+
a: U_co,
|
187 |
+
sep: None | U_co = ...,
|
188 |
+
maxsplit: None | i_co = ...,
|
189 |
+
) -> NDArray[object_]: ...
|
190 |
+
@overload
|
191 |
+
def split(
|
192 |
+
a: S_co,
|
193 |
+
sep: None | S_co = ...,
|
194 |
+
maxsplit: None | i_co = ...,
|
195 |
+
) -> NDArray[object_]: ...
|
196 |
+
|
197 |
+
@overload
|
198 |
+
def splitlines(a: U_co, keepends: None | b_co = ...) -> NDArray[object_]: ...
|
199 |
+
@overload
|
200 |
+
def splitlines(a: S_co, keepends: None | b_co = ...) -> NDArray[object_]: ...
|
201 |
+
|
202 |
+
@overload
|
203 |
+
def strip(a: U_co, chars: None | U_co = ...) -> NDArray[str_]: ...
|
204 |
+
@overload
|
205 |
+
def strip(a: S_co, chars: None | S_co = ...) -> NDArray[bytes_]: ...
|
206 |
+
|
207 |
+
@overload
|
208 |
+
def swapcase(a: U_co) -> NDArray[str_]: ...
|
209 |
+
@overload
|
210 |
+
def swapcase(a: S_co) -> NDArray[bytes_]: ...
|
211 |
+
|
212 |
+
@overload
|
213 |
+
def title(a: U_co) -> NDArray[str_]: ...
|
214 |
+
@overload
|
215 |
+
def title(a: S_co) -> NDArray[bytes_]: ...
|
216 |
+
|
217 |
+
@overload
|
218 |
+
def translate(
|
219 |
+
a: U_co,
|
220 |
+
table: U_co,
|
221 |
+
deletechars: None | U_co = ...,
|
222 |
+
) -> NDArray[str_]: ...
|
223 |
+
@overload
|
224 |
+
def translate(
|
225 |
+
a: S_co,
|
226 |
+
table: S_co,
|
227 |
+
deletechars: None | S_co = ...,
|
228 |
+
) -> NDArray[bytes_]: ...
|
229 |
+
|
230 |
+
@overload
|
231 |
+
def upper(a: U_co) -> NDArray[str_]: ...
|
232 |
+
@overload
|
233 |
+
def upper(a: S_co) -> NDArray[bytes_]: ...
|
234 |
+
|
235 |
+
@overload
|
236 |
+
def zfill(a: U_co, width: i_co) -> NDArray[str_]: ...
|
237 |
+
@overload
|
238 |
+
def zfill(a: S_co, width: i_co) -> NDArray[bytes_]: ...
|
239 |
+
|
240 |
+
# String information
|
241 |
+
@overload
|
242 |
+
def count(
|
243 |
+
a: U_co,
|
244 |
+
sub: U_co,
|
245 |
+
start: i_co = ...,
|
246 |
+
end: None | i_co = ...,
|
247 |
+
) -> NDArray[int_]: ...
|
248 |
+
@overload
|
249 |
+
def count(
|
250 |
+
a: S_co,
|
251 |
+
sub: S_co,
|
252 |
+
start: i_co = ...,
|
253 |
+
end: None | i_co = ...,
|
254 |
+
) -> NDArray[int_]: ...
|
255 |
+
|
256 |
+
@overload
|
257 |
+
def endswith(
|
258 |
+
a: U_co,
|
259 |
+
suffix: U_co,
|
260 |
+
start: i_co = ...,
|
261 |
+
end: None | i_co = ...,
|
262 |
+
) -> NDArray[bool_]: ...
|
263 |
+
@overload
|
264 |
+
def endswith(
|
265 |
+
a: S_co,
|
266 |
+
suffix: S_co,
|
267 |
+
start: i_co = ...,
|
268 |
+
end: None | i_co = ...,
|
269 |
+
) -> NDArray[bool_]: ...
|
270 |
+
|
271 |
+
@overload
|
272 |
+
def find(
|
273 |
+
a: U_co,
|
274 |
+
sub: U_co,
|
275 |
+
start: i_co = ...,
|
276 |
+
end: None | i_co = ...,
|
277 |
+
) -> NDArray[int_]: ...
|
278 |
+
@overload
|
279 |
+
def find(
|
280 |
+
a: S_co,
|
281 |
+
sub: S_co,
|
282 |
+
start: i_co = ...,
|
283 |
+
end: None | i_co = ...,
|
284 |
+
) -> NDArray[int_]: ...
|
285 |
+
|
286 |
+
@overload
|
287 |
+
def index(
|
288 |
+
a: U_co,
|
289 |
+
sub: U_co,
|
290 |
+
start: i_co = ...,
|
291 |
+
end: None | i_co = ...,
|
292 |
+
) -> NDArray[int_]: ...
|
293 |
+
@overload
|
294 |
+
def index(
|
295 |
+
a: S_co,
|
296 |
+
sub: S_co,
|
297 |
+
start: i_co = ...,
|
298 |
+
end: None | i_co = ...,
|
299 |
+
) -> NDArray[int_]: ...
|
300 |
+
|
301 |
+
def isalpha(a: U_co | S_co) -> NDArray[bool_]: ...
|
302 |
+
def isalnum(a: U_co | S_co) -> NDArray[bool_]: ...
|
303 |
+
def isdecimal(a: U_co | S_co) -> NDArray[bool_]: ...
|
304 |
+
def isdigit(a: U_co | S_co) -> NDArray[bool_]: ...
|
305 |
+
def islower(a: U_co | S_co) -> NDArray[bool_]: ...
|
306 |
+
def isnumeric(a: U_co | S_co) -> NDArray[bool_]: ...
|
307 |
+
def isspace(a: U_co | S_co) -> NDArray[bool_]: ...
|
308 |
+
def istitle(a: U_co | S_co) -> NDArray[bool_]: ...
|
309 |
+
def isupper(a: U_co | S_co) -> NDArray[bool_]: ...
|
310 |
+
|
311 |
+
@overload
|
312 |
+
def rfind(
|
313 |
+
a: U_co,
|
314 |
+
sub: U_co,
|
315 |
+
start: i_co = ...,
|
316 |
+
end: None | i_co = ...,
|
317 |
+
) -> NDArray[int_]: ...
|
318 |
+
@overload
|
319 |
+
def rfind(
|
320 |
+
a: S_co,
|
321 |
+
sub: S_co,
|
322 |
+
start: i_co = ...,
|
323 |
+
end: None | i_co = ...,
|
324 |
+
) -> NDArray[int_]: ...
|
325 |
+
|
326 |
+
@overload
|
327 |
+
def rindex(
|
328 |
+
a: U_co,
|
329 |
+
sub: U_co,
|
330 |
+
start: i_co = ...,
|
331 |
+
end: None | i_co = ...,
|
332 |
+
) -> NDArray[int_]: ...
|
333 |
+
@overload
|
334 |
+
def rindex(
|
335 |
+
a: S_co,
|
336 |
+
sub: S_co,
|
337 |
+
start: i_co = ...,
|
338 |
+
end: None | i_co = ...,
|
339 |
+
) -> NDArray[int_]: ...
|
340 |
+
|
341 |
+
@overload
|
342 |
+
def startswith(
|
343 |
+
a: U_co,
|
344 |
+
prefix: U_co,
|
345 |
+
start: i_co = ...,
|
346 |
+
end: None | i_co = ...,
|
347 |
+
) -> NDArray[bool_]: ...
|
348 |
+
@overload
|
349 |
+
def startswith(
|
350 |
+
a: S_co,
|
351 |
+
prefix: S_co,
|
352 |
+
start: i_co = ...,
|
353 |
+
end: None | i_co = ...,
|
354 |
+
) -> NDArray[bool_]: ...
|
355 |
+
|
356 |
+
def str_len(A: U_co | S_co) -> NDArray[int_]: ...
|
357 |
+
|
358 |
+
# Overload 1 and 2: str- or bytes-based array-likes
|
359 |
+
# overload 3: arbitrary object with unicode=False (-> bytes_)
|
360 |
+
# overload 4: arbitrary object with unicode=True (-> str_)
|
361 |
+
@overload
|
362 |
+
def array(
|
363 |
+
obj: U_co,
|
364 |
+
itemsize: None | int = ...,
|
365 |
+
copy: bool = ...,
|
366 |
+
unicode: L[False] = ...,
|
367 |
+
order: _OrderKACF = ...,
|
368 |
+
) -> _CharArray[str_]: ...
|
369 |
+
@overload
|
370 |
+
def array(
|
371 |
+
obj: S_co,
|
372 |
+
itemsize: None | int = ...,
|
373 |
+
copy: bool = ...,
|
374 |
+
unicode: L[False] = ...,
|
375 |
+
order: _OrderKACF = ...,
|
376 |
+
) -> _CharArray[bytes_]: ...
|
377 |
+
@overload
|
378 |
+
def array(
|
379 |
+
obj: object,
|
380 |
+
itemsize: None | int = ...,
|
381 |
+
copy: bool = ...,
|
382 |
+
unicode: L[False] = ...,
|
383 |
+
order: _OrderKACF = ...,
|
384 |
+
) -> _CharArray[bytes_]: ...
|
385 |
+
@overload
|
386 |
+
def array(
|
387 |
+
obj: object,
|
388 |
+
itemsize: None | int = ...,
|
389 |
+
copy: bool = ...,
|
390 |
+
unicode: L[True] = ...,
|
391 |
+
order: _OrderKACF = ...,
|
392 |
+
) -> _CharArray[str_]: ...
|
393 |
+
|
394 |
+
@overload
|
395 |
+
def asarray(
|
396 |
+
obj: U_co,
|
397 |
+
itemsize: None | int = ...,
|
398 |
+
unicode: L[False] = ...,
|
399 |
+
order: _OrderKACF = ...,
|
400 |
+
) -> _CharArray[str_]: ...
|
401 |
+
@overload
|
402 |
+
def asarray(
|
403 |
+
obj: S_co,
|
404 |
+
itemsize: None | int = ...,
|
405 |
+
unicode: L[False] = ...,
|
406 |
+
order: _OrderKACF = ...,
|
407 |
+
) -> _CharArray[bytes_]: ...
|
408 |
+
@overload
|
409 |
+
def asarray(
|
410 |
+
obj: object,
|
411 |
+
itemsize: None | int = ...,
|
412 |
+
unicode: L[False] = ...,
|
413 |
+
order: _OrderKACF = ...,
|
414 |
+
) -> _CharArray[bytes_]: ...
|
415 |
+
@overload
|
416 |
+
def asarray(
|
417 |
+
obj: object,
|
418 |
+
itemsize: None | int = ...,
|
419 |
+
unicode: L[True] = ...,
|
420 |
+
order: _OrderKACF = ...,
|
421 |
+
) -> _CharArray[str_]: ...
|
env-llmeval/lib/python3.10/site-packages/numpy/core/einsumfunc.py
ADDED
@@ -0,0 +1,1443 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Implementation of optimized einsum.
|
3 |
+
|
4 |
+
"""
|
5 |
+
import itertools
|
6 |
+
import operator
|
7 |
+
|
8 |
+
from numpy.core.multiarray import c_einsum
|
9 |
+
from numpy.core.numeric import asanyarray, tensordot
|
10 |
+
from numpy.core.overrides import array_function_dispatch
|
11 |
+
|
12 |
+
__all__ = ['einsum', 'einsum_path']
|
13 |
+
|
14 |
+
einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
15 |
+
einsum_symbols_set = set(einsum_symbols)
|
16 |
+
|
17 |
+
|
18 |
+
def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
|
19 |
+
"""
|
20 |
+
Computes the number of FLOPS in the contraction.
|
21 |
+
|
22 |
+
Parameters
|
23 |
+
----------
|
24 |
+
idx_contraction : iterable
|
25 |
+
The indices involved in the contraction
|
26 |
+
inner : bool
|
27 |
+
Does this contraction require an inner product?
|
28 |
+
num_terms : int
|
29 |
+
The number of terms in a contraction
|
30 |
+
size_dictionary : dict
|
31 |
+
The size of each of the indices in idx_contraction
|
32 |
+
|
33 |
+
Returns
|
34 |
+
-------
|
35 |
+
flop_count : int
|
36 |
+
The total number of FLOPS required for the contraction.
|
37 |
+
|
38 |
+
Examples
|
39 |
+
--------
|
40 |
+
|
41 |
+
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
|
42 |
+
30
|
43 |
+
|
44 |
+
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
|
45 |
+
60
|
46 |
+
|
47 |
+
"""
|
48 |
+
|
49 |
+
overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
|
50 |
+
op_factor = max(1, num_terms - 1)
|
51 |
+
if inner:
|
52 |
+
op_factor += 1
|
53 |
+
|
54 |
+
return overall_size * op_factor
|
55 |
+
|
56 |
+
def _compute_size_by_dict(indices, idx_dict):
|
57 |
+
"""
|
58 |
+
Computes the product of the elements in indices based on the dictionary
|
59 |
+
idx_dict.
|
60 |
+
|
61 |
+
Parameters
|
62 |
+
----------
|
63 |
+
indices : iterable
|
64 |
+
Indices to base the product on.
|
65 |
+
idx_dict : dictionary
|
66 |
+
Dictionary of index sizes
|
67 |
+
|
68 |
+
Returns
|
69 |
+
-------
|
70 |
+
ret : int
|
71 |
+
The resulting product.
|
72 |
+
|
73 |
+
Examples
|
74 |
+
--------
|
75 |
+
>>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
|
76 |
+
90
|
77 |
+
|
78 |
+
"""
|
79 |
+
ret = 1
|
80 |
+
for i in indices:
|
81 |
+
ret *= idx_dict[i]
|
82 |
+
return ret
|
83 |
+
|
84 |
+
|
85 |
+
def _find_contraction(positions, input_sets, output_set):
|
86 |
+
"""
|
87 |
+
Finds the contraction for a given set of input and output sets.
|
88 |
+
|
89 |
+
Parameters
|
90 |
+
----------
|
91 |
+
positions : iterable
|
92 |
+
Integer positions of terms used in the contraction.
|
93 |
+
input_sets : list
|
94 |
+
List of sets that represent the lhs side of the einsum subscript
|
95 |
+
output_set : set
|
96 |
+
Set that represents the rhs side of the overall einsum subscript
|
97 |
+
|
98 |
+
Returns
|
99 |
+
-------
|
100 |
+
new_result : set
|
101 |
+
The indices of the resulting contraction
|
102 |
+
remaining : list
|
103 |
+
List of sets that have not been contracted, the new set is appended to
|
104 |
+
the end of this list
|
105 |
+
idx_removed : set
|
106 |
+
Indices removed from the entire contraction
|
107 |
+
idx_contraction : set
|
108 |
+
The indices used in the current contraction
|
109 |
+
|
110 |
+
Examples
|
111 |
+
--------
|
112 |
+
|
113 |
+
# A simple dot product test case
|
114 |
+
>>> pos = (0, 1)
|
115 |
+
>>> isets = [set('ab'), set('bc')]
|
116 |
+
>>> oset = set('ac')
|
117 |
+
>>> _find_contraction(pos, isets, oset)
|
118 |
+
({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
|
119 |
+
|
120 |
+
# A more complex case with additional terms in the contraction
|
121 |
+
>>> pos = (0, 2)
|
122 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
123 |
+
>>> oset = set('ac')
|
124 |
+
>>> _find_contraction(pos, isets, oset)
|
125 |
+
({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
|
126 |
+
"""
|
127 |
+
|
128 |
+
idx_contract = set()
|
129 |
+
idx_remain = output_set.copy()
|
130 |
+
remaining = []
|
131 |
+
for ind, value in enumerate(input_sets):
|
132 |
+
if ind in positions:
|
133 |
+
idx_contract |= value
|
134 |
+
else:
|
135 |
+
remaining.append(value)
|
136 |
+
idx_remain |= value
|
137 |
+
|
138 |
+
new_result = idx_remain & idx_contract
|
139 |
+
idx_removed = (idx_contract - new_result)
|
140 |
+
remaining.append(new_result)
|
141 |
+
|
142 |
+
return (new_result, remaining, idx_removed, idx_contract)
|
143 |
+
|
144 |
+
|
145 |
+
def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
|
146 |
+
"""
|
147 |
+
Computes all possible pair contractions, sieves the results based
|
148 |
+
on ``memory_limit`` and returns the lowest cost path. This algorithm
|
149 |
+
scales factorial with respect to the elements in the list ``input_sets``.
|
150 |
+
|
151 |
+
Parameters
|
152 |
+
----------
|
153 |
+
input_sets : list
|
154 |
+
List of sets that represent the lhs side of the einsum subscript
|
155 |
+
output_set : set
|
156 |
+
Set that represents the rhs side of the overall einsum subscript
|
157 |
+
idx_dict : dictionary
|
158 |
+
Dictionary of index sizes
|
159 |
+
memory_limit : int
|
160 |
+
The maximum number of elements in a temporary array
|
161 |
+
|
162 |
+
Returns
|
163 |
+
-------
|
164 |
+
path : list
|
165 |
+
The optimal contraction order within the memory limit constraint.
|
166 |
+
|
167 |
+
Examples
|
168 |
+
--------
|
169 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
170 |
+
>>> oset = set()
|
171 |
+
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
|
172 |
+
>>> _optimal_path(isets, oset, idx_sizes, 5000)
|
173 |
+
[(0, 2), (0, 1)]
|
174 |
+
"""
|
175 |
+
|
176 |
+
full_results = [(0, [], input_sets)]
|
177 |
+
for iteration in range(len(input_sets) - 1):
|
178 |
+
iter_results = []
|
179 |
+
|
180 |
+
# Compute all unique pairs
|
181 |
+
for curr in full_results:
|
182 |
+
cost, positions, remaining = curr
|
183 |
+
for con in itertools.combinations(range(len(input_sets) - iteration), 2):
|
184 |
+
|
185 |
+
# Find the contraction
|
186 |
+
cont = _find_contraction(con, remaining, output_set)
|
187 |
+
new_result, new_input_sets, idx_removed, idx_contract = cont
|
188 |
+
|
189 |
+
# Sieve the results based on memory_limit
|
190 |
+
new_size = _compute_size_by_dict(new_result, idx_dict)
|
191 |
+
if new_size > memory_limit:
|
192 |
+
continue
|
193 |
+
|
194 |
+
# Build (total_cost, positions, indices_remaining)
|
195 |
+
total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
|
196 |
+
new_pos = positions + [con]
|
197 |
+
iter_results.append((total_cost, new_pos, new_input_sets))
|
198 |
+
|
199 |
+
# Update combinatorial list, if we did not find anything return best
|
200 |
+
# path + remaining contractions
|
201 |
+
if iter_results:
|
202 |
+
full_results = iter_results
|
203 |
+
else:
|
204 |
+
path = min(full_results, key=lambda x: x[0])[1]
|
205 |
+
path += [tuple(range(len(input_sets) - iteration))]
|
206 |
+
return path
|
207 |
+
|
208 |
+
# If we have not found anything return single einsum contraction
|
209 |
+
if len(full_results) == 0:
|
210 |
+
return [tuple(range(len(input_sets)))]
|
211 |
+
|
212 |
+
path = min(full_results, key=lambda x: x[0])[1]
|
213 |
+
return path
|
214 |
+
|
215 |
+
def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
|
216 |
+
"""Compute the cost (removed size + flops) and resultant indices for
|
217 |
+
performing the contraction specified by ``positions``.
|
218 |
+
|
219 |
+
Parameters
|
220 |
+
----------
|
221 |
+
positions : tuple of int
|
222 |
+
The locations of the proposed tensors to contract.
|
223 |
+
input_sets : list of sets
|
224 |
+
The indices found on each tensors.
|
225 |
+
output_set : set
|
226 |
+
The output indices of the expression.
|
227 |
+
idx_dict : dict
|
228 |
+
Mapping of each index to its size.
|
229 |
+
memory_limit : int
|
230 |
+
The total allowed size for an intermediary tensor.
|
231 |
+
path_cost : int
|
232 |
+
The contraction cost so far.
|
233 |
+
naive_cost : int
|
234 |
+
The cost of the unoptimized expression.
|
235 |
+
|
236 |
+
Returns
|
237 |
+
-------
|
238 |
+
cost : (int, int)
|
239 |
+
A tuple containing the size of any indices removed, and the flop cost.
|
240 |
+
positions : tuple of int
|
241 |
+
The locations of the proposed tensors to contract.
|
242 |
+
new_input_sets : list of sets
|
243 |
+
The resulting new list of indices if this proposed contraction is performed.
|
244 |
+
|
245 |
+
"""
|
246 |
+
|
247 |
+
# Find the contraction
|
248 |
+
contract = _find_contraction(positions, input_sets, output_set)
|
249 |
+
idx_result, new_input_sets, idx_removed, idx_contract = contract
|
250 |
+
|
251 |
+
# Sieve the results based on memory_limit
|
252 |
+
new_size = _compute_size_by_dict(idx_result, idx_dict)
|
253 |
+
if new_size > memory_limit:
|
254 |
+
return None
|
255 |
+
|
256 |
+
# Build sort tuple
|
257 |
+
old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
|
258 |
+
removed_size = sum(old_sizes) - new_size
|
259 |
+
|
260 |
+
# NB: removed_size used to be just the size of any removed indices i.e.:
|
261 |
+
# helpers.compute_size_by_dict(idx_removed, idx_dict)
|
262 |
+
cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
|
263 |
+
sort = (-removed_size, cost)
|
264 |
+
|
265 |
+
# Sieve based on total cost as well
|
266 |
+
if (path_cost + cost) > naive_cost:
|
267 |
+
return None
|
268 |
+
|
269 |
+
# Add contraction to possible choices
|
270 |
+
return [sort, positions, new_input_sets]
|
271 |
+
|
272 |
+
|
273 |
+
def _update_other_results(results, best):
|
274 |
+
"""Update the positions and provisional input_sets of ``results`` based on
|
275 |
+
performing the contraction result ``best``. Remove any involving the tensors
|
276 |
+
contracted.
|
277 |
+
|
278 |
+
Parameters
|
279 |
+
----------
|
280 |
+
results : list
|
281 |
+
List of contraction results produced by ``_parse_possible_contraction``.
|
282 |
+
best : list
|
283 |
+
The best contraction of ``results`` i.e. the one that will be performed.
|
284 |
+
|
285 |
+
Returns
|
286 |
+
-------
|
287 |
+
mod_results : list
|
288 |
+
The list of modified results, updated with outcome of ``best`` contraction.
|
289 |
+
"""
|
290 |
+
|
291 |
+
best_con = best[1]
|
292 |
+
bx, by = best_con
|
293 |
+
mod_results = []
|
294 |
+
|
295 |
+
for cost, (x, y), con_sets in results:
|
296 |
+
|
297 |
+
# Ignore results involving tensors just contracted
|
298 |
+
if x in best_con or y in best_con:
|
299 |
+
continue
|
300 |
+
|
301 |
+
# Update the input_sets
|
302 |
+
del con_sets[by - int(by > x) - int(by > y)]
|
303 |
+
del con_sets[bx - int(bx > x) - int(bx > y)]
|
304 |
+
con_sets.insert(-1, best[2][-1])
|
305 |
+
|
306 |
+
# Update the position indices
|
307 |
+
mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
|
308 |
+
mod_results.append((cost, mod_con, con_sets))
|
309 |
+
|
310 |
+
return mod_results
|
311 |
+
|
312 |
+
def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
|
313 |
+
"""
|
314 |
+
Finds the path by contracting the best pair until the input list is
|
315 |
+
exhausted. The best pair is found by minimizing the tuple
|
316 |
+
``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
|
317 |
+
matrix multiplication or inner product operations, then Hadamard like
|
318 |
+
operations, and finally outer operations. Outer products are limited by
|
319 |
+
``memory_limit``. This algorithm scales cubically with respect to the
|
320 |
+
number of elements in the list ``input_sets``.
|
321 |
+
|
322 |
+
Parameters
|
323 |
+
----------
|
324 |
+
input_sets : list
|
325 |
+
List of sets that represent the lhs side of the einsum subscript
|
326 |
+
output_set : set
|
327 |
+
Set that represents the rhs side of the overall einsum subscript
|
328 |
+
idx_dict : dictionary
|
329 |
+
Dictionary of index sizes
|
330 |
+
memory_limit : int
|
331 |
+
The maximum number of elements in a temporary array
|
332 |
+
|
333 |
+
Returns
|
334 |
+
-------
|
335 |
+
path : list
|
336 |
+
The greedy contraction order within the memory limit constraint.
|
337 |
+
|
338 |
+
Examples
|
339 |
+
--------
|
340 |
+
>>> isets = [set('abd'), set('ac'), set('bdc')]
|
341 |
+
>>> oset = set()
|
342 |
+
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
|
343 |
+
>>> _greedy_path(isets, oset, idx_sizes, 5000)
|
344 |
+
[(0, 2), (0, 1)]
|
345 |
+
"""
|
346 |
+
|
347 |
+
# Handle trivial cases that leaked through
|
348 |
+
if len(input_sets) == 1:
|
349 |
+
return [(0,)]
|
350 |
+
elif len(input_sets) == 2:
|
351 |
+
return [(0, 1)]
|
352 |
+
|
353 |
+
# Build up a naive cost
|
354 |
+
contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
|
355 |
+
idx_result, new_input_sets, idx_removed, idx_contract = contract
|
356 |
+
naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
|
357 |
+
|
358 |
+
# Initially iterate over all pairs
|
359 |
+
comb_iter = itertools.combinations(range(len(input_sets)), 2)
|
360 |
+
known_contractions = []
|
361 |
+
|
362 |
+
path_cost = 0
|
363 |
+
path = []
|
364 |
+
|
365 |
+
for iteration in range(len(input_sets) - 1):
|
366 |
+
|
367 |
+
# Iterate over all pairs on first step, only previously found pairs on subsequent steps
|
368 |
+
for positions in comb_iter:
|
369 |
+
|
370 |
+
# Always initially ignore outer products
|
371 |
+
if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
|
372 |
+
continue
|
373 |
+
|
374 |
+
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
|
375 |
+
naive_cost)
|
376 |
+
if result is not None:
|
377 |
+
known_contractions.append(result)
|
378 |
+
|
379 |
+
# If we do not have a inner contraction, rescan pairs including outer products
|
380 |
+
if len(known_contractions) == 0:
|
381 |
+
|
382 |
+
# Then check the outer products
|
383 |
+
for positions in itertools.combinations(range(len(input_sets)), 2):
|
384 |
+
result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
|
385 |
+
path_cost, naive_cost)
|
386 |
+
if result is not None:
|
387 |
+
known_contractions.append(result)
|
388 |
+
|
389 |
+
# If we still did not find any remaining contractions, default back to einsum like behavior
|
390 |
+
if len(known_contractions) == 0:
|
391 |
+
path.append(tuple(range(len(input_sets))))
|
392 |
+
break
|
393 |
+
|
394 |
+
# Sort based on first index
|
395 |
+
best = min(known_contractions, key=lambda x: x[0])
|
396 |
+
|
397 |
+
# Now propagate as many unused contractions as possible to next iteration
|
398 |
+
known_contractions = _update_other_results(known_contractions, best)
|
399 |
+
|
400 |
+
# Next iteration only compute contractions with the new tensor
|
401 |
+
# All other contractions have been accounted for
|
402 |
+
input_sets = best[2]
|
403 |
+
new_tensor_pos = len(input_sets) - 1
|
404 |
+
comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
|
405 |
+
|
406 |
+
# Update path and total cost
|
407 |
+
path.append(best[1])
|
408 |
+
path_cost += best[0][1]
|
409 |
+
|
410 |
+
return path
|
411 |
+
|
412 |
+
|
413 |
+
def _can_dot(inputs, result, idx_removed):
|
414 |
+
"""
|
415 |
+
Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
|
416 |
+
|
417 |
+
Parameters
|
418 |
+
----------
|
419 |
+
inputs : list of str
|
420 |
+
Specifies the subscripts for summation.
|
421 |
+
result : str
|
422 |
+
Resulting summation.
|
423 |
+
idx_removed : set
|
424 |
+
Indices that are removed in the summation
|
425 |
+
|
426 |
+
|
427 |
+
Returns
|
428 |
+
-------
|
429 |
+
type : bool
|
430 |
+
Returns true if BLAS should and can be used, else False
|
431 |
+
|
432 |
+
Notes
|
433 |
+
-----
|
434 |
+
If the operations is BLAS level 1 or 2 and is not already aligned
|
435 |
+
we default back to einsum as the memory movement to copy is more
|
436 |
+
costly than the operation itself.
|
437 |
+
|
438 |
+
|
439 |
+
Examples
|
440 |
+
--------
|
441 |
+
|
442 |
+
# Standard GEMM operation
|
443 |
+
>>> _can_dot(['ij', 'jk'], 'ik', set('j'))
|
444 |
+
True
|
445 |
+
|
446 |
+
# Can use the standard BLAS, but requires odd data movement
|
447 |
+
>>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
|
448 |
+
False
|
449 |
+
|
450 |
+
# DDOT where the memory is not aligned
|
451 |
+
>>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
|
452 |
+
False
|
453 |
+
|
454 |
+
"""
|
455 |
+
|
456 |
+
# All `dot` calls remove indices
|
457 |
+
if len(idx_removed) == 0:
|
458 |
+
return False
|
459 |
+
|
460 |
+
# BLAS can only handle two operands
|
461 |
+
if len(inputs) != 2:
|
462 |
+
return False
|
463 |
+
|
464 |
+
input_left, input_right = inputs
|
465 |
+
|
466 |
+
for c in set(input_left + input_right):
|
467 |
+
# can't deal with repeated indices on same input or more than 2 total
|
468 |
+
nl, nr = input_left.count(c), input_right.count(c)
|
469 |
+
if (nl > 1) or (nr > 1) or (nl + nr > 2):
|
470 |
+
return False
|
471 |
+
|
472 |
+
# can't do implicit summation or dimension collapse e.g.
|
473 |
+
# "ab,bc->c" (implicitly sum over 'a')
|
474 |
+
# "ab,ca->ca" (take diagonal of 'a')
|
475 |
+
if nl + nr - 1 == int(c in result):
|
476 |
+
return False
|
477 |
+
|
478 |
+
# Build a few temporaries
|
479 |
+
set_left = set(input_left)
|
480 |
+
set_right = set(input_right)
|
481 |
+
keep_left = set_left - idx_removed
|
482 |
+
keep_right = set_right - idx_removed
|
483 |
+
rs = len(idx_removed)
|
484 |
+
|
485 |
+
# At this point we are a DOT, GEMV, or GEMM operation
|
486 |
+
|
487 |
+
# Handle inner products
|
488 |
+
|
489 |
+
# DDOT with aligned data
|
490 |
+
if input_left == input_right:
|
491 |
+
return True
|
492 |
+
|
493 |
+
# DDOT without aligned data (better to use einsum)
|
494 |
+
if set_left == set_right:
|
495 |
+
return False
|
496 |
+
|
497 |
+
# Handle the 4 possible (aligned) GEMV or GEMM cases
|
498 |
+
|
499 |
+
# GEMM or GEMV no transpose
|
500 |
+
if input_left[-rs:] == input_right[:rs]:
|
501 |
+
return True
|
502 |
+
|
503 |
+
# GEMM or GEMV transpose both
|
504 |
+
if input_left[:rs] == input_right[-rs:]:
|
505 |
+
return True
|
506 |
+
|
507 |
+
# GEMM or GEMV transpose right
|
508 |
+
if input_left[-rs:] == input_right[-rs:]:
|
509 |
+
return True
|
510 |
+
|
511 |
+
# GEMM or GEMV transpose left
|
512 |
+
if input_left[:rs] == input_right[:rs]:
|
513 |
+
return True
|
514 |
+
|
515 |
+
# Einsum is faster than GEMV if we have to copy data
|
516 |
+
if not keep_left or not keep_right:
|
517 |
+
return False
|
518 |
+
|
519 |
+
# We are a matrix-matrix product, but we need to copy data
|
520 |
+
return True
|
521 |
+
|
522 |
+
|
523 |
+
def _parse_einsum_input(operands):
|
524 |
+
"""
|
525 |
+
A reproduction of einsum c side einsum parsing in python.
|
526 |
+
|
527 |
+
Returns
|
528 |
+
-------
|
529 |
+
input_strings : str
|
530 |
+
Parsed input strings
|
531 |
+
output_string : str
|
532 |
+
Parsed output string
|
533 |
+
operands : list of array_like
|
534 |
+
The operands to use in the numpy contraction
|
535 |
+
|
536 |
+
Examples
|
537 |
+
--------
|
538 |
+
The operand list is simplified to reduce printing:
|
539 |
+
|
540 |
+
>>> np.random.seed(123)
|
541 |
+
>>> a = np.random.rand(4, 4)
|
542 |
+
>>> b = np.random.rand(4, 4, 4)
|
543 |
+
>>> _parse_einsum_input(('...a,...a->...', a, b))
|
544 |
+
('za,xza', 'xz', [a, b]) # may vary
|
545 |
+
|
546 |
+
>>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
|
547 |
+
('za,xza', 'xz', [a, b]) # may vary
|
548 |
+
"""
|
549 |
+
|
550 |
+
if len(operands) == 0:
|
551 |
+
raise ValueError("No input operands")
|
552 |
+
|
553 |
+
if isinstance(operands[0], str):
|
554 |
+
subscripts = operands[0].replace(" ", "")
|
555 |
+
operands = [asanyarray(v) for v in operands[1:]]
|
556 |
+
|
557 |
+
# Ensure all characters are valid
|
558 |
+
for s in subscripts:
|
559 |
+
if s in '.,->':
|
560 |
+
continue
|
561 |
+
if s not in einsum_symbols:
|
562 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
563 |
+
|
564 |
+
else:
|
565 |
+
tmp_operands = list(operands)
|
566 |
+
operand_list = []
|
567 |
+
subscript_list = []
|
568 |
+
for p in range(len(operands) // 2):
|
569 |
+
operand_list.append(tmp_operands.pop(0))
|
570 |
+
subscript_list.append(tmp_operands.pop(0))
|
571 |
+
|
572 |
+
output_list = tmp_operands[-1] if len(tmp_operands) else None
|
573 |
+
operands = [asanyarray(v) for v in operand_list]
|
574 |
+
subscripts = ""
|
575 |
+
last = len(subscript_list) - 1
|
576 |
+
for num, sub in enumerate(subscript_list):
|
577 |
+
for s in sub:
|
578 |
+
if s is Ellipsis:
|
579 |
+
subscripts += "..."
|
580 |
+
else:
|
581 |
+
try:
|
582 |
+
s = operator.index(s)
|
583 |
+
except TypeError as e:
|
584 |
+
raise TypeError("For this input type lists must contain "
|
585 |
+
"either int or Ellipsis") from e
|
586 |
+
subscripts += einsum_symbols[s]
|
587 |
+
if num != last:
|
588 |
+
subscripts += ","
|
589 |
+
|
590 |
+
if output_list is not None:
|
591 |
+
subscripts += "->"
|
592 |
+
for s in output_list:
|
593 |
+
if s is Ellipsis:
|
594 |
+
subscripts += "..."
|
595 |
+
else:
|
596 |
+
try:
|
597 |
+
s = operator.index(s)
|
598 |
+
except TypeError as e:
|
599 |
+
raise TypeError("For this input type lists must contain "
|
600 |
+
"either int or Ellipsis") from e
|
601 |
+
subscripts += einsum_symbols[s]
|
602 |
+
# Check for proper "->"
|
603 |
+
if ("-" in subscripts) or (">" in subscripts):
|
604 |
+
invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
|
605 |
+
if invalid or (subscripts.count("->") != 1):
|
606 |
+
raise ValueError("Subscripts can only contain one '->'.")
|
607 |
+
|
608 |
+
# Parse ellipses
|
609 |
+
if "." in subscripts:
|
610 |
+
used = subscripts.replace(".", "").replace(",", "").replace("->", "")
|
611 |
+
unused = list(einsum_symbols_set - set(used))
|
612 |
+
ellipse_inds = "".join(unused)
|
613 |
+
longest = 0
|
614 |
+
|
615 |
+
if "->" in subscripts:
|
616 |
+
input_tmp, output_sub = subscripts.split("->")
|
617 |
+
split_subscripts = input_tmp.split(",")
|
618 |
+
out_sub = True
|
619 |
+
else:
|
620 |
+
split_subscripts = subscripts.split(',')
|
621 |
+
out_sub = False
|
622 |
+
|
623 |
+
for num, sub in enumerate(split_subscripts):
|
624 |
+
if "." in sub:
|
625 |
+
if (sub.count(".") != 3) or (sub.count("...") != 1):
|
626 |
+
raise ValueError("Invalid Ellipses.")
|
627 |
+
|
628 |
+
# Take into account numerical values
|
629 |
+
if operands[num].shape == ():
|
630 |
+
ellipse_count = 0
|
631 |
+
else:
|
632 |
+
ellipse_count = max(operands[num].ndim, 1)
|
633 |
+
ellipse_count -= (len(sub) - 3)
|
634 |
+
|
635 |
+
if ellipse_count > longest:
|
636 |
+
longest = ellipse_count
|
637 |
+
|
638 |
+
if ellipse_count < 0:
|
639 |
+
raise ValueError("Ellipses lengths do not match.")
|
640 |
+
elif ellipse_count == 0:
|
641 |
+
split_subscripts[num] = sub.replace('...', '')
|
642 |
+
else:
|
643 |
+
rep_inds = ellipse_inds[-ellipse_count:]
|
644 |
+
split_subscripts[num] = sub.replace('...', rep_inds)
|
645 |
+
|
646 |
+
subscripts = ",".join(split_subscripts)
|
647 |
+
if longest == 0:
|
648 |
+
out_ellipse = ""
|
649 |
+
else:
|
650 |
+
out_ellipse = ellipse_inds[-longest:]
|
651 |
+
|
652 |
+
if out_sub:
|
653 |
+
subscripts += "->" + output_sub.replace("...", out_ellipse)
|
654 |
+
else:
|
655 |
+
# Special care for outputless ellipses
|
656 |
+
output_subscript = ""
|
657 |
+
tmp_subscripts = subscripts.replace(",", "")
|
658 |
+
for s in sorted(set(tmp_subscripts)):
|
659 |
+
if s not in (einsum_symbols):
|
660 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
661 |
+
if tmp_subscripts.count(s) == 1:
|
662 |
+
output_subscript += s
|
663 |
+
normal_inds = ''.join(sorted(set(output_subscript) -
|
664 |
+
set(out_ellipse)))
|
665 |
+
|
666 |
+
subscripts += "->" + out_ellipse + normal_inds
|
667 |
+
|
668 |
+
# Build output string if does not exist
|
669 |
+
if "->" in subscripts:
|
670 |
+
input_subscripts, output_subscript = subscripts.split("->")
|
671 |
+
else:
|
672 |
+
input_subscripts = subscripts
|
673 |
+
# Build output subscripts
|
674 |
+
tmp_subscripts = subscripts.replace(",", "")
|
675 |
+
output_subscript = ""
|
676 |
+
for s in sorted(set(tmp_subscripts)):
|
677 |
+
if s not in einsum_symbols:
|
678 |
+
raise ValueError("Character %s is not a valid symbol." % s)
|
679 |
+
if tmp_subscripts.count(s) == 1:
|
680 |
+
output_subscript += s
|
681 |
+
|
682 |
+
# Make sure output subscripts are in the input
|
683 |
+
for char in output_subscript:
|
684 |
+
if char not in input_subscripts:
|
685 |
+
raise ValueError("Output character %s did not appear in the input"
|
686 |
+
% char)
|
687 |
+
|
688 |
+
# Make sure number operands is equivalent to the number of terms
|
689 |
+
if len(input_subscripts.split(',')) != len(operands):
|
690 |
+
raise ValueError("Number of einsum subscripts must be equal to the "
|
691 |
+
"number of operands.")
|
692 |
+
|
693 |
+
return (input_subscripts, output_subscript, operands)
|
694 |
+
|
695 |
+
|
696 |
+
def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
|
697 |
+
# NOTE: technically, we should only dispatch on array-like arguments, not
|
698 |
+
# subscripts (given as strings). But separating operands into
|
699 |
+
# arrays/subscripts is a little tricky/slow (given einsum's two supported
|
700 |
+
# signatures), so as a practical shortcut we dispatch on everything.
|
701 |
+
# Strings will be ignored for dispatching since they don't define
|
702 |
+
# __array_function__.
|
703 |
+
return operands
|
704 |
+
|
705 |
+
|
706 |
+
@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
|
707 |
+
def einsum_path(*operands, optimize='greedy', einsum_call=False):
|
708 |
+
"""
|
709 |
+
einsum_path(subscripts, *operands, optimize='greedy')
|
710 |
+
|
711 |
+
Evaluates the lowest cost contraction order for an einsum expression by
|
712 |
+
considering the creation of intermediate arrays.
|
713 |
+
|
714 |
+
Parameters
|
715 |
+
----------
|
716 |
+
subscripts : str
|
717 |
+
Specifies the subscripts for summation.
|
718 |
+
*operands : list of array_like
|
719 |
+
These are the arrays for the operation.
|
720 |
+
optimize : {bool, list, tuple, 'greedy', 'optimal'}
|
721 |
+
Choose the type of path. If a tuple is provided, the second argument is
|
722 |
+
assumed to be the maximum intermediate size created. If only a single
|
723 |
+
argument is provided the largest input or output array size is used
|
724 |
+
as a maximum intermediate size.
|
725 |
+
|
726 |
+
* if a list is given that starts with ``einsum_path``, uses this as the
|
727 |
+
contraction path
|
728 |
+
* if False no optimization is taken
|
729 |
+
* if True defaults to the 'greedy' algorithm
|
730 |
+
* 'optimal' An algorithm that combinatorially explores all possible
|
731 |
+
ways of contracting the listed tensors and chooses the least costly
|
732 |
+
path. Scales exponentially with the number of terms in the
|
733 |
+
contraction.
|
734 |
+
* 'greedy' An algorithm that chooses the best pair contraction
|
735 |
+
at each step. Effectively, this algorithm searches the largest inner,
|
736 |
+
Hadamard, and then outer products at each step. Scales cubically with
|
737 |
+
the number of terms in the contraction. Equivalent to the 'optimal'
|
738 |
+
path for most contractions.
|
739 |
+
|
740 |
+
Default is 'greedy'.
|
741 |
+
|
742 |
+
Returns
|
743 |
+
-------
|
744 |
+
path : list of tuples
|
745 |
+
A list representation of the einsum path.
|
746 |
+
string_repr : str
|
747 |
+
A printable representation of the einsum path.
|
748 |
+
|
749 |
+
Notes
|
750 |
+
-----
|
751 |
+
The resulting path indicates which terms of the input contraction should be
|
752 |
+
contracted first, the result of this contraction is then appended to the
|
753 |
+
end of the contraction list. This list can then be iterated over until all
|
754 |
+
intermediate contractions are complete.
|
755 |
+
|
756 |
+
See Also
|
757 |
+
--------
|
758 |
+
einsum, linalg.multi_dot
|
759 |
+
|
760 |
+
Examples
|
761 |
+
--------
|
762 |
+
|
763 |
+
We can begin with a chain dot example. In this case, it is optimal to
|
764 |
+
contract the ``b`` and ``c`` tensors first as represented by the first
|
765 |
+
element of the path ``(1, 2)``. The resulting tensor is added to the end
|
766 |
+
of the contraction and the remaining contraction ``(0, 1)`` is then
|
767 |
+
completed.
|
768 |
+
|
769 |
+
>>> np.random.seed(123)
|
770 |
+
>>> a = np.random.rand(2, 2)
|
771 |
+
>>> b = np.random.rand(2, 5)
|
772 |
+
>>> c = np.random.rand(5, 2)
|
773 |
+
>>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
|
774 |
+
>>> print(path_info[0])
|
775 |
+
['einsum_path', (1, 2), (0, 1)]
|
776 |
+
>>> print(path_info[1])
|
777 |
+
Complete contraction: ij,jk,kl->il # may vary
|
778 |
+
Naive scaling: 4
|
779 |
+
Optimized scaling: 3
|
780 |
+
Naive FLOP count: 1.600e+02
|
781 |
+
Optimized FLOP count: 5.600e+01
|
782 |
+
Theoretical speedup: 2.857
|
783 |
+
Largest intermediate: 4.000e+00 elements
|
784 |
+
-------------------------------------------------------------------------
|
785 |
+
scaling current remaining
|
786 |
+
-------------------------------------------------------------------------
|
787 |
+
3 kl,jk->jl ij,jl->il
|
788 |
+
3 jl,ij->il il->il
|
789 |
+
|
790 |
+
|
791 |
+
A more complex index transformation example.
|
792 |
+
|
793 |
+
>>> I = np.random.rand(10, 10, 10, 10)
|
794 |
+
>>> C = np.random.rand(10, 10)
|
795 |
+
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
|
796 |
+
... optimize='greedy')
|
797 |
+
|
798 |
+
>>> print(path_info[0])
|
799 |
+
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
|
800 |
+
>>> print(path_info[1])
|
801 |
+
Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
|
802 |
+
Naive scaling: 8
|
803 |
+
Optimized scaling: 5
|
804 |
+
Naive FLOP count: 8.000e+08
|
805 |
+
Optimized FLOP count: 8.000e+05
|
806 |
+
Theoretical speedup: 1000.000
|
807 |
+
Largest intermediate: 1.000e+04 elements
|
808 |
+
--------------------------------------------------------------------------
|
809 |
+
scaling current remaining
|
810 |
+
--------------------------------------------------------------------------
|
811 |
+
5 abcd,ea->bcde fb,gc,hd,bcde->efgh
|
812 |
+
5 bcde,fb->cdef gc,hd,cdef->efgh
|
813 |
+
5 cdef,gc->defg hd,defg->efgh
|
814 |
+
5 defg,hd->efgh efgh->efgh
|
815 |
+
"""
|
816 |
+
|
817 |
+
# Figure out what the path really is
|
818 |
+
path_type = optimize
|
819 |
+
if path_type is True:
|
820 |
+
path_type = 'greedy'
|
821 |
+
if path_type is None:
|
822 |
+
path_type = False
|
823 |
+
|
824 |
+
explicit_einsum_path = False
|
825 |
+
memory_limit = None
|
826 |
+
|
827 |
+
# No optimization or a named path algorithm
|
828 |
+
if (path_type is False) or isinstance(path_type, str):
|
829 |
+
pass
|
830 |
+
|
831 |
+
# Given an explicit path
|
832 |
+
elif len(path_type) and (path_type[0] == 'einsum_path'):
|
833 |
+
explicit_einsum_path = True
|
834 |
+
|
835 |
+
# Path tuple with memory limit
|
836 |
+
elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
|
837 |
+
isinstance(path_type[1], (int, float))):
|
838 |
+
memory_limit = int(path_type[1])
|
839 |
+
path_type = path_type[0]
|
840 |
+
|
841 |
+
else:
|
842 |
+
raise TypeError("Did not understand the path: %s" % str(path_type))
|
843 |
+
|
844 |
+
# Hidden option, only einsum should call this
|
845 |
+
einsum_call_arg = einsum_call
|
846 |
+
|
847 |
+
# Python side parsing
|
848 |
+
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
|
849 |
+
|
850 |
+
# Build a few useful list and sets
|
851 |
+
input_list = input_subscripts.split(',')
|
852 |
+
input_sets = [set(x) for x in input_list]
|
853 |
+
output_set = set(output_subscript)
|
854 |
+
indices = set(input_subscripts.replace(',', ''))
|
855 |
+
|
856 |
+
# Get length of each unique dimension and ensure all dimensions are correct
|
857 |
+
dimension_dict = {}
|
858 |
+
broadcast_indices = [[] for x in range(len(input_list))]
|
859 |
+
for tnum, term in enumerate(input_list):
|
860 |
+
sh = operands[tnum].shape
|
861 |
+
if len(sh) != len(term):
|
862 |
+
raise ValueError("Einstein sum subscript %s does not contain the "
|
863 |
+
"correct number of indices for operand %d."
|
864 |
+
% (input_subscripts[tnum], tnum))
|
865 |
+
for cnum, char in enumerate(term):
|
866 |
+
dim = sh[cnum]
|
867 |
+
|
868 |
+
# Build out broadcast indices
|
869 |
+
if dim == 1:
|
870 |
+
broadcast_indices[tnum].append(char)
|
871 |
+
|
872 |
+
if char in dimension_dict.keys():
|
873 |
+
# For broadcasting cases we always want the largest dim size
|
874 |
+
if dimension_dict[char] == 1:
|
875 |
+
dimension_dict[char] = dim
|
876 |
+
elif dim not in (1, dimension_dict[char]):
|
877 |
+
raise ValueError("Size of label '%s' for operand %d (%d) "
|
878 |
+
"does not match previous terms (%d)."
|
879 |
+
% (char, tnum, dimension_dict[char], dim))
|
880 |
+
else:
|
881 |
+
dimension_dict[char] = dim
|
882 |
+
|
883 |
+
# Convert broadcast inds to sets
|
884 |
+
broadcast_indices = [set(x) for x in broadcast_indices]
|
885 |
+
|
886 |
+
# Compute size of each input array plus the output array
|
887 |
+
size_list = [_compute_size_by_dict(term, dimension_dict)
|
888 |
+
for term in input_list + [output_subscript]]
|
889 |
+
max_size = max(size_list)
|
890 |
+
|
891 |
+
if memory_limit is None:
|
892 |
+
memory_arg = max_size
|
893 |
+
else:
|
894 |
+
memory_arg = memory_limit
|
895 |
+
|
896 |
+
# Compute naive cost
|
897 |
+
# This isn't quite right, need to look into exactly how einsum does this
|
898 |
+
inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
|
899 |
+
naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
|
900 |
+
|
901 |
+
# Compute the path
|
902 |
+
if explicit_einsum_path:
|
903 |
+
path = path_type[1:]
|
904 |
+
elif (
|
905 |
+
(path_type is False)
|
906 |
+
or (len(input_list) in [1, 2])
|
907 |
+
or (indices == output_set)
|
908 |
+
):
|
909 |
+
# Nothing to be optimized, leave it to einsum
|
910 |
+
path = [tuple(range(len(input_list)))]
|
911 |
+
elif path_type == "greedy":
|
912 |
+
path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
|
913 |
+
elif path_type == "optimal":
|
914 |
+
path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
|
915 |
+
else:
|
916 |
+
raise KeyError("Path name %s not found", path_type)
|
917 |
+
|
918 |
+
cost_list, scale_list, size_list, contraction_list = [], [], [], []
|
919 |
+
|
920 |
+
# Build contraction tuple (positions, gemm, einsum_str, remaining)
|
921 |
+
for cnum, contract_inds in enumerate(path):
|
922 |
+
# Make sure we remove inds from right to left
|
923 |
+
contract_inds = tuple(sorted(list(contract_inds), reverse=True))
|
924 |
+
|
925 |
+
contract = _find_contraction(contract_inds, input_sets, output_set)
|
926 |
+
out_inds, input_sets, idx_removed, idx_contract = contract
|
927 |
+
|
928 |
+
cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
|
929 |
+
cost_list.append(cost)
|
930 |
+
scale_list.append(len(idx_contract))
|
931 |
+
size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
|
932 |
+
|
933 |
+
bcast = set()
|
934 |
+
tmp_inputs = []
|
935 |
+
for x in contract_inds:
|
936 |
+
tmp_inputs.append(input_list.pop(x))
|
937 |
+
bcast |= broadcast_indices.pop(x)
|
938 |
+
|
939 |
+
new_bcast_inds = bcast - idx_removed
|
940 |
+
|
941 |
+
# If we're broadcasting, nix blas
|
942 |
+
if not len(idx_removed & bcast):
|
943 |
+
do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
|
944 |
+
else:
|
945 |
+
do_blas = False
|
946 |
+
|
947 |
+
# Last contraction
|
948 |
+
if (cnum - len(path)) == -1:
|
949 |
+
idx_result = output_subscript
|
950 |
+
else:
|
951 |
+
sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
|
952 |
+
idx_result = "".join([x[1] for x in sorted(sort_result)])
|
953 |
+
|
954 |
+
input_list.append(idx_result)
|
955 |
+
broadcast_indices.append(new_bcast_inds)
|
956 |
+
einsum_str = ",".join(tmp_inputs) + "->" + idx_result
|
957 |
+
|
958 |
+
contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
|
959 |
+
contraction_list.append(contraction)
|
960 |
+
|
961 |
+
opt_cost = sum(cost_list) + 1
|
962 |
+
|
963 |
+
if len(input_list) != 1:
|
964 |
+
# Explicit "einsum_path" is usually trusted, but we detect this kind of
|
965 |
+
# mistake in order to prevent from returning an intermediate value.
|
966 |
+
raise RuntimeError(
|
967 |
+
"Invalid einsum_path is specified: {} more operands has to be "
|
968 |
+
"contracted.".format(len(input_list) - 1))
|
969 |
+
|
970 |
+
if einsum_call_arg:
|
971 |
+
return (operands, contraction_list)
|
972 |
+
|
973 |
+
# Return the path along with a nice string representation
|
974 |
+
overall_contraction = input_subscripts + "->" + output_subscript
|
975 |
+
header = ("scaling", "current", "remaining")
|
976 |
+
|
977 |
+
speedup = naive_cost / opt_cost
|
978 |
+
max_i = max(size_list)
|
979 |
+
|
980 |
+
path_print = " Complete contraction: %s\n" % overall_contraction
|
981 |
+
path_print += " Naive scaling: %d\n" % len(indices)
|
982 |
+
path_print += " Optimized scaling: %d\n" % max(scale_list)
|
983 |
+
path_print += " Naive FLOP count: %.3e\n" % naive_cost
|
984 |
+
path_print += " Optimized FLOP count: %.3e\n" % opt_cost
|
985 |
+
path_print += " Theoretical speedup: %3.3f\n" % speedup
|
986 |
+
path_print += " Largest intermediate: %.3e elements\n" % max_i
|
987 |
+
path_print += "-" * 74 + "\n"
|
988 |
+
path_print += "%6s %24s %40s\n" % header
|
989 |
+
path_print += "-" * 74
|
990 |
+
|
991 |
+
for n, contraction in enumerate(contraction_list):
|
992 |
+
inds, idx_rm, einsum_str, remaining, blas = contraction
|
993 |
+
remaining_str = ",".join(remaining) + "->" + output_subscript
|
994 |
+
path_run = (scale_list[n], einsum_str, remaining_str)
|
995 |
+
path_print += "\n%4d %24s %40s" % path_run
|
996 |
+
|
997 |
+
path = ['einsum_path'] + path
|
998 |
+
return (path, path_print)
|
999 |
+
|
1000 |
+
|
1001 |
+
def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
|
1002 |
+
# Arguably we dispatch on more arguments than we really should; see note in
|
1003 |
+
# _einsum_path_dispatcher for why.
|
1004 |
+
yield from operands
|
1005 |
+
yield out
|
1006 |
+
|
1007 |
+
|
1008 |
+
# Rewrite einsum to handle different cases
|
1009 |
+
@array_function_dispatch(_einsum_dispatcher, module='numpy')
|
1010 |
+
def einsum(*operands, out=None, optimize=False, **kwargs):
|
1011 |
+
"""
|
1012 |
+
einsum(subscripts, *operands, out=None, dtype=None, order='K',
|
1013 |
+
casting='safe', optimize=False)
|
1014 |
+
|
1015 |
+
Evaluates the Einstein summation convention on the operands.
|
1016 |
+
|
1017 |
+
Using the Einstein summation convention, many common multi-dimensional,
|
1018 |
+
linear algebraic array operations can be represented in a simple fashion.
|
1019 |
+
In *implicit* mode `einsum` computes these values.
|
1020 |
+
|
1021 |
+
In *explicit* mode, `einsum` provides further flexibility to compute
|
1022 |
+
other array operations that might not be considered classical Einstein
|
1023 |
+
summation operations, by disabling, or forcing summation over specified
|
1024 |
+
subscript labels.
|
1025 |
+
|
1026 |
+
See the notes and examples for clarification.
|
1027 |
+
|
1028 |
+
Parameters
|
1029 |
+
----------
|
1030 |
+
subscripts : str
|
1031 |
+
Specifies the subscripts for summation as comma separated list of
|
1032 |
+
subscript labels. An implicit (classical Einstein summation)
|
1033 |
+
calculation is performed unless the explicit indicator '->' is
|
1034 |
+
included as well as subscript labels of the precise output form.
|
1035 |
+
operands : list of array_like
|
1036 |
+
These are the arrays for the operation.
|
1037 |
+
out : ndarray, optional
|
1038 |
+
If provided, the calculation is done into this array.
|
1039 |
+
dtype : {data-type, None}, optional
|
1040 |
+
If provided, forces the calculation to use the data type specified.
|
1041 |
+
Note that you may have to also give a more liberal `casting`
|
1042 |
+
parameter to allow the conversions. Default is None.
|
1043 |
+
order : {'C', 'F', 'A', 'K'}, optional
|
1044 |
+
Controls the memory layout of the output. 'C' means it should
|
1045 |
+
be C contiguous. 'F' means it should be Fortran contiguous,
|
1046 |
+
'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
|
1047 |
+
'K' means it should be as close to the layout as the inputs as
|
1048 |
+
is possible, including arbitrarily permuted axes.
|
1049 |
+
Default is 'K'.
|
1050 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
1051 |
+
Controls what kind of data casting may occur. Setting this to
|
1052 |
+
'unsafe' is not recommended, as it can adversely affect accumulations.
|
1053 |
+
|
1054 |
+
* 'no' means the data types should not be cast at all.
|
1055 |
+
* 'equiv' means only byte-order changes are allowed.
|
1056 |
+
* 'safe' means only casts which can preserve values are allowed.
|
1057 |
+
* 'same_kind' means only safe casts or casts within a kind,
|
1058 |
+
like float64 to float32, are allowed.
|
1059 |
+
* 'unsafe' means any data conversions may be done.
|
1060 |
+
|
1061 |
+
Default is 'safe'.
|
1062 |
+
optimize : {False, True, 'greedy', 'optimal'}, optional
|
1063 |
+
Controls if intermediate optimization should occur. No optimization
|
1064 |
+
will occur if False and True will default to the 'greedy' algorithm.
|
1065 |
+
Also accepts an explicit contraction list from the ``np.einsum_path``
|
1066 |
+
function. See ``np.einsum_path`` for more details. Defaults to False.
|
1067 |
+
|
1068 |
+
Returns
|
1069 |
+
-------
|
1070 |
+
output : ndarray
|
1071 |
+
The calculation based on the Einstein summation convention.
|
1072 |
+
|
1073 |
+
See Also
|
1074 |
+
--------
|
1075 |
+
einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
|
1076 |
+
einops :
|
1077 |
+
similar verbose interface is provided by
|
1078 |
+
`einops <https://github.com/arogozhnikov/einops>`_ package to cover
|
1079 |
+
additional operations: transpose, reshape/flatten, repeat/tile,
|
1080 |
+
squeeze/unsqueeze and reductions.
|
1081 |
+
opt_einsum :
|
1082 |
+
`opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
|
1083 |
+
optimizes contraction order for einsum-like expressions
|
1084 |
+
in backend-agnostic manner.
|
1085 |
+
|
1086 |
+
Notes
|
1087 |
+
-----
|
1088 |
+
.. versionadded:: 1.6.0
|
1089 |
+
|
1090 |
+
The Einstein summation convention can be used to compute
|
1091 |
+
many multi-dimensional, linear algebraic array operations. `einsum`
|
1092 |
+
provides a succinct way of representing these.
|
1093 |
+
|
1094 |
+
A non-exhaustive list of these operations,
|
1095 |
+
which can be computed by `einsum`, is shown below along with examples:
|
1096 |
+
|
1097 |
+
* Trace of an array, :py:func:`numpy.trace`.
|
1098 |
+
* Return a diagonal, :py:func:`numpy.diag`.
|
1099 |
+
* Array axis summations, :py:func:`numpy.sum`.
|
1100 |
+
* Transpositions and permutations, :py:func:`numpy.transpose`.
|
1101 |
+
* Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
|
1102 |
+
* Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
|
1103 |
+
* Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
|
1104 |
+
* Tensor contractions, :py:func:`numpy.tensordot`.
|
1105 |
+
* Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
|
1106 |
+
|
1107 |
+
The subscripts string is a comma-separated list of subscript labels,
|
1108 |
+
where each label refers to a dimension of the corresponding operand.
|
1109 |
+
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
|
1110 |
+
is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
|
1111 |
+
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
|
1112 |
+
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
|
1113 |
+
describes traditional matrix multiplication and is equivalent to
|
1114 |
+
:py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
|
1115 |
+
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
|
1116 |
+
to :py:func:`np.trace(a) <numpy.trace>`.
|
1117 |
+
|
1118 |
+
In *implicit mode*, the chosen subscripts are important
|
1119 |
+
since the axes of the output are reordered alphabetically. This
|
1120 |
+
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
|
1121 |
+
``np.einsum('ji', a)`` takes its transpose. Additionally,
|
1122 |
+
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
|
1123 |
+
``np.einsum('ij,jh', a, b)`` returns the transpose of the
|
1124 |
+
multiplication since subscript 'h' precedes subscript 'i'.
|
1125 |
+
|
1126 |
+
In *explicit mode* the output can be directly controlled by
|
1127 |
+
specifying output subscript labels. This requires the
|
1128 |
+
identifier '->' as well as the list of output subscript labels.
|
1129 |
+
This feature increases the flexibility of the function since
|
1130 |
+
summing can be disabled or forced when required. The call
|
1131 |
+
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
|
1132 |
+
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
|
1133 |
+
The difference is that `einsum` does not allow broadcasting by default.
|
1134 |
+
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
|
1135 |
+
order of the output subscript labels and therefore returns matrix
|
1136 |
+
multiplication, unlike the example above in implicit mode.
|
1137 |
+
|
1138 |
+
To enable and control broadcasting, use an ellipsis. Default
|
1139 |
+
NumPy-style broadcasting is done by adding an ellipsis
|
1140 |
+
to the left of each term, like ``np.einsum('...ii->...i', a)``.
|
1141 |
+
To take the trace along the first and last axes,
|
1142 |
+
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
|
1143 |
+
product with the left-most indices instead of rightmost, one can do
|
1144 |
+
``np.einsum('ij...,jk...->ik...', a, b)``.
|
1145 |
+
|
1146 |
+
When there is only one operand, no axes are summed, and no output
|
1147 |
+
parameter is provided, a view into the operand is returned instead
|
1148 |
+
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
|
1149 |
+
produces a view (changed in version 1.10.0).
|
1150 |
+
|
1151 |
+
`einsum` also provides an alternative way to provide the subscripts
|
1152 |
+
and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
|
1153 |
+
If the output shape is not provided in this format `einsum` will be
|
1154 |
+
calculated in implicit mode, otherwise it will be performed explicitly.
|
1155 |
+
The examples below have corresponding `einsum` calls with the two
|
1156 |
+
parameter methods.
|
1157 |
+
|
1158 |
+
.. versionadded:: 1.10.0
|
1159 |
+
|
1160 |
+
Views returned from einsum are now writeable whenever the input array
|
1161 |
+
is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
|
1162 |
+
have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
|
1163 |
+
and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
|
1164 |
+
of a 2D array.
|
1165 |
+
|
1166 |
+
.. versionadded:: 1.12.0
|
1167 |
+
|
1168 |
+
Added the ``optimize`` argument which will optimize the contraction order
|
1169 |
+
of an einsum expression. For a contraction with three or more operands this
|
1170 |
+
can greatly increase the computational efficiency at the cost of a larger
|
1171 |
+
memory footprint during computation.
|
1172 |
+
|
1173 |
+
Typically a 'greedy' algorithm is applied which empirical tests have shown
|
1174 |
+
returns the optimal path in the majority of cases. In some cases 'optimal'
|
1175 |
+
will return the superlative path through a more expensive, exhaustive search.
|
1176 |
+
For iterative calculations it may be advisable to calculate the optimal path
|
1177 |
+
once and reuse that path by supplying it as an argument. An example is given
|
1178 |
+
below.
|
1179 |
+
|
1180 |
+
See :py:func:`numpy.einsum_path` for more details.
|
1181 |
+
|
1182 |
+
Examples
|
1183 |
+
--------
|
1184 |
+
>>> a = np.arange(25).reshape(5,5)
|
1185 |
+
>>> b = np.arange(5)
|
1186 |
+
>>> c = np.arange(6).reshape(2,3)
|
1187 |
+
|
1188 |
+
Trace of a matrix:
|
1189 |
+
|
1190 |
+
>>> np.einsum('ii', a)
|
1191 |
+
60
|
1192 |
+
>>> np.einsum(a, [0,0])
|
1193 |
+
60
|
1194 |
+
>>> np.trace(a)
|
1195 |
+
60
|
1196 |
+
|
1197 |
+
Extract the diagonal (requires explicit form):
|
1198 |
+
|
1199 |
+
>>> np.einsum('ii->i', a)
|
1200 |
+
array([ 0, 6, 12, 18, 24])
|
1201 |
+
>>> np.einsum(a, [0,0], [0])
|
1202 |
+
array([ 0, 6, 12, 18, 24])
|
1203 |
+
>>> np.diag(a)
|
1204 |
+
array([ 0, 6, 12, 18, 24])
|
1205 |
+
|
1206 |
+
Sum over an axis (requires explicit form):
|
1207 |
+
|
1208 |
+
>>> np.einsum('ij->i', a)
|
1209 |
+
array([ 10, 35, 60, 85, 110])
|
1210 |
+
>>> np.einsum(a, [0,1], [0])
|
1211 |
+
array([ 10, 35, 60, 85, 110])
|
1212 |
+
>>> np.sum(a, axis=1)
|
1213 |
+
array([ 10, 35, 60, 85, 110])
|
1214 |
+
|
1215 |
+
For higher dimensional arrays summing a single axis can be done with ellipsis:
|
1216 |
+
|
1217 |
+
>>> np.einsum('...j->...', a)
|
1218 |
+
array([ 10, 35, 60, 85, 110])
|
1219 |
+
>>> np.einsum(a, [Ellipsis,1], [Ellipsis])
|
1220 |
+
array([ 10, 35, 60, 85, 110])
|
1221 |
+
|
1222 |
+
Compute a matrix transpose, or reorder any number of axes:
|
1223 |
+
|
1224 |
+
>>> np.einsum('ji', c)
|
1225 |
+
array([[0, 3],
|
1226 |
+
[1, 4],
|
1227 |
+
[2, 5]])
|
1228 |
+
>>> np.einsum('ij->ji', c)
|
1229 |
+
array([[0, 3],
|
1230 |
+
[1, 4],
|
1231 |
+
[2, 5]])
|
1232 |
+
>>> np.einsum(c, [1,0])
|
1233 |
+
array([[0, 3],
|
1234 |
+
[1, 4],
|
1235 |
+
[2, 5]])
|
1236 |
+
>>> np.transpose(c)
|
1237 |
+
array([[0, 3],
|
1238 |
+
[1, 4],
|
1239 |
+
[2, 5]])
|
1240 |
+
|
1241 |
+
Vector inner products:
|
1242 |
+
|
1243 |
+
>>> np.einsum('i,i', b, b)
|
1244 |
+
30
|
1245 |
+
>>> np.einsum(b, [0], b, [0])
|
1246 |
+
30
|
1247 |
+
>>> np.inner(b,b)
|
1248 |
+
30
|
1249 |
+
|
1250 |
+
Matrix vector multiplication:
|
1251 |
+
|
1252 |
+
>>> np.einsum('ij,j', a, b)
|
1253 |
+
array([ 30, 80, 130, 180, 230])
|
1254 |
+
>>> np.einsum(a, [0,1], b, [1])
|
1255 |
+
array([ 30, 80, 130, 180, 230])
|
1256 |
+
>>> np.dot(a, b)
|
1257 |
+
array([ 30, 80, 130, 180, 230])
|
1258 |
+
>>> np.einsum('...j,j', a, b)
|
1259 |
+
array([ 30, 80, 130, 180, 230])
|
1260 |
+
|
1261 |
+
Broadcasting and scalar multiplication:
|
1262 |
+
|
1263 |
+
>>> np.einsum('..., ...', 3, c)
|
1264 |
+
array([[ 0, 3, 6],
|
1265 |
+
[ 9, 12, 15]])
|
1266 |
+
>>> np.einsum(',ij', 3, c)
|
1267 |
+
array([[ 0, 3, 6],
|
1268 |
+
[ 9, 12, 15]])
|
1269 |
+
>>> np.einsum(3, [Ellipsis], c, [Ellipsis])
|
1270 |
+
array([[ 0, 3, 6],
|
1271 |
+
[ 9, 12, 15]])
|
1272 |
+
>>> np.multiply(3, c)
|
1273 |
+
array([[ 0, 3, 6],
|
1274 |
+
[ 9, 12, 15]])
|
1275 |
+
|
1276 |
+
Vector outer product:
|
1277 |
+
|
1278 |
+
>>> np.einsum('i,j', np.arange(2)+1, b)
|
1279 |
+
array([[0, 1, 2, 3, 4],
|
1280 |
+
[0, 2, 4, 6, 8]])
|
1281 |
+
>>> np.einsum(np.arange(2)+1, [0], b, [1])
|
1282 |
+
array([[0, 1, 2, 3, 4],
|
1283 |
+
[0, 2, 4, 6, 8]])
|
1284 |
+
>>> np.outer(np.arange(2)+1, b)
|
1285 |
+
array([[0, 1, 2, 3, 4],
|
1286 |
+
[0, 2, 4, 6, 8]])
|
1287 |
+
|
1288 |
+
Tensor contraction:
|
1289 |
+
|
1290 |
+
>>> a = np.arange(60.).reshape(3,4,5)
|
1291 |
+
>>> b = np.arange(24.).reshape(4,3,2)
|
1292 |
+
>>> np.einsum('ijk,jil->kl', a, b)
|
1293 |
+
array([[4400., 4730.],
|
1294 |
+
[4532., 4874.],
|
1295 |
+
[4664., 5018.],
|
1296 |
+
[4796., 5162.],
|
1297 |
+
[4928., 5306.]])
|
1298 |
+
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
|
1299 |
+
array([[4400., 4730.],
|
1300 |
+
[4532., 4874.],
|
1301 |
+
[4664., 5018.],
|
1302 |
+
[4796., 5162.],
|
1303 |
+
[4928., 5306.]])
|
1304 |
+
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
|
1305 |
+
array([[4400., 4730.],
|
1306 |
+
[4532., 4874.],
|
1307 |
+
[4664., 5018.],
|
1308 |
+
[4796., 5162.],
|
1309 |
+
[4928., 5306.]])
|
1310 |
+
|
1311 |
+
Writeable returned arrays (since version 1.10.0):
|
1312 |
+
|
1313 |
+
>>> a = np.zeros((3, 3))
|
1314 |
+
>>> np.einsum('ii->i', a)[:] = 1
|
1315 |
+
>>> a
|
1316 |
+
array([[1., 0., 0.],
|
1317 |
+
[0., 1., 0.],
|
1318 |
+
[0., 0., 1.]])
|
1319 |
+
|
1320 |
+
Example of ellipsis use:
|
1321 |
+
|
1322 |
+
>>> a = np.arange(6).reshape((3,2))
|
1323 |
+
>>> b = np.arange(12).reshape((4,3))
|
1324 |
+
>>> np.einsum('ki,jk->ij', a, b)
|
1325 |
+
array([[10, 28, 46, 64],
|
1326 |
+
[13, 40, 67, 94]])
|
1327 |
+
>>> np.einsum('ki,...k->i...', a, b)
|
1328 |
+
array([[10, 28, 46, 64],
|
1329 |
+
[13, 40, 67, 94]])
|
1330 |
+
>>> np.einsum('k...,jk', a, b)
|
1331 |
+
array([[10, 28, 46, 64],
|
1332 |
+
[13, 40, 67, 94]])
|
1333 |
+
|
1334 |
+
Chained array operations. For more complicated contractions, speed ups
|
1335 |
+
might be achieved by repeatedly computing a 'greedy' path or pre-computing the
|
1336 |
+
'optimal' path and repeatedly applying it, using an
|
1337 |
+
`einsum_path` insertion (since version 1.12.0). Performance improvements can be
|
1338 |
+
particularly significant with larger arrays:
|
1339 |
+
|
1340 |
+
>>> a = np.ones(64).reshape(2,4,8)
|
1341 |
+
|
1342 |
+
Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
|
1343 |
+
|
1344 |
+
>>> for iteration in range(500):
|
1345 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
|
1346 |
+
|
1347 |
+
Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
|
1348 |
+
|
1349 |
+
>>> for iteration in range(500):
|
1350 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
|
1351 |
+
|
1352 |
+
Greedy `einsum` (faster optimal path approximation): ~160ms
|
1353 |
+
|
1354 |
+
>>> for iteration in range(500):
|
1355 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
|
1356 |
+
|
1357 |
+
Optimal `einsum` (best usage pattern in some use cases): ~110ms
|
1358 |
+
|
1359 |
+
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
|
1360 |
+
>>> for iteration in range(500):
|
1361 |
+
... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
|
1362 |
+
|
1363 |
+
"""
|
1364 |
+
# Special handling if out is specified
|
1365 |
+
specified_out = out is not None
|
1366 |
+
|
1367 |
+
# If no optimization, run pure einsum
|
1368 |
+
if optimize is False:
|
1369 |
+
if specified_out:
|
1370 |
+
kwargs['out'] = out
|
1371 |
+
return c_einsum(*operands, **kwargs)
|
1372 |
+
|
1373 |
+
# Check the kwargs to avoid a more cryptic error later, without having to
|
1374 |
+
# repeat default values here
|
1375 |
+
valid_einsum_kwargs = ['dtype', 'order', 'casting']
|
1376 |
+
unknown_kwargs = [k for (k, v) in kwargs.items() if
|
1377 |
+
k not in valid_einsum_kwargs]
|
1378 |
+
if len(unknown_kwargs):
|
1379 |
+
raise TypeError("Did not understand the following kwargs: %s"
|
1380 |
+
% unknown_kwargs)
|
1381 |
+
|
1382 |
+
# Build the contraction list and operand
|
1383 |
+
operands, contraction_list = einsum_path(*operands, optimize=optimize,
|
1384 |
+
einsum_call=True)
|
1385 |
+
|
1386 |
+
# Handle order kwarg for output array, c_einsum allows mixed case
|
1387 |
+
output_order = kwargs.pop('order', 'K')
|
1388 |
+
if output_order.upper() == 'A':
|
1389 |
+
if all(arr.flags.f_contiguous for arr in operands):
|
1390 |
+
output_order = 'F'
|
1391 |
+
else:
|
1392 |
+
output_order = 'C'
|
1393 |
+
|
1394 |
+
# Start contraction loop
|
1395 |
+
for num, contraction in enumerate(contraction_list):
|
1396 |
+
inds, idx_rm, einsum_str, remaining, blas = contraction
|
1397 |
+
tmp_operands = [operands.pop(x) for x in inds]
|
1398 |
+
|
1399 |
+
# Do we need to deal with the output?
|
1400 |
+
handle_out = specified_out and ((num + 1) == len(contraction_list))
|
1401 |
+
|
1402 |
+
# Call tensordot if still possible
|
1403 |
+
if blas:
|
1404 |
+
# Checks have already been handled
|
1405 |
+
input_str, results_index = einsum_str.split('->')
|
1406 |
+
input_left, input_right = input_str.split(',')
|
1407 |
+
|
1408 |
+
tensor_result = input_left + input_right
|
1409 |
+
for s in idx_rm:
|
1410 |
+
tensor_result = tensor_result.replace(s, "")
|
1411 |
+
|
1412 |
+
# Find indices to contract over
|
1413 |
+
left_pos, right_pos = [], []
|
1414 |
+
for s in sorted(idx_rm):
|
1415 |
+
left_pos.append(input_left.find(s))
|
1416 |
+
right_pos.append(input_right.find(s))
|
1417 |
+
|
1418 |
+
# Contract!
|
1419 |
+
new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
|
1420 |
+
|
1421 |
+
# Build a new view if needed
|
1422 |
+
if (tensor_result != results_index) or handle_out:
|
1423 |
+
if handle_out:
|
1424 |
+
kwargs["out"] = out
|
1425 |
+
new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
|
1426 |
+
|
1427 |
+
# Call einsum
|
1428 |
+
else:
|
1429 |
+
# If out was specified
|
1430 |
+
if handle_out:
|
1431 |
+
kwargs["out"] = out
|
1432 |
+
|
1433 |
+
# Do the contraction
|
1434 |
+
new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
|
1435 |
+
|
1436 |
+
# Append new items and dereference what we can
|
1437 |
+
operands.append(new_view)
|
1438 |
+
del tmp_operands, new_view
|
1439 |
+
|
1440 |
+
if specified_out:
|
1441 |
+
return out
|
1442 |
+
else:
|
1443 |
+
return asanyarray(operands[0], order=output_order)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/getlimits.py
ADDED
@@ -0,0 +1,735 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Machine limits for Float32 and Float64 and (long double) if available...
|
2 |
+
|
3 |
+
"""
|
4 |
+
__all__ = ['finfo', 'iinfo']
|
5 |
+
|
6 |
+
import warnings
|
7 |
+
|
8 |
+
from .._utils import set_module
|
9 |
+
from ._machar import MachAr
|
10 |
+
from . import numeric
|
11 |
+
from . import numerictypes as ntypes
|
12 |
+
from .numeric import array, inf, NaN
|
13 |
+
from .umath import log10, exp2, nextafter, isnan
|
14 |
+
|
15 |
+
|
16 |
+
def _fr0(a):
|
17 |
+
"""fix rank-0 --> rank-1"""
|
18 |
+
if a.ndim == 0:
|
19 |
+
a = a.copy()
|
20 |
+
a.shape = (1,)
|
21 |
+
return a
|
22 |
+
|
23 |
+
|
24 |
+
def _fr1(a):
|
25 |
+
"""fix rank > 0 --> rank-0"""
|
26 |
+
if a.size == 1:
|
27 |
+
a = a.copy()
|
28 |
+
a.shape = ()
|
29 |
+
return a
|
30 |
+
|
31 |
+
|
32 |
+
class MachArLike:
|
33 |
+
""" Object to simulate MachAr instance """
|
34 |
+
def __init__(self, ftype, *, eps, epsneg, huge, tiny,
|
35 |
+
ibeta, smallest_subnormal=None, **kwargs):
|
36 |
+
self.params = _MACHAR_PARAMS[ftype]
|
37 |
+
self.ftype = ftype
|
38 |
+
self.title = self.params['title']
|
39 |
+
# Parameter types same as for discovered MachAr object.
|
40 |
+
if not smallest_subnormal:
|
41 |
+
self._smallest_subnormal = nextafter(
|
42 |
+
self.ftype(0), self.ftype(1), dtype=self.ftype)
|
43 |
+
else:
|
44 |
+
self._smallest_subnormal = smallest_subnormal
|
45 |
+
self.epsilon = self.eps = self._float_to_float(eps)
|
46 |
+
self.epsneg = self._float_to_float(epsneg)
|
47 |
+
self.xmax = self.huge = self._float_to_float(huge)
|
48 |
+
self.xmin = self._float_to_float(tiny)
|
49 |
+
self.smallest_normal = self.tiny = self._float_to_float(tiny)
|
50 |
+
self.ibeta = self.params['itype'](ibeta)
|
51 |
+
self.__dict__.update(kwargs)
|
52 |
+
self.precision = int(-log10(self.eps))
|
53 |
+
self.resolution = self._float_to_float(
|
54 |
+
self._float_conv(10) ** (-self.precision))
|
55 |
+
self._str_eps = self._float_to_str(self.eps)
|
56 |
+
self._str_epsneg = self._float_to_str(self.epsneg)
|
57 |
+
self._str_xmin = self._float_to_str(self.xmin)
|
58 |
+
self._str_xmax = self._float_to_str(self.xmax)
|
59 |
+
self._str_resolution = self._float_to_str(self.resolution)
|
60 |
+
self._str_smallest_normal = self._float_to_str(self.xmin)
|
61 |
+
|
62 |
+
@property
|
63 |
+
def smallest_subnormal(self):
|
64 |
+
"""Return the value for the smallest subnormal.
|
65 |
+
|
66 |
+
Returns
|
67 |
+
-------
|
68 |
+
smallest_subnormal : float
|
69 |
+
value for the smallest subnormal.
|
70 |
+
|
71 |
+
Warns
|
72 |
+
-----
|
73 |
+
UserWarning
|
74 |
+
If the calculated value for the smallest subnormal is zero.
|
75 |
+
"""
|
76 |
+
# Check that the calculated value is not zero, in case it raises a
|
77 |
+
# warning.
|
78 |
+
value = self._smallest_subnormal
|
79 |
+
if self.ftype(0) == value:
|
80 |
+
warnings.warn(
|
81 |
+
'The value of the smallest subnormal for {} type '
|
82 |
+
'is zero.'.format(self.ftype), UserWarning, stacklevel=2)
|
83 |
+
|
84 |
+
return self._float_to_float(value)
|
85 |
+
|
86 |
+
@property
|
87 |
+
def _str_smallest_subnormal(self):
|
88 |
+
"""Return the string representation of the smallest subnormal."""
|
89 |
+
return self._float_to_str(self.smallest_subnormal)
|
90 |
+
|
91 |
+
def _float_to_float(self, value):
|
92 |
+
"""Converts float to float.
|
93 |
+
|
94 |
+
Parameters
|
95 |
+
----------
|
96 |
+
value : float
|
97 |
+
value to be converted.
|
98 |
+
"""
|
99 |
+
return _fr1(self._float_conv(value))
|
100 |
+
|
101 |
+
def _float_conv(self, value):
|
102 |
+
"""Converts float to conv.
|
103 |
+
|
104 |
+
Parameters
|
105 |
+
----------
|
106 |
+
value : float
|
107 |
+
value to be converted.
|
108 |
+
"""
|
109 |
+
return array([value], self.ftype)
|
110 |
+
|
111 |
+
def _float_to_str(self, value):
|
112 |
+
"""Converts float to str.
|
113 |
+
|
114 |
+
Parameters
|
115 |
+
----------
|
116 |
+
value : float
|
117 |
+
value to be converted.
|
118 |
+
"""
|
119 |
+
return self.params['fmt'] % array(_fr0(value)[0], self.ftype)
|
120 |
+
|
121 |
+
|
122 |
+
_convert_to_float = {
|
123 |
+
ntypes.csingle: ntypes.single,
|
124 |
+
ntypes.complex_: ntypes.float_,
|
125 |
+
ntypes.clongfloat: ntypes.longfloat
|
126 |
+
}
|
127 |
+
|
128 |
+
# Parameters for creating MachAr / MachAr-like objects
|
129 |
+
_title_fmt = 'numpy {} precision floating point number'
|
130 |
+
_MACHAR_PARAMS = {
|
131 |
+
ntypes.double: dict(
|
132 |
+
itype = ntypes.int64,
|
133 |
+
fmt = '%24.16e',
|
134 |
+
title = _title_fmt.format('double')),
|
135 |
+
ntypes.single: dict(
|
136 |
+
itype = ntypes.int32,
|
137 |
+
fmt = '%15.7e',
|
138 |
+
title = _title_fmt.format('single')),
|
139 |
+
ntypes.longdouble: dict(
|
140 |
+
itype = ntypes.longlong,
|
141 |
+
fmt = '%s',
|
142 |
+
title = _title_fmt.format('long double')),
|
143 |
+
ntypes.half: dict(
|
144 |
+
itype = ntypes.int16,
|
145 |
+
fmt = '%12.5e',
|
146 |
+
title = _title_fmt.format('half'))}
|
147 |
+
|
148 |
+
# Key to identify the floating point type. Key is result of
|
149 |
+
# ftype('-0.1').newbyteorder('<').tobytes()
|
150 |
+
#
|
151 |
+
# 20230201 - use (ftype(-1.0) / ftype(10.0)).newbyteorder('<').tobytes()
|
152 |
+
# instead because stold may have deficiencies on some platforms.
|
153 |
+
# See:
|
154 |
+
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
|
155 |
+
|
156 |
+
_KNOWN_TYPES = {}
|
157 |
+
def _register_type(machar, bytepat):
|
158 |
+
_KNOWN_TYPES[bytepat] = machar
|
159 |
+
_float_ma = {}
|
160 |
+
|
161 |
+
|
162 |
+
def _register_known_types():
|
163 |
+
# Known parameters for float16
|
164 |
+
# See docstring of MachAr class for description of parameters.
|
165 |
+
f16 = ntypes.float16
|
166 |
+
float16_ma = MachArLike(f16,
|
167 |
+
machep=-10,
|
168 |
+
negep=-11,
|
169 |
+
minexp=-14,
|
170 |
+
maxexp=16,
|
171 |
+
it=10,
|
172 |
+
iexp=5,
|
173 |
+
ibeta=2,
|
174 |
+
irnd=5,
|
175 |
+
ngrd=0,
|
176 |
+
eps=exp2(f16(-10)),
|
177 |
+
epsneg=exp2(f16(-11)),
|
178 |
+
huge=f16(65504),
|
179 |
+
tiny=f16(2 ** -14))
|
180 |
+
_register_type(float16_ma, b'f\xae')
|
181 |
+
_float_ma[16] = float16_ma
|
182 |
+
|
183 |
+
# Known parameters for float32
|
184 |
+
f32 = ntypes.float32
|
185 |
+
float32_ma = MachArLike(f32,
|
186 |
+
machep=-23,
|
187 |
+
negep=-24,
|
188 |
+
minexp=-126,
|
189 |
+
maxexp=128,
|
190 |
+
it=23,
|
191 |
+
iexp=8,
|
192 |
+
ibeta=2,
|
193 |
+
irnd=5,
|
194 |
+
ngrd=0,
|
195 |
+
eps=exp2(f32(-23)),
|
196 |
+
epsneg=exp2(f32(-24)),
|
197 |
+
huge=f32((1 - 2 ** -24) * 2**128),
|
198 |
+
tiny=exp2(f32(-126)))
|
199 |
+
_register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
|
200 |
+
_float_ma[32] = float32_ma
|
201 |
+
|
202 |
+
# Known parameters for float64
|
203 |
+
f64 = ntypes.float64
|
204 |
+
epsneg_f64 = 2.0 ** -53.0
|
205 |
+
tiny_f64 = 2.0 ** -1022.0
|
206 |
+
float64_ma = MachArLike(f64,
|
207 |
+
machep=-52,
|
208 |
+
negep=-53,
|
209 |
+
minexp=-1022,
|
210 |
+
maxexp=1024,
|
211 |
+
it=52,
|
212 |
+
iexp=11,
|
213 |
+
ibeta=2,
|
214 |
+
irnd=5,
|
215 |
+
ngrd=0,
|
216 |
+
eps=2.0 ** -52.0,
|
217 |
+
epsneg=epsneg_f64,
|
218 |
+
huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
|
219 |
+
tiny=tiny_f64)
|
220 |
+
_register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
221 |
+
_float_ma[64] = float64_ma
|
222 |
+
|
223 |
+
# Known parameters for IEEE 754 128-bit binary float
|
224 |
+
ld = ntypes.longdouble
|
225 |
+
epsneg_f128 = exp2(ld(-113))
|
226 |
+
tiny_f128 = exp2(ld(-16382))
|
227 |
+
# Ignore runtime error when this is not f128
|
228 |
+
with numeric.errstate(all='ignore'):
|
229 |
+
huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
|
230 |
+
float128_ma = MachArLike(ld,
|
231 |
+
machep=-112,
|
232 |
+
negep=-113,
|
233 |
+
minexp=-16382,
|
234 |
+
maxexp=16384,
|
235 |
+
it=112,
|
236 |
+
iexp=15,
|
237 |
+
ibeta=2,
|
238 |
+
irnd=5,
|
239 |
+
ngrd=0,
|
240 |
+
eps=exp2(ld(-112)),
|
241 |
+
epsneg=epsneg_f128,
|
242 |
+
huge=huge_f128,
|
243 |
+
tiny=tiny_f128)
|
244 |
+
# IEEE 754 128-bit binary float
|
245 |
+
_register_type(float128_ma,
|
246 |
+
b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
|
247 |
+
_float_ma[128] = float128_ma
|
248 |
+
|
249 |
+
# Known parameters for float80 (Intel 80-bit extended precision)
|
250 |
+
epsneg_f80 = exp2(ld(-64))
|
251 |
+
tiny_f80 = exp2(ld(-16382))
|
252 |
+
# Ignore runtime error when this is not f80
|
253 |
+
with numeric.errstate(all='ignore'):
|
254 |
+
huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
|
255 |
+
float80_ma = MachArLike(ld,
|
256 |
+
machep=-63,
|
257 |
+
negep=-64,
|
258 |
+
minexp=-16382,
|
259 |
+
maxexp=16384,
|
260 |
+
it=63,
|
261 |
+
iexp=15,
|
262 |
+
ibeta=2,
|
263 |
+
irnd=5,
|
264 |
+
ngrd=0,
|
265 |
+
eps=exp2(ld(-63)),
|
266 |
+
epsneg=epsneg_f80,
|
267 |
+
huge=huge_f80,
|
268 |
+
tiny=tiny_f80)
|
269 |
+
# float80, first 10 bytes containing actual storage
|
270 |
+
_register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
|
271 |
+
_float_ma[80] = float80_ma
|
272 |
+
|
273 |
+
# Guessed / known parameters for double double; see:
|
274 |
+
# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
|
275 |
+
# These numbers have the same exponent range as float64, but extended number of
|
276 |
+
# digits in the significand.
|
277 |
+
huge_dd = nextafter(ld(inf), ld(0), dtype=ld)
|
278 |
+
# As the smallest_normal in double double is so hard to calculate we set
|
279 |
+
# it to NaN.
|
280 |
+
smallest_normal_dd = NaN
|
281 |
+
# Leave the same value for the smallest subnormal as double
|
282 |
+
smallest_subnormal_dd = ld(nextafter(0., 1.))
|
283 |
+
float_dd_ma = MachArLike(ld,
|
284 |
+
machep=-105,
|
285 |
+
negep=-106,
|
286 |
+
minexp=-1022,
|
287 |
+
maxexp=1024,
|
288 |
+
it=105,
|
289 |
+
iexp=11,
|
290 |
+
ibeta=2,
|
291 |
+
irnd=5,
|
292 |
+
ngrd=0,
|
293 |
+
eps=exp2(ld(-105)),
|
294 |
+
epsneg=exp2(ld(-106)),
|
295 |
+
huge=huge_dd,
|
296 |
+
tiny=smallest_normal_dd,
|
297 |
+
smallest_subnormal=smallest_subnormal_dd)
|
298 |
+
# double double; low, high order (e.g. PPC 64)
|
299 |
+
_register_type(float_dd_ma,
|
300 |
+
b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
|
301 |
+
# double double; high, low order (e.g. PPC 64 le)
|
302 |
+
_register_type(float_dd_ma,
|
303 |
+
b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
|
304 |
+
_float_ma['dd'] = float_dd_ma
|
305 |
+
|
306 |
+
|
307 |
+
def _get_machar(ftype):
|
308 |
+
""" Get MachAr instance or MachAr-like instance
|
309 |
+
|
310 |
+
Get parameters for floating point type, by first trying signatures of
|
311 |
+
various known floating point types, then, if none match, attempting to
|
312 |
+
identify parameters by analysis.
|
313 |
+
|
314 |
+
Parameters
|
315 |
+
----------
|
316 |
+
ftype : class
|
317 |
+
Numpy floating point type class (e.g. ``np.float64``)
|
318 |
+
|
319 |
+
Returns
|
320 |
+
-------
|
321 |
+
ma_like : instance of :class:`MachAr` or :class:`MachArLike`
|
322 |
+
Object giving floating point parameters for `ftype`.
|
323 |
+
|
324 |
+
Warns
|
325 |
+
-----
|
326 |
+
UserWarning
|
327 |
+
If the binary signature of the float type is not in the dictionary of
|
328 |
+
known float types.
|
329 |
+
"""
|
330 |
+
params = _MACHAR_PARAMS.get(ftype)
|
331 |
+
if params is None:
|
332 |
+
raise ValueError(repr(ftype))
|
333 |
+
# Detect known / suspected types
|
334 |
+
# ftype(-1.0) / ftype(10.0) is better than ftype('-0.1') because stold
|
335 |
+
# may be deficient
|
336 |
+
key = (ftype(-1.0) / ftype(10.)).newbyteorder('<').tobytes()
|
337 |
+
ma_like = None
|
338 |
+
if ftype == ntypes.longdouble:
|
339 |
+
# Could be 80 bit == 10 byte extended precision, where last bytes can
|
340 |
+
# be random garbage.
|
341 |
+
# Comparing first 10 bytes to pattern first to avoid branching on the
|
342 |
+
# random garbage.
|
343 |
+
ma_like = _KNOWN_TYPES.get(key[:10])
|
344 |
+
if ma_like is None:
|
345 |
+
# see if the full key is known.
|
346 |
+
ma_like = _KNOWN_TYPES.get(key)
|
347 |
+
if ma_like is None and len(key) == 16:
|
348 |
+
# machine limits could be f80 masquerading as np.float128,
|
349 |
+
# find all keys with length 16 and make new dict, but make the keys
|
350 |
+
# only 10 bytes long, the last bytes can be random garbage
|
351 |
+
_kt = {k[:10]: v for k, v in _KNOWN_TYPES.items() if len(k) == 16}
|
352 |
+
ma_like = _kt.get(key[:10])
|
353 |
+
if ma_like is not None:
|
354 |
+
return ma_like
|
355 |
+
# Fall back to parameter discovery
|
356 |
+
warnings.warn(
|
357 |
+
f'Signature {key} for {ftype} does not match any known type: '
|
358 |
+
'falling back to type probe function.\n'
|
359 |
+
'This warnings indicates broken support for the dtype!',
|
360 |
+
UserWarning, stacklevel=2)
|
361 |
+
return _discovered_machar(ftype)
|
362 |
+
|
363 |
+
|
364 |
+
def _discovered_machar(ftype):
|
365 |
+
""" Create MachAr instance with found information on float types
|
366 |
+
|
367 |
+
TODO: MachAr should be retired completely ideally. We currently only
|
368 |
+
ever use it system with broken longdouble (valgrind, WSL).
|
369 |
+
"""
|
370 |
+
params = _MACHAR_PARAMS[ftype]
|
371 |
+
return MachAr(lambda v: array([v], ftype),
|
372 |
+
lambda v:_fr0(v.astype(params['itype']))[0],
|
373 |
+
lambda v:array(_fr0(v)[0], ftype),
|
374 |
+
lambda v: params['fmt'] % array(_fr0(v)[0], ftype),
|
375 |
+
params['title'])
|
376 |
+
|
377 |
+
|
378 |
+
@set_module('numpy')
|
379 |
+
class finfo:
|
380 |
+
"""
|
381 |
+
finfo(dtype)
|
382 |
+
|
383 |
+
Machine limits for floating point types.
|
384 |
+
|
385 |
+
Attributes
|
386 |
+
----------
|
387 |
+
bits : int
|
388 |
+
The number of bits occupied by the type.
|
389 |
+
dtype : dtype
|
390 |
+
Returns the dtype for which `finfo` returns information. For complex
|
391 |
+
input, the returned dtype is the associated ``float*`` dtype for its
|
392 |
+
real and complex components.
|
393 |
+
eps : float
|
394 |
+
The difference between 1.0 and the next smallest representable float
|
395 |
+
larger than 1.0. For example, for 64-bit binary floats in the IEEE-754
|
396 |
+
standard, ``eps = 2**-52``, approximately 2.22e-16.
|
397 |
+
epsneg : float
|
398 |
+
The difference between 1.0 and the next smallest representable float
|
399 |
+
less than 1.0. For example, for 64-bit binary floats in the IEEE-754
|
400 |
+
standard, ``epsneg = 2**-53``, approximately 1.11e-16.
|
401 |
+
iexp : int
|
402 |
+
The number of bits in the exponent portion of the floating point
|
403 |
+
representation.
|
404 |
+
machep : int
|
405 |
+
The exponent that yields `eps`.
|
406 |
+
max : floating point number of the appropriate type
|
407 |
+
The largest representable number.
|
408 |
+
maxexp : int
|
409 |
+
The smallest positive power of the base (2) that causes overflow.
|
410 |
+
min : floating point number of the appropriate type
|
411 |
+
The smallest representable number, typically ``-max``.
|
412 |
+
minexp : int
|
413 |
+
The most negative power of the base (2) consistent with there
|
414 |
+
being no leading 0's in the mantissa.
|
415 |
+
negep : int
|
416 |
+
The exponent that yields `epsneg`.
|
417 |
+
nexp : int
|
418 |
+
The number of bits in the exponent including its sign and bias.
|
419 |
+
nmant : int
|
420 |
+
The number of bits in the mantissa.
|
421 |
+
precision : int
|
422 |
+
The approximate number of decimal digits to which this kind of
|
423 |
+
float is precise.
|
424 |
+
resolution : floating point number of the appropriate type
|
425 |
+
The approximate decimal resolution of this type, i.e.,
|
426 |
+
``10**-precision``.
|
427 |
+
tiny : float
|
428 |
+
An alias for `smallest_normal`, kept for backwards compatibility.
|
429 |
+
smallest_normal : float
|
430 |
+
The smallest positive floating point number with 1 as leading bit in
|
431 |
+
the mantissa following IEEE-754 (see Notes).
|
432 |
+
smallest_subnormal : float
|
433 |
+
The smallest positive floating point number with 0 as leading bit in
|
434 |
+
the mantissa following IEEE-754.
|
435 |
+
|
436 |
+
Parameters
|
437 |
+
----------
|
438 |
+
dtype : float, dtype, or instance
|
439 |
+
Kind of floating point or complex floating point
|
440 |
+
data-type about which to get information.
|
441 |
+
|
442 |
+
See Also
|
443 |
+
--------
|
444 |
+
iinfo : The equivalent for integer data types.
|
445 |
+
spacing : The distance between a value and the nearest adjacent number
|
446 |
+
nextafter : The next floating point value after x1 towards x2
|
447 |
+
|
448 |
+
Notes
|
449 |
+
-----
|
450 |
+
For developers of NumPy: do not instantiate this at the module level.
|
451 |
+
The initial calculation of these parameters is expensive and negatively
|
452 |
+
impacts import times. These objects are cached, so calling ``finfo()``
|
453 |
+
repeatedly inside your functions is not a problem.
|
454 |
+
|
455 |
+
Note that ``smallest_normal`` is not actually the smallest positive
|
456 |
+
representable value in a NumPy floating point type. As in the IEEE-754
|
457 |
+
standard [1]_, NumPy floating point types make use of subnormal numbers to
|
458 |
+
fill the gap between 0 and ``smallest_normal``. However, subnormal numbers
|
459 |
+
may have significantly reduced precision [2]_.
|
460 |
+
|
461 |
+
This function can also be used for complex data types as well. If used,
|
462 |
+
the output will be the same as the corresponding real float type
|
463 |
+
(e.g. numpy.finfo(numpy.csingle) is the same as numpy.finfo(numpy.single)).
|
464 |
+
However, the output is true for the real and imaginary components.
|
465 |
+
|
466 |
+
References
|
467 |
+
----------
|
468 |
+
.. [1] IEEE Standard for Floating-Point Arithmetic, IEEE Std 754-2008,
|
469 |
+
pp.1-70, 2008, http://www.doi.org/10.1109/IEEESTD.2008.4610935
|
470 |
+
.. [2] Wikipedia, "Denormal Numbers",
|
471 |
+
https://en.wikipedia.org/wiki/Denormal_number
|
472 |
+
|
473 |
+
Examples
|
474 |
+
--------
|
475 |
+
>>> np.finfo(np.float64).dtype
|
476 |
+
dtype('float64')
|
477 |
+
>>> np.finfo(np.complex64).dtype
|
478 |
+
dtype('float32')
|
479 |
+
|
480 |
+
"""
|
481 |
+
|
482 |
+
_finfo_cache = {}
|
483 |
+
|
484 |
+
def __new__(cls, dtype):
|
485 |
+
try:
|
486 |
+
obj = cls._finfo_cache.get(dtype) # most common path
|
487 |
+
if obj is not None:
|
488 |
+
return obj
|
489 |
+
except TypeError:
|
490 |
+
pass
|
491 |
+
|
492 |
+
if dtype is None:
|
493 |
+
# Deprecated in NumPy 1.25, 2023-01-16
|
494 |
+
warnings.warn(
|
495 |
+
"finfo() dtype cannot be None. This behavior will "
|
496 |
+
"raise an error in the future. (Deprecated in NumPy 1.25)",
|
497 |
+
DeprecationWarning,
|
498 |
+
stacklevel=2
|
499 |
+
)
|
500 |
+
|
501 |
+
try:
|
502 |
+
dtype = numeric.dtype(dtype)
|
503 |
+
except TypeError:
|
504 |
+
# In case a float instance was given
|
505 |
+
dtype = numeric.dtype(type(dtype))
|
506 |
+
|
507 |
+
obj = cls._finfo_cache.get(dtype)
|
508 |
+
if obj is not None:
|
509 |
+
return obj
|
510 |
+
dtypes = [dtype]
|
511 |
+
newdtype = numeric.obj2sctype(dtype)
|
512 |
+
if newdtype is not dtype:
|
513 |
+
dtypes.append(newdtype)
|
514 |
+
dtype = newdtype
|
515 |
+
if not issubclass(dtype, numeric.inexact):
|
516 |
+
raise ValueError("data type %r not inexact" % (dtype))
|
517 |
+
obj = cls._finfo_cache.get(dtype)
|
518 |
+
if obj is not None:
|
519 |
+
return obj
|
520 |
+
if not issubclass(dtype, numeric.floating):
|
521 |
+
newdtype = _convert_to_float[dtype]
|
522 |
+
if newdtype is not dtype:
|
523 |
+
# dtype changed, for example from complex128 to float64
|
524 |
+
dtypes.append(newdtype)
|
525 |
+
dtype = newdtype
|
526 |
+
|
527 |
+
obj = cls._finfo_cache.get(dtype, None)
|
528 |
+
if obj is not None:
|
529 |
+
# the original dtype was not in the cache, but the new
|
530 |
+
# dtype is in the cache. we add the original dtypes to
|
531 |
+
# the cache and return the result
|
532 |
+
for dt in dtypes:
|
533 |
+
cls._finfo_cache[dt] = obj
|
534 |
+
return obj
|
535 |
+
obj = object.__new__(cls)._init(dtype)
|
536 |
+
for dt in dtypes:
|
537 |
+
cls._finfo_cache[dt] = obj
|
538 |
+
return obj
|
539 |
+
|
540 |
+
def _init(self, dtype):
|
541 |
+
self.dtype = numeric.dtype(dtype)
|
542 |
+
machar = _get_machar(dtype)
|
543 |
+
|
544 |
+
for word in ['precision', 'iexp',
|
545 |
+
'maxexp', 'minexp', 'negep',
|
546 |
+
'machep']:
|
547 |
+
setattr(self, word, getattr(machar, word))
|
548 |
+
for word in ['resolution', 'epsneg', 'smallest_subnormal']:
|
549 |
+
setattr(self, word, getattr(machar, word).flat[0])
|
550 |
+
self.bits = self.dtype.itemsize * 8
|
551 |
+
self.max = machar.huge.flat[0]
|
552 |
+
self.min = -self.max
|
553 |
+
self.eps = machar.eps.flat[0]
|
554 |
+
self.nexp = machar.iexp
|
555 |
+
self.nmant = machar.it
|
556 |
+
self._machar = machar
|
557 |
+
self._str_tiny = machar._str_xmin.strip()
|
558 |
+
self._str_max = machar._str_xmax.strip()
|
559 |
+
self._str_epsneg = machar._str_epsneg.strip()
|
560 |
+
self._str_eps = machar._str_eps.strip()
|
561 |
+
self._str_resolution = machar._str_resolution.strip()
|
562 |
+
self._str_smallest_normal = machar._str_smallest_normal.strip()
|
563 |
+
self._str_smallest_subnormal = machar._str_smallest_subnormal.strip()
|
564 |
+
return self
|
565 |
+
|
566 |
+
def __str__(self):
|
567 |
+
fmt = (
|
568 |
+
'Machine parameters for %(dtype)s\n'
|
569 |
+
'---------------------------------------------------------------\n'
|
570 |
+
'precision = %(precision)3s resolution = %(_str_resolution)s\n'
|
571 |
+
'machep = %(machep)6s eps = %(_str_eps)s\n'
|
572 |
+
'negep = %(negep)6s epsneg = %(_str_epsneg)s\n'
|
573 |
+
'minexp = %(minexp)6s tiny = %(_str_tiny)s\n'
|
574 |
+
'maxexp = %(maxexp)6s max = %(_str_max)s\n'
|
575 |
+
'nexp = %(nexp)6s min = -max\n'
|
576 |
+
'smallest_normal = %(_str_smallest_normal)s '
|
577 |
+
'smallest_subnormal = %(_str_smallest_subnormal)s\n'
|
578 |
+
'---------------------------------------------------------------\n'
|
579 |
+
)
|
580 |
+
return fmt % self.__dict__
|
581 |
+
|
582 |
+
def __repr__(self):
|
583 |
+
c = self.__class__.__name__
|
584 |
+
d = self.__dict__.copy()
|
585 |
+
d['klass'] = c
|
586 |
+
return (("%(klass)s(resolution=%(resolution)s, min=-%(_str_max)s,"
|
587 |
+
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
|
588 |
+
|
589 |
+
@property
|
590 |
+
def smallest_normal(self):
|
591 |
+
"""Return the value for the smallest normal.
|
592 |
+
|
593 |
+
Returns
|
594 |
+
-------
|
595 |
+
smallest_normal : float
|
596 |
+
Value for the smallest normal.
|
597 |
+
|
598 |
+
Warns
|
599 |
+
-----
|
600 |
+
UserWarning
|
601 |
+
If the calculated value for the smallest normal is requested for
|
602 |
+
double-double.
|
603 |
+
"""
|
604 |
+
# This check is necessary because the value for smallest_normal is
|
605 |
+
# platform dependent for longdouble types.
|
606 |
+
if isnan(self._machar.smallest_normal.flat[0]):
|
607 |
+
warnings.warn(
|
608 |
+
'The value of smallest normal is undefined for double double',
|
609 |
+
UserWarning, stacklevel=2)
|
610 |
+
return self._machar.smallest_normal.flat[0]
|
611 |
+
|
612 |
+
@property
|
613 |
+
def tiny(self):
|
614 |
+
"""Return the value for tiny, alias of smallest_normal.
|
615 |
+
|
616 |
+
Returns
|
617 |
+
-------
|
618 |
+
tiny : float
|
619 |
+
Value for the smallest normal, alias of smallest_normal.
|
620 |
+
|
621 |
+
Warns
|
622 |
+
-----
|
623 |
+
UserWarning
|
624 |
+
If the calculated value for the smallest normal is requested for
|
625 |
+
double-double.
|
626 |
+
"""
|
627 |
+
return self.smallest_normal
|
628 |
+
|
629 |
+
|
630 |
+
@set_module('numpy')
|
631 |
+
class iinfo:
|
632 |
+
"""
|
633 |
+
iinfo(type)
|
634 |
+
|
635 |
+
Machine limits for integer types.
|
636 |
+
|
637 |
+
Attributes
|
638 |
+
----------
|
639 |
+
bits : int
|
640 |
+
The number of bits occupied by the type.
|
641 |
+
dtype : dtype
|
642 |
+
Returns the dtype for which `iinfo` returns information.
|
643 |
+
min : int
|
644 |
+
The smallest integer expressible by the type.
|
645 |
+
max : int
|
646 |
+
The largest integer expressible by the type.
|
647 |
+
|
648 |
+
Parameters
|
649 |
+
----------
|
650 |
+
int_type : integer type, dtype, or instance
|
651 |
+
The kind of integer data type to get information about.
|
652 |
+
|
653 |
+
See Also
|
654 |
+
--------
|
655 |
+
finfo : The equivalent for floating point data types.
|
656 |
+
|
657 |
+
Examples
|
658 |
+
--------
|
659 |
+
With types:
|
660 |
+
|
661 |
+
>>> ii16 = np.iinfo(np.int16)
|
662 |
+
>>> ii16.min
|
663 |
+
-32768
|
664 |
+
>>> ii16.max
|
665 |
+
32767
|
666 |
+
>>> ii32 = np.iinfo(np.int32)
|
667 |
+
>>> ii32.min
|
668 |
+
-2147483648
|
669 |
+
>>> ii32.max
|
670 |
+
2147483647
|
671 |
+
|
672 |
+
With instances:
|
673 |
+
|
674 |
+
>>> ii32 = np.iinfo(np.int32(10))
|
675 |
+
>>> ii32.min
|
676 |
+
-2147483648
|
677 |
+
>>> ii32.max
|
678 |
+
2147483647
|
679 |
+
|
680 |
+
"""
|
681 |
+
|
682 |
+
_min_vals = {}
|
683 |
+
_max_vals = {}
|
684 |
+
|
685 |
+
def __init__(self, int_type):
|
686 |
+
try:
|
687 |
+
self.dtype = numeric.dtype(int_type)
|
688 |
+
except TypeError:
|
689 |
+
self.dtype = numeric.dtype(type(int_type))
|
690 |
+
self.kind = self.dtype.kind
|
691 |
+
self.bits = self.dtype.itemsize * 8
|
692 |
+
self.key = "%s%d" % (self.kind, self.bits)
|
693 |
+
if self.kind not in 'iu':
|
694 |
+
raise ValueError("Invalid integer data type %r." % (self.kind,))
|
695 |
+
|
696 |
+
@property
|
697 |
+
def min(self):
|
698 |
+
"""Minimum value of given dtype."""
|
699 |
+
if self.kind == 'u':
|
700 |
+
return 0
|
701 |
+
else:
|
702 |
+
try:
|
703 |
+
val = iinfo._min_vals[self.key]
|
704 |
+
except KeyError:
|
705 |
+
val = int(-(1 << (self.bits-1)))
|
706 |
+
iinfo._min_vals[self.key] = val
|
707 |
+
return val
|
708 |
+
|
709 |
+
@property
|
710 |
+
def max(self):
|
711 |
+
"""Maximum value of given dtype."""
|
712 |
+
try:
|
713 |
+
val = iinfo._max_vals[self.key]
|
714 |
+
except KeyError:
|
715 |
+
if self.kind == 'u':
|
716 |
+
val = int((1 << self.bits) - 1)
|
717 |
+
else:
|
718 |
+
val = int((1 << (self.bits-1)) - 1)
|
719 |
+
iinfo._max_vals[self.key] = val
|
720 |
+
return val
|
721 |
+
|
722 |
+
def __str__(self):
|
723 |
+
"""String representation."""
|
724 |
+
fmt = (
|
725 |
+
'Machine parameters for %(dtype)s\n'
|
726 |
+
'---------------------------------------------------------------\n'
|
727 |
+
'min = %(min)s\n'
|
728 |
+
'max = %(max)s\n'
|
729 |
+
'---------------------------------------------------------------\n'
|
730 |
+
)
|
731 |
+
return fmt % {'dtype': self.dtype, 'min': self.min, 'max': self.max}
|
732 |
+
|
733 |
+
def __repr__(self):
|
734 |
+
return "%s(min=%s, max=%s, dtype=%s)" % (self.__class__.__name__,
|
735 |
+
self.min, self.max, self.dtype)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/getlimits.pyi
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy import (
|
2 |
+
finfo as finfo,
|
3 |
+
iinfo as iinfo,
|
4 |
+
)
|
5 |
+
|
6 |
+
__all__: list[str]
|
env-llmeval/lib/python3.10/site-packages/numpy/core/multiarray.py
ADDED
@@ -0,0 +1,1715 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Create the numpy.core.multiarray namespace for backward compatibility. In v1.16
|
3 |
+
the multiarray and umath c-extension modules were merged into a single
|
4 |
+
_multiarray_umath extension module. So we replicate the old namespace
|
5 |
+
by importing from the extension module.
|
6 |
+
|
7 |
+
"""
|
8 |
+
|
9 |
+
import functools
|
10 |
+
from . import overrides
|
11 |
+
from . import _multiarray_umath
|
12 |
+
from ._multiarray_umath import * # noqa: F403
|
13 |
+
# These imports are needed for backward compatibility,
|
14 |
+
# do not change them. issue gh-15518
|
15 |
+
# _get_ndarray_c_version is semi-public, on purpose not added to __all__
|
16 |
+
from ._multiarray_umath import (
|
17 |
+
fastCopyAndTranspose, _flagdict, from_dlpack, _place, _reconstruct,
|
18 |
+
_vec_string, _ARRAY_API, _monotonicity, _get_ndarray_c_version,
|
19 |
+
_get_madvise_hugepage, _set_madvise_hugepage,
|
20 |
+
_get_promotion_state, _set_promotion_state, _using_numpy2_behavior
|
21 |
+
)
|
22 |
+
|
23 |
+
__all__ = [
|
24 |
+
'_ARRAY_API', 'ALLOW_THREADS', 'BUFSIZE', 'CLIP', 'DATETIMEUNITS',
|
25 |
+
'ITEM_HASOBJECT', 'ITEM_IS_POINTER', 'LIST_PICKLE', 'MAXDIMS',
|
26 |
+
'MAY_SHARE_BOUNDS', 'MAY_SHARE_EXACT', 'NEEDS_INIT', 'NEEDS_PYAPI',
|
27 |
+
'RAISE', 'USE_GETITEM', 'USE_SETITEM', 'WRAP',
|
28 |
+
'_flagdict', 'from_dlpack', '_place', '_reconstruct', '_vec_string',
|
29 |
+
'_monotonicity', 'add_docstring', 'arange', 'array', 'asarray',
|
30 |
+
'asanyarray', 'ascontiguousarray', 'asfortranarray', 'bincount',
|
31 |
+
'broadcast', 'busday_count', 'busday_offset', 'busdaycalendar', 'can_cast',
|
32 |
+
'compare_chararrays', 'concatenate', 'copyto', 'correlate', 'correlate2',
|
33 |
+
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
|
34 |
+
'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
|
35 |
+
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
|
36 |
+
'frombuffer', 'fromfile', 'fromiter', 'fromstring',
|
37 |
+
'get_handler_name', 'get_handler_version', 'inner', 'interp',
|
38 |
+
'interp_complex', 'is_busday', 'lexsort', 'matmul', 'may_share_memory',
|
39 |
+
'min_scalar_type', 'ndarray', 'nditer', 'nested_iters',
|
40 |
+
'normalize_axis_index', 'packbits', 'promote_types', 'putmask',
|
41 |
+
'ravel_multi_index', 'result_type', 'scalar', 'set_datetimeparse_function',
|
42 |
+
'set_legacy_print_mode', 'set_numeric_ops', 'set_string_function',
|
43 |
+
'set_typeDict', 'shares_memory', 'tracemalloc_domain', 'typeinfo',
|
44 |
+
'unpackbits', 'unravel_index', 'vdot', 'where', 'zeros',
|
45 |
+
'_get_promotion_state', '_set_promotion_state', '_using_numpy2_behavior']
|
46 |
+
|
47 |
+
# For backward compatibility, make sure pickle imports these functions from here
|
48 |
+
_reconstruct.__module__ = 'numpy.core.multiarray'
|
49 |
+
scalar.__module__ = 'numpy.core.multiarray'
|
50 |
+
|
51 |
+
|
52 |
+
from_dlpack.__module__ = 'numpy'
|
53 |
+
arange.__module__ = 'numpy'
|
54 |
+
array.__module__ = 'numpy'
|
55 |
+
asarray.__module__ = 'numpy'
|
56 |
+
asanyarray.__module__ = 'numpy'
|
57 |
+
ascontiguousarray.__module__ = 'numpy'
|
58 |
+
asfortranarray.__module__ = 'numpy'
|
59 |
+
datetime_data.__module__ = 'numpy'
|
60 |
+
empty.__module__ = 'numpy'
|
61 |
+
frombuffer.__module__ = 'numpy'
|
62 |
+
fromfile.__module__ = 'numpy'
|
63 |
+
fromiter.__module__ = 'numpy'
|
64 |
+
frompyfunc.__module__ = 'numpy'
|
65 |
+
fromstring.__module__ = 'numpy'
|
66 |
+
geterrobj.__module__ = 'numpy'
|
67 |
+
may_share_memory.__module__ = 'numpy'
|
68 |
+
nested_iters.__module__ = 'numpy'
|
69 |
+
promote_types.__module__ = 'numpy'
|
70 |
+
set_numeric_ops.__module__ = 'numpy'
|
71 |
+
seterrobj.__module__ = 'numpy'
|
72 |
+
zeros.__module__ = 'numpy'
|
73 |
+
_get_promotion_state.__module__ = 'numpy'
|
74 |
+
_set_promotion_state.__module__ = 'numpy'
|
75 |
+
_using_numpy2_behavior.__module__ = 'numpy'
|
76 |
+
|
77 |
+
|
78 |
+
# We can't verify dispatcher signatures because NumPy's C functions don't
|
79 |
+
# support introspection.
|
80 |
+
array_function_from_c_func_and_dispatcher = functools.partial(
|
81 |
+
overrides.array_function_from_dispatcher,
|
82 |
+
module='numpy', docs_from_dispatcher=True, verify=False)
|
83 |
+
|
84 |
+
|
85 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
|
86 |
+
def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
|
87 |
+
"""
|
88 |
+
empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
|
89 |
+
|
90 |
+
Return a new array with the same shape and type as a given array.
|
91 |
+
|
92 |
+
Parameters
|
93 |
+
----------
|
94 |
+
prototype : array_like
|
95 |
+
The shape and data-type of `prototype` define these same attributes
|
96 |
+
of the returned array.
|
97 |
+
dtype : data-type, optional
|
98 |
+
Overrides the data type of the result.
|
99 |
+
|
100 |
+
.. versionadded:: 1.6.0
|
101 |
+
order : {'C', 'F', 'A', or 'K'}, optional
|
102 |
+
Overrides the memory layout of the result. 'C' means C-order,
|
103 |
+
'F' means F-order, 'A' means 'F' if `prototype` is Fortran
|
104 |
+
contiguous, 'C' otherwise. 'K' means match the layout of `prototype`
|
105 |
+
as closely as possible.
|
106 |
+
|
107 |
+
.. versionadded:: 1.6.0
|
108 |
+
subok : bool, optional.
|
109 |
+
If True, then the newly created array will use the sub-class
|
110 |
+
type of `prototype`, otherwise it will be a base-class array. Defaults
|
111 |
+
to True.
|
112 |
+
shape : int or sequence of ints, optional.
|
113 |
+
Overrides the shape of the result. If order='K' and the number of
|
114 |
+
dimensions is unchanged, will try to keep order, otherwise,
|
115 |
+
order='C' is implied.
|
116 |
+
|
117 |
+
.. versionadded:: 1.17.0
|
118 |
+
|
119 |
+
Returns
|
120 |
+
-------
|
121 |
+
out : ndarray
|
122 |
+
Array of uninitialized (arbitrary) data with the same
|
123 |
+
shape and type as `prototype`.
|
124 |
+
|
125 |
+
See Also
|
126 |
+
--------
|
127 |
+
ones_like : Return an array of ones with shape and type of input.
|
128 |
+
zeros_like : Return an array of zeros with shape and type of input.
|
129 |
+
full_like : Return a new array with shape of input filled with value.
|
130 |
+
empty : Return a new uninitialized array.
|
131 |
+
|
132 |
+
Notes
|
133 |
+
-----
|
134 |
+
This function does *not* initialize the returned array; to do that use
|
135 |
+
`zeros_like` or `ones_like` instead. It may be marginally faster than
|
136 |
+
the functions that do set the array values.
|
137 |
+
|
138 |
+
Examples
|
139 |
+
--------
|
140 |
+
>>> a = ([1,2,3], [4,5,6]) # a is array-like
|
141 |
+
>>> np.empty_like(a)
|
142 |
+
array([[-1073741821, -1073741821, 3], # uninitialized
|
143 |
+
[ 0, 0, -1073741821]])
|
144 |
+
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
|
145 |
+
>>> np.empty_like(a)
|
146 |
+
array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
|
147 |
+
[ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
|
148 |
+
|
149 |
+
"""
|
150 |
+
return (prototype,)
|
151 |
+
|
152 |
+
|
153 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
|
154 |
+
def concatenate(arrays, axis=None, out=None, *, dtype=None, casting=None):
|
155 |
+
"""
|
156 |
+
concatenate((a1, a2, ...), axis=0, out=None, dtype=None, casting="same_kind")
|
157 |
+
|
158 |
+
Join a sequence of arrays along an existing axis.
|
159 |
+
|
160 |
+
Parameters
|
161 |
+
----------
|
162 |
+
a1, a2, ... : sequence of array_like
|
163 |
+
The arrays must have the same shape, except in the dimension
|
164 |
+
corresponding to `axis` (the first, by default).
|
165 |
+
axis : int, optional
|
166 |
+
The axis along which the arrays will be joined. If axis is None,
|
167 |
+
arrays are flattened before use. Default is 0.
|
168 |
+
out : ndarray, optional
|
169 |
+
If provided, the destination to place the result. The shape must be
|
170 |
+
correct, matching that of what concatenate would have returned if no
|
171 |
+
out argument were specified.
|
172 |
+
dtype : str or dtype
|
173 |
+
If provided, the destination array will have this dtype. Cannot be
|
174 |
+
provided together with `out`.
|
175 |
+
|
176 |
+
.. versionadded:: 1.20.0
|
177 |
+
|
178 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
179 |
+
Controls what kind of data casting may occur. Defaults to 'same_kind'.
|
180 |
+
|
181 |
+
.. versionadded:: 1.20.0
|
182 |
+
|
183 |
+
Returns
|
184 |
+
-------
|
185 |
+
res : ndarray
|
186 |
+
The concatenated array.
|
187 |
+
|
188 |
+
See Also
|
189 |
+
--------
|
190 |
+
ma.concatenate : Concatenate function that preserves input masks.
|
191 |
+
array_split : Split an array into multiple sub-arrays of equal or
|
192 |
+
near-equal size.
|
193 |
+
split : Split array into a list of multiple sub-arrays of equal size.
|
194 |
+
hsplit : Split array into multiple sub-arrays horizontally (column wise).
|
195 |
+
vsplit : Split array into multiple sub-arrays vertically (row wise).
|
196 |
+
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
|
197 |
+
stack : Stack a sequence of arrays along a new axis.
|
198 |
+
block : Assemble arrays from blocks.
|
199 |
+
hstack : Stack arrays in sequence horizontally (column wise).
|
200 |
+
vstack : Stack arrays in sequence vertically (row wise).
|
201 |
+
dstack : Stack arrays in sequence depth wise (along third dimension).
|
202 |
+
column_stack : Stack 1-D arrays as columns into a 2-D array.
|
203 |
+
|
204 |
+
Notes
|
205 |
+
-----
|
206 |
+
When one or more of the arrays to be concatenated is a MaskedArray,
|
207 |
+
this function will return a MaskedArray object instead of an ndarray,
|
208 |
+
but the input masks are *not* preserved. In cases where a MaskedArray
|
209 |
+
is expected as input, use the ma.concatenate function from the masked
|
210 |
+
array module instead.
|
211 |
+
|
212 |
+
Examples
|
213 |
+
--------
|
214 |
+
>>> a = np.array([[1, 2], [3, 4]])
|
215 |
+
>>> b = np.array([[5, 6]])
|
216 |
+
>>> np.concatenate((a, b), axis=0)
|
217 |
+
array([[1, 2],
|
218 |
+
[3, 4],
|
219 |
+
[5, 6]])
|
220 |
+
>>> np.concatenate((a, b.T), axis=1)
|
221 |
+
array([[1, 2, 5],
|
222 |
+
[3, 4, 6]])
|
223 |
+
>>> np.concatenate((a, b), axis=None)
|
224 |
+
array([1, 2, 3, 4, 5, 6])
|
225 |
+
|
226 |
+
This function will not preserve masking of MaskedArray inputs.
|
227 |
+
|
228 |
+
>>> a = np.ma.arange(3)
|
229 |
+
>>> a[1] = np.ma.masked
|
230 |
+
>>> b = np.arange(2, 5)
|
231 |
+
>>> a
|
232 |
+
masked_array(data=[0, --, 2],
|
233 |
+
mask=[False, True, False],
|
234 |
+
fill_value=999999)
|
235 |
+
>>> b
|
236 |
+
array([2, 3, 4])
|
237 |
+
>>> np.concatenate([a, b])
|
238 |
+
masked_array(data=[0, 1, 2, 2, 3, 4],
|
239 |
+
mask=False,
|
240 |
+
fill_value=999999)
|
241 |
+
>>> np.ma.concatenate([a, b])
|
242 |
+
masked_array(data=[0, --, 2, 2, 3, 4],
|
243 |
+
mask=[False, True, False, False, False, False],
|
244 |
+
fill_value=999999)
|
245 |
+
|
246 |
+
"""
|
247 |
+
if out is not None:
|
248 |
+
# optimize for the typical case where only arrays is provided
|
249 |
+
arrays = list(arrays)
|
250 |
+
arrays.append(out)
|
251 |
+
return arrays
|
252 |
+
|
253 |
+
|
254 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
|
255 |
+
def inner(a, b):
|
256 |
+
"""
|
257 |
+
inner(a, b, /)
|
258 |
+
|
259 |
+
Inner product of two arrays.
|
260 |
+
|
261 |
+
Ordinary inner product of vectors for 1-D arrays (without complex
|
262 |
+
conjugation), in higher dimensions a sum product over the last axes.
|
263 |
+
|
264 |
+
Parameters
|
265 |
+
----------
|
266 |
+
a, b : array_like
|
267 |
+
If `a` and `b` are nonscalar, their last dimensions must match.
|
268 |
+
|
269 |
+
Returns
|
270 |
+
-------
|
271 |
+
out : ndarray
|
272 |
+
If `a` and `b` are both
|
273 |
+
scalars or both 1-D arrays then a scalar is returned; otherwise
|
274 |
+
an array is returned.
|
275 |
+
``out.shape = (*a.shape[:-1], *b.shape[:-1])``
|
276 |
+
|
277 |
+
Raises
|
278 |
+
------
|
279 |
+
ValueError
|
280 |
+
If both `a` and `b` are nonscalar and their last dimensions have
|
281 |
+
different sizes.
|
282 |
+
|
283 |
+
See Also
|
284 |
+
--------
|
285 |
+
tensordot : Sum products over arbitrary axes.
|
286 |
+
dot : Generalised matrix product, using second last dimension of `b`.
|
287 |
+
einsum : Einstein summation convention.
|
288 |
+
|
289 |
+
Notes
|
290 |
+
-----
|
291 |
+
For vectors (1-D arrays) it computes the ordinary inner-product::
|
292 |
+
|
293 |
+
np.inner(a, b) = sum(a[:]*b[:])
|
294 |
+
|
295 |
+
More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``::
|
296 |
+
|
297 |
+
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
|
298 |
+
|
299 |
+
or explicitly::
|
300 |
+
|
301 |
+
np.inner(a, b)[i0,...,ir-2,j0,...,js-2]
|
302 |
+
= sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:])
|
303 |
+
|
304 |
+
In addition `a` or `b` may be scalars, in which case::
|
305 |
+
|
306 |
+
np.inner(a,b) = a*b
|
307 |
+
|
308 |
+
Examples
|
309 |
+
--------
|
310 |
+
Ordinary inner product for vectors:
|
311 |
+
|
312 |
+
>>> a = np.array([1,2,3])
|
313 |
+
>>> b = np.array([0,1,0])
|
314 |
+
>>> np.inner(a, b)
|
315 |
+
2
|
316 |
+
|
317 |
+
Some multidimensional examples:
|
318 |
+
|
319 |
+
>>> a = np.arange(24).reshape((2,3,4))
|
320 |
+
>>> b = np.arange(4)
|
321 |
+
>>> c = np.inner(a, b)
|
322 |
+
>>> c.shape
|
323 |
+
(2, 3)
|
324 |
+
>>> c
|
325 |
+
array([[ 14, 38, 62],
|
326 |
+
[ 86, 110, 134]])
|
327 |
+
|
328 |
+
>>> a = np.arange(2).reshape((1,1,2))
|
329 |
+
>>> b = np.arange(6).reshape((3,2))
|
330 |
+
>>> c = np.inner(a, b)
|
331 |
+
>>> c.shape
|
332 |
+
(1, 1, 3)
|
333 |
+
>>> c
|
334 |
+
array([[[1, 3, 5]]])
|
335 |
+
|
336 |
+
An example where `b` is a scalar:
|
337 |
+
|
338 |
+
>>> np.inner(np.eye(2), 7)
|
339 |
+
array([[7., 0.],
|
340 |
+
[0., 7.]])
|
341 |
+
|
342 |
+
"""
|
343 |
+
return (a, b)
|
344 |
+
|
345 |
+
|
346 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
|
347 |
+
def where(condition, x=None, y=None):
|
348 |
+
"""
|
349 |
+
where(condition, [x, y], /)
|
350 |
+
|
351 |
+
Return elements chosen from `x` or `y` depending on `condition`.
|
352 |
+
|
353 |
+
.. note::
|
354 |
+
When only `condition` is provided, this function is a shorthand for
|
355 |
+
``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
|
356 |
+
preferred, as it behaves correctly for subclasses. The rest of this
|
357 |
+
documentation covers only the case where all three arguments are
|
358 |
+
provided.
|
359 |
+
|
360 |
+
Parameters
|
361 |
+
----------
|
362 |
+
condition : array_like, bool
|
363 |
+
Where True, yield `x`, otherwise yield `y`.
|
364 |
+
x, y : array_like
|
365 |
+
Values from which to choose. `x`, `y` and `condition` need to be
|
366 |
+
broadcastable to some shape.
|
367 |
+
|
368 |
+
Returns
|
369 |
+
-------
|
370 |
+
out : ndarray
|
371 |
+
An array with elements from `x` where `condition` is True, and elements
|
372 |
+
from `y` elsewhere.
|
373 |
+
|
374 |
+
See Also
|
375 |
+
--------
|
376 |
+
choose
|
377 |
+
nonzero : The function that is called when x and y are omitted
|
378 |
+
|
379 |
+
Notes
|
380 |
+
-----
|
381 |
+
If all the arrays are 1-D, `where` is equivalent to::
|
382 |
+
|
383 |
+
[xv if c else yv
|
384 |
+
for c, xv, yv in zip(condition, x, y)]
|
385 |
+
|
386 |
+
Examples
|
387 |
+
--------
|
388 |
+
>>> a = np.arange(10)
|
389 |
+
>>> a
|
390 |
+
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
391 |
+
>>> np.where(a < 5, a, 10*a)
|
392 |
+
array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
|
393 |
+
|
394 |
+
This can be used on multidimensional arrays too:
|
395 |
+
|
396 |
+
>>> np.where([[True, False], [True, True]],
|
397 |
+
... [[1, 2], [3, 4]],
|
398 |
+
... [[9, 8], [7, 6]])
|
399 |
+
array([[1, 8],
|
400 |
+
[3, 4]])
|
401 |
+
|
402 |
+
The shapes of x, y, and the condition are broadcast together:
|
403 |
+
|
404 |
+
>>> x, y = np.ogrid[:3, :4]
|
405 |
+
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
|
406 |
+
array([[10, 0, 0, 0],
|
407 |
+
[10, 11, 1, 1],
|
408 |
+
[10, 11, 12, 2]])
|
409 |
+
|
410 |
+
>>> a = np.array([[0, 1, 2],
|
411 |
+
... [0, 2, 4],
|
412 |
+
... [0, 3, 6]])
|
413 |
+
>>> np.where(a < 4, a, -1) # -1 is broadcast
|
414 |
+
array([[ 0, 1, 2],
|
415 |
+
[ 0, 2, -1],
|
416 |
+
[ 0, 3, -1]])
|
417 |
+
"""
|
418 |
+
return (condition, x, y)
|
419 |
+
|
420 |
+
|
421 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
|
422 |
+
def lexsort(keys, axis=None):
|
423 |
+
"""
|
424 |
+
lexsort(keys, axis=-1)
|
425 |
+
|
426 |
+
Perform an indirect stable sort using a sequence of keys.
|
427 |
+
|
428 |
+
Given multiple sorting keys, which can be interpreted as columns in a
|
429 |
+
spreadsheet, lexsort returns an array of integer indices that describes
|
430 |
+
the sort order by multiple columns. The last key in the sequence is used
|
431 |
+
for the primary sort order, the second-to-last key for the secondary sort
|
432 |
+
order, and so on. The keys argument must be a sequence of objects that
|
433 |
+
can be converted to arrays of the same shape. If a 2D array is provided
|
434 |
+
for the keys argument, its rows are interpreted as the sorting keys and
|
435 |
+
sorting is according to the last row, second last row etc.
|
436 |
+
|
437 |
+
Parameters
|
438 |
+
----------
|
439 |
+
keys : (k, N) array or tuple containing k (N,)-shaped sequences
|
440 |
+
The `k` different "columns" to be sorted. The last column (or row if
|
441 |
+
`keys` is a 2D array) is the primary sort key.
|
442 |
+
axis : int, optional
|
443 |
+
Axis to be indirectly sorted. By default, sort over the last axis.
|
444 |
+
|
445 |
+
Returns
|
446 |
+
-------
|
447 |
+
indices : (N,) ndarray of ints
|
448 |
+
Array of indices that sort the keys along the specified axis.
|
449 |
+
|
450 |
+
See Also
|
451 |
+
--------
|
452 |
+
argsort : Indirect sort.
|
453 |
+
ndarray.sort : In-place sort.
|
454 |
+
sort : Return a sorted copy of an array.
|
455 |
+
|
456 |
+
Examples
|
457 |
+
--------
|
458 |
+
Sort names: first by surname, then by name.
|
459 |
+
|
460 |
+
>>> surnames = ('Hertz', 'Galilei', 'Hertz')
|
461 |
+
>>> first_names = ('Heinrich', 'Galileo', 'Gustav')
|
462 |
+
>>> ind = np.lexsort((first_names, surnames))
|
463 |
+
>>> ind
|
464 |
+
array([1, 2, 0])
|
465 |
+
|
466 |
+
>>> [surnames[i] + ", " + first_names[i] for i in ind]
|
467 |
+
['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
|
468 |
+
|
469 |
+
Sort two columns of numbers:
|
470 |
+
|
471 |
+
>>> a = [1,5,1,4,3,4,4] # First column
|
472 |
+
>>> b = [9,4,0,4,0,2,1] # Second column
|
473 |
+
>>> ind = np.lexsort((b,a)) # Sort by a, then by b
|
474 |
+
>>> ind
|
475 |
+
array([2, 0, 4, 6, 5, 3, 1])
|
476 |
+
|
477 |
+
>>> [(a[i],b[i]) for i in ind]
|
478 |
+
[(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
|
479 |
+
|
480 |
+
Note that sorting is first according to the elements of ``a``.
|
481 |
+
Secondary sorting is according to the elements of ``b``.
|
482 |
+
|
483 |
+
A normal ``argsort`` would have yielded:
|
484 |
+
|
485 |
+
>>> [(a[i],b[i]) for i in np.argsort(a)]
|
486 |
+
[(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
|
487 |
+
|
488 |
+
Structured arrays are sorted lexically by ``argsort``:
|
489 |
+
|
490 |
+
>>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
|
491 |
+
... dtype=np.dtype([('x', int), ('y', int)]))
|
492 |
+
|
493 |
+
>>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
|
494 |
+
array([2, 0, 4, 6, 5, 3, 1])
|
495 |
+
|
496 |
+
"""
|
497 |
+
if isinstance(keys, tuple):
|
498 |
+
return keys
|
499 |
+
else:
|
500 |
+
return (keys,)
|
501 |
+
|
502 |
+
|
503 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
|
504 |
+
def can_cast(from_, to, casting=None):
|
505 |
+
"""
|
506 |
+
can_cast(from_, to, casting='safe')
|
507 |
+
|
508 |
+
Returns True if cast between data types can occur according to the
|
509 |
+
casting rule. If from is a scalar or array scalar, also returns
|
510 |
+
True if the scalar value can be cast without overflow or truncation
|
511 |
+
to an integer.
|
512 |
+
|
513 |
+
Parameters
|
514 |
+
----------
|
515 |
+
from_ : dtype, dtype specifier, scalar, or array
|
516 |
+
Data type, scalar, or array to cast from.
|
517 |
+
to : dtype or dtype specifier
|
518 |
+
Data type to cast to.
|
519 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
520 |
+
Controls what kind of data casting may occur.
|
521 |
+
|
522 |
+
* 'no' means the data types should not be cast at all.
|
523 |
+
* 'equiv' means only byte-order changes are allowed.
|
524 |
+
* 'safe' means only casts which can preserve values are allowed.
|
525 |
+
* 'same_kind' means only safe casts or casts within a kind,
|
526 |
+
like float64 to float32, are allowed.
|
527 |
+
* 'unsafe' means any data conversions may be done.
|
528 |
+
|
529 |
+
Returns
|
530 |
+
-------
|
531 |
+
out : bool
|
532 |
+
True if cast can occur according to the casting rule.
|
533 |
+
|
534 |
+
Notes
|
535 |
+
-----
|
536 |
+
.. versionchanged:: 1.17.0
|
537 |
+
Casting between a simple data type and a structured one is possible only
|
538 |
+
for "unsafe" casting. Casting to multiple fields is allowed, but
|
539 |
+
casting from multiple fields is not.
|
540 |
+
|
541 |
+
.. versionchanged:: 1.9.0
|
542 |
+
Casting from numeric to string types in 'safe' casting mode requires
|
543 |
+
that the string dtype length is long enough to store the maximum
|
544 |
+
integer/float value converted.
|
545 |
+
|
546 |
+
See also
|
547 |
+
--------
|
548 |
+
dtype, result_type
|
549 |
+
|
550 |
+
Examples
|
551 |
+
--------
|
552 |
+
Basic examples
|
553 |
+
|
554 |
+
>>> np.can_cast(np.int32, np.int64)
|
555 |
+
True
|
556 |
+
>>> np.can_cast(np.float64, complex)
|
557 |
+
True
|
558 |
+
>>> np.can_cast(complex, float)
|
559 |
+
False
|
560 |
+
|
561 |
+
>>> np.can_cast('i8', 'f8')
|
562 |
+
True
|
563 |
+
>>> np.can_cast('i8', 'f4')
|
564 |
+
False
|
565 |
+
>>> np.can_cast('i4', 'S4')
|
566 |
+
False
|
567 |
+
|
568 |
+
Casting scalars
|
569 |
+
|
570 |
+
>>> np.can_cast(100, 'i1')
|
571 |
+
True
|
572 |
+
>>> np.can_cast(150, 'i1')
|
573 |
+
False
|
574 |
+
>>> np.can_cast(150, 'u1')
|
575 |
+
True
|
576 |
+
|
577 |
+
>>> np.can_cast(3.5e100, np.float32)
|
578 |
+
False
|
579 |
+
>>> np.can_cast(1000.0, np.float32)
|
580 |
+
True
|
581 |
+
|
582 |
+
Array scalar checks the value, array does not
|
583 |
+
|
584 |
+
>>> np.can_cast(np.array(1000.0), np.float32)
|
585 |
+
True
|
586 |
+
>>> np.can_cast(np.array([1000.0]), np.float32)
|
587 |
+
False
|
588 |
+
|
589 |
+
Using the casting rules
|
590 |
+
|
591 |
+
>>> np.can_cast('i8', 'i8', 'no')
|
592 |
+
True
|
593 |
+
>>> np.can_cast('<i8', '>i8', 'no')
|
594 |
+
False
|
595 |
+
|
596 |
+
>>> np.can_cast('<i8', '>i8', 'equiv')
|
597 |
+
True
|
598 |
+
>>> np.can_cast('<i4', '>i8', 'equiv')
|
599 |
+
False
|
600 |
+
|
601 |
+
>>> np.can_cast('<i4', '>i8', 'safe')
|
602 |
+
True
|
603 |
+
>>> np.can_cast('<i8', '>i4', 'safe')
|
604 |
+
False
|
605 |
+
|
606 |
+
>>> np.can_cast('<i8', '>i4', 'same_kind')
|
607 |
+
True
|
608 |
+
>>> np.can_cast('<i8', '>u4', 'same_kind')
|
609 |
+
False
|
610 |
+
|
611 |
+
>>> np.can_cast('<i8', '>u4', 'unsafe')
|
612 |
+
True
|
613 |
+
|
614 |
+
"""
|
615 |
+
return (from_,)
|
616 |
+
|
617 |
+
|
618 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
|
619 |
+
def min_scalar_type(a):
|
620 |
+
"""
|
621 |
+
min_scalar_type(a, /)
|
622 |
+
|
623 |
+
For scalar ``a``, returns the data type with the smallest size
|
624 |
+
and smallest scalar kind which can hold its value. For non-scalar
|
625 |
+
array ``a``, returns the vector's dtype unmodified.
|
626 |
+
|
627 |
+
Floating point values are not demoted to integers,
|
628 |
+
and complex values are not demoted to floats.
|
629 |
+
|
630 |
+
Parameters
|
631 |
+
----------
|
632 |
+
a : scalar or array_like
|
633 |
+
The value whose minimal data type is to be found.
|
634 |
+
|
635 |
+
Returns
|
636 |
+
-------
|
637 |
+
out : dtype
|
638 |
+
The minimal data type.
|
639 |
+
|
640 |
+
Notes
|
641 |
+
-----
|
642 |
+
.. versionadded:: 1.6.0
|
643 |
+
|
644 |
+
See Also
|
645 |
+
--------
|
646 |
+
result_type, promote_types, dtype, can_cast
|
647 |
+
|
648 |
+
Examples
|
649 |
+
--------
|
650 |
+
>>> np.min_scalar_type(10)
|
651 |
+
dtype('uint8')
|
652 |
+
|
653 |
+
>>> np.min_scalar_type(-260)
|
654 |
+
dtype('int16')
|
655 |
+
|
656 |
+
>>> np.min_scalar_type(3.1)
|
657 |
+
dtype('float16')
|
658 |
+
|
659 |
+
>>> np.min_scalar_type(1e50)
|
660 |
+
dtype('float64')
|
661 |
+
|
662 |
+
>>> np.min_scalar_type(np.arange(4,dtype='f8'))
|
663 |
+
dtype('float64')
|
664 |
+
|
665 |
+
"""
|
666 |
+
return (a,)
|
667 |
+
|
668 |
+
|
669 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
|
670 |
+
def result_type(*arrays_and_dtypes):
|
671 |
+
"""
|
672 |
+
result_type(*arrays_and_dtypes)
|
673 |
+
|
674 |
+
Returns the type that results from applying the NumPy
|
675 |
+
type promotion rules to the arguments.
|
676 |
+
|
677 |
+
Type promotion in NumPy works similarly to the rules in languages
|
678 |
+
like C++, with some slight differences. When both scalars and
|
679 |
+
arrays are used, the array's type takes precedence and the actual value
|
680 |
+
of the scalar is taken into account.
|
681 |
+
|
682 |
+
For example, calculating 3*a, where a is an array of 32-bit floats,
|
683 |
+
intuitively should result in a 32-bit float output. If the 3 is a
|
684 |
+
32-bit integer, the NumPy rules indicate it can't convert losslessly
|
685 |
+
into a 32-bit float, so a 64-bit float should be the result type.
|
686 |
+
By examining the value of the constant, '3', we see that it fits in
|
687 |
+
an 8-bit integer, which can be cast losslessly into the 32-bit float.
|
688 |
+
|
689 |
+
Parameters
|
690 |
+
----------
|
691 |
+
arrays_and_dtypes : list of arrays and dtypes
|
692 |
+
The operands of some operation whose result type is needed.
|
693 |
+
|
694 |
+
Returns
|
695 |
+
-------
|
696 |
+
out : dtype
|
697 |
+
The result type.
|
698 |
+
|
699 |
+
See also
|
700 |
+
--------
|
701 |
+
dtype, promote_types, min_scalar_type, can_cast
|
702 |
+
|
703 |
+
Notes
|
704 |
+
-----
|
705 |
+
.. versionadded:: 1.6.0
|
706 |
+
|
707 |
+
The specific algorithm used is as follows.
|
708 |
+
|
709 |
+
Categories are determined by first checking which of boolean,
|
710 |
+
integer (int/uint), or floating point (float/complex) the maximum
|
711 |
+
kind of all the arrays and the scalars are.
|
712 |
+
|
713 |
+
If there are only scalars or the maximum category of the scalars
|
714 |
+
is higher than the maximum category of the arrays,
|
715 |
+
the data types are combined with :func:`promote_types`
|
716 |
+
to produce the return value.
|
717 |
+
|
718 |
+
Otherwise, `min_scalar_type` is called on each scalar, and
|
719 |
+
the resulting data types are all combined with :func:`promote_types`
|
720 |
+
to produce the return value.
|
721 |
+
|
722 |
+
The set of int values is not a subset of the uint values for types
|
723 |
+
with the same number of bits, something not reflected in
|
724 |
+
:func:`min_scalar_type`, but handled as a special case in `result_type`.
|
725 |
+
|
726 |
+
Examples
|
727 |
+
--------
|
728 |
+
>>> np.result_type(3, np.arange(7, dtype='i1'))
|
729 |
+
dtype('int8')
|
730 |
+
|
731 |
+
>>> np.result_type('i4', 'c8')
|
732 |
+
dtype('complex128')
|
733 |
+
|
734 |
+
>>> np.result_type(3.0, -2)
|
735 |
+
dtype('float64')
|
736 |
+
|
737 |
+
"""
|
738 |
+
return arrays_and_dtypes
|
739 |
+
|
740 |
+
|
741 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
|
742 |
+
def dot(a, b, out=None):
|
743 |
+
"""
|
744 |
+
dot(a, b, out=None)
|
745 |
+
|
746 |
+
Dot product of two arrays. Specifically,
|
747 |
+
|
748 |
+
- If both `a` and `b` are 1-D arrays, it is inner product of vectors
|
749 |
+
(without complex conjugation).
|
750 |
+
|
751 |
+
- If both `a` and `b` are 2-D arrays, it is matrix multiplication,
|
752 |
+
but using :func:`matmul` or ``a @ b`` is preferred.
|
753 |
+
|
754 |
+
- If either `a` or `b` is 0-D (scalar), it is equivalent to
|
755 |
+
:func:`multiply` and using ``numpy.multiply(a, b)`` or ``a * b`` is
|
756 |
+
preferred.
|
757 |
+
|
758 |
+
- If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
|
759 |
+
the last axis of `a` and `b`.
|
760 |
+
|
761 |
+
- If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
|
762 |
+
sum product over the last axis of `a` and the second-to-last axis of
|
763 |
+
`b`::
|
764 |
+
|
765 |
+
dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
|
766 |
+
|
767 |
+
It uses an optimized BLAS library when possible (see `numpy.linalg`).
|
768 |
+
|
769 |
+
Parameters
|
770 |
+
----------
|
771 |
+
a : array_like
|
772 |
+
First argument.
|
773 |
+
b : array_like
|
774 |
+
Second argument.
|
775 |
+
out : ndarray, optional
|
776 |
+
Output argument. This must have the exact kind that would be returned
|
777 |
+
if it was not used. In particular, it must have the right type, must be
|
778 |
+
C-contiguous, and its dtype must be the dtype that would be returned
|
779 |
+
for `dot(a,b)`. This is a performance feature. Therefore, if these
|
780 |
+
conditions are not met, an exception is raised, instead of attempting
|
781 |
+
to be flexible.
|
782 |
+
|
783 |
+
Returns
|
784 |
+
-------
|
785 |
+
output : ndarray
|
786 |
+
Returns the dot product of `a` and `b`. If `a` and `b` are both
|
787 |
+
scalars or both 1-D arrays then a scalar is returned; otherwise
|
788 |
+
an array is returned.
|
789 |
+
If `out` is given, then it is returned.
|
790 |
+
|
791 |
+
Raises
|
792 |
+
------
|
793 |
+
ValueError
|
794 |
+
If the last dimension of `a` is not the same size as
|
795 |
+
the second-to-last dimension of `b`.
|
796 |
+
|
797 |
+
See Also
|
798 |
+
--------
|
799 |
+
vdot : Complex-conjugating dot product.
|
800 |
+
tensordot : Sum products over arbitrary axes.
|
801 |
+
einsum : Einstein summation convention.
|
802 |
+
matmul : '@' operator as method with out parameter.
|
803 |
+
linalg.multi_dot : Chained dot product.
|
804 |
+
|
805 |
+
Examples
|
806 |
+
--------
|
807 |
+
>>> np.dot(3, 4)
|
808 |
+
12
|
809 |
+
|
810 |
+
Neither argument is complex-conjugated:
|
811 |
+
|
812 |
+
>>> np.dot([2j, 3j], [2j, 3j])
|
813 |
+
(-13+0j)
|
814 |
+
|
815 |
+
For 2-D arrays it is the matrix product:
|
816 |
+
|
817 |
+
>>> a = [[1, 0], [0, 1]]
|
818 |
+
>>> b = [[4, 1], [2, 2]]
|
819 |
+
>>> np.dot(a, b)
|
820 |
+
array([[4, 1],
|
821 |
+
[2, 2]])
|
822 |
+
|
823 |
+
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
|
824 |
+
>>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
|
825 |
+
>>> np.dot(a, b)[2,3,2,1,2,2]
|
826 |
+
499128
|
827 |
+
>>> sum(a[2,3,2,:] * b[1,2,:,2])
|
828 |
+
499128
|
829 |
+
|
830 |
+
"""
|
831 |
+
return (a, b, out)
|
832 |
+
|
833 |
+
|
834 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
|
835 |
+
def vdot(a, b):
|
836 |
+
"""
|
837 |
+
vdot(a, b, /)
|
838 |
+
|
839 |
+
Return the dot product of two vectors.
|
840 |
+
|
841 |
+
The vdot(`a`, `b`) function handles complex numbers differently than
|
842 |
+
dot(`a`, `b`). If the first argument is complex the complex conjugate
|
843 |
+
of the first argument is used for the calculation of the dot product.
|
844 |
+
|
845 |
+
Note that `vdot` handles multidimensional arrays differently than `dot`:
|
846 |
+
it does *not* perform a matrix product, but flattens input arguments
|
847 |
+
to 1-D vectors first. Consequently, it should only be used for vectors.
|
848 |
+
|
849 |
+
Parameters
|
850 |
+
----------
|
851 |
+
a : array_like
|
852 |
+
If `a` is complex the complex conjugate is taken before calculation
|
853 |
+
of the dot product.
|
854 |
+
b : array_like
|
855 |
+
Second argument to the dot product.
|
856 |
+
|
857 |
+
Returns
|
858 |
+
-------
|
859 |
+
output : ndarray
|
860 |
+
Dot product of `a` and `b`. Can be an int, float, or
|
861 |
+
complex depending on the types of `a` and `b`.
|
862 |
+
|
863 |
+
See Also
|
864 |
+
--------
|
865 |
+
dot : Return the dot product without using the complex conjugate of the
|
866 |
+
first argument.
|
867 |
+
|
868 |
+
Examples
|
869 |
+
--------
|
870 |
+
>>> a = np.array([1+2j,3+4j])
|
871 |
+
>>> b = np.array([5+6j,7+8j])
|
872 |
+
>>> np.vdot(a, b)
|
873 |
+
(70-8j)
|
874 |
+
>>> np.vdot(b, a)
|
875 |
+
(70+8j)
|
876 |
+
|
877 |
+
Note that higher-dimensional arrays are flattened!
|
878 |
+
|
879 |
+
>>> a = np.array([[1, 4], [5, 6]])
|
880 |
+
>>> b = np.array([[4, 1], [2, 2]])
|
881 |
+
>>> np.vdot(a, b)
|
882 |
+
30
|
883 |
+
>>> np.vdot(b, a)
|
884 |
+
30
|
885 |
+
>>> 1*4 + 4*1 + 5*2 + 6*2
|
886 |
+
30
|
887 |
+
|
888 |
+
"""
|
889 |
+
return (a, b)
|
890 |
+
|
891 |
+
|
892 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
|
893 |
+
def bincount(x, weights=None, minlength=None):
|
894 |
+
"""
|
895 |
+
bincount(x, /, weights=None, minlength=0)
|
896 |
+
|
897 |
+
Count number of occurrences of each value in array of non-negative ints.
|
898 |
+
|
899 |
+
The number of bins (of size 1) is one larger than the largest value in
|
900 |
+
`x`. If `minlength` is specified, there will be at least this number
|
901 |
+
of bins in the output array (though it will be longer if necessary,
|
902 |
+
depending on the contents of `x`).
|
903 |
+
Each bin gives the number of occurrences of its index value in `x`.
|
904 |
+
If `weights` is specified the input array is weighted by it, i.e. if a
|
905 |
+
value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
|
906 |
+
of ``out[n] += 1``.
|
907 |
+
|
908 |
+
Parameters
|
909 |
+
----------
|
910 |
+
x : array_like, 1 dimension, nonnegative ints
|
911 |
+
Input array.
|
912 |
+
weights : array_like, optional
|
913 |
+
Weights, array of the same shape as `x`.
|
914 |
+
minlength : int, optional
|
915 |
+
A minimum number of bins for the output array.
|
916 |
+
|
917 |
+
.. versionadded:: 1.6.0
|
918 |
+
|
919 |
+
Returns
|
920 |
+
-------
|
921 |
+
out : ndarray of ints
|
922 |
+
The result of binning the input array.
|
923 |
+
The length of `out` is equal to ``np.amax(x)+1``.
|
924 |
+
|
925 |
+
Raises
|
926 |
+
------
|
927 |
+
ValueError
|
928 |
+
If the input is not 1-dimensional, or contains elements with negative
|
929 |
+
values, or if `minlength` is negative.
|
930 |
+
TypeError
|
931 |
+
If the type of the input is float or complex.
|
932 |
+
|
933 |
+
See Also
|
934 |
+
--------
|
935 |
+
histogram, digitize, unique
|
936 |
+
|
937 |
+
Examples
|
938 |
+
--------
|
939 |
+
>>> np.bincount(np.arange(5))
|
940 |
+
array([1, 1, 1, 1, 1])
|
941 |
+
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
|
942 |
+
array([1, 3, 1, 1, 0, 0, 0, 1])
|
943 |
+
|
944 |
+
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
|
945 |
+
>>> np.bincount(x).size == np.amax(x)+1
|
946 |
+
True
|
947 |
+
|
948 |
+
The input array needs to be of integer dtype, otherwise a
|
949 |
+
TypeError is raised:
|
950 |
+
|
951 |
+
>>> np.bincount(np.arange(5, dtype=float))
|
952 |
+
Traceback (most recent call last):
|
953 |
+
...
|
954 |
+
TypeError: Cannot cast array data from dtype('float64') to dtype('int64')
|
955 |
+
according to the rule 'safe'
|
956 |
+
|
957 |
+
A possible use of ``bincount`` is to perform sums over
|
958 |
+
variable-size chunks of an array, using the ``weights`` keyword.
|
959 |
+
|
960 |
+
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
|
961 |
+
>>> x = np.array([0, 1, 1, 2, 2, 2])
|
962 |
+
>>> np.bincount(x, weights=w)
|
963 |
+
array([ 0.3, 0.7, 1.1])
|
964 |
+
|
965 |
+
"""
|
966 |
+
return (x, weights)
|
967 |
+
|
968 |
+
|
969 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
|
970 |
+
def ravel_multi_index(multi_index, dims, mode=None, order=None):
|
971 |
+
"""
|
972 |
+
ravel_multi_index(multi_index, dims, mode='raise', order='C')
|
973 |
+
|
974 |
+
Converts a tuple of index arrays into an array of flat
|
975 |
+
indices, applying boundary modes to the multi-index.
|
976 |
+
|
977 |
+
Parameters
|
978 |
+
----------
|
979 |
+
multi_index : tuple of array_like
|
980 |
+
A tuple of integer arrays, one array for each dimension.
|
981 |
+
dims : tuple of ints
|
982 |
+
The shape of array into which the indices from ``multi_index`` apply.
|
983 |
+
mode : {'raise', 'wrap', 'clip'}, optional
|
984 |
+
Specifies how out-of-bounds indices are handled. Can specify
|
985 |
+
either one mode or a tuple of modes, one mode per index.
|
986 |
+
|
987 |
+
* 'raise' -- raise an error (default)
|
988 |
+
* 'wrap' -- wrap around
|
989 |
+
* 'clip' -- clip to the range
|
990 |
+
|
991 |
+
In 'clip' mode, a negative index which would normally
|
992 |
+
wrap will clip to 0 instead.
|
993 |
+
order : {'C', 'F'}, optional
|
994 |
+
Determines whether the multi-index should be viewed as
|
995 |
+
indexing in row-major (C-style) or column-major
|
996 |
+
(Fortran-style) order.
|
997 |
+
|
998 |
+
Returns
|
999 |
+
-------
|
1000 |
+
raveled_indices : ndarray
|
1001 |
+
An array of indices into the flattened version of an array
|
1002 |
+
of dimensions ``dims``.
|
1003 |
+
|
1004 |
+
See Also
|
1005 |
+
--------
|
1006 |
+
unravel_index
|
1007 |
+
|
1008 |
+
Notes
|
1009 |
+
-----
|
1010 |
+
.. versionadded:: 1.6.0
|
1011 |
+
|
1012 |
+
Examples
|
1013 |
+
--------
|
1014 |
+
>>> arr = np.array([[3,6,6],[4,5,1]])
|
1015 |
+
>>> np.ravel_multi_index(arr, (7,6))
|
1016 |
+
array([22, 41, 37])
|
1017 |
+
>>> np.ravel_multi_index(arr, (7,6), order='F')
|
1018 |
+
array([31, 41, 13])
|
1019 |
+
>>> np.ravel_multi_index(arr, (4,6), mode='clip')
|
1020 |
+
array([22, 23, 19])
|
1021 |
+
>>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
|
1022 |
+
array([12, 13, 13])
|
1023 |
+
|
1024 |
+
>>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
|
1025 |
+
1621
|
1026 |
+
"""
|
1027 |
+
return multi_index
|
1028 |
+
|
1029 |
+
|
1030 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
|
1031 |
+
def unravel_index(indices, shape=None, order=None):
|
1032 |
+
"""
|
1033 |
+
unravel_index(indices, shape, order='C')
|
1034 |
+
|
1035 |
+
Converts a flat index or array of flat indices into a tuple
|
1036 |
+
of coordinate arrays.
|
1037 |
+
|
1038 |
+
Parameters
|
1039 |
+
----------
|
1040 |
+
indices : array_like
|
1041 |
+
An integer array whose elements are indices into the flattened
|
1042 |
+
version of an array of dimensions ``shape``. Before version 1.6.0,
|
1043 |
+
this function accepted just one index value.
|
1044 |
+
shape : tuple of ints
|
1045 |
+
The shape of the array to use for unraveling ``indices``.
|
1046 |
+
|
1047 |
+
.. versionchanged:: 1.16.0
|
1048 |
+
Renamed from ``dims`` to ``shape``.
|
1049 |
+
|
1050 |
+
order : {'C', 'F'}, optional
|
1051 |
+
Determines whether the indices should be viewed as indexing in
|
1052 |
+
row-major (C-style) or column-major (Fortran-style) order.
|
1053 |
+
|
1054 |
+
.. versionadded:: 1.6.0
|
1055 |
+
|
1056 |
+
Returns
|
1057 |
+
-------
|
1058 |
+
unraveled_coords : tuple of ndarray
|
1059 |
+
Each array in the tuple has the same shape as the ``indices``
|
1060 |
+
array.
|
1061 |
+
|
1062 |
+
See Also
|
1063 |
+
--------
|
1064 |
+
ravel_multi_index
|
1065 |
+
|
1066 |
+
Examples
|
1067 |
+
--------
|
1068 |
+
>>> np.unravel_index([22, 41, 37], (7,6))
|
1069 |
+
(array([3, 6, 6]), array([4, 5, 1]))
|
1070 |
+
>>> np.unravel_index([31, 41, 13], (7,6), order='F')
|
1071 |
+
(array([3, 6, 6]), array([4, 5, 1]))
|
1072 |
+
|
1073 |
+
>>> np.unravel_index(1621, (6,7,8,9))
|
1074 |
+
(3, 1, 4, 1)
|
1075 |
+
|
1076 |
+
"""
|
1077 |
+
return (indices,)
|
1078 |
+
|
1079 |
+
|
1080 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
|
1081 |
+
def copyto(dst, src, casting=None, where=None):
|
1082 |
+
"""
|
1083 |
+
copyto(dst, src, casting='same_kind', where=True)
|
1084 |
+
|
1085 |
+
Copies values from one array to another, broadcasting as necessary.
|
1086 |
+
|
1087 |
+
Raises a TypeError if the `casting` rule is violated, and if
|
1088 |
+
`where` is provided, it selects which elements to copy.
|
1089 |
+
|
1090 |
+
.. versionadded:: 1.7.0
|
1091 |
+
|
1092 |
+
Parameters
|
1093 |
+
----------
|
1094 |
+
dst : ndarray
|
1095 |
+
The array into which values are copied.
|
1096 |
+
src : array_like
|
1097 |
+
The array from which values are copied.
|
1098 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
|
1099 |
+
Controls what kind of data casting may occur when copying.
|
1100 |
+
|
1101 |
+
* 'no' means the data types should not be cast at all.
|
1102 |
+
* 'equiv' means only byte-order changes are allowed.
|
1103 |
+
* 'safe' means only casts which can preserve values are allowed.
|
1104 |
+
* 'same_kind' means only safe casts or casts within a kind,
|
1105 |
+
like float64 to float32, are allowed.
|
1106 |
+
* 'unsafe' means any data conversions may be done.
|
1107 |
+
where : array_like of bool, optional
|
1108 |
+
A boolean array which is broadcasted to match the dimensions
|
1109 |
+
of `dst`, and selects elements to copy from `src` to `dst`
|
1110 |
+
wherever it contains the value True.
|
1111 |
+
|
1112 |
+
Examples
|
1113 |
+
--------
|
1114 |
+
>>> A = np.array([4, 5, 6])
|
1115 |
+
>>> B = [1, 2, 3]
|
1116 |
+
>>> np.copyto(A, B)
|
1117 |
+
>>> A
|
1118 |
+
array([1, 2, 3])
|
1119 |
+
|
1120 |
+
>>> A = np.array([[1, 2, 3], [4, 5, 6]])
|
1121 |
+
>>> B = [[4, 5, 6], [7, 8, 9]]
|
1122 |
+
>>> np.copyto(A, B)
|
1123 |
+
>>> A
|
1124 |
+
array([[4, 5, 6],
|
1125 |
+
[7, 8, 9]])
|
1126 |
+
|
1127 |
+
"""
|
1128 |
+
return (dst, src, where)
|
1129 |
+
|
1130 |
+
|
1131 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
|
1132 |
+
def putmask(a, /, mask, values):
|
1133 |
+
"""
|
1134 |
+
putmask(a, mask, values)
|
1135 |
+
|
1136 |
+
Changes elements of an array based on conditional and input values.
|
1137 |
+
|
1138 |
+
Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
|
1139 |
+
|
1140 |
+
If `values` is not the same size as `a` and `mask` then it will repeat.
|
1141 |
+
This gives behavior different from ``a[mask] = values``.
|
1142 |
+
|
1143 |
+
Parameters
|
1144 |
+
----------
|
1145 |
+
a : ndarray
|
1146 |
+
Target array.
|
1147 |
+
mask : array_like
|
1148 |
+
Boolean mask array. It has to be the same shape as `a`.
|
1149 |
+
values : array_like
|
1150 |
+
Values to put into `a` where `mask` is True. If `values` is smaller
|
1151 |
+
than `a` it will be repeated.
|
1152 |
+
|
1153 |
+
See Also
|
1154 |
+
--------
|
1155 |
+
place, put, take, copyto
|
1156 |
+
|
1157 |
+
Examples
|
1158 |
+
--------
|
1159 |
+
>>> x = np.arange(6).reshape(2, 3)
|
1160 |
+
>>> np.putmask(x, x>2, x**2)
|
1161 |
+
>>> x
|
1162 |
+
array([[ 0, 1, 2],
|
1163 |
+
[ 9, 16, 25]])
|
1164 |
+
|
1165 |
+
If `values` is smaller than `a` it is repeated:
|
1166 |
+
|
1167 |
+
>>> x = np.arange(5)
|
1168 |
+
>>> np.putmask(x, x>1, [-33, -44])
|
1169 |
+
>>> x
|
1170 |
+
array([ 0, 1, -33, -44, -33])
|
1171 |
+
|
1172 |
+
"""
|
1173 |
+
return (a, mask, values)
|
1174 |
+
|
1175 |
+
|
1176 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
|
1177 |
+
def packbits(a, axis=None, bitorder='big'):
|
1178 |
+
"""
|
1179 |
+
packbits(a, /, axis=None, bitorder='big')
|
1180 |
+
|
1181 |
+
Packs the elements of a binary-valued array into bits in a uint8 array.
|
1182 |
+
|
1183 |
+
The result is padded to full bytes by inserting zero bits at the end.
|
1184 |
+
|
1185 |
+
Parameters
|
1186 |
+
----------
|
1187 |
+
a : array_like
|
1188 |
+
An array of integers or booleans whose elements should be packed to
|
1189 |
+
bits.
|
1190 |
+
axis : int, optional
|
1191 |
+
The dimension over which bit-packing is done.
|
1192 |
+
``None`` implies packing the flattened array.
|
1193 |
+
bitorder : {'big', 'little'}, optional
|
1194 |
+
The order of the input bits. 'big' will mimic bin(val),
|
1195 |
+
``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011``, 'little' will
|
1196 |
+
reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
|
1197 |
+
Defaults to 'big'.
|
1198 |
+
|
1199 |
+
.. versionadded:: 1.17.0
|
1200 |
+
|
1201 |
+
Returns
|
1202 |
+
-------
|
1203 |
+
packed : ndarray
|
1204 |
+
Array of type uint8 whose elements represent bits corresponding to the
|
1205 |
+
logical (0 or nonzero) value of the input elements. The shape of
|
1206 |
+
`packed` has the same number of dimensions as the input (unless `axis`
|
1207 |
+
is None, in which case the output is 1-D).
|
1208 |
+
|
1209 |
+
See Also
|
1210 |
+
--------
|
1211 |
+
unpackbits: Unpacks elements of a uint8 array into a binary-valued output
|
1212 |
+
array.
|
1213 |
+
|
1214 |
+
Examples
|
1215 |
+
--------
|
1216 |
+
>>> a = np.array([[[1,0,1],
|
1217 |
+
... [0,1,0]],
|
1218 |
+
... [[1,1,0],
|
1219 |
+
... [0,0,1]]])
|
1220 |
+
>>> b = np.packbits(a, axis=-1)
|
1221 |
+
>>> b
|
1222 |
+
array([[[160],
|
1223 |
+
[ 64]],
|
1224 |
+
[[192],
|
1225 |
+
[ 32]]], dtype=uint8)
|
1226 |
+
|
1227 |
+
Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
|
1228 |
+
and 32 = 0010 0000.
|
1229 |
+
|
1230 |
+
"""
|
1231 |
+
return (a,)
|
1232 |
+
|
1233 |
+
|
1234 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
|
1235 |
+
def unpackbits(a, axis=None, count=None, bitorder='big'):
|
1236 |
+
"""
|
1237 |
+
unpackbits(a, /, axis=None, count=None, bitorder='big')
|
1238 |
+
|
1239 |
+
Unpacks elements of a uint8 array into a binary-valued output array.
|
1240 |
+
|
1241 |
+
Each element of `a` represents a bit-field that should be unpacked
|
1242 |
+
into a binary-valued output array. The shape of the output array is
|
1243 |
+
either 1-D (if `axis` is ``None``) or the same shape as the input
|
1244 |
+
array with unpacking done along the axis specified.
|
1245 |
+
|
1246 |
+
Parameters
|
1247 |
+
----------
|
1248 |
+
a : ndarray, uint8 type
|
1249 |
+
Input array.
|
1250 |
+
axis : int, optional
|
1251 |
+
The dimension over which bit-unpacking is done.
|
1252 |
+
``None`` implies unpacking the flattened array.
|
1253 |
+
count : int or None, optional
|
1254 |
+
The number of elements to unpack along `axis`, provided as a way
|
1255 |
+
of undoing the effect of packing a size that is not a multiple
|
1256 |
+
of eight. A non-negative number means to only unpack `count`
|
1257 |
+
bits. A negative number means to trim off that many bits from
|
1258 |
+
the end. ``None`` means to unpack the entire array (the
|
1259 |
+
default). Counts larger than the available number of bits will
|
1260 |
+
add zero padding to the output. Negative counts must not
|
1261 |
+
exceed the available number of bits.
|
1262 |
+
|
1263 |
+
.. versionadded:: 1.17.0
|
1264 |
+
|
1265 |
+
bitorder : {'big', 'little'}, optional
|
1266 |
+
The order of the returned bits. 'big' will mimic bin(val),
|
1267 |
+
``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
|
1268 |
+
the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
|
1269 |
+
Defaults to 'big'.
|
1270 |
+
|
1271 |
+
.. versionadded:: 1.17.0
|
1272 |
+
|
1273 |
+
Returns
|
1274 |
+
-------
|
1275 |
+
unpacked : ndarray, uint8 type
|
1276 |
+
The elements are binary-valued (0 or 1).
|
1277 |
+
|
1278 |
+
See Also
|
1279 |
+
--------
|
1280 |
+
packbits : Packs the elements of a binary-valued array into bits in
|
1281 |
+
a uint8 array.
|
1282 |
+
|
1283 |
+
Examples
|
1284 |
+
--------
|
1285 |
+
>>> a = np.array([[2], [7], [23]], dtype=np.uint8)
|
1286 |
+
>>> a
|
1287 |
+
array([[ 2],
|
1288 |
+
[ 7],
|
1289 |
+
[23]], dtype=uint8)
|
1290 |
+
>>> b = np.unpackbits(a, axis=1)
|
1291 |
+
>>> b
|
1292 |
+
array([[0, 0, 0, 0, 0, 0, 1, 0],
|
1293 |
+
[0, 0, 0, 0, 0, 1, 1, 1],
|
1294 |
+
[0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
|
1295 |
+
>>> c = np.unpackbits(a, axis=1, count=-3)
|
1296 |
+
>>> c
|
1297 |
+
array([[0, 0, 0, 0, 0],
|
1298 |
+
[0, 0, 0, 0, 0],
|
1299 |
+
[0, 0, 0, 1, 0]], dtype=uint8)
|
1300 |
+
|
1301 |
+
>>> p = np.packbits(b, axis=0)
|
1302 |
+
>>> np.unpackbits(p, axis=0)
|
1303 |
+
array([[0, 0, 0, 0, 0, 0, 1, 0],
|
1304 |
+
[0, 0, 0, 0, 0, 1, 1, 1],
|
1305 |
+
[0, 0, 0, 1, 0, 1, 1, 1],
|
1306 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
1307 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
1308 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
1309 |
+
[0, 0, 0, 0, 0, 0, 0, 0],
|
1310 |
+
[0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
|
1311 |
+
>>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
|
1312 |
+
True
|
1313 |
+
|
1314 |
+
"""
|
1315 |
+
return (a,)
|
1316 |
+
|
1317 |
+
|
1318 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
|
1319 |
+
def shares_memory(a, b, max_work=None):
|
1320 |
+
"""
|
1321 |
+
shares_memory(a, b, /, max_work=None)
|
1322 |
+
|
1323 |
+
Determine if two arrays share memory.
|
1324 |
+
|
1325 |
+
.. warning::
|
1326 |
+
|
1327 |
+
This function can be exponentially slow for some inputs, unless
|
1328 |
+
`max_work` is set to a finite number or ``MAY_SHARE_BOUNDS``.
|
1329 |
+
If in doubt, use `numpy.may_share_memory` instead.
|
1330 |
+
|
1331 |
+
Parameters
|
1332 |
+
----------
|
1333 |
+
a, b : ndarray
|
1334 |
+
Input arrays
|
1335 |
+
max_work : int, optional
|
1336 |
+
Effort to spend on solving the overlap problem (maximum number
|
1337 |
+
of candidate solutions to consider). The following special
|
1338 |
+
values are recognized:
|
1339 |
+
|
1340 |
+
max_work=MAY_SHARE_EXACT (default)
|
1341 |
+
The problem is solved exactly. In this case, the function returns
|
1342 |
+
True only if there is an element shared between the arrays. Finding
|
1343 |
+
the exact solution may take extremely long in some cases.
|
1344 |
+
max_work=MAY_SHARE_BOUNDS
|
1345 |
+
Only the memory bounds of a and b are checked.
|
1346 |
+
|
1347 |
+
Raises
|
1348 |
+
------
|
1349 |
+
numpy.exceptions.TooHardError
|
1350 |
+
Exceeded max_work.
|
1351 |
+
|
1352 |
+
Returns
|
1353 |
+
-------
|
1354 |
+
out : bool
|
1355 |
+
|
1356 |
+
See Also
|
1357 |
+
--------
|
1358 |
+
may_share_memory
|
1359 |
+
|
1360 |
+
Examples
|
1361 |
+
--------
|
1362 |
+
>>> x = np.array([1, 2, 3, 4])
|
1363 |
+
>>> np.shares_memory(x, np.array([5, 6, 7]))
|
1364 |
+
False
|
1365 |
+
>>> np.shares_memory(x[::2], x)
|
1366 |
+
True
|
1367 |
+
>>> np.shares_memory(x[::2], x[1::2])
|
1368 |
+
False
|
1369 |
+
|
1370 |
+
Checking whether two arrays share memory is NP-complete, and
|
1371 |
+
runtime may increase exponentially in the number of
|
1372 |
+
dimensions. Hence, `max_work` should generally be set to a finite
|
1373 |
+
number, as it is possible to construct examples that take
|
1374 |
+
extremely long to run:
|
1375 |
+
|
1376 |
+
>>> from numpy.lib.stride_tricks import as_strided
|
1377 |
+
>>> x = np.zeros([192163377], dtype=np.int8)
|
1378 |
+
>>> x1 = as_strided(x, strides=(36674, 61119, 85569), shape=(1049, 1049, 1049))
|
1379 |
+
>>> x2 = as_strided(x[64023025:], strides=(12223, 12224, 1), shape=(1049, 1049, 1))
|
1380 |
+
>>> np.shares_memory(x1, x2, max_work=1000)
|
1381 |
+
Traceback (most recent call last):
|
1382 |
+
...
|
1383 |
+
numpy.exceptions.TooHardError: Exceeded max_work
|
1384 |
+
|
1385 |
+
Running ``np.shares_memory(x1, x2)`` without `max_work` set takes
|
1386 |
+
around 1 minute for this case. It is possible to find problems
|
1387 |
+
that take still significantly longer.
|
1388 |
+
|
1389 |
+
"""
|
1390 |
+
return (a, b)
|
1391 |
+
|
1392 |
+
|
1393 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
|
1394 |
+
def may_share_memory(a, b, max_work=None):
|
1395 |
+
"""
|
1396 |
+
may_share_memory(a, b, /, max_work=None)
|
1397 |
+
|
1398 |
+
Determine if two arrays might share memory
|
1399 |
+
|
1400 |
+
A return of True does not necessarily mean that the two arrays
|
1401 |
+
share any element. It just means that they *might*.
|
1402 |
+
|
1403 |
+
Only the memory bounds of a and b are checked by default.
|
1404 |
+
|
1405 |
+
Parameters
|
1406 |
+
----------
|
1407 |
+
a, b : ndarray
|
1408 |
+
Input arrays
|
1409 |
+
max_work : int, optional
|
1410 |
+
Effort to spend on solving the overlap problem. See
|
1411 |
+
`shares_memory` for details. Default for ``may_share_memory``
|
1412 |
+
is to do a bounds check.
|
1413 |
+
|
1414 |
+
Returns
|
1415 |
+
-------
|
1416 |
+
out : bool
|
1417 |
+
|
1418 |
+
See Also
|
1419 |
+
--------
|
1420 |
+
shares_memory
|
1421 |
+
|
1422 |
+
Examples
|
1423 |
+
--------
|
1424 |
+
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
|
1425 |
+
False
|
1426 |
+
>>> x = np.zeros([3, 4])
|
1427 |
+
>>> np.may_share_memory(x[:,0], x[:,1])
|
1428 |
+
True
|
1429 |
+
|
1430 |
+
"""
|
1431 |
+
return (a, b)
|
1432 |
+
|
1433 |
+
|
1434 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
|
1435 |
+
def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
|
1436 |
+
"""
|
1437 |
+
is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
|
1438 |
+
|
1439 |
+
Calculates which of the given dates are valid days, and which are not.
|
1440 |
+
|
1441 |
+
.. versionadded:: 1.7.0
|
1442 |
+
|
1443 |
+
Parameters
|
1444 |
+
----------
|
1445 |
+
dates : array_like of datetime64[D]
|
1446 |
+
The array of dates to process.
|
1447 |
+
weekmask : str or array_like of bool, optional
|
1448 |
+
A seven-element array indicating which of Monday through Sunday are
|
1449 |
+
valid days. May be specified as a length-seven list or array, like
|
1450 |
+
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
|
1451 |
+
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
|
1452 |
+
weekdays, optionally separated by white space. Valid abbreviations
|
1453 |
+
are: Mon Tue Wed Thu Fri Sat Sun
|
1454 |
+
holidays : array_like of datetime64[D], optional
|
1455 |
+
An array of dates to consider as invalid dates. They may be
|
1456 |
+
specified in any order, and NaT (not-a-time) dates are ignored.
|
1457 |
+
This list is saved in a normalized form that is suited for
|
1458 |
+
fast calculations of valid days.
|
1459 |
+
busdaycal : busdaycalendar, optional
|
1460 |
+
A `busdaycalendar` object which specifies the valid days. If this
|
1461 |
+
parameter is provided, neither weekmask nor holidays may be
|
1462 |
+
provided.
|
1463 |
+
out : array of bool, optional
|
1464 |
+
If provided, this array is filled with the result.
|
1465 |
+
|
1466 |
+
Returns
|
1467 |
+
-------
|
1468 |
+
out : array of bool
|
1469 |
+
An array with the same shape as ``dates``, containing True for
|
1470 |
+
each valid day, and False for each invalid day.
|
1471 |
+
|
1472 |
+
See Also
|
1473 |
+
--------
|
1474 |
+
busdaycalendar : An object that specifies a custom set of valid days.
|
1475 |
+
busday_offset : Applies an offset counted in valid days.
|
1476 |
+
busday_count : Counts how many valid days are in a half-open date range.
|
1477 |
+
|
1478 |
+
Examples
|
1479 |
+
--------
|
1480 |
+
>>> # The weekdays are Friday, Saturday, and Monday
|
1481 |
+
... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
|
1482 |
+
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
|
1483 |
+
array([False, False, True])
|
1484 |
+
"""
|
1485 |
+
return (dates, weekmask, holidays, out)
|
1486 |
+
|
1487 |
+
|
1488 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
|
1489 |
+
def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
|
1490 |
+
busdaycal=None, out=None):
|
1491 |
+
"""
|
1492 |
+
busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
|
1493 |
+
|
1494 |
+
First adjusts the date to fall on a valid day according to
|
1495 |
+
the ``roll`` rule, then applies offsets to the given dates
|
1496 |
+
counted in valid days.
|
1497 |
+
|
1498 |
+
.. versionadded:: 1.7.0
|
1499 |
+
|
1500 |
+
Parameters
|
1501 |
+
----------
|
1502 |
+
dates : array_like of datetime64[D]
|
1503 |
+
The array of dates to process.
|
1504 |
+
offsets : array_like of int
|
1505 |
+
The array of offsets, which is broadcast with ``dates``.
|
1506 |
+
roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
|
1507 |
+
How to treat dates that do not fall on a valid day. The default
|
1508 |
+
is 'raise'.
|
1509 |
+
|
1510 |
+
* 'raise' means to raise an exception for an invalid day.
|
1511 |
+
* 'nat' means to return a NaT (not-a-time) for an invalid day.
|
1512 |
+
* 'forward' and 'following' mean to take the first valid day
|
1513 |
+
later in time.
|
1514 |
+
* 'backward' and 'preceding' mean to take the first valid day
|
1515 |
+
earlier in time.
|
1516 |
+
* 'modifiedfollowing' means to take the first valid day
|
1517 |
+
later in time unless it is across a Month boundary, in which
|
1518 |
+
case to take the first valid day earlier in time.
|
1519 |
+
* 'modifiedpreceding' means to take the first valid day
|
1520 |
+
earlier in time unless it is across a Month boundary, in which
|
1521 |
+
case to take the first valid day later in time.
|
1522 |
+
weekmask : str or array_like of bool, optional
|
1523 |
+
A seven-element array indicating which of Monday through Sunday are
|
1524 |
+
valid days. May be specified as a length-seven list or array, like
|
1525 |
+
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
|
1526 |
+
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
|
1527 |
+
weekdays, optionally separated by white space. Valid abbreviations
|
1528 |
+
are: Mon Tue Wed Thu Fri Sat Sun
|
1529 |
+
holidays : array_like of datetime64[D], optional
|
1530 |
+
An array of dates to consider as invalid dates. They may be
|
1531 |
+
specified in any order, and NaT (not-a-time) dates are ignored.
|
1532 |
+
This list is saved in a normalized form that is suited for
|
1533 |
+
fast calculations of valid days.
|
1534 |
+
busdaycal : busdaycalendar, optional
|
1535 |
+
A `busdaycalendar` object which specifies the valid days. If this
|
1536 |
+
parameter is provided, neither weekmask nor holidays may be
|
1537 |
+
provided.
|
1538 |
+
out : array of datetime64[D], optional
|
1539 |
+
If provided, this array is filled with the result.
|
1540 |
+
|
1541 |
+
Returns
|
1542 |
+
-------
|
1543 |
+
out : array of datetime64[D]
|
1544 |
+
An array with a shape from broadcasting ``dates`` and ``offsets``
|
1545 |
+
together, containing the dates with offsets applied.
|
1546 |
+
|
1547 |
+
See Also
|
1548 |
+
--------
|
1549 |
+
busdaycalendar : An object that specifies a custom set of valid days.
|
1550 |
+
is_busday : Returns a boolean array indicating valid days.
|
1551 |
+
busday_count : Counts how many valid days are in a half-open date range.
|
1552 |
+
|
1553 |
+
Examples
|
1554 |
+
--------
|
1555 |
+
>>> # First business day in October 2011 (not accounting for holidays)
|
1556 |
+
... np.busday_offset('2011-10', 0, roll='forward')
|
1557 |
+
numpy.datetime64('2011-10-03')
|
1558 |
+
>>> # Last business day in February 2012 (not accounting for holidays)
|
1559 |
+
... np.busday_offset('2012-03', -1, roll='forward')
|
1560 |
+
numpy.datetime64('2012-02-29')
|
1561 |
+
>>> # Third Wednesday in January 2011
|
1562 |
+
... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
|
1563 |
+
numpy.datetime64('2011-01-19')
|
1564 |
+
>>> # 2012 Mother's Day in Canada and the U.S.
|
1565 |
+
... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
|
1566 |
+
numpy.datetime64('2012-05-13')
|
1567 |
+
|
1568 |
+
>>> # First business day on or after a date
|
1569 |
+
... np.busday_offset('2011-03-20', 0, roll='forward')
|
1570 |
+
numpy.datetime64('2011-03-21')
|
1571 |
+
>>> np.busday_offset('2011-03-22', 0, roll='forward')
|
1572 |
+
numpy.datetime64('2011-03-22')
|
1573 |
+
>>> # First business day after a date
|
1574 |
+
... np.busday_offset('2011-03-20', 1, roll='backward')
|
1575 |
+
numpy.datetime64('2011-03-21')
|
1576 |
+
>>> np.busday_offset('2011-03-22', 1, roll='backward')
|
1577 |
+
numpy.datetime64('2011-03-23')
|
1578 |
+
"""
|
1579 |
+
return (dates, offsets, weekmask, holidays, out)
|
1580 |
+
|
1581 |
+
|
1582 |
+
@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
|
1583 |
+
def busday_count(begindates, enddates, weekmask=None, holidays=None,
|
1584 |
+
busdaycal=None, out=None):
|
1585 |
+
"""
|
1586 |
+
busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
|
1587 |
+
|
1588 |
+
Counts the number of valid days between `begindates` and
|
1589 |
+
`enddates`, not including the day of `enddates`.
|
1590 |
+
|
1591 |
+
If ``enddates`` specifies a date value that is earlier than the
|
1592 |
+
corresponding ``begindates`` date value, the count will be negative.
|
1593 |
+
|
1594 |
+
.. versionadded:: 1.7.0
|
1595 |
+
|
1596 |
+
Parameters
|
1597 |
+
----------
|
1598 |
+
begindates : array_like of datetime64[D]
|
1599 |
+
The array of the first dates for counting.
|
1600 |
+
enddates : array_like of datetime64[D]
|
1601 |
+
The array of the end dates for counting, which are excluded
|
1602 |
+
from the count themselves.
|
1603 |
+
weekmask : str or array_like of bool, optional
|
1604 |
+
A seven-element array indicating which of Monday through Sunday are
|
1605 |
+
valid days. May be specified as a length-seven list or array, like
|
1606 |
+
[1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
|
1607 |
+
like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
|
1608 |
+
weekdays, optionally separated by white space. Valid abbreviations
|
1609 |
+
are: Mon Tue Wed Thu Fri Sat Sun
|
1610 |
+
holidays : array_like of datetime64[D], optional
|
1611 |
+
An array of dates to consider as invalid dates. They may be
|
1612 |
+
specified in any order, and NaT (not-a-time) dates are ignored.
|
1613 |
+
This list is saved in a normalized form that is suited for
|
1614 |
+
fast calculations of valid days.
|
1615 |
+
busdaycal : busdaycalendar, optional
|
1616 |
+
A `busdaycalendar` object which specifies the valid days. If this
|
1617 |
+
parameter is provided, neither weekmask nor holidays may be
|
1618 |
+
provided.
|
1619 |
+
out : array of int, optional
|
1620 |
+
If provided, this array is filled with the result.
|
1621 |
+
|
1622 |
+
Returns
|
1623 |
+
-------
|
1624 |
+
out : array of int
|
1625 |
+
An array with a shape from broadcasting ``begindates`` and ``enddates``
|
1626 |
+
together, containing the number of valid days between
|
1627 |
+
the begin and end dates.
|
1628 |
+
|
1629 |
+
See Also
|
1630 |
+
--------
|
1631 |
+
busdaycalendar : An object that specifies a custom set of valid days.
|
1632 |
+
is_busday : Returns a boolean array indicating valid days.
|
1633 |
+
busday_offset : Applies an offset counted in valid days.
|
1634 |
+
|
1635 |
+
Examples
|
1636 |
+
--------
|
1637 |
+
>>> # Number of weekdays in January 2011
|
1638 |
+
... np.busday_count('2011-01', '2011-02')
|
1639 |
+
21
|
1640 |
+
>>> # Number of weekdays in 2011
|
1641 |
+
>>> np.busday_count('2011', '2012')
|
1642 |
+
260
|
1643 |
+
>>> # Number of Saturdays in 2011
|
1644 |
+
... np.busday_count('2011', '2012', weekmask='Sat')
|
1645 |
+
53
|
1646 |
+
"""
|
1647 |
+
return (begindates, enddates, weekmask, holidays, out)
|
1648 |
+
|
1649 |
+
|
1650 |
+
@array_function_from_c_func_and_dispatcher(
|
1651 |
+
_multiarray_umath.datetime_as_string)
|
1652 |
+
def datetime_as_string(arr, unit=None, timezone=None, casting=None):
|
1653 |
+
"""
|
1654 |
+
datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
|
1655 |
+
|
1656 |
+
Convert an array of datetimes into an array of strings.
|
1657 |
+
|
1658 |
+
Parameters
|
1659 |
+
----------
|
1660 |
+
arr : array_like of datetime64
|
1661 |
+
The array of UTC timestamps to format.
|
1662 |
+
unit : str
|
1663 |
+
One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
|
1664 |
+
timezone : {'naive', 'UTC', 'local'} or tzinfo
|
1665 |
+
Timezone information to use when displaying the datetime. If 'UTC', end
|
1666 |
+
with a Z to indicate UTC time. If 'local', convert to the local timezone
|
1667 |
+
first, and suffix with a +-#### timezone offset. If a tzinfo object,
|
1668 |
+
then do as with 'local', but use the specified timezone.
|
1669 |
+
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
|
1670 |
+
Casting to allow when changing between datetime units.
|
1671 |
+
|
1672 |
+
Returns
|
1673 |
+
-------
|
1674 |
+
str_arr : ndarray
|
1675 |
+
An array of strings the same shape as `arr`.
|
1676 |
+
|
1677 |
+
Examples
|
1678 |
+
--------
|
1679 |
+
>>> import pytz
|
1680 |
+
>>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
|
1681 |
+
>>> d
|
1682 |
+
array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
|
1683 |
+
'2002-10-27T07:30'], dtype='datetime64[m]')
|
1684 |
+
|
1685 |
+
Setting the timezone to UTC shows the same information, but with a Z suffix
|
1686 |
+
|
1687 |
+
>>> np.datetime_as_string(d, timezone='UTC')
|
1688 |
+
array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
|
1689 |
+
'2002-10-27T07:30Z'], dtype='<U35')
|
1690 |
+
|
1691 |
+
Note that we picked datetimes that cross a DST boundary. Passing in a
|
1692 |
+
``pytz`` timezone object will print the appropriate offset
|
1693 |
+
|
1694 |
+
>>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
|
1695 |
+
array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
|
1696 |
+
'2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
|
1697 |
+
|
1698 |
+
Passing in a unit will change the precision
|
1699 |
+
|
1700 |
+
>>> np.datetime_as_string(d, unit='h')
|
1701 |
+
array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
|
1702 |
+
dtype='<U32')
|
1703 |
+
>>> np.datetime_as_string(d, unit='s')
|
1704 |
+
array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
|
1705 |
+
'2002-10-27T07:30:00'], dtype='<U38')
|
1706 |
+
|
1707 |
+
'casting' can be used to specify whether precision can be changed
|
1708 |
+
|
1709 |
+
>>> np.datetime_as_string(d, unit='h', casting='safe')
|
1710 |
+
Traceback (most recent call last):
|
1711 |
+
...
|
1712 |
+
TypeError: Cannot create a datetime string as units 'h' from a NumPy
|
1713 |
+
datetime with units 'm' according to the rule 'safe'
|
1714 |
+
"""
|
1715 |
+
return (arr,)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/numeric.py
ADDED
@@ -0,0 +1,2530 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
import itertools
|
3 |
+
import operator
|
4 |
+
import sys
|
5 |
+
import warnings
|
6 |
+
import numbers
|
7 |
+
import builtins
|
8 |
+
|
9 |
+
import numpy as np
|
10 |
+
from . import multiarray
|
11 |
+
from .multiarray import (
|
12 |
+
fastCopyAndTranspose, ALLOW_THREADS,
|
13 |
+
BUFSIZE, CLIP, MAXDIMS, MAY_SHARE_BOUNDS, MAY_SHARE_EXACT, RAISE,
|
14 |
+
WRAP, arange, array, asarray, asanyarray, ascontiguousarray,
|
15 |
+
asfortranarray, broadcast, can_cast, compare_chararrays,
|
16 |
+
concatenate, copyto, dot, dtype, empty,
|
17 |
+
empty_like, flatiter, frombuffer, from_dlpack, fromfile, fromiter,
|
18 |
+
fromstring, inner, lexsort, matmul, may_share_memory,
|
19 |
+
min_scalar_type, ndarray, nditer, nested_iters, promote_types,
|
20 |
+
putmask, result_type, set_numeric_ops, shares_memory, vdot, where,
|
21 |
+
zeros, normalize_axis_index, _get_promotion_state, _set_promotion_state,
|
22 |
+
_using_numpy2_behavior)
|
23 |
+
|
24 |
+
from . import overrides
|
25 |
+
from . import umath
|
26 |
+
from . import shape_base
|
27 |
+
from .overrides import set_array_function_like_doc, set_module
|
28 |
+
from .umath import (multiply, invert, sin, PINF, NAN)
|
29 |
+
from . import numerictypes
|
30 |
+
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
|
31 |
+
from ..exceptions import ComplexWarning, TooHardError, AxisError
|
32 |
+
from ._ufunc_config import errstate, _no_nep50_warning
|
33 |
+
|
34 |
+
bitwise_not = invert
|
35 |
+
ufunc = type(sin)
|
36 |
+
newaxis = None
|
37 |
+
|
38 |
+
array_function_dispatch = functools.partial(
|
39 |
+
overrides.array_function_dispatch, module='numpy')
|
40 |
+
|
41 |
+
|
42 |
+
__all__ = [
|
43 |
+
'newaxis', 'ndarray', 'flatiter', 'nditer', 'nested_iters', 'ufunc',
|
44 |
+
'arange', 'array', 'asarray', 'asanyarray', 'ascontiguousarray',
|
45 |
+
'asfortranarray', 'zeros', 'count_nonzero', 'empty', 'broadcast', 'dtype',
|
46 |
+
'fromstring', 'fromfile', 'frombuffer', 'from_dlpack', 'where',
|
47 |
+
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
|
48 |
+
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
|
49 |
+
'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
|
50 |
+
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
|
51 |
+
'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
|
52 |
+
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
|
53 |
+
'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
|
54 |
+
'identity', 'allclose', 'compare_chararrays', 'putmask',
|
55 |
+
'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
|
56 |
+
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
|
57 |
+
'BUFSIZE', 'ALLOW_THREADS', 'full', 'full_like',
|
58 |
+
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
|
59 |
+
'MAY_SHARE_EXACT', '_get_promotion_state', '_set_promotion_state',
|
60 |
+
'_using_numpy2_behavior']
|
61 |
+
|
62 |
+
|
63 |
+
def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
|
64 |
+
return (a,)
|
65 |
+
|
66 |
+
|
67 |
+
@array_function_dispatch(_zeros_like_dispatcher)
|
68 |
+
def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
|
69 |
+
"""
|
70 |
+
Return an array of zeros with the same shape and type as a given array.
|
71 |
+
|
72 |
+
Parameters
|
73 |
+
----------
|
74 |
+
a : array_like
|
75 |
+
The shape and data-type of `a` define these same attributes of
|
76 |
+
the returned array.
|
77 |
+
dtype : data-type, optional
|
78 |
+
Overrides the data type of the result.
|
79 |
+
|
80 |
+
.. versionadded:: 1.6.0
|
81 |
+
order : {'C', 'F', 'A', or 'K'}, optional
|
82 |
+
Overrides the memory layout of the result. 'C' means C-order,
|
83 |
+
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
|
84 |
+
'C' otherwise. 'K' means match the layout of `a` as closely
|
85 |
+
as possible.
|
86 |
+
|
87 |
+
.. versionadded:: 1.6.0
|
88 |
+
subok : bool, optional.
|
89 |
+
If True, then the newly created array will use the sub-class
|
90 |
+
type of `a`, otherwise it will be a base-class array. Defaults
|
91 |
+
to True.
|
92 |
+
shape : int or sequence of ints, optional.
|
93 |
+
Overrides the shape of the result. If order='K' and the number of
|
94 |
+
dimensions is unchanged, will try to keep order, otherwise,
|
95 |
+
order='C' is implied.
|
96 |
+
|
97 |
+
.. versionadded:: 1.17.0
|
98 |
+
|
99 |
+
Returns
|
100 |
+
-------
|
101 |
+
out : ndarray
|
102 |
+
Array of zeros with the same shape and type as `a`.
|
103 |
+
|
104 |
+
See Also
|
105 |
+
--------
|
106 |
+
empty_like : Return an empty array with shape and type of input.
|
107 |
+
ones_like : Return an array of ones with shape and type of input.
|
108 |
+
full_like : Return a new array with shape of input filled with value.
|
109 |
+
zeros : Return a new array setting values to zero.
|
110 |
+
|
111 |
+
Examples
|
112 |
+
--------
|
113 |
+
>>> x = np.arange(6)
|
114 |
+
>>> x = x.reshape((2, 3))
|
115 |
+
>>> x
|
116 |
+
array([[0, 1, 2],
|
117 |
+
[3, 4, 5]])
|
118 |
+
>>> np.zeros_like(x)
|
119 |
+
array([[0, 0, 0],
|
120 |
+
[0, 0, 0]])
|
121 |
+
|
122 |
+
>>> y = np.arange(3, dtype=float)
|
123 |
+
>>> y
|
124 |
+
array([0., 1., 2.])
|
125 |
+
>>> np.zeros_like(y)
|
126 |
+
array([0., 0., 0.])
|
127 |
+
|
128 |
+
"""
|
129 |
+
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
|
130 |
+
# needed instead of a 0 to get same result as zeros for string dtypes
|
131 |
+
z = zeros(1, dtype=res.dtype)
|
132 |
+
multiarray.copyto(res, z, casting='unsafe')
|
133 |
+
return res
|
134 |
+
|
135 |
+
|
136 |
+
@set_array_function_like_doc
|
137 |
+
@set_module('numpy')
|
138 |
+
def ones(shape, dtype=None, order='C', *, like=None):
|
139 |
+
"""
|
140 |
+
Return a new array of given shape and type, filled with ones.
|
141 |
+
|
142 |
+
Parameters
|
143 |
+
----------
|
144 |
+
shape : int or sequence of ints
|
145 |
+
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
|
146 |
+
dtype : data-type, optional
|
147 |
+
The desired data-type for the array, e.g., `numpy.int8`. Default is
|
148 |
+
`numpy.float64`.
|
149 |
+
order : {'C', 'F'}, optional, default: C
|
150 |
+
Whether to store multi-dimensional data in row-major
|
151 |
+
(C-style) or column-major (Fortran-style) order in
|
152 |
+
memory.
|
153 |
+
${ARRAY_FUNCTION_LIKE}
|
154 |
+
|
155 |
+
.. versionadded:: 1.20.0
|
156 |
+
|
157 |
+
Returns
|
158 |
+
-------
|
159 |
+
out : ndarray
|
160 |
+
Array of ones with the given shape, dtype, and order.
|
161 |
+
|
162 |
+
See Also
|
163 |
+
--------
|
164 |
+
ones_like : Return an array of ones with shape and type of input.
|
165 |
+
empty : Return a new uninitialized array.
|
166 |
+
zeros : Return a new array setting values to zero.
|
167 |
+
full : Return a new array of given shape filled with value.
|
168 |
+
|
169 |
+
|
170 |
+
Examples
|
171 |
+
--------
|
172 |
+
>>> np.ones(5)
|
173 |
+
array([1., 1., 1., 1., 1.])
|
174 |
+
|
175 |
+
>>> np.ones((5,), dtype=int)
|
176 |
+
array([1, 1, 1, 1, 1])
|
177 |
+
|
178 |
+
>>> np.ones((2, 1))
|
179 |
+
array([[1.],
|
180 |
+
[1.]])
|
181 |
+
|
182 |
+
>>> s = (2,2)
|
183 |
+
>>> np.ones(s)
|
184 |
+
array([[1., 1.],
|
185 |
+
[1., 1.]])
|
186 |
+
|
187 |
+
"""
|
188 |
+
if like is not None:
|
189 |
+
return _ones_with_like(like, shape, dtype=dtype, order=order)
|
190 |
+
|
191 |
+
a = empty(shape, dtype, order)
|
192 |
+
multiarray.copyto(a, 1, casting='unsafe')
|
193 |
+
return a
|
194 |
+
|
195 |
+
|
196 |
+
_ones_with_like = array_function_dispatch()(ones)
|
197 |
+
|
198 |
+
|
199 |
+
def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
|
200 |
+
return (a,)
|
201 |
+
|
202 |
+
|
203 |
+
@array_function_dispatch(_ones_like_dispatcher)
|
204 |
+
def ones_like(a, dtype=None, order='K', subok=True, shape=None):
|
205 |
+
"""
|
206 |
+
Return an array of ones with the same shape and type as a given array.
|
207 |
+
|
208 |
+
Parameters
|
209 |
+
----------
|
210 |
+
a : array_like
|
211 |
+
The shape and data-type of `a` define these same attributes of
|
212 |
+
the returned array.
|
213 |
+
dtype : data-type, optional
|
214 |
+
Overrides the data type of the result.
|
215 |
+
|
216 |
+
.. versionadded:: 1.6.0
|
217 |
+
order : {'C', 'F', 'A', or 'K'}, optional
|
218 |
+
Overrides the memory layout of the result. 'C' means C-order,
|
219 |
+
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
|
220 |
+
'C' otherwise. 'K' means match the layout of `a` as closely
|
221 |
+
as possible.
|
222 |
+
|
223 |
+
.. versionadded:: 1.6.0
|
224 |
+
subok : bool, optional.
|
225 |
+
If True, then the newly created array will use the sub-class
|
226 |
+
type of `a`, otherwise it will be a base-class array. Defaults
|
227 |
+
to True.
|
228 |
+
shape : int or sequence of ints, optional.
|
229 |
+
Overrides the shape of the result. If order='K' and the number of
|
230 |
+
dimensions is unchanged, will try to keep order, otherwise,
|
231 |
+
order='C' is implied.
|
232 |
+
|
233 |
+
.. versionadded:: 1.17.0
|
234 |
+
|
235 |
+
Returns
|
236 |
+
-------
|
237 |
+
out : ndarray
|
238 |
+
Array of ones with the same shape and type as `a`.
|
239 |
+
|
240 |
+
See Also
|
241 |
+
--------
|
242 |
+
empty_like : Return an empty array with shape and type of input.
|
243 |
+
zeros_like : Return an array of zeros with shape and type of input.
|
244 |
+
full_like : Return a new array with shape of input filled with value.
|
245 |
+
ones : Return a new array setting values to one.
|
246 |
+
|
247 |
+
Examples
|
248 |
+
--------
|
249 |
+
>>> x = np.arange(6)
|
250 |
+
>>> x = x.reshape((2, 3))
|
251 |
+
>>> x
|
252 |
+
array([[0, 1, 2],
|
253 |
+
[3, 4, 5]])
|
254 |
+
>>> np.ones_like(x)
|
255 |
+
array([[1, 1, 1],
|
256 |
+
[1, 1, 1]])
|
257 |
+
|
258 |
+
>>> y = np.arange(3, dtype=float)
|
259 |
+
>>> y
|
260 |
+
array([0., 1., 2.])
|
261 |
+
>>> np.ones_like(y)
|
262 |
+
array([1., 1., 1.])
|
263 |
+
|
264 |
+
"""
|
265 |
+
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
|
266 |
+
multiarray.copyto(res, 1, casting='unsafe')
|
267 |
+
return res
|
268 |
+
|
269 |
+
|
270 |
+
def _full_dispatcher(shape, fill_value, dtype=None, order=None, *, like=None):
|
271 |
+
return(like,)
|
272 |
+
|
273 |
+
|
274 |
+
@set_array_function_like_doc
|
275 |
+
@set_module('numpy')
|
276 |
+
def full(shape, fill_value, dtype=None, order='C', *, like=None):
|
277 |
+
"""
|
278 |
+
Return a new array of given shape and type, filled with `fill_value`.
|
279 |
+
|
280 |
+
Parameters
|
281 |
+
----------
|
282 |
+
shape : int or sequence of ints
|
283 |
+
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
|
284 |
+
fill_value : scalar or array_like
|
285 |
+
Fill value.
|
286 |
+
dtype : data-type, optional
|
287 |
+
The desired data-type for the array The default, None, means
|
288 |
+
``np.array(fill_value).dtype``.
|
289 |
+
order : {'C', 'F'}, optional
|
290 |
+
Whether to store multidimensional data in C- or Fortran-contiguous
|
291 |
+
(row- or column-wise) order in memory.
|
292 |
+
${ARRAY_FUNCTION_LIKE}
|
293 |
+
|
294 |
+
.. versionadded:: 1.20.0
|
295 |
+
|
296 |
+
Returns
|
297 |
+
-------
|
298 |
+
out : ndarray
|
299 |
+
Array of `fill_value` with the given shape, dtype, and order.
|
300 |
+
|
301 |
+
See Also
|
302 |
+
--------
|
303 |
+
full_like : Return a new array with shape of input filled with value.
|
304 |
+
empty : Return a new uninitialized array.
|
305 |
+
ones : Return a new array setting values to one.
|
306 |
+
zeros : Return a new array setting values to zero.
|
307 |
+
|
308 |
+
Examples
|
309 |
+
--------
|
310 |
+
>>> np.full((2, 2), np.inf)
|
311 |
+
array([[inf, inf],
|
312 |
+
[inf, inf]])
|
313 |
+
>>> np.full((2, 2), 10)
|
314 |
+
array([[10, 10],
|
315 |
+
[10, 10]])
|
316 |
+
|
317 |
+
>>> np.full((2, 2), [1, 2])
|
318 |
+
array([[1, 2],
|
319 |
+
[1, 2]])
|
320 |
+
|
321 |
+
"""
|
322 |
+
if like is not None:
|
323 |
+
return _full_with_like(
|
324 |
+
like, shape, fill_value, dtype=dtype, order=order)
|
325 |
+
|
326 |
+
if dtype is None:
|
327 |
+
fill_value = asarray(fill_value)
|
328 |
+
dtype = fill_value.dtype
|
329 |
+
a = empty(shape, dtype, order)
|
330 |
+
multiarray.copyto(a, fill_value, casting='unsafe')
|
331 |
+
return a
|
332 |
+
|
333 |
+
|
334 |
+
_full_with_like = array_function_dispatch()(full)
|
335 |
+
|
336 |
+
|
337 |
+
def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
|
338 |
+
return (a,)
|
339 |
+
|
340 |
+
|
341 |
+
@array_function_dispatch(_full_like_dispatcher)
|
342 |
+
def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
|
343 |
+
"""
|
344 |
+
Return a full array with the same shape and type as a given array.
|
345 |
+
|
346 |
+
Parameters
|
347 |
+
----------
|
348 |
+
a : array_like
|
349 |
+
The shape and data-type of `a` define these same attributes of
|
350 |
+
the returned array.
|
351 |
+
fill_value : array_like
|
352 |
+
Fill value.
|
353 |
+
dtype : data-type, optional
|
354 |
+
Overrides the data type of the result.
|
355 |
+
order : {'C', 'F', 'A', or 'K'}, optional
|
356 |
+
Overrides the memory layout of the result. 'C' means C-order,
|
357 |
+
'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous,
|
358 |
+
'C' otherwise. 'K' means match the layout of `a` as closely
|
359 |
+
as possible.
|
360 |
+
subok : bool, optional.
|
361 |
+
If True, then the newly created array will use the sub-class
|
362 |
+
type of `a`, otherwise it will be a base-class array. Defaults
|
363 |
+
to True.
|
364 |
+
shape : int or sequence of ints, optional.
|
365 |
+
Overrides the shape of the result. If order='K' and the number of
|
366 |
+
dimensions is unchanged, will try to keep order, otherwise,
|
367 |
+
order='C' is implied.
|
368 |
+
|
369 |
+
.. versionadded:: 1.17.0
|
370 |
+
|
371 |
+
Returns
|
372 |
+
-------
|
373 |
+
out : ndarray
|
374 |
+
Array of `fill_value` with the same shape and type as `a`.
|
375 |
+
|
376 |
+
See Also
|
377 |
+
--------
|
378 |
+
empty_like : Return an empty array with shape and type of input.
|
379 |
+
ones_like : Return an array of ones with shape and type of input.
|
380 |
+
zeros_like : Return an array of zeros with shape and type of input.
|
381 |
+
full : Return a new array of given shape filled with value.
|
382 |
+
|
383 |
+
Examples
|
384 |
+
--------
|
385 |
+
>>> x = np.arange(6, dtype=int)
|
386 |
+
>>> np.full_like(x, 1)
|
387 |
+
array([1, 1, 1, 1, 1, 1])
|
388 |
+
>>> np.full_like(x, 0.1)
|
389 |
+
array([0, 0, 0, 0, 0, 0])
|
390 |
+
>>> np.full_like(x, 0.1, dtype=np.double)
|
391 |
+
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
|
392 |
+
>>> np.full_like(x, np.nan, dtype=np.double)
|
393 |
+
array([nan, nan, nan, nan, nan, nan])
|
394 |
+
|
395 |
+
>>> y = np.arange(6, dtype=np.double)
|
396 |
+
>>> np.full_like(y, 0.1)
|
397 |
+
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
|
398 |
+
|
399 |
+
>>> y = np.zeros([2, 2, 3], dtype=int)
|
400 |
+
>>> np.full_like(y, [0, 0, 255])
|
401 |
+
array([[[ 0, 0, 255],
|
402 |
+
[ 0, 0, 255]],
|
403 |
+
[[ 0, 0, 255],
|
404 |
+
[ 0, 0, 255]]])
|
405 |
+
"""
|
406 |
+
res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
|
407 |
+
multiarray.copyto(res, fill_value, casting='unsafe')
|
408 |
+
return res
|
409 |
+
|
410 |
+
|
411 |
+
def _count_nonzero_dispatcher(a, axis=None, *, keepdims=None):
|
412 |
+
return (a,)
|
413 |
+
|
414 |
+
|
415 |
+
@array_function_dispatch(_count_nonzero_dispatcher)
|
416 |
+
def count_nonzero(a, axis=None, *, keepdims=False):
|
417 |
+
"""
|
418 |
+
Counts the number of non-zero values in the array ``a``.
|
419 |
+
|
420 |
+
The word "non-zero" is in reference to the Python 2.x
|
421 |
+
built-in method ``__nonzero__()`` (renamed ``__bool__()``
|
422 |
+
in Python 3.x) of Python objects that tests an object's
|
423 |
+
"truthfulness". For example, any number is considered
|
424 |
+
truthful if it is nonzero, whereas any string is considered
|
425 |
+
truthful if it is not the empty string. Thus, this function
|
426 |
+
(recursively) counts how many elements in ``a`` (and in
|
427 |
+
sub-arrays thereof) have their ``__nonzero__()`` or ``__bool__()``
|
428 |
+
method evaluated to ``True``.
|
429 |
+
|
430 |
+
Parameters
|
431 |
+
----------
|
432 |
+
a : array_like
|
433 |
+
The array for which to count non-zeros.
|
434 |
+
axis : int or tuple, optional
|
435 |
+
Axis or tuple of axes along which to count non-zeros.
|
436 |
+
Default is None, meaning that non-zeros will be counted
|
437 |
+
along a flattened version of ``a``.
|
438 |
+
|
439 |
+
.. versionadded:: 1.12.0
|
440 |
+
|
441 |
+
keepdims : bool, optional
|
442 |
+
If this is set to True, the axes that are counted are left
|
443 |
+
in the result as dimensions with size one. With this option,
|
444 |
+
the result will broadcast correctly against the input array.
|
445 |
+
|
446 |
+
.. versionadded:: 1.19.0
|
447 |
+
|
448 |
+
Returns
|
449 |
+
-------
|
450 |
+
count : int or array of int
|
451 |
+
Number of non-zero values in the array along a given axis.
|
452 |
+
Otherwise, the total number of non-zero values in the array
|
453 |
+
is returned.
|
454 |
+
|
455 |
+
See Also
|
456 |
+
--------
|
457 |
+
nonzero : Return the coordinates of all the non-zero values.
|
458 |
+
|
459 |
+
Examples
|
460 |
+
--------
|
461 |
+
>>> np.count_nonzero(np.eye(4))
|
462 |
+
4
|
463 |
+
>>> a = np.array([[0, 1, 7, 0],
|
464 |
+
... [3, 0, 2, 19]])
|
465 |
+
>>> np.count_nonzero(a)
|
466 |
+
5
|
467 |
+
>>> np.count_nonzero(a, axis=0)
|
468 |
+
array([1, 1, 2, 1])
|
469 |
+
>>> np.count_nonzero(a, axis=1)
|
470 |
+
array([2, 3])
|
471 |
+
>>> np.count_nonzero(a, axis=1, keepdims=True)
|
472 |
+
array([[2],
|
473 |
+
[3]])
|
474 |
+
"""
|
475 |
+
if axis is None and not keepdims:
|
476 |
+
return multiarray.count_nonzero(a)
|
477 |
+
|
478 |
+
a = asanyarray(a)
|
479 |
+
|
480 |
+
# TODO: this works around .astype(bool) not working properly (gh-9847)
|
481 |
+
if np.issubdtype(a.dtype, np.character):
|
482 |
+
a_bool = a != a.dtype.type()
|
483 |
+
else:
|
484 |
+
a_bool = a.astype(np.bool_, copy=False)
|
485 |
+
|
486 |
+
return a_bool.sum(axis=axis, dtype=np.intp, keepdims=keepdims)
|
487 |
+
|
488 |
+
|
489 |
+
@set_module('numpy')
|
490 |
+
def isfortran(a):
|
491 |
+
"""
|
492 |
+
Check if the array is Fortran contiguous but *not* C contiguous.
|
493 |
+
|
494 |
+
This function is obsolete and, because of changes due to relaxed stride
|
495 |
+
checking, its return value for the same array may differ for versions
|
496 |
+
of NumPy >= 1.10.0 and previous versions. If you only want to check if an
|
497 |
+
array is Fortran contiguous use ``a.flags.f_contiguous`` instead.
|
498 |
+
|
499 |
+
Parameters
|
500 |
+
----------
|
501 |
+
a : ndarray
|
502 |
+
Input array.
|
503 |
+
|
504 |
+
Returns
|
505 |
+
-------
|
506 |
+
isfortran : bool
|
507 |
+
Returns True if the array is Fortran contiguous but *not* C contiguous.
|
508 |
+
|
509 |
+
|
510 |
+
Examples
|
511 |
+
--------
|
512 |
+
|
513 |
+
np.array allows to specify whether the array is written in C-contiguous
|
514 |
+
order (last index varies the fastest), or FORTRAN-contiguous order in
|
515 |
+
memory (first index varies the fastest).
|
516 |
+
|
517 |
+
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
|
518 |
+
>>> a
|
519 |
+
array([[1, 2, 3],
|
520 |
+
[4, 5, 6]])
|
521 |
+
>>> np.isfortran(a)
|
522 |
+
False
|
523 |
+
|
524 |
+
>>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
|
525 |
+
>>> b
|
526 |
+
array([[1, 2, 3],
|
527 |
+
[4, 5, 6]])
|
528 |
+
>>> np.isfortran(b)
|
529 |
+
True
|
530 |
+
|
531 |
+
|
532 |
+
The transpose of a C-ordered array is a FORTRAN-ordered array.
|
533 |
+
|
534 |
+
>>> a = np.array([[1, 2, 3], [4, 5, 6]], order='C')
|
535 |
+
>>> a
|
536 |
+
array([[1, 2, 3],
|
537 |
+
[4, 5, 6]])
|
538 |
+
>>> np.isfortran(a)
|
539 |
+
False
|
540 |
+
>>> b = a.T
|
541 |
+
>>> b
|
542 |
+
array([[1, 4],
|
543 |
+
[2, 5],
|
544 |
+
[3, 6]])
|
545 |
+
>>> np.isfortran(b)
|
546 |
+
True
|
547 |
+
|
548 |
+
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
|
549 |
+
|
550 |
+
>>> np.isfortran(np.array([1, 2], order='F'))
|
551 |
+
False
|
552 |
+
|
553 |
+
"""
|
554 |
+
return a.flags.fnc
|
555 |
+
|
556 |
+
|
557 |
+
def _argwhere_dispatcher(a):
|
558 |
+
return (a,)
|
559 |
+
|
560 |
+
|
561 |
+
@array_function_dispatch(_argwhere_dispatcher)
|
562 |
+
def argwhere(a):
|
563 |
+
"""
|
564 |
+
Find the indices of array elements that are non-zero, grouped by element.
|
565 |
+
|
566 |
+
Parameters
|
567 |
+
----------
|
568 |
+
a : array_like
|
569 |
+
Input data.
|
570 |
+
|
571 |
+
Returns
|
572 |
+
-------
|
573 |
+
index_array : (N, a.ndim) ndarray
|
574 |
+
Indices of elements that are non-zero. Indices are grouped by element.
|
575 |
+
This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
|
576 |
+
non-zero items.
|
577 |
+
|
578 |
+
See Also
|
579 |
+
--------
|
580 |
+
where, nonzero
|
581 |
+
|
582 |
+
Notes
|
583 |
+
-----
|
584 |
+
``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
|
585 |
+
but produces a result of the correct shape for a 0D array.
|
586 |
+
|
587 |
+
The output of ``argwhere`` is not suitable for indexing arrays.
|
588 |
+
For this purpose use ``nonzero(a)`` instead.
|
589 |
+
|
590 |
+
Examples
|
591 |
+
--------
|
592 |
+
>>> x = np.arange(6).reshape(2,3)
|
593 |
+
>>> x
|
594 |
+
array([[0, 1, 2],
|
595 |
+
[3, 4, 5]])
|
596 |
+
>>> np.argwhere(x>1)
|
597 |
+
array([[0, 2],
|
598 |
+
[1, 0],
|
599 |
+
[1, 1],
|
600 |
+
[1, 2]])
|
601 |
+
|
602 |
+
"""
|
603 |
+
# nonzero does not behave well on 0d, so promote to 1d
|
604 |
+
if np.ndim(a) == 0:
|
605 |
+
a = shape_base.atleast_1d(a)
|
606 |
+
# then remove the added dimension
|
607 |
+
return argwhere(a)[:,:0]
|
608 |
+
return transpose(nonzero(a))
|
609 |
+
|
610 |
+
|
611 |
+
def _flatnonzero_dispatcher(a):
|
612 |
+
return (a,)
|
613 |
+
|
614 |
+
|
615 |
+
@array_function_dispatch(_flatnonzero_dispatcher)
|
616 |
+
def flatnonzero(a):
|
617 |
+
"""
|
618 |
+
Return indices that are non-zero in the flattened version of a.
|
619 |
+
|
620 |
+
This is equivalent to ``np.nonzero(np.ravel(a))[0]``.
|
621 |
+
|
622 |
+
Parameters
|
623 |
+
----------
|
624 |
+
a : array_like
|
625 |
+
Input data.
|
626 |
+
|
627 |
+
Returns
|
628 |
+
-------
|
629 |
+
res : ndarray
|
630 |
+
Output array, containing the indices of the elements of ``a.ravel()``
|
631 |
+
that are non-zero.
|
632 |
+
|
633 |
+
See Also
|
634 |
+
--------
|
635 |
+
nonzero : Return the indices of the non-zero elements of the input array.
|
636 |
+
ravel : Return a 1-D array containing the elements of the input array.
|
637 |
+
|
638 |
+
Examples
|
639 |
+
--------
|
640 |
+
>>> x = np.arange(-2, 3)
|
641 |
+
>>> x
|
642 |
+
array([-2, -1, 0, 1, 2])
|
643 |
+
>>> np.flatnonzero(x)
|
644 |
+
array([0, 1, 3, 4])
|
645 |
+
|
646 |
+
Use the indices of the non-zero elements as an index array to extract
|
647 |
+
these elements:
|
648 |
+
|
649 |
+
>>> x.ravel()[np.flatnonzero(x)]
|
650 |
+
array([-2, -1, 1, 2])
|
651 |
+
|
652 |
+
"""
|
653 |
+
return np.nonzero(np.ravel(a))[0]
|
654 |
+
|
655 |
+
|
656 |
+
def _correlate_dispatcher(a, v, mode=None):
|
657 |
+
return (a, v)
|
658 |
+
|
659 |
+
|
660 |
+
@array_function_dispatch(_correlate_dispatcher)
|
661 |
+
def correlate(a, v, mode='valid'):
|
662 |
+
r"""
|
663 |
+
Cross-correlation of two 1-dimensional sequences.
|
664 |
+
|
665 |
+
This function computes the correlation as generally defined in signal
|
666 |
+
processing texts:
|
667 |
+
|
668 |
+
.. math:: c_k = \sum_n a_{n+k} \cdot \overline{v}_n
|
669 |
+
|
670 |
+
with a and v sequences being zero-padded where necessary and
|
671 |
+
:math:`\overline x` denoting complex conjugation.
|
672 |
+
|
673 |
+
Parameters
|
674 |
+
----------
|
675 |
+
a, v : array_like
|
676 |
+
Input sequences.
|
677 |
+
mode : {'valid', 'same', 'full'}, optional
|
678 |
+
Refer to the `convolve` docstring. Note that the default
|
679 |
+
is 'valid', unlike `convolve`, which uses 'full'.
|
680 |
+
old_behavior : bool
|
681 |
+
`old_behavior` was removed in NumPy 1.10. If you need the old
|
682 |
+
behavior, use `multiarray.correlate`.
|
683 |
+
|
684 |
+
Returns
|
685 |
+
-------
|
686 |
+
out : ndarray
|
687 |
+
Discrete cross-correlation of `a` and `v`.
|
688 |
+
|
689 |
+
See Also
|
690 |
+
--------
|
691 |
+
convolve : Discrete, linear convolution of two one-dimensional sequences.
|
692 |
+
multiarray.correlate : Old, no conjugate, version of correlate.
|
693 |
+
scipy.signal.correlate : uses FFT which has superior performance on large arrays.
|
694 |
+
|
695 |
+
Notes
|
696 |
+
-----
|
697 |
+
The definition of correlation above is not unique and sometimes correlation
|
698 |
+
may be defined differently. Another common definition is:
|
699 |
+
|
700 |
+
.. math:: c'_k = \sum_n a_{n} \cdot \overline{v_{n+k}}
|
701 |
+
|
702 |
+
which is related to :math:`c_k` by :math:`c'_k = c_{-k}`.
|
703 |
+
|
704 |
+
`numpy.correlate` may perform slowly in large arrays (i.e. n = 1e5) because it does
|
705 |
+
not use the FFT to compute the convolution; in that case, `scipy.signal.correlate` might
|
706 |
+
be preferable.
|
707 |
+
|
708 |
+
|
709 |
+
Examples
|
710 |
+
--------
|
711 |
+
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
|
712 |
+
array([3.5])
|
713 |
+
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
|
714 |
+
array([2. , 3.5, 3. ])
|
715 |
+
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
|
716 |
+
array([0.5, 2. , 3.5, 3. , 0. ])
|
717 |
+
|
718 |
+
Using complex sequences:
|
719 |
+
|
720 |
+
>>> np.correlate([1+1j, 2, 3-1j], [0, 1, 0.5j], 'full')
|
721 |
+
array([ 0.5-0.5j, 1.0+0.j , 1.5-1.5j, 3.0-1.j , 0.0+0.j ])
|
722 |
+
|
723 |
+
Note that you get the time reversed, complex conjugated result
|
724 |
+
(:math:`\overline{c_{-k}}`) when the two input sequences a and v change
|
725 |
+
places:
|
726 |
+
|
727 |
+
>>> np.correlate([0, 1, 0.5j], [1+1j, 2, 3-1j], 'full')
|
728 |
+
array([ 0.0+0.j , 3.0+1.j , 1.5+1.5j, 1.0+0.j , 0.5+0.5j])
|
729 |
+
|
730 |
+
"""
|
731 |
+
return multiarray.correlate2(a, v, mode)
|
732 |
+
|
733 |
+
|
734 |
+
def _convolve_dispatcher(a, v, mode=None):
|
735 |
+
return (a, v)
|
736 |
+
|
737 |
+
|
738 |
+
@array_function_dispatch(_convolve_dispatcher)
|
739 |
+
def convolve(a, v, mode='full'):
|
740 |
+
"""
|
741 |
+
Returns the discrete, linear convolution of two one-dimensional sequences.
|
742 |
+
|
743 |
+
The convolution operator is often seen in signal processing, where it
|
744 |
+
models the effect of a linear time-invariant system on a signal [1]_. In
|
745 |
+
probability theory, the sum of two independent random variables is
|
746 |
+
distributed according to the convolution of their individual
|
747 |
+
distributions.
|
748 |
+
|
749 |
+
If `v` is longer than `a`, the arrays are swapped before computation.
|
750 |
+
|
751 |
+
Parameters
|
752 |
+
----------
|
753 |
+
a : (N,) array_like
|
754 |
+
First one-dimensional input array.
|
755 |
+
v : (M,) array_like
|
756 |
+
Second one-dimensional input array.
|
757 |
+
mode : {'full', 'valid', 'same'}, optional
|
758 |
+
'full':
|
759 |
+
By default, mode is 'full'. This returns the convolution
|
760 |
+
at each point of overlap, with an output shape of (N+M-1,). At
|
761 |
+
the end-points of the convolution, the signals do not overlap
|
762 |
+
completely, and boundary effects may be seen.
|
763 |
+
|
764 |
+
'same':
|
765 |
+
Mode 'same' returns output of length ``max(M, N)``. Boundary
|
766 |
+
effects are still visible.
|
767 |
+
|
768 |
+
'valid':
|
769 |
+
Mode 'valid' returns output of length
|
770 |
+
``max(M, N) - min(M, N) + 1``. The convolution product is only given
|
771 |
+
for points where the signals overlap completely. Values outside
|
772 |
+
the signal boundary have no effect.
|
773 |
+
|
774 |
+
Returns
|
775 |
+
-------
|
776 |
+
out : ndarray
|
777 |
+
Discrete, linear convolution of `a` and `v`.
|
778 |
+
|
779 |
+
See Also
|
780 |
+
--------
|
781 |
+
scipy.signal.fftconvolve : Convolve two arrays using the Fast Fourier
|
782 |
+
Transform.
|
783 |
+
scipy.linalg.toeplitz : Used to construct the convolution operator.
|
784 |
+
polymul : Polynomial multiplication. Same output as convolve, but also
|
785 |
+
accepts poly1d objects as input.
|
786 |
+
|
787 |
+
Notes
|
788 |
+
-----
|
789 |
+
The discrete convolution operation is defined as
|
790 |
+
|
791 |
+
.. math:: (a * v)_n = \\sum_{m = -\\infty}^{\\infty} a_m v_{n - m}
|
792 |
+
|
793 |
+
It can be shown that a convolution :math:`x(t) * y(t)` in time/space
|
794 |
+
is equivalent to the multiplication :math:`X(f) Y(f)` in the Fourier
|
795 |
+
domain, after appropriate padding (padding is necessary to prevent
|
796 |
+
circular convolution). Since multiplication is more efficient (faster)
|
797 |
+
than convolution, the function `scipy.signal.fftconvolve` exploits the
|
798 |
+
FFT to calculate the convolution of large data-sets.
|
799 |
+
|
800 |
+
References
|
801 |
+
----------
|
802 |
+
.. [1] Wikipedia, "Convolution",
|
803 |
+
https://en.wikipedia.org/wiki/Convolution
|
804 |
+
|
805 |
+
Examples
|
806 |
+
--------
|
807 |
+
Note how the convolution operator flips the second array
|
808 |
+
before "sliding" the two across one another:
|
809 |
+
|
810 |
+
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
|
811 |
+
array([0. , 1. , 2.5, 4. , 1.5])
|
812 |
+
|
813 |
+
Only return the middle values of the convolution.
|
814 |
+
Contains boundary effects, where zeros are taken
|
815 |
+
into account:
|
816 |
+
|
817 |
+
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
|
818 |
+
array([1. , 2.5, 4. ])
|
819 |
+
|
820 |
+
The two arrays are of the same length, so there
|
821 |
+
is only one position where they completely overlap:
|
822 |
+
|
823 |
+
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
|
824 |
+
array([2.5])
|
825 |
+
|
826 |
+
"""
|
827 |
+
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
|
828 |
+
if (len(v) > len(a)):
|
829 |
+
a, v = v, a
|
830 |
+
if len(a) == 0:
|
831 |
+
raise ValueError('a cannot be empty')
|
832 |
+
if len(v) == 0:
|
833 |
+
raise ValueError('v cannot be empty')
|
834 |
+
return multiarray.correlate(a, v[::-1], mode)
|
835 |
+
|
836 |
+
|
837 |
+
def _outer_dispatcher(a, b, out=None):
|
838 |
+
return (a, b, out)
|
839 |
+
|
840 |
+
|
841 |
+
@array_function_dispatch(_outer_dispatcher)
|
842 |
+
def outer(a, b, out=None):
|
843 |
+
"""
|
844 |
+
Compute the outer product of two vectors.
|
845 |
+
|
846 |
+
Given two vectors `a` and `b` of length ``M`` and ``N``, repsectively,
|
847 |
+
the outer product [1]_ is::
|
848 |
+
|
849 |
+
[[a_0*b_0 a_0*b_1 ... a_0*b_{N-1} ]
|
850 |
+
[a_1*b_0 .
|
851 |
+
[ ... .
|
852 |
+
[a_{M-1}*b_0 a_{M-1}*b_{N-1} ]]
|
853 |
+
|
854 |
+
Parameters
|
855 |
+
----------
|
856 |
+
a : (M,) array_like
|
857 |
+
First input vector. Input is flattened if
|
858 |
+
not already 1-dimensional.
|
859 |
+
b : (N,) array_like
|
860 |
+
Second input vector. Input is flattened if
|
861 |
+
not already 1-dimensional.
|
862 |
+
out : (M, N) ndarray, optional
|
863 |
+
A location where the result is stored
|
864 |
+
|
865 |
+
.. versionadded:: 1.9.0
|
866 |
+
|
867 |
+
Returns
|
868 |
+
-------
|
869 |
+
out : (M, N) ndarray
|
870 |
+
``out[i, j] = a[i] * b[j]``
|
871 |
+
|
872 |
+
See also
|
873 |
+
--------
|
874 |
+
inner
|
875 |
+
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
|
876 |
+
ufunc.outer : A generalization to dimensions other than 1D and other
|
877 |
+
operations. ``np.multiply.outer(a.ravel(), b.ravel())``
|
878 |
+
is the equivalent.
|
879 |
+
tensordot : ``np.tensordot(a.ravel(), b.ravel(), axes=((), ()))``
|
880 |
+
is the equivalent.
|
881 |
+
|
882 |
+
References
|
883 |
+
----------
|
884 |
+
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
|
885 |
+
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
|
886 |
+
pg. 8.
|
887 |
+
|
888 |
+
Examples
|
889 |
+
--------
|
890 |
+
Make a (*very* coarse) grid for computing a Mandelbrot set:
|
891 |
+
|
892 |
+
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
|
893 |
+
>>> rl
|
894 |
+
array([[-2., -1., 0., 1., 2.],
|
895 |
+
[-2., -1., 0., 1., 2.],
|
896 |
+
[-2., -1., 0., 1., 2.],
|
897 |
+
[-2., -1., 0., 1., 2.],
|
898 |
+
[-2., -1., 0., 1., 2.]])
|
899 |
+
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
|
900 |
+
>>> im
|
901 |
+
array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
|
902 |
+
[0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
|
903 |
+
[0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
|
904 |
+
[0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
|
905 |
+
[0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
|
906 |
+
>>> grid = rl + im
|
907 |
+
>>> grid
|
908 |
+
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
|
909 |
+
[-2.+1.j, -1.+1.j, 0.+1.j, 1.+1.j, 2.+1.j],
|
910 |
+
[-2.+0.j, -1.+0.j, 0.+0.j, 1.+0.j, 2.+0.j],
|
911 |
+
[-2.-1.j, -1.-1.j, 0.-1.j, 1.-1.j, 2.-1.j],
|
912 |
+
[-2.-2.j, -1.-2.j, 0.-2.j, 1.-2.j, 2.-2.j]])
|
913 |
+
|
914 |
+
An example using a "vector" of letters:
|
915 |
+
|
916 |
+
>>> x = np.array(['a', 'b', 'c'], dtype=object)
|
917 |
+
>>> np.outer(x, [1, 2, 3])
|
918 |
+
array([['a', 'aa', 'aaa'],
|
919 |
+
['b', 'bb', 'bbb'],
|
920 |
+
['c', 'cc', 'ccc']], dtype=object)
|
921 |
+
|
922 |
+
"""
|
923 |
+
a = asarray(a)
|
924 |
+
b = asarray(b)
|
925 |
+
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
|
926 |
+
|
927 |
+
|
928 |
+
def _tensordot_dispatcher(a, b, axes=None):
|
929 |
+
return (a, b)
|
930 |
+
|
931 |
+
|
932 |
+
@array_function_dispatch(_tensordot_dispatcher)
|
933 |
+
def tensordot(a, b, axes=2):
|
934 |
+
"""
|
935 |
+
Compute tensor dot product along specified axes.
|
936 |
+
|
937 |
+
Given two tensors, `a` and `b`, and an array_like object containing
|
938 |
+
two array_like objects, ``(a_axes, b_axes)``, sum the products of
|
939 |
+
`a`'s and `b`'s elements (components) over the axes specified by
|
940 |
+
``a_axes`` and ``b_axes``. The third argument can be a single non-negative
|
941 |
+
integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
|
942 |
+
of `a` and the first ``N`` dimensions of `b` are summed over.
|
943 |
+
|
944 |
+
Parameters
|
945 |
+
----------
|
946 |
+
a, b : array_like
|
947 |
+
Tensors to "dot".
|
948 |
+
|
949 |
+
axes : int or (2,) array_like
|
950 |
+
* integer_like
|
951 |
+
If an int N, sum over the last N axes of `a` and the first N axes
|
952 |
+
of `b` in order. The sizes of the corresponding axes must match.
|
953 |
+
* (2,) array_like
|
954 |
+
Or, a list of axes to be summed over, first sequence applying to `a`,
|
955 |
+
second to `b`. Both elements array_like must be of the same length.
|
956 |
+
|
957 |
+
Returns
|
958 |
+
-------
|
959 |
+
output : ndarray
|
960 |
+
The tensor dot product of the input.
|
961 |
+
|
962 |
+
See Also
|
963 |
+
--------
|
964 |
+
dot, einsum
|
965 |
+
|
966 |
+
Notes
|
967 |
+
-----
|
968 |
+
Three common use cases are:
|
969 |
+
* ``axes = 0`` : tensor product :math:`a\\otimes b`
|
970 |
+
* ``axes = 1`` : tensor dot product :math:`a\\cdot b`
|
971 |
+
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
|
972 |
+
|
973 |
+
When `axes` is integer_like, the sequence for evaluation will be: first
|
974 |
+
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
|
975 |
+
Nth axis in `b` last.
|
976 |
+
|
977 |
+
When there is more than one axis to sum over - and they are not the last
|
978 |
+
(first) axes of `a` (`b`) - the argument `axes` should consist of
|
979 |
+
two sequences of the same length, with the first axis to sum over given
|
980 |
+
first in both sequences, the second axis second, and so forth.
|
981 |
+
|
982 |
+
The shape of the result consists of the non-contracted axes of the
|
983 |
+
first tensor, followed by the non-contracted axes of the second.
|
984 |
+
|
985 |
+
Examples
|
986 |
+
--------
|
987 |
+
A "traditional" example:
|
988 |
+
|
989 |
+
>>> a = np.arange(60.).reshape(3,4,5)
|
990 |
+
>>> b = np.arange(24.).reshape(4,3,2)
|
991 |
+
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
|
992 |
+
>>> c.shape
|
993 |
+
(5, 2)
|
994 |
+
>>> c
|
995 |
+
array([[4400., 4730.],
|
996 |
+
[4532., 4874.],
|
997 |
+
[4664., 5018.],
|
998 |
+
[4796., 5162.],
|
999 |
+
[4928., 5306.]])
|
1000 |
+
>>> # A slower but equivalent way of computing the same...
|
1001 |
+
>>> d = np.zeros((5,2))
|
1002 |
+
>>> for i in range(5):
|
1003 |
+
... for j in range(2):
|
1004 |
+
... for k in range(3):
|
1005 |
+
... for n in range(4):
|
1006 |
+
... d[i,j] += a[k,n,i] * b[n,k,j]
|
1007 |
+
>>> c == d
|
1008 |
+
array([[ True, True],
|
1009 |
+
[ True, True],
|
1010 |
+
[ True, True],
|
1011 |
+
[ True, True],
|
1012 |
+
[ True, True]])
|
1013 |
+
|
1014 |
+
An extended example taking advantage of the overloading of + and \\*:
|
1015 |
+
|
1016 |
+
>>> a = np.array(range(1, 9))
|
1017 |
+
>>> a.shape = (2, 2, 2)
|
1018 |
+
>>> A = np.array(('a', 'b', 'c', 'd'), dtype=object)
|
1019 |
+
>>> A.shape = (2, 2)
|
1020 |
+
>>> a; A
|
1021 |
+
array([[[1, 2],
|
1022 |
+
[3, 4]],
|
1023 |
+
[[5, 6],
|
1024 |
+
[7, 8]]])
|
1025 |
+
array([['a', 'b'],
|
1026 |
+
['c', 'd']], dtype=object)
|
1027 |
+
|
1028 |
+
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
|
1029 |
+
array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
|
1030 |
+
|
1031 |
+
>>> np.tensordot(a, A, 1)
|
1032 |
+
array([[['acc', 'bdd'],
|
1033 |
+
['aaacccc', 'bbbdddd']],
|
1034 |
+
[['aaaaacccccc', 'bbbbbdddddd'],
|
1035 |
+
['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
|
1036 |
+
|
1037 |
+
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
|
1038 |
+
array([[[[['a', 'b'],
|
1039 |
+
['c', 'd']],
|
1040 |
+
...
|
1041 |
+
|
1042 |
+
>>> np.tensordot(a, A, (0, 1))
|
1043 |
+
array([[['abbbbb', 'cddddd'],
|
1044 |
+
['aabbbbbb', 'ccdddddd']],
|
1045 |
+
[['aaabbbbbbb', 'cccddddddd'],
|
1046 |
+
['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
|
1047 |
+
|
1048 |
+
>>> np.tensordot(a, A, (2, 1))
|
1049 |
+
array([[['abb', 'cdd'],
|
1050 |
+
['aaabbbb', 'cccdddd']],
|
1051 |
+
[['aaaaabbbbbb', 'cccccdddddd'],
|
1052 |
+
['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
|
1053 |
+
|
1054 |
+
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
|
1055 |
+
array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
|
1056 |
+
|
1057 |
+
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
|
1058 |
+
array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
|
1059 |
+
|
1060 |
+
"""
|
1061 |
+
try:
|
1062 |
+
iter(axes)
|
1063 |
+
except Exception:
|
1064 |
+
axes_a = list(range(-axes, 0))
|
1065 |
+
axes_b = list(range(0, axes))
|
1066 |
+
else:
|
1067 |
+
axes_a, axes_b = axes
|
1068 |
+
try:
|
1069 |
+
na = len(axes_a)
|
1070 |
+
axes_a = list(axes_a)
|
1071 |
+
except TypeError:
|
1072 |
+
axes_a = [axes_a]
|
1073 |
+
na = 1
|
1074 |
+
try:
|
1075 |
+
nb = len(axes_b)
|
1076 |
+
axes_b = list(axes_b)
|
1077 |
+
except TypeError:
|
1078 |
+
axes_b = [axes_b]
|
1079 |
+
nb = 1
|
1080 |
+
|
1081 |
+
a, b = asarray(a), asarray(b)
|
1082 |
+
as_ = a.shape
|
1083 |
+
nda = a.ndim
|
1084 |
+
bs = b.shape
|
1085 |
+
ndb = b.ndim
|
1086 |
+
equal = True
|
1087 |
+
if na != nb:
|
1088 |
+
equal = False
|
1089 |
+
else:
|
1090 |
+
for k in range(na):
|
1091 |
+
if as_[axes_a[k]] != bs[axes_b[k]]:
|
1092 |
+
equal = False
|
1093 |
+
break
|
1094 |
+
if axes_a[k] < 0:
|
1095 |
+
axes_a[k] += nda
|
1096 |
+
if axes_b[k] < 0:
|
1097 |
+
axes_b[k] += ndb
|
1098 |
+
if not equal:
|
1099 |
+
raise ValueError("shape-mismatch for sum")
|
1100 |
+
|
1101 |
+
# Move the axes to sum over to the end of "a"
|
1102 |
+
# and to the front of "b"
|
1103 |
+
notin = [k for k in range(nda) if k not in axes_a]
|
1104 |
+
newaxes_a = notin + axes_a
|
1105 |
+
N2 = 1
|
1106 |
+
for axis in axes_a:
|
1107 |
+
N2 *= as_[axis]
|
1108 |
+
newshape_a = (int(multiply.reduce([as_[ax] for ax in notin])), N2)
|
1109 |
+
olda = [as_[axis] for axis in notin]
|
1110 |
+
|
1111 |
+
notin = [k for k in range(ndb) if k not in axes_b]
|
1112 |
+
newaxes_b = axes_b + notin
|
1113 |
+
N2 = 1
|
1114 |
+
for axis in axes_b:
|
1115 |
+
N2 *= bs[axis]
|
1116 |
+
newshape_b = (N2, int(multiply.reduce([bs[ax] for ax in notin])))
|
1117 |
+
oldb = [bs[axis] for axis in notin]
|
1118 |
+
|
1119 |
+
at = a.transpose(newaxes_a).reshape(newshape_a)
|
1120 |
+
bt = b.transpose(newaxes_b).reshape(newshape_b)
|
1121 |
+
res = dot(at, bt)
|
1122 |
+
return res.reshape(olda + oldb)
|
1123 |
+
|
1124 |
+
|
1125 |
+
def _roll_dispatcher(a, shift, axis=None):
|
1126 |
+
return (a,)
|
1127 |
+
|
1128 |
+
|
1129 |
+
@array_function_dispatch(_roll_dispatcher)
|
1130 |
+
def roll(a, shift, axis=None):
|
1131 |
+
"""
|
1132 |
+
Roll array elements along a given axis.
|
1133 |
+
|
1134 |
+
Elements that roll beyond the last position are re-introduced at
|
1135 |
+
the first.
|
1136 |
+
|
1137 |
+
Parameters
|
1138 |
+
----------
|
1139 |
+
a : array_like
|
1140 |
+
Input array.
|
1141 |
+
shift : int or tuple of ints
|
1142 |
+
The number of places by which elements are shifted. If a tuple,
|
1143 |
+
then `axis` must be a tuple of the same size, and each of the
|
1144 |
+
given axes is shifted by the corresponding number. If an int
|
1145 |
+
while `axis` is a tuple of ints, then the same value is used for
|
1146 |
+
all given axes.
|
1147 |
+
axis : int or tuple of ints, optional
|
1148 |
+
Axis or axes along which elements are shifted. By default, the
|
1149 |
+
array is flattened before shifting, after which the original
|
1150 |
+
shape is restored.
|
1151 |
+
|
1152 |
+
Returns
|
1153 |
+
-------
|
1154 |
+
res : ndarray
|
1155 |
+
Output array, with the same shape as `a`.
|
1156 |
+
|
1157 |
+
See Also
|
1158 |
+
--------
|
1159 |
+
rollaxis : Roll the specified axis backwards, until it lies in a
|
1160 |
+
given position.
|
1161 |
+
|
1162 |
+
Notes
|
1163 |
+
-----
|
1164 |
+
.. versionadded:: 1.12.0
|
1165 |
+
|
1166 |
+
Supports rolling over multiple dimensions simultaneously.
|
1167 |
+
|
1168 |
+
Examples
|
1169 |
+
--------
|
1170 |
+
>>> x = np.arange(10)
|
1171 |
+
>>> np.roll(x, 2)
|
1172 |
+
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
|
1173 |
+
>>> np.roll(x, -2)
|
1174 |
+
array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
|
1175 |
+
|
1176 |
+
>>> x2 = np.reshape(x, (2, 5))
|
1177 |
+
>>> x2
|
1178 |
+
array([[0, 1, 2, 3, 4],
|
1179 |
+
[5, 6, 7, 8, 9]])
|
1180 |
+
>>> np.roll(x2, 1)
|
1181 |
+
array([[9, 0, 1, 2, 3],
|
1182 |
+
[4, 5, 6, 7, 8]])
|
1183 |
+
>>> np.roll(x2, -1)
|
1184 |
+
array([[1, 2, 3, 4, 5],
|
1185 |
+
[6, 7, 8, 9, 0]])
|
1186 |
+
>>> np.roll(x2, 1, axis=0)
|
1187 |
+
array([[5, 6, 7, 8, 9],
|
1188 |
+
[0, 1, 2, 3, 4]])
|
1189 |
+
>>> np.roll(x2, -1, axis=0)
|
1190 |
+
array([[5, 6, 7, 8, 9],
|
1191 |
+
[0, 1, 2, 3, 4]])
|
1192 |
+
>>> np.roll(x2, 1, axis=1)
|
1193 |
+
array([[4, 0, 1, 2, 3],
|
1194 |
+
[9, 5, 6, 7, 8]])
|
1195 |
+
>>> np.roll(x2, -1, axis=1)
|
1196 |
+
array([[1, 2, 3, 4, 0],
|
1197 |
+
[6, 7, 8, 9, 5]])
|
1198 |
+
>>> np.roll(x2, (1, 1), axis=(1, 0))
|
1199 |
+
array([[9, 5, 6, 7, 8],
|
1200 |
+
[4, 0, 1, 2, 3]])
|
1201 |
+
>>> np.roll(x2, (2, 1), axis=(1, 0))
|
1202 |
+
array([[8, 9, 5, 6, 7],
|
1203 |
+
[3, 4, 0, 1, 2]])
|
1204 |
+
|
1205 |
+
"""
|
1206 |
+
a = asanyarray(a)
|
1207 |
+
if axis is None:
|
1208 |
+
return roll(a.ravel(), shift, 0).reshape(a.shape)
|
1209 |
+
|
1210 |
+
else:
|
1211 |
+
axis = normalize_axis_tuple(axis, a.ndim, allow_duplicate=True)
|
1212 |
+
broadcasted = broadcast(shift, axis)
|
1213 |
+
if broadcasted.ndim > 1:
|
1214 |
+
raise ValueError(
|
1215 |
+
"'shift' and 'axis' should be scalars or 1D sequences")
|
1216 |
+
shifts = {ax: 0 for ax in range(a.ndim)}
|
1217 |
+
for sh, ax in broadcasted:
|
1218 |
+
shifts[ax] += sh
|
1219 |
+
|
1220 |
+
rolls = [((slice(None), slice(None)),)] * a.ndim
|
1221 |
+
for ax, offset in shifts.items():
|
1222 |
+
offset %= a.shape[ax] or 1 # If `a` is empty, nothing matters.
|
1223 |
+
if offset:
|
1224 |
+
# (original, result), (original, result)
|
1225 |
+
rolls[ax] = ((slice(None, -offset), slice(offset, None)),
|
1226 |
+
(slice(-offset, None), slice(None, offset)))
|
1227 |
+
|
1228 |
+
result = empty_like(a)
|
1229 |
+
for indices in itertools.product(*rolls):
|
1230 |
+
arr_index, res_index = zip(*indices)
|
1231 |
+
result[res_index] = a[arr_index]
|
1232 |
+
|
1233 |
+
return result
|
1234 |
+
|
1235 |
+
|
1236 |
+
def _rollaxis_dispatcher(a, axis, start=None):
|
1237 |
+
return (a,)
|
1238 |
+
|
1239 |
+
|
1240 |
+
@array_function_dispatch(_rollaxis_dispatcher)
|
1241 |
+
def rollaxis(a, axis, start=0):
|
1242 |
+
"""
|
1243 |
+
Roll the specified axis backwards, until it lies in a given position.
|
1244 |
+
|
1245 |
+
This function continues to be supported for backward compatibility, but you
|
1246 |
+
should prefer `moveaxis`. The `moveaxis` function was added in NumPy
|
1247 |
+
1.11.
|
1248 |
+
|
1249 |
+
Parameters
|
1250 |
+
----------
|
1251 |
+
a : ndarray
|
1252 |
+
Input array.
|
1253 |
+
axis : int
|
1254 |
+
The axis to be rolled. The positions of the other axes do not
|
1255 |
+
change relative to one another.
|
1256 |
+
start : int, optional
|
1257 |
+
When ``start <= axis``, the axis is rolled back until it lies in
|
1258 |
+
this position. When ``start > axis``, the axis is rolled until it
|
1259 |
+
lies before this position. The default, 0, results in a "complete"
|
1260 |
+
roll. The following table describes how negative values of ``start``
|
1261 |
+
are interpreted:
|
1262 |
+
|
1263 |
+
.. table::
|
1264 |
+
:align: left
|
1265 |
+
|
1266 |
+
+-------------------+----------------------+
|
1267 |
+
| ``start`` | Normalized ``start`` |
|
1268 |
+
+===================+======================+
|
1269 |
+
| ``-(arr.ndim+1)`` | raise ``AxisError`` |
|
1270 |
+
+-------------------+----------------------+
|
1271 |
+
| ``-arr.ndim`` | 0 |
|
1272 |
+
+-------------------+----------------------+
|
1273 |
+
| |vdots| | |vdots| |
|
1274 |
+
+-------------------+----------------------+
|
1275 |
+
| ``-1`` | ``arr.ndim-1`` |
|
1276 |
+
+-------------------+----------------------+
|
1277 |
+
| ``0`` | ``0`` |
|
1278 |
+
+-------------------+----------------------+
|
1279 |
+
| |vdots| | |vdots| |
|
1280 |
+
+-------------------+----------------------+
|
1281 |
+
| ``arr.ndim`` | ``arr.ndim`` |
|
1282 |
+
+-------------------+----------------------+
|
1283 |
+
| ``arr.ndim + 1`` | raise ``AxisError`` |
|
1284 |
+
+-------------------+----------------------+
|
1285 |
+
|
1286 |
+
.. |vdots| unicode:: U+22EE .. Vertical Ellipsis
|
1287 |
+
|
1288 |
+
Returns
|
1289 |
+
-------
|
1290 |
+
res : ndarray
|
1291 |
+
For NumPy >= 1.10.0 a view of `a` is always returned. For earlier
|
1292 |
+
NumPy versions a view of `a` is returned only if the order of the
|
1293 |
+
axes is changed, otherwise the input array is returned.
|
1294 |
+
|
1295 |
+
See Also
|
1296 |
+
--------
|
1297 |
+
moveaxis : Move array axes to new positions.
|
1298 |
+
roll : Roll the elements of an array by a number of positions along a
|
1299 |
+
given axis.
|
1300 |
+
|
1301 |
+
Examples
|
1302 |
+
--------
|
1303 |
+
>>> a = np.ones((3,4,5,6))
|
1304 |
+
>>> np.rollaxis(a, 3, 1).shape
|
1305 |
+
(3, 6, 4, 5)
|
1306 |
+
>>> np.rollaxis(a, 2).shape
|
1307 |
+
(5, 3, 4, 6)
|
1308 |
+
>>> np.rollaxis(a, 1, 4).shape
|
1309 |
+
(3, 5, 6, 4)
|
1310 |
+
|
1311 |
+
"""
|
1312 |
+
n = a.ndim
|
1313 |
+
axis = normalize_axis_index(axis, n)
|
1314 |
+
if start < 0:
|
1315 |
+
start += n
|
1316 |
+
msg = "'%s' arg requires %d <= %s < %d, but %d was passed in"
|
1317 |
+
if not (0 <= start < n + 1):
|
1318 |
+
raise AxisError(msg % ('start', -n, 'start', n + 1, start))
|
1319 |
+
if axis < start:
|
1320 |
+
# it's been removed
|
1321 |
+
start -= 1
|
1322 |
+
if axis == start:
|
1323 |
+
return a[...]
|
1324 |
+
axes = list(range(0, n))
|
1325 |
+
axes.remove(axis)
|
1326 |
+
axes.insert(start, axis)
|
1327 |
+
return a.transpose(axes)
|
1328 |
+
|
1329 |
+
|
1330 |
+
def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
|
1331 |
+
"""
|
1332 |
+
Normalizes an axis argument into a tuple of non-negative integer axes.
|
1333 |
+
|
1334 |
+
This handles shorthands such as ``1`` and converts them to ``(1,)``,
|
1335 |
+
as well as performing the handling of negative indices covered by
|
1336 |
+
`normalize_axis_index`.
|
1337 |
+
|
1338 |
+
By default, this forbids axes from being specified multiple times.
|
1339 |
+
|
1340 |
+
Used internally by multi-axis-checking logic.
|
1341 |
+
|
1342 |
+
.. versionadded:: 1.13.0
|
1343 |
+
|
1344 |
+
Parameters
|
1345 |
+
----------
|
1346 |
+
axis : int, iterable of int
|
1347 |
+
The un-normalized index or indices of the axis.
|
1348 |
+
ndim : int
|
1349 |
+
The number of dimensions of the array that `axis` should be normalized
|
1350 |
+
against.
|
1351 |
+
argname : str, optional
|
1352 |
+
A prefix to put before the error message, typically the name of the
|
1353 |
+
argument.
|
1354 |
+
allow_duplicate : bool, optional
|
1355 |
+
If False, the default, disallow an axis from being specified twice.
|
1356 |
+
|
1357 |
+
Returns
|
1358 |
+
-------
|
1359 |
+
normalized_axes : tuple of int
|
1360 |
+
The normalized axis index, such that `0 <= normalized_axis < ndim`
|
1361 |
+
|
1362 |
+
Raises
|
1363 |
+
------
|
1364 |
+
AxisError
|
1365 |
+
If any axis provided is out of range
|
1366 |
+
ValueError
|
1367 |
+
If an axis is repeated
|
1368 |
+
|
1369 |
+
See also
|
1370 |
+
--------
|
1371 |
+
normalize_axis_index : normalizing a single scalar axis
|
1372 |
+
"""
|
1373 |
+
# Optimization to speed-up the most common cases.
|
1374 |
+
if type(axis) not in (tuple, list):
|
1375 |
+
try:
|
1376 |
+
axis = [operator.index(axis)]
|
1377 |
+
except TypeError:
|
1378 |
+
pass
|
1379 |
+
# Going via an iterator directly is slower than via list comprehension.
|
1380 |
+
axis = tuple([normalize_axis_index(ax, ndim, argname) for ax in axis])
|
1381 |
+
if not allow_duplicate and len(set(axis)) != len(axis):
|
1382 |
+
if argname:
|
1383 |
+
raise ValueError('repeated axis in `{}` argument'.format(argname))
|
1384 |
+
else:
|
1385 |
+
raise ValueError('repeated axis')
|
1386 |
+
return axis
|
1387 |
+
|
1388 |
+
|
1389 |
+
def _moveaxis_dispatcher(a, source, destination):
|
1390 |
+
return (a,)
|
1391 |
+
|
1392 |
+
|
1393 |
+
@array_function_dispatch(_moveaxis_dispatcher)
|
1394 |
+
def moveaxis(a, source, destination):
|
1395 |
+
"""
|
1396 |
+
Move axes of an array to new positions.
|
1397 |
+
|
1398 |
+
Other axes remain in their original order.
|
1399 |
+
|
1400 |
+
.. versionadded:: 1.11.0
|
1401 |
+
|
1402 |
+
Parameters
|
1403 |
+
----------
|
1404 |
+
a : np.ndarray
|
1405 |
+
The array whose axes should be reordered.
|
1406 |
+
source : int or sequence of int
|
1407 |
+
Original positions of the axes to move. These must be unique.
|
1408 |
+
destination : int or sequence of int
|
1409 |
+
Destination positions for each of the original axes. These must also be
|
1410 |
+
unique.
|
1411 |
+
|
1412 |
+
Returns
|
1413 |
+
-------
|
1414 |
+
result : np.ndarray
|
1415 |
+
Array with moved axes. This array is a view of the input array.
|
1416 |
+
|
1417 |
+
See Also
|
1418 |
+
--------
|
1419 |
+
transpose : Permute the dimensions of an array.
|
1420 |
+
swapaxes : Interchange two axes of an array.
|
1421 |
+
|
1422 |
+
Examples
|
1423 |
+
--------
|
1424 |
+
>>> x = np.zeros((3, 4, 5))
|
1425 |
+
>>> np.moveaxis(x, 0, -1).shape
|
1426 |
+
(4, 5, 3)
|
1427 |
+
>>> np.moveaxis(x, -1, 0).shape
|
1428 |
+
(5, 3, 4)
|
1429 |
+
|
1430 |
+
These all achieve the same result:
|
1431 |
+
|
1432 |
+
>>> np.transpose(x).shape
|
1433 |
+
(5, 4, 3)
|
1434 |
+
>>> np.swapaxes(x, 0, -1).shape
|
1435 |
+
(5, 4, 3)
|
1436 |
+
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
|
1437 |
+
(5, 4, 3)
|
1438 |
+
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
|
1439 |
+
(5, 4, 3)
|
1440 |
+
|
1441 |
+
"""
|
1442 |
+
try:
|
1443 |
+
# allow duck-array types if they define transpose
|
1444 |
+
transpose = a.transpose
|
1445 |
+
except AttributeError:
|
1446 |
+
a = asarray(a)
|
1447 |
+
transpose = a.transpose
|
1448 |
+
|
1449 |
+
source = normalize_axis_tuple(source, a.ndim, 'source')
|
1450 |
+
destination = normalize_axis_tuple(destination, a.ndim, 'destination')
|
1451 |
+
if len(source) != len(destination):
|
1452 |
+
raise ValueError('`source` and `destination` arguments must have '
|
1453 |
+
'the same number of elements')
|
1454 |
+
|
1455 |
+
order = [n for n in range(a.ndim) if n not in source]
|
1456 |
+
|
1457 |
+
for dest, src in sorted(zip(destination, source)):
|
1458 |
+
order.insert(dest, src)
|
1459 |
+
|
1460 |
+
result = transpose(order)
|
1461 |
+
return result
|
1462 |
+
|
1463 |
+
|
1464 |
+
def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
|
1465 |
+
return (a, b)
|
1466 |
+
|
1467 |
+
|
1468 |
+
@array_function_dispatch(_cross_dispatcher)
|
1469 |
+
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
|
1470 |
+
"""
|
1471 |
+
Return the cross product of two (arrays of) vectors.
|
1472 |
+
|
1473 |
+
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
|
1474 |
+
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
|
1475 |
+
are defined by the last axis of `a` and `b` by default, and these axes
|
1476 |
+
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
|
1477 |
+
2, the third component of the input vector is assumed to be zero and the
|
1478 |
+
cross product calculated accordingly. In cases where both input vectors
|
1479 |
+
have dimension 2, the z-component of the cross product is returned.
|
1480 |
+
|
1481 |
+
Parameters
|
1482 |
+
----------
|
1483 |
+
a : array_like
|
1484 |
+
Components of the first vector(s).
|
1485 |
+
b : array_like
|
1486 |
+
Components of the second vector(s).
|
1487 |
+
axisa : int, optional
|
1488 |
+
Axis of `a` that defines the vector(s). By default, the last axis.
|
1489 |
+
axisb : int, optional
|
1490 |
+
Axis of `b` that defines the vector(s). By default, the last axis.
|
1491 |
+
axisc : int, optional
|
1492 |
+
Axis of `c` containing the cross product vector(s). Ignored if
|
1493 |
+
both input vectors have dimension 2, as the return is scalar.
|
1494 |
+
By default, the last axis.
|
1495 |
+
axis : int, optional
|
1496 |
+
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
|
1497 |
+
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
|
1498 |
+
|
1499 |
+
Returns
|
1500 |
+
-------
|
1501 |
+
c : ndarray
|
1502 |
+
Vector cross product(s).
|
1503 |
+
|
1504 |
+
Raises
|
1505 |
+
------
|
1506 |
+
ValueError
|
1507 |
+
When the dimension of the vector(s) in `a` and/or `b` does not
|
1508 |
+
equal 2 or 3.
|
1509 |
+
|
1510 |
+
See Also
|
1511 |
+
--------
|
1512 |
+
inner : Inner product
|
1513 |
+
outer : Outer product.
|
1514 |
+
ix_ : Construct index arrays.
|
1515 |
+
|
1516 |
+
Notes
|
1517 |
+
-----
|
1518 |
+
.. versionadded:: 1.9.0
|
1519 |
+
|
1520 |
+
Supports full broadcasting of the inputs.
|
1521 |
+
|
1522 |
+
Examples
|
1523 |
+
--------
|
1524 |
+
Vector cross-product.
|
1525 |
+
|
1526 |
+
>>> x = [1, 2, 3]
|
1527 |
+
>>> y = [4, 5, 6]
|
1528 |
+
>>> np.cross(x, y)
|
1529 |
+
array([-3, 6, -3])
|
1530 |
+
|
1531 |
+
One vector with dimension 2.
|
1532 |
+
|
1533 |
+
>>> x = [1, 2]
|
1534 |
+
>>> y = [4, 5, 6]
|
1535 |
+
>>> np.cross(x, y)
|
1536 |
+
array([12, -6, -3])
|
1537 |
+
|
1538 |
+
Equivalently:
|
1539 |
+
|
1540 |
+
>>> x = [1, 2, 0]
|
1541 |
+
>>> y = [4, 5, 6]
|
1542 |
+
>>> np.cross(x, y)
|
1543 |
+
array([12, -6, -3])
|
1544 |
+
|
1545 |
+
Both vectors with dimension 2.
|
1546 |
+
|
1547 |
+
>>> x = [1,2]
|
1548 |
+
>>> y = [4,5]
|
1549 |
+
>>> np.cross(x, y)
|
1550 |
+
array(-3)
|
1551 |
+
|
1552 |
+
Multiple vector cross-products. Note that the direction of the cross
|
1553 |
+
product vector is defined by the *right-hand rule*.
|
1554 |
+
|
1555 |
+
>>> x = np.array([[1,2,3], [4,5,6]])
|
1556 |
+
>>> y = np.array([[4,5,6], [1,2,3]])
|
1557 |
+
>>> np.cross(x, y)
|
1558 |
+
array([[-3, 6, -3],
|
1559 |
+
[ 3, -6, 3]])
|
1560 |
+
|
1561 |
+
The orientation of `c` can be changed using the `axisc` keyword.
|
1562 |
+
|
1563 |
+
>>> np.cross(x, y, axisc=0)
|
1564 |
+
array([[-3, 3],
|
1565 |
+
[ 6, -6],
|
1566 |
+
[-3, 3]])
|
1567 |
+
|
1568 |
+
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
|
1569 |
+
|
1570 |
+
>>> x = np.array([[1,2,3], [4,5,6], [7, 8, 9]])
|
1571 |
+
>>> y = np.array([[7, 8, 9], [4,5,6], [1,2,3]])
|
1572 |
+
>>> np.cross(x, y)
|
1573 |
+
array([[ -6, 12, -6],
|
1574 |
+
[ 0, 0, 0],
|
1575 |
+
[ 6, -12, 6]])
|
1576 |
+
>>> np.cross(x, y, axisa=0, axisb=0)
|
1577 |
+
array([[-24, 48, -24],
|
1578 |
+
[-30, 60, -30],
|
1579 |
+
[-36, 72, -36]])
|
1580 |
+
|
1581 |
+
"""
|
1582 |
+
if axis is not None:
|
1583 |
+
axisa, axisb, axisc = (axis,) * 3
|
1584 |
+
a = asarray(a)
|
1585 |
+
b = asarray(b)
|
1586 |
+
# Check axisa and axisb are within bounds
|
1587 |
+
axisa = normalize_axis_index(axisa, a.ndim, msg_prefix='axisa')
|
1588 |
+
axisb = normalize_axis_index(axisb, b.ndim, msg_prefix='axisb')
|
1589 |
+
|
1590 |
+
# Move working axis to the end of the shape
|
1591 |
+
a = moveaxis(a, axisa, -1)
|
1592 |
+
b = moveaxis(b, axisb, -1)
|
1593 |
+
msg = ("incompatible dimensions for cross product\n"
|
1594 |
+
"(dimension must be 2 or 3)")
|
1595 |
+
if a.shape[-1] not in (2, 3) or b.shape[-1] not in (2, 3):
|
1596 |
+
raise ValueError(msg)
|
1597 |
+
|
1598 |
+
# Create the output array
|
1599 |
+
shape = broadcast(a[..., 0], b[..., 0]).shape
|
1600 |
+
if a.shape[-1] == 3 or b.shape[-1] == 3:
|
1601 |
+
shape += (3,)
|
1602 |
+
# Check axisc is within bounds
|
1603 |
+
axisc = normalize_axis_index(axisc, len(shape), msg_prefix='axisc')
|
1604 |
+
dtype = promote_types(a.dtype, b.dtype)
|
1605 |
+
cp = empty(shape, dtype)
|
1606 |
+
|
1607 |
+
# recast arrays as dtype
|
1608 |
+
a = a.astype(dtype)
|
1609 |
+
b = b.astype(dtype)
|
1610 |
+
|
1611 |
+
# create local aliases for readability
|
1612 |
+
a0 = a[..., 0]
|
1613 |
+
a1 = a[..., 1]
|
1614 |
+
if a.shape[-1] == 3:
|
1615 |
+
a2 = a[..., 2]
|
1616 |
+
b0 = b[..., 0]
|
1617 |
+
b1 = b[..., 1]
|
1618 |
+
if b.shape[-1] == 3:
|
1619 |
+
b2 = b[..., 2]
|
1620 |
+
if cp.ndim != 0 and cp.shape[-1] == 3:
|
1621 |
+
cp0 = cp[..., 0]
|
1622 |
+
cp1 = cp[..., 1]
|
1623 |
+
cp2 = cp[..., 2]
|
1624 |
+
|
1625 |
+
if a.shape[-1] == 2:
|
1626 |
+
if b.shape[-1] == 2:
|
1627 |
+
# a0 * b1 - a1 * b0
|
1628 |
+
multiply(a0, b1, out=cp)
|
1629 |
+
cp -= a1 * b0
|
1630 |
+
return cp
|
1631 |
+
else:
|
1632 |
+
assert b.shape[-1] == 3
|
1633 |
+
# cp0 = a1 * b2 - 0 (a2 = 0)
|
1634 |
+
# cp1 = 0 - a0 * b2 (a2 = 0)
|
1635 |
+
# cp2 = a0 * b1 - a1 * b0
|
1636 |
+
multiply(a1, b2, out=cp0)
|
1637 |
+
multiply(a0, b2, out=cp1)
|
1638 |
+
negative(cp1, out=cp1)
|
1639 |
+
multiply(a0, b1, out=cp2)
|
1640 |
+
cp2 -= a1 * b0
|
1641 |
+
else:
|
1642 |
+
assert a.shape[-1] == 3
|
1643 |
+
if b.shape[-1] == 3:
|
1644 |
+
# cp0 = a1 * b2 - a2 * b1
|
1645 |
+
# cp1 = a2 * b0 - a0 * b2
|
1646 |
+
# cp2 = a0 * b1 - a1 * b0
|
1647 |
+
multiply(a1, b2, out=cp0)
|
1648 |
+
tmp = array(a2 * b1)
|
1649 |
+
cp0 -= tmp
|
1650 |
+
multiply(a2, b0, out=cp1)
|
1651 |
+
multiply(a0, b2, out=tmp)
|
1652 |
+
cp1 -= tmp
|
1653 |
+
multiply(a0, b1, out=cp2)
|
1654 |
+
multiply(a1, b0, out=tmp)
|
1655 |
+
cp2 -= tmp
|
1656 |
+
else:
|
1657 |
+
assert b.shape[-1] == 2
|
1658 |
+
# cp0 = 0 - a2 * b1 (b2 = 0)
|
1659 |
+
# cp1 = a2 * b0 - 0 (b2 = 0)
|
1660 |
+
# cp2 = a0 * b1 - a1 * b0
|
1661 |
+
multiply(a2, b1, out=cp0)
|
1662 |
+
negative(cp0, out=cp0)
|
1663 |
+
multiply(a2, b0, out=cp1)
|
1664 |
+
multiply(a0, b1, out=cp2)
|
1665 |
+
cp2 -= a1 * b0
|
1666 |
+
|
1667 |
+
return moveaxis(cp, -1, axisc)
|
1668 |
+
|
1669 |
+
|
1670 |
+
little_endian = (sys.byteorder == 'little')
|
1671 |
+
|
1672 |
+
|
1673 |
+
@set_module('numpy')
|
1674 |
+
def indices(dimensions, dtype=int, sparse=False):
|
1675 |
+
"""
|
1676 |
+
Return an array representing the indices of a grid.
|
1677 |
+
|
1678 |
+
Compute an array where the subarrays contain index values 0, 1, ...
|
1679 |
+
varying only along the corresponding axis.
|
1680 |
+
|
1681 |
+
Parameters
|
1682 |
+
----------
|
1683 |
+
dimensions : sequence of ints
|
1684 |
+
The shape of the grid.
|
1685 |
+
dtype : dtype, optional
|
1686 |
+
Data type of the result.
|
1687 |
+
sparse : boolean, optional
|
1688 |
+
Return a sparse representation of the grid instead of a dense
|
1689 |
+
representation. Default is False.
|
1690 |
+
|
1691 |
+
.. versionadded:: 1.17
|
1692 |
+
|
1693 |
+
Returns
|
1694 |
+
-------
|
1695 |
+
grid : one ndarray or tuple of ndarrays
|
1696 |
+
If sparse is False:
|
1697 |
+
Returns one array of grid indices,
|
1698 |
+
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
|
1699 |
+
If sparse is True:
|
1700 |
+
Returns a tuple of arrays, with
|
1701 |
+
``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
|
1702 |
+
dimensions[i] in the ith place
|
1703 |
+
|
1704 |
+
See Also
|
1705 |
+
--------
|
1706 |
+
mgrid, ogrid, meshgrid
|
1707 |
+
|
1708 |
+
Notes
|
1709 |
+
-----
|
1710 |
+
The output shape in the dense case is obtained by prepending the number
|
1711 |
+
of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
|
1712 |
+
is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
|
1713 |
+
``(N, r0, ..., rN-1)``.
|
1714 |
+
|
1715 |
+
The subarrays ``grid[k]`` contains the N-D array of indices along the
|
1716 |
+
``k-th`` axis. Explicitly::
|
1717 |
+
|
1718 |
+
grid[k, i0, i1, ..., iN-1] = ik
|
1719 |
+
|
1720 |
+
Examples
|
1721 |
+
--------
|
1722 |
+
>>> grid = np.indices((2, 3))
|
1723 |
+
>>> grid.shape
|
1724 |
+
(2, 2, 3)
|
1725 |
+
>>> grid[0] # row indices
|
1726 |
+
array([[0, 0, 0],
|
1727 |
+
[1, 1, 1]])
|
1728 |
+
>>> grid[1] # column indices
|
1729 |
+
array([[0, 1, 2],
|
1730 |
+
[0, 1, 2]])
|
1731 |
+
|
1732 |
+
The indices can be used as an index into an array.
|
1733 |
+
|
1734 |
+
>>> x = np.arange(20).reshape(5, 4)
|
1735 |
+
>>> row, col = np.indices((2, 3))
|
1736 |
+
>>> x[row, col]
|
1737 |
+
array([[0, 1, 2],
|
1738 |
+
[4, 5, 6]])
|
1739 |
+
|
1740 |
+
Note that it would be more straightforward in the above example to
|
1741 |
+
extract the required elements directly with ``x[:2, :3]``.
|
1742 |
+
|
1743 |
+
If sparse is set to true, the grid will be returned in a sparse
|
1744 |
+
representation.
|
1745 |
+
|
1746 |
+
>>> i, j = np.indices((2, 3), sparse=True)
|
1747 |
+
>>> i.shape
|
1748 |
+
(2, 1)
|
1749 |
+
>>> j.shape
|
1750 |
+
(1, 3)
|
1751 |
+
>>> i # row indices
|
1752 |
+
array([[0],
|
1753 |
+
[1]])
|
1754 |
+
>>> j # column indices
|
1755 |
+
array([[0, 1, 2]])
|
1756 |
+
|
1757 |
+
"""
|
1758 |
+
dimensions = tuple(dimensions)
|
1759 |
+
N = len(dimensions)
|
1760 |
+
shape = (1,)*N
|
1761 |
+
if sparse:
|
1762 |
+
res = tuple()
|
1763 |
+
else:
|
1764 |
+
res = empty((N,)+dimensions, dtype=dtype)
|
1765 |
+
for i, dim in enumerate(dimensions):
|
1766 |
+
idx = arange(dim, dtype=dtype).reshape(
|
1767 |
+
shape[:i] + (dim,) + shape[i+1:]
|
1768 |
+
)
|
1769 |
+
if sparse:
|
1770 |
+
res = res + (idx,)
|
1771 |
+
else:
|
1772 |
+
res[i] = idx
|
1773 |
+
return res
|
1774 |
+
|
1775 |
+
|
1776 |
+
@set_array_function_like_doc
|
1777 |
+
@set_module('numpy')
|
1778 |
+
def fromfunction(function, shape, *, dtype=float, like=None, **kwargs):
|
1779 |
+
"""
|
1780 |
+
Construct an array by executing a function over each coordinate.
|
1781 |
+
|
1782 |
+
The resulting array therefore has a value ``fn(x, y, z)`` at
|
1783 |
+
coordinate ``(x, y, z)``.
|
1784 |
+
|
1785 |
+
Parameters
|
1786 |
+
----------
|
1787 |
+
function : callable
|
1788 |
+
The function is called with N parameters, where N is the rank of
|
1789 |
+
`shape`. Each parameter represents the coordinates of the array
|
1790 |
+
varying along a specific axis. For example, if `shape`
|
1791 |
+
were ``(2, 2)``, then the parameters would be
|
1792 |
+
``array([[0, 0], [1, 1]])`` and ``array([[0, 1], [0, 1]])``
|
1793 |
+
shape : (N,) tuple of ints
|
1794 |
+
Shape of the output array, which also determines the shape of
|
1795 |
+
the coordinate arrays passed to `function`.
|
1796 |
+
dtype : data-type, optional
|
1797 |
+
Data-type of the coordinate arrays passed to `function`.
|
1798 |
+
By default, `dtype` is float.
|
1799 |
+
${ARRAY_FUNCTION_LIKE}
|
1800 |
+
|
1801 |
+
.. versionadded:: 1.20.0
|
1802 |
+
|
1803 |
+
Returns
|
1804 |
+
-------
|
1805 |
+
fromfunction : any
|
1806 |
+
The result of the call to `function` is passed back directly.
|
1807 |
+
Therefore the shape of `fromfunction` is completely determined by
|
1808 |
+
`function`. If `function` returns a scalar value, the shape of
|
1809 |
+
`fromfunction` would not match the `shape` parameter.
|
1810 |
+
|
1811 |
+
See Also
|
1812 |
+
--------
|
1813 |
+
indices, meshgrid
|
1814 |
+
|
1815 |
+
Notes
|
1816 |
+
-----
|
1817 |
+
Keywords other than `dtype` and `like` are passed to `function`.
|
1818 |
+
|
1819 |
+
Examples
|
1820 |
+
--------
|
1821 |
+
>>> np.fromfunction(lambda i, j: i, (2, 2), dtype=float)
|
1822 |
+
array([[0., 0.],
|
1823 |
+
[1., 1.]])
|
1824 |
+
|
1825 |
+
>>> np.fromfunction(lambda i, j: j, (2, 2), dtype=float)
|
1826 |
+
array([[0., 1.],
|
1827 |
+
[0., 1.]])
|
1828 |
+
|
1829 |
+
>>> np.fromfunction(lambda i, j: i == j, (3, 3), dtype=int)
|
1830 |
+
array([[ True, False, False],
|
1831 |
+
[False, True, False],
|
1832 |
+
[False, False, True]])
|
1833 |
+
|
1834 |
+
>>> np.fromfunction(lambda i, j: i + j, (3, 3), dtype=int)
|
1835 |
+
array([[0, 1, 2],
|
1836 |
+
[1, 2, 3],
|
1837 |
+
[2, 3, 4]])
|
1838 |
+
|
1839 |
+
"""
|
1840 |
+
if like is not None:
|
1841 |
+
return _fromfunction_with_like(
|
1842 |
+
like, function, shape, dtype=dtype, **kwargs)
|
1843 |
+
|
1844 |
+
args = indices(shape, dtype=dtype)
|
1845 |
+
return function(*args, **kwargs)
|
1846 |
+
|
1847 |
+
|
1848 |
+
_fromfunction_with_like = array_function_dispatch()(fromfunction)
|
1849 |
+
|
1850 |
+
|
1851 |
+
def _frombuffer(buf, dtype, shape, order):
|
1852 |
+
return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
|
1853 |
+
|
1854 |
+
|
1855 |
+
@set_module('numpy')
|
1856 |
+
def isscalar(element):
|
1857 |
+
"""
|
1858 |
+
Returns True if the type of `element` is a scalar type.
|
1859 |
+
|
1860 |
+
Parameters
|
1861 |
+
----------
|
1862 |
+
element : any
|
1863 |
+
Input argument, can be of any type and shape.
|
1864 |
+
|
1865 |
+
Returns
|
1866 |
+
-------
|
1867 |
+
val : bool
|
1868 |
+
True if `element` is a scalar type, False if it is not.
|
1869 |
+
|
1870 |
+
See Also
|
1871 |
+
--------
|
1872 |
+
ndim : Get the number of dimensions of an array
|
1873 |
+
|
1874 |
+
Notes
|
1875 |
+
-----
|
1876 |
+
If you need a stricter way to identify a *numerical* scalar, use
|
1877 |
+
``isinstance(x, numbers.Number)``, as that returns ``False`` for most
|
1878 |
+
non-numerical elements such as strings.
|
1879 |
+
|
1880 |
+
In most cases ``np.ndim(x) == 0`` should be used instead of this function,
|
1881 |
+
as that will also return true for 0d arrays. This is how numpy overloads
|
1882 |
+
functions in the style of the ``dx`` arguments to `gradient` and the ``bins``
|
1883 |
+
argument to `histogram`. Some key differences:
|
1884 |
+
|
1885 |
+
+--------------------------------------+---------------+-------------------+
|
1886 |
+
| x |``isscalar(x)``|``np.ndim(x) == 0``|
|
1887 |
+
+======================================+===============+===================+
|
1888 |
+
| PEP 3141 numeric objects (including | ``True`` | ``True`` |
|
1889 |
+
| builtins) | | |
|
1890 |
+
+--------------------------------------+---------------+-------------------+
|
1891 |
+
| builtin string and buffer objects | ``True`` | ``True`` |
|
1892 |
+
+--------------------------------------+---------------+-------------------+
|
1893 |
+
| other builtin objects, like | ``False`` | ``True`` |
|
1894 |
+
| `pathlib.Path`, `Exception`, | | |
|
1895 |
+
| the result of `re.compile` | | |
|
1896 |
+
+--------------------------------------+---------------+-------------------+
|
1897 |
+
| third-party objects like | ``False`` | ``True`` |
|
1898 |
+
| `matplotlib.figure.Figure` | | |
|
1899 |
+
+--------------------------------------+---------------+-------------------+
|
1900 |
+
| zero-dimensional numpy arrays | ``False`` | ``True`` |
|
1901 |
+
+--------------------------------------+---------------+-------------------+
|
1902 |
+
| other numpy arrays | ``False`` | ``False`` |
|
1903 |
+
+--------------------------------------+---------------+-------------------+
|
1904 |
+
| `list`, `tuple`, and other sequence | ``False`` | ``False`` |
|
1905 |
+
| objects | | |
|
1906 |
+
+--------------------------------------+---------------+-------------------+
|
1907 |
+
|
1908 |
+
Examples
|
1909 |
+
--------
|
1910 |
+
>>> np.isscalar(3.1)
|
1911 |
+
True
|
1912 |
+
>>> np.isscalar(np.array(3.1))
|
1913 |
+
False
|
1914 |
+
>>> np.isscalar([3.1])
|
1915 |
+
False
|
1916 |
+
>>> np.isscalar(False)
|
1917 |
+
True
|
1918 |
+
>>> np.isscalar('numpy')
|
1919 |
+
True
|
1920 |
+
|
1921 |
+
NumPy supports PEP 3141 numbers:
|
1922 |
+
|
1923 |
+
>>> from fractions import Fraction
|
1924 |
+
>>> np.isscalar(Fraction(5, 17))
|
1925 |
+
True
|
1926 |
+
>>> from numbers import Number
|
1927 |
+
>>> np.isscalar(Number())
|
1928 |
+
True
|
1929 |
+
|
1930 |
+
"""
|
1931 |
+
return (isinstance(element, generic)
|
1932 |
+
or type(element) in ScalarType
|
1933 |
+
or isinstance(element, numbers.Number))
|
1934 |
+
|
1935 |
+
|
1936 |
+
@set_module('numpy')
|
1937 |
+
def binary_repr(num, width=None):
|
1938 |
+
"""
|
1939 |
+
Return the binary representation of the input number as a string.
|
1940 |
+
|
1941 |
+
For negative numbers, if width is not given, a minus sign is added to the
|
1942 |
+
front. If width is given, the two's complement of the number is
|
1943 |
+
returned, with respect to that width.
|
1944 |
+
|
1945 |
+
In a two's-complement system negative numbers are represented by the two's
|
1946 |
+
complement of the absolute value. This is the most common method of
|
1947 |
+
representing signed integers on computers [1]_. A N-bit two's-complement
|
1948 |
+
system can represent every integer in the range
|
1949 |
+
:math:`-2^{N-1}` to :math:`+2^{N-1}-1`.
|
1950 |
+
|
1951 |
+
Parameters
|
1952 |
+
----------
|
1953 |
+
num : int
|
1954 |
+
Only an integer decimal number can be used.
|
1955 |
+
width : int, optional
|
1956 |
+
The length of the returned string if `num` is positive, or the length
|
1957 |
+
of the two's complement if `num` is negative, provided that `width` is
|
1958 |
+
at least a sufficient number of bits for `num` to be represented in the
|
1959 |
+
designated form.
|
1960 |
+
|
1961 |
+
If the `width` value is insufficient, it will be ignored, and `num` will
|
1962 |
+
be returned in binary (`num` > 0) or two's complement (`num` < 0) form
|
1963 |
+
with its width equal to the minimum number of bits needed to represent
|
1964 |
+
the number in the designated form. This behavior is deprecated and will
|
1965 |
+
later raise an error.
|
1966 |
+
|
1967 |
+
.. deprecated:: 1.12.0
|
1968 |
+
|
1969 |
+
Returns
|
1970 |
+
-------
|
1971 |
+
bin : str
|
1972 |
+
Binary representation of `num` or two's complement of `num`.
|
1973 |
+
|
1974 |
+
See Also
|
1975 |
+
--------
|
1976 |
+
base_repr: Return a string representation of a number in the given base
|
1977 |
+
system.
|
1978 |
+
bin: Python's built-in binary representation generator of an integer.
|
1979 |
+
|
1980 |
+
Notes
|
1981 |
+
-----
|
1982 |
+
`binary_repr` is equivalent to using `base_repr` with base 2, but about 25x
|
1983 |
+
faster.
|
1984 |
+
|
1985 |
+
References
|
1986 |
+
----------
|
1987 |
+
.. [1] Wikipedia, "Two's complement",
|
1988 |
+
https://en.wikipedia.org/wiki/Two's_complement
|
1989 |
+
|
1990 |
+
Examples
|
1991 |
+
--------
|
1992 |
+
>>> np.binary_repr(3)
|
1993 |
+
'11'
|
1994 |
+
>>> np.binary_repr(-3)
|
1995 |
+
'-11'
|
1996 |
+
>>> np.binary_repr(3, width=4)
|
1997 |
+
'0011'
|
1998 |
+
|
1999 |
+
The two's complement is returned when the input number is negative and
|
2000 |
+
width is specified:
|
2001 |
+
|
2002 |
+
>>> np.binary_repr(-3, width=3)
|
2003 |
+
'101'
|
2004 |
+
>>> np.binary_repr(-3, width=5)
|
2005 |
+
'11101'
|
2006 |
+
|
2007 |
+
"""
|
2008 |
+
def warn_if_insufficient(width, binwidth):
|
2009 |
+
if width is not None and width < binwidth:
|
2010 |
+
warnings.warn(
|
2011 |
+
"Insufficient bit width provided. This behavior "
|
2012 |
+
"will raise an error in the future.", DeprecationWarning,
|
2013 |
+
stacklevel=3)
|
2014 |
+
|
2015 |
+
# Ensure that num is a Python integer to avoid overflow or unwanted
|
2016 |
+
# casts to floating point.
|
2017 |
+
num = operator.index(num)
|
2018 |
+
|
2019 |
+
if num == 0:
|
2020 |
+
return '0' * (width or 1)
|
2021 |
+
|
2022 |
+
elif num > 0:
|
2023 |
+
binary = bin(num)[2:]
|
2024 |
+
binwidth = len(binary)
|
2025 |
+
outwidth = (binwidth if width is None
|
2026 |
+
else builtins.max(binwidth, width))
|
2027 |
+
warn_if_insufficient(width, binwidth)
|
2028 |
+
return binary.zfill(outwidth)
|
2029 |
+
|
2030 |
+
else:
|
2031 |
+
if width is None:
|
2032 |
+
return '-' + bin(-num)[2:]
|
2033 |
+
|
2034 |
+
else:
|
2035 |
+
poswidth = len(bin(-num)[2:])
|
2036 |
+
|
2037 |
+
# See gh-8679: remove extra digit
|
2038 |
+
# for numbers at boundaries.
|
2039 |
+
if 2**(poswidth - 1) == -num:
|
2040 |
+
poswidth -= 1
|
2041 |
+
|
2042 |
+
twocomp = 2**(poswidth + 1) + num
|
2043 |
+
binary = bin(twocomp)[2:]
|
2044 |
+
binwidth = len(binary)
|
2045 |
+
|
2046 |
+
outwidth = builtins.max(binwidth, width)
|
2047 |
+
warn_if_insufficient(width, binwidth)
|
2048 |
+
return '1' * (outwidth - binwidth) + binary
|
2049 |
+
|
2050 |
+
|
2051 |
+
@set_module('numpy')
|
2052 |
+
def base_repr(number, base=2, padding=0):
|
2053 |
+
"""
|
2054 |
+
Return a string representation of a number in the given base system.
|
2055 |
+
|
2056 |
+
Parameters
|
2057 |
+
----------
|
2058 |
+
number : int
|
2059 |
+
The value to convert. Positive and negative values are handled.
|
2060 |
+
base : int, optional
|
2061 |
+
Convert `number` to the `base` number system. The valid range is 2-36,
|
2062 |
+
the default value is 2.
|
2063 |
+
padding : int, optional
|
2064 |
+
Number of zeros padded on the left. Default is 0 (no padding).
|
2065 |
+
|
2066 |
+
Returns
|
2067 |
+
-------
|
2068 |
+
out : str
|
2069 |
+
String representation of `number` in `base` system.
|
2070 |
+
|
2071 |
+
See Also
|
2072 |
+
--------
|
2073 |
+
binary_repr : Faster version of `base_repr` for base 2.
|
2074 |
+
|
2075 |
+
Examples
|
2076 |
+
--------
|
2077 |
+
>>> np.base_repr(5)
|
2078 |
+
'101'
|
2079 |
+
>>> np.base_repr(6, 5)
|
2080 |
+
'11'
|
2081 |
+
>>> np.base_repr(7, base=5, padding=3)
|
2082 |
+
'00012'
|
2083 |
+
|
2084 |
+
>>> np.base_repr(10, base=16)
|
2085 |
+
'A'
|
2086 |
+
>>> np.base_repr(32, base=16)
|
2087 |
+
'20'
|
2088 |
+
|
2089 |
+
"""
|
2090 |
+
digits = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
|
2091 |
+
if base > len(digits):
|
2092 |
+
raise ValueError("Bases greater than 36 not handled in base_repr.")
|
2093 |
+
elif base < 2:
|
2094 |
+
raise ValueError("Bases less than 2 not handled in base_repr.")
|
2095 |
+
|
2096 |
+
num = abs(number)
|
2097 |
+
res = []
|
2098 |
+
while num:
|
2099 |
+
res.append(digits[num % base])
|
2100 |
+
num //= base
|
2101 |
+
if padding:
|
2102 |
+
res.append('0' * padding)
|
2103 |
+
if number < 0:
|
2104 |
+
res.append('-')
|
2105 |
+
return ''.join(reversed(res or '0'))
|
2106 |
+
|
2107 |
+
|
2108 |
+
# These are all essentially abbreviations
|
2109 |
+
# These might wind up in a special abbreviations module
|
2110 |
+
|
2111 |
+
|
2112 |
+
def _maketup(descr, val):
|
2113 |
+
dt = dtype(descr)
|
2114 |
+
# Place val in all scalar tuples:
|
2115 |
+
fields = dt.fields
|
2116 |
+
if fields is None:
|
2117 |
+
return val
|
2118 |
+
else:
|
2119 |
+
res = [_maketup(fields[name][0], val) for name in dt.names]
|
2120 |
+
return tuple(res)
|
2121 |
+
|
2122 |
+
|
2123 |
+
@set_array_function_like_doc
|
2124 |
+
@set_module('numpy')
|
2125 |
+
def identity(n, dtype=None, *, like=None):
|
2126 |
+
"""
|
2127 |
+
Return the identity array.
|
2128 |
+
|
2129 |
+
The identity array is a square array with ones on
|
2130 |
+
the main diagonal.
|
2131 |
+
|
2132 |
+
Parameters
|
2133 |
+
----------
|
2134 |
+
n : int
|
2135 |
+
Number of rows (and columns) in `n` x `n` output.
|
2136 |
+
dtype : data-type, optional
|
2137 |
+
Data-type of the output. Defaults to ``float``.
|
2138 |
+
${ARRAY_FUNCTION_LIKE}
|
2139 |
+
|
2140 |
+
.. versionadded:: 1.20.0
|
2141 |
+
|
2142 |
+
Returns
|
2143 |
+
-------
|
2144 |
+
out : ndarray
|
2145 |
+
`n` x `n` array with its main diagonal set to one,
|
2146 |
+
and all other elements 0.
|
2147 |
+
|
2148 |
+
Examples
|
2149 |
+
--------
|
2150 |
+
>>> np.identity(3)
|
2151 |
+
array([[1., 0., 0.],
|
2152 |
+
[0., 1., 0.],
|
2153 |
+
[0., 0., 1.]])
|
2154 |
+
|
2155 |
+
"""
|
2156 |
+
if like is not None:
|
2157 |
+
return _identity_with_like(like, n, dtype=dtype)
|
2158 |
+
|
2159 |
+
from numpy import eye
|
2160 |
+
return eye(n, dtype=dtype, like=like)
|
2161 |
+
|
2162 |
+
|
2163 |
+
_identity_with_like = array_function_dispatch()(identity)
|
2164 |
+
|
2165 |
+
|
2166 |
+
def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
|
2167 |
+
return (a, b)
|
2168 |
+
|
2169 |
+
|
2170 |
+
@array_function_dispatch(_allclose_dispatcher)
|
2171 |
+
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
|
2172 |
+
"""
|
2173 |
+
Returns True if two arrays are element-wise equal within a tolerance.
|
2174 |
+
|
2175 |
+
The tolerance values are positive, typically very small numbers. The
|
2176 |
+
relative difference (`rtol` * abs(`b`)) and the absolute difference
|
2177 |
+
`atol` are added together to compare against the absolute difference
|
2178 |
+
between `a` and `b`.
|
2179 |
+
|
2180 |
+
NaNs are treated as equal if they are in the same place and if
|
2181 |
+
``equal_nan=True``. Infs are treated as equal if they are in the same
|
2182 |
+
place and of the same sign in both arrays.
|
2183 |
+
|
2184 |
+
Parameters
|
2185 |
+
----------
|
2186 |
+
a, b : array_like
|
2187 |
+
Input arrays to compare.
|
2188 |
+
rtol : float
|
2189 |
+
The relative tolerance parameter (see Notes).
|
2190 |
+
atol : float
|
2191 |
+
The absolute tolerance parameter (see Notes).
|
2192 |
+
equal_nan : bool
|
2193 |
+
Whether to compare NaN's as equal. If True, NaN's in `a` will be
|
2194 |
+
considered equal to NaN's in `b` in the output array.
|
2195 |
+
|
2196 |
+
.. versionadded:: 1.10.0
|
2197 |
+
|
2198 |
+
Returns
|
2199 |
+
-------
|
2200 |
+
allclose : bool
|
2201 |
+
Returns True if the two arrays are equal within the given
|
2202 |
+
tolerance; False otherwise.
|
2203 |
+
|
2204 |
+
See Also
|
2205 |
+
--------
|
2206 |
+
isclose, all, any, equal
|
2207 |
+
|
2208 |
+
Notes
|
2209 |
+
-----
|
2210 |
+
If the following equation is element-wise True, then allclose returns
|
2211 |
+
True.
|
2212 |
+
|
2213 |
+
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
|
2214 |
+
|
2215 |
+
The above equation is not symmetric in `a` and `b`, so that
|
2216 |
+
``allclose(a, b)`` might be different from ``allclose(b, a)`` in
|
2217 |
+
some rare cases.
|
2218 |
+
|
2219 |
+
The comparison of `a` and `b` uses standard broadcasting, which
|
2220 |
+
means that `a` and `b` need not have the same shape in order for
|
2221 |
+
``allclose(a, b)`` to evaluate to True. The same is true for
|
2222 |
+
`equal` but not `array_equal`.
|
2223 |
+
|
2224 |
+
`allclose` is not defined for non-numeric data types.
|
2225 |
+
`bool` is considered a numeric data-type for this purpose.
|
2226 |
+
|
2227 |
+
Examples
|
2228 |
+
--------
|
2229 |
+
>>> np.allclose([1e10,1e-7], [1.00001e10,1e-8])
|
2230 |
+
False
|
2231 |
+
>>> np.allclose([1e10,1e-8], [1.00001e10,1e-9])
|
2232 |
+
True
|
2233 |
+
>>> np.allclose([1e10,1e-8], [1.0001e10,1e-9])
|
2234 |
+
False
|
2235 |
+
>>> np.allclose([1.0, np.nan], [1.0, np.nan])
|
2236 |
+
False
|
2237 |
+
>>> np.allclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
|
2238 |
+
True
|
2239 |
+
|
2240 |
+
"""
|
2241 |
+
res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
|
2242 |
+
return bool(res)
|
2243 |
+
|
2244 |
+
|
2245 |
+
def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
|
2246 |
+
return (a, b)
|
2247 |
+
|
2248 |
+
|
2249 |
+
@array_function_dispatch(_isclose_dispatcher)
|
2250 |
+
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
|
2251 |
+
"""
|
2252 |
+
Returns a boolean array where two arrays are element-wise equal within a
|
2253 |
+
tolerance.
|
2254 |
+
|
2255 |
+
The tolerance values are positive, typically very small numbers. The
|
2256 |
+
relative difference (`rtol` * abs(`b`)) and the absolute difference
|
2257 |
+
`atol` are added together to compare against the absolute difference
|
2258 |
+
between `a` and `b`.
|
2259 |
+
|
2260 |
+
.. warning:: The default `atol` is not appropriate for comparing numbers
|
2261 |
+
that are much smaller than one (see Notes).
|
2262 |
+
|
2263 |
+
Parameters
|
2264 |
+
----------
|
2265 |
+
a, b : array_like
|
2266 |
+
Input arrays to compare.
|
2267 |
+
rtol : float
|
2268 |
+
The relative tolerance parameter (see Notes).
|
2269 |
+
atol : float
|
2270 |
+
The absolute tolerance parameter (see Notes).
|
2271 |
+
equal_nan : bool
|
2272 |
+
Whether to compare NaN's as equal. If True, NaN's in `a` will be
|
2273 |
+
considered equal to NaN's in `b` in the output array.
|
2274 |
+
|
2275 |
+
Returns
|
2276 |
+
-------
|
2277 |
+
y : array_like
|
2278 |
+
Returns a boolean array of where `a` and `b` are equal within the
|
2279 |
+
given tolerance. If both `a` and `b` are scalars, returns a single
|
2280 |
+
boolean value.
|
2281 |
+
|
2282 |
+
See Also
|
2283 |
+
--------
|
2284 |
+
allclose
|
2285 |
+
math.isclose
|
2286 |
+
|
2287 |
+
Notes
|
2288 |
+
-----
|
2289 |
+
.. versionadded:: 1.7.0
|
2290 |
+
|
2291 |
+
For finite values, isclose uses the following equation to test whether
|
2292 |
+
two floating point values are equivalent.
|
2293 |
+
|
2294 |
+
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
|
2295 |
+
|
2296 |
+
Unlike the built-in `math.isclose`, the above equation is not symmetric
|
2297 |
+
in `a` and `b` -- it assumes `b` is the reference value -- so that
|
2298 |
+
`isclose(a, b)` might be different from `isclose(b, a)`. Furthermore,
|
2299 |
+
the default value of atol is not zero, and is used to determine what
|
2300 |
+
small values should be considered close to zero. The default value is
|
2301 |
+
appropriate for expected values of order unity: if the expected values
|
2302 |
+
are significantly smaller than one, it can result in false positives.
|
2303 |
+
`atol` should be carefully selected for the use case at hand. A zero value
|
2304 |
+
for `atol` will result in `False` if either `a` or `b` is zero.
|
2305 |
+
|
2306 |
+
`isclose` is not defined for non-numeric data types.
|
2307 |
+
`bool` is considered a numeric data-type for this purpose.
|
2308 |
+
|
2309 |
+
Examples
|
2310 |
+
--------
|
2311 |
+
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
|
2312 |
+
array([ True, False])
|
2313 |
+
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
|
2314 |
+
array([ True, True])
|
2315 |
+
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
|
2316 |
+
array([False, True])
|
2317 |
+
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
|
2318 |
+
array([ True, False])
|
2319 |
+
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
|
2320 |
+
array([ True, True])
|
2321 |
+
>>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
|
2322 |
+
array([ True, False])
|
2323 |
+
>>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
|
2324 |
+
array([False, False])
|
2325 |
+
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
|
2326 |
+
array([ True, True])
|
2327 |
+
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
|
2328 |
+
array([False, True])
|
2329 |
+
"""
|
2330 |
+
def within_tol(x, y, atol, rtol):
|
2331 |
+
with errstate(invalid='ignore'), _no_nep50_warning():
|
2332 |
+
return less_equal(abs(x-y), atol + rtol * abs(y))
|
2333 |
+
|
2334 |
+
x = asanyarray(a)
|
2335 |
+
y = asanyarray(b)
|
2336 |
+
|
2337 |
+
# Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT).
|
2338 |
+
# This will cause casting of x later. Also, make sure to allow subclasses
|
2339 |
+
# (e.g., for numpy.ma).
|
2340 |
+
# NOTE: We explicitly allow timedelta, which used to work. This could
|
2341 |
+
# possibly be deprecated. See also gh-18286.
|
2342 |
+
# timedelta works if `atol` is an integer or also a timedelta.
|
2343 |
+
# Although, the default tolerances are unlikely to be useful
|
2344 |
+
if y.dtype.kind != "m":
|
2345 |
+
dt = multiarray.result_type(y, 1.)
|
2346 |
+
y = asanyarray(y, dtype=dt)
|
2347 |
+
|
2348 |
+
xfin = isfinite(x)
|
2349 |
+
yfin = isfinite(y)
|
2350 |
+
if all(xfin) and all(yfin):
|
2351 |
+
return within_tol(x, y, atol, rtol)
|
2352 |
+
else:
|
2353 |
+
finite = xfin & yfin
|
2354 |
+
cond = zeros_like(finite, subok=True)
|
2355 |
+
# Because we're using boolean indexing, x & y must be the same shape.
|
2356 |
+
# Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in
|
2357 |
+
# lib.stride_tricks, though, so we can't import it here.
|
2358 |
+
x = x * ones_like(cond)
|
2359 |
+
y = y * ones_like(cond)
|
2360 |
+
# Avoid subtraction with infinite/nan values...
|
2361 |
+
cond[finite] = within_tol(x[finite], y[finite], atol, rtol)
|
2362 |
+
# Check for equality of infinite values...
|
2363 |
+
cond[~finite] = (x[~finite] == y[~finite])
|
2364 |
+
if equal_nan:
|
2365 |
+
# Make NaN == NaN
|
2366 |
+
both_nan = isnan(x) & isnan(y)
|
2367 |
+
|
2368 |
+
# Needed to treat masked arrays correctly. = True would not work.
|
2369 |
+
cond[both_nan] = both_nan[both_nan]
|
2370 |
+
|
2371 |
+
return cond[()] # Flatten 0d arrays to scalars
|
2372 |
+
|
2373 |
+
|
2374 |
+
def _array_equal_dispatcher(a1, a2, equal_nan=None):
|
2375 |
+
return (a1, a2)
|
2376 |
+
|
2377 |
+
|
2378 |
+
@array_function_dispatch(_array_equal_dispatcher)
|
2379 |
+
def array_equal(a1, a2, equal_nan=False):
|
2380 |
+
"""
|
2381 |
+
True if two arrays have the same shape and elements, False otherwise.
|
2382 |
+
|
2383 |
+
Parameters
|
2384 |
+
----------
|
2385 |
+
a1, a2 : array_like
|
2386 |
+
Input arrays.
|
2387 |
+
equal_nan : bool
|
2388 |
+
Whether to compare NaN's as equal. If the dtype of a1 and a2 is
|
2389 |
+
complex, values will be considered equal if either the real or the
|
2390 |
+
imaginary component of a given value is ``nan``.
|
2391 |
+
|
2392 |
+
.. versionadded:: 1.19.0
|
2393 |
+
|
2394 |
+
Returns
|
2395 |
+
-------
|
2396 |
+
b : bool
|
2397 |
+
Returns True if the arrays are equal.
|
2398 |
+
|
2399 |
+
See Also
|
2400 |
+
--------
|
2401 |
+
allclose: Returns True if two arrays are element-wise equal within a
|
2402 |
+
tolerance.
|
2403 |
+
array_equiv: Returns True if input arrays are shape consistent and all
|
2404 |
+
elements equal.
|
2405 |
+
|
2406 |
+
Examples
|
2407 |
+
--------
|
2408 |
+
>>> np.array_equal([1, 2], [1, 2])
|
2409 |
+
True
|
2410 |
+
>>> np.array_equal(np.array([1, 2]), np.array([1, 2]))
|
2411 |
+
True
|
2412 |
+
>>> np.array_equal([1, 2], [1, 2, 3])
|
2413 |
+
False
|
2414 |
+
>>> np.array_equal([1, 2], [1, 4])
|
2415 |
+
False
|
2416 |
+
>>> a = np.array([1, np.nan])
|
2417 |
+
>>> np.array_equal(a, a)
|
2418 |
+
False
|
2419 |
+
>>> np.array_equal(a, a, equal_nan=True)
|
2420 |
+
True
|
2421 |
+
|
2422 |
+
When ``equal_nan`` is True, complex values with nan components are
|
2423 |
+
considered equal if either the real *or* the imaginary components are nan.
|
2424 |
+
|
2425 |
+
>>> a = np.array([1 + 1j])
|
2426 |
+
>>> b = a.copy()
|
2427 |
+
>>> a.real = np.nan
|
2428 |
+
>>> b.imag = np.nan
|
2429 |
+
>>> np.array_equal(a, b, equal_nan=True)
|
2430 |
+
True
|
2431 |
+
"""
|
2432 |
+
try:
|
2433 |
+
a1, a2 = asarray(a1), asarray(a2)
|
2434 |
+
except Exception:
|
2435 |
+
return False
|
2436 |
+
if a1.shape != a2.shape:
|
2437 |
+
return False
|
2438 |
+
if not equal_nan:
|
2439 |
+
return bool(asarray(a1 == a2).all())
|
2440 |
+
# Handling NaN values if equal_nan is True
|
2441 |
+
a1nan, a2nan = isnan(a1), isnan(a2)
|
2442 |
+
# NaN's occur at different locations
|
2443 |
+
if not (a1nan == a2nan).all():
|
2444 |
+
return False
|
2445 |
+
# Shapes of a1, a2 and masks are guaranteed to be consistent by this point
|
2446 |
+
return bool(asarray(a1[~a1nan] == a2[~a1nan]).all())
|
2447 |
+
|
2448 |
+
|
2449 |
+
def _array_equiv_dispatcher(a1, a2):
|
2450 |
+
return (a1, a2)
|
2451 |
+
|
2452 |
+
|
2453 |
+
@array_function_dispatch(_array_equiv_dispatcher)
|
2454 |
+
def array_equiv(a1, a2):
|
2455 |
+
"""
|
2456 |
+
Returns True if input arrays are shape consistent and all elements equal.
|
2457 |
+
|
2458 |
+
Shape consistent means they are either the same shape, or one input array
|
2459 |
+
can be broadcasted to create the same shape as the other one.
|
2460 |
+
|
2461 |
+
Parameters
|
2462 |
+
----------
|
2463 |
+
a1, a2 : array_like
|
2464 |
+
Input arrays.
|
2465 |
+
|
2466 |
+
Returns
|
2467 |
+
-------
|
2468 |
+
out : bool
|
2469 |
+
True if equivalent, False otherwise.
|
2470 |
+
|
2471 |
+
Examples
|
2472 |
+
--------
|
2473 |
+
>>> np.array_equiv([1, 2], [1, 2])
|
2474 |
+
True
|
2475 |
+
>>> np.array_equiv([1, 2], [1, 3])
|
2476 |
+
False
|
2477 |
+
|
2478 |
+
Showing the shape equivalence:
|
2479 |
+
|
2480 |
+
>>> np.array_equiv([1, 2], [[1, 2], [1, 2]])
|
2481 |
+
True
|
2482 |
+
>>> np.array_equiv([1, 2], [[1, 2, 1, 2], [1, 2, 1, 2]])
|
2483 |
+
False
|
2484 |
+
|
2485 |
+
>>> np.array_equiv([1, 2], [[1, 2], [1, 3]])
|
2486 |
+
False
|
2487 |
+
|
2488 |
+
"""
|
2489 |
+
try:
|
2490 |
+
a1, a2 = asarray(a1), asarray(a2)
|
2491 |
+
except Exception:
|
2492 |
+
return False
|
2493 |
+
try:
|
2494 |
+
multiarray.broadcast(a1, a2)
|
2495 |
+
except Exception:
|
2496 |
+
return False
|
2497 |
+
|
2498 |
+
return bool(asarray(a1 == a2).all())
|
2499 |
+
|
2500 |
+
|
2501 |
+
Inf = inf = infty = Infinity = PINF
|
2502 |
+
nan = NaN = NAN
|
2503 |
+
False_ = bool_(False)
|
2504 |
+
True_ = bool_(True)
|
2505 |
+
|
2506 |
+
|
2507 |
+
def extend_all(module):
|
2508 |
+
existing = set(__all__)
|
2509 |
+
mall = getattr(module, '__all__')
|
2510 |
+
for a in mall:
|
2511 |
+
if a not in existing:
|
2512 |
+
__all__.append(a)
|
2513 |
+
|
2514 |
+
|
2515 |
+
from .umath import *
|
2516 |
+
from .numerictypes import *
|
2517 |
+
from . import fromnumeric
|
2518 |
+
from .fromnumeric import *
|
2519 |
+
from . import arrayprint
|
2520 |
+
from .arrayprint import *
|
2521 |
+
from . import _asarray
|
2522 |
+
from ._asarray import *
|
2523 |
+
from . import _ufunc_config
|
2524 |
+
from ._ufunc_config import *
|
2525 |
+
extend_all(fromnumeric)
|
2526 |
+
extend_all(umath)
|
2527 |
+
extend_all(numerictypes)
|
2528 |
+
extend_all(arrayprint)
|
2529 |
+
extend_all(_asarray)
|
2530 |
+
extend_all(_ufunc_config)
|
env-llmeval/lib/python3.10/site-packages/numpy/core/numeric.pyi
ADDED
@@ -0,0 +1,660 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Callable, Sequence
|
2 |
+
from typing import (
|
3 |
+
Any,
|
4 |
+
overload,
|
5 |
+
TypeVar,
|
6 |
+
Literal,
|
7 |
+
SupportsAbs,
|
8 |
+
SupportsIndex,
|
9 |
+
NoReturn,
|
10 |
+
)
|
11 |
+
if sys.version_info >= (3, 10):
|
12 |
+
from typing import TypeGuard
|
13 |
+
else:
|
14 |
+
from typing_extensions import TypeGuard
|
15 |
+
|
16 |
+
from numpy import (
|
17 |
+
ComplexWarning as ComplexWarning,
|
18 |
+
generic,
|
19 |
+
unsignedinteger,
|
20 |
+
signedinteger,
|
21 |
+
floating,
|
22 |
+
complexfloating,
|
23 |
+
bool_,
|
24 |
+
int_,
|
25 |
+
intp,
|
26 |
+
float64,
|
27 |
+
timedelta64,
|
28 |
+
object_,
|
29 |
+
_OrderKACF,
|
30 |
+
_OrderCF,
|
31 |
+
)
|
32 |
+
|
33 |
+
from numpy._typing import (
|
34 |
+
ArrayLike,
|
35 |
+
NDArray,
|
36 |
+
DTypeLike,
|
37 |
+
_ShapeLike,
|
38 |
+
_DTypeLike,
|
39 |
+
_ArrayLike,
|
40 |
+
_SupportsArrayFunc,
|
41 |
+
_ScalarLike_co,
|
42 |
+
_ArrayLikeBool_co,
|
43 |
+
_ArrayLikeUInt_co,
|
44 |
+
_ArrayLikeInt_co,
|
45 |
+
_ArrayLikeFloat_co,
|
46 |
+
_ArrayLikeComplex_co,
|
47 |
+
_ArrayLikeTD64_co,
|
48 |
+
_ArrayLikeObject_co,
|
49 |
+
_ArrayLikeUnknown,
|
50 |
+
)
|
51 |
+
|
52 |
+
_T = TypeVar("_T")
|
53 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
54 |
+
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
55 |
+
|
56 |
+
_CorrelateMode = Literal["valid", "same", "full"]
|
57 |
+
|
58 |
+
__all__: list[str]
|
59 |
+
|
60 |
+
@overload
|
61 |
+
def zeros_like(
|
62 |
+
a: _ArrayType,
|
63 |
+
dtype: None = ...,
|
64 |
+
order: _OrderKACF = ...,
|
65 |
+
subok: Literal[True] = ...,
|
66 |
+
shape: None = ...,
|
67 |
+
) -> _ArrayType: ...
|
68 |
+
@overload
|
69 |
+
def zeros_like(
|
70 |
+
a: _ArrayLike[_SCT],
|
71 |
+
dtype: None = ...,
|
72 |
+
order: _OrderKACF = ...,
|
73 |
+
subok: bool = ...,
|
74 |
+
shape: None | _ShapeLike = ...,
|
75 |
+
) -> NDArray[_SCT]: ...
|
76 |
+
@overload
|
77 |
+
def zeros_like(
|
78 |
+
a: object,
|
79 |
+
dtype: None = ...,
|
80 |
+
order: _OrderKACF = ...,
|
81 |
+
subok: bool = ...,
|
82 |
+
shape: None | _ShapeLike= ...,
|
83 |
+
) -> NDArray[Any]: ...
|
84 |
+
@overload
|
85 |
+
def zeros_like(
|
86 |
+
a: Any,
|
87 |
+
dtype: _DTypeLike[_SCT],
|
88 |
+
order: _OrderKACF = ...,
|
89 |
+
subok: bool = ...,
|
90 |
+
shape: None | _ShapeLike= ...,
|
91 |
+
) -> NDArray[_SCT]: ...
|
92 |
+
@overload
|
93 |
+
def zeros_like(
|
94 |
+
a: Any,
|
95 |
+
dtype: DTypeLike,
|
96 |
+
order: _OrderKACF = ...,
|
97 |
+
subok: bool = ...,
|
98 |
+
shape: None | _ShapeLike= ...,
|
99 |
+
) -> NDArray[Any]: ...
|
100 |
+
|
101 |
+
@overload
|
102 |
+
def ones(
|
103 |
+
shape: _ShapeLike,
|
104 |
+
dtype: None = ...,
|
105 |
+
order: _OrderCF = ...,
|
106 |
+
*,
|
107 |
+
like: _SupportsArrayFunc = ...,
|
108 |
+
) -> NDArray[float64]: ...
|
109 |
+
@overload
|
110 |
+
def ones(
|
111 |
+
shape: _ShapeLike,
|
112 |
+
dtype: _DTypeLike[_SCT],
|
113 |
+
order: _OrderCF = ...,
|
114 |
+
*,
|
115 |
+
like: _SupportsArrayFunc = ...,
|
116 |
+
) -> NDArray[_SCT]: ...
|
117 |
+
@overload
|
118 |
+
def ones(
|
119 |
+
shape: _ShapeLike,
|
120 |
+
dtype: DTypeLike,
|
121 |
+
order: _OrderCF = ...,
|
122 |
+
*,
|
123 |
+
like: _SupportsArrayFunc = ...,
|
124 |
+
) -> NDArray[Any]: ...
|
125 |
+
|
126 |
+
@overload
|
127 |
+
def ones_like(
|
128 |
+
a: _ArrayType,
|
129 |
+
dtype: None = ...,
|
130 |
+
order: _OrderKACF = ...,
|
131 |
+
subok: Literal[True] = ...,
|
132 |
+
shape: None = ...,
|
133 |
+
) -> _ArrayType: ...
|
134 |
+
@overload
|
135 |
+
def ones_like(
|
136 |
+
a: _ArrayLike[_SCT],
|
137 |
+
dtype: None = ...,
|
138 |
+
order: _OrderKACF = ...,
|
139 |
+
subok: bool = ...,
|
140 |
+
shape: None | _ShapeLike = ...,
|
141 |
+
) -> NDArray[_SCT]: ...
|
142 |
+
@overload
|
143 |
+
def ones_like(
|
144 |
+
a: object,
|
145 |
+
dtype: None = ...,
|
146 |
+
order: _OrderKACF = ...,
|
147 |
+
subok: bool = ...,
|
148 |
+
shape: None | _ShapeLike= ...,
|
149 |
+
) -> NDArray[Any]: ...
|
150 |
+
@overload
|
151 |
+
def ones_like(
|
152 |
+
a: Any,
|
153 |
+
dtype: _DTypeLike[_SCT],
|
154 |
+
order: _OrderKACF = ...,
|
155 |
+
subok: bool = ...,
|
156 |
+
shape: None | _ShapeLike= ...,
|
157 |
+
) -> NDArray[_SCT]: ...
|
158 |
+
@overload
|
159 |
+
def ones_like(
|
160 |
+
a: Any,
|
161 |
+
dtype: DTypeLike,
|
162 |
+
order: _OrderKACF = ...,
|
163 |
+
subok: bool = ...,
|
164 |
+
shape: None | _ShapeLike= ...,
|
165 |
+
) -> NDArray[Any]: ...
|
166 |
+
|
167 |
+
@overload
|
168 |
+
def full(
|
169 |
+
shape: _ShapeLike,
|
170 |
+
fill_value: Any,
|
171 |
+
dtype: None = ...,
|
172 |
+
order: _OrderCF = ...,
|
173 |
+
*,
|
174 |
+
like: _SupportsArrayFunc = ...,
|
175 |
+
) -> NDArray[Any]: ...
|
176 |
+
@overload
|
177 |
+
def full(
|
178 |
+
shape: _ShapeLike,
|
179 |
+
fill_value: Any,
|
180 |
+
dtype: _DTypeLike[_SCT],
|
181 |
+
order: _OrderCF = ...,
|
182 |
+
*,
|
183 |
+
like: _SupportsArrayFunc = ...,
|
184 |
+
) -> NDArray[_SCT]: ...
|
185 |
+
@overload
|
186 |
+
def full(
|
187 |
+
shape: _ShapeLike,
|
188 |
+
fill_value: Any,
|
189 |
+
dtype: DTypeLike,
|
190 |
+
order: _OrderCF = ...,
|
191 |
+
*,
|
192 |
+
like: _SupportsArrayFunc = ...,
|
193 |
+
) -> NDArray[Any]: ...
|
194 |
+
|
195 |
+
@overload
|
196 |
+
def full_like(
|
197 |
+
a: _ArrayType,
|
198 |
+
fill_value: Any,
|
199 |
+
dtype: None = ...,
|
200 |
+
order: _OrderKACF = ...,
|
201 |
+
subok: Literal[True] = ...,
|
202 |
+
shape: None = ...,
|
203 |
+
) -> _ArrayType: ...
|
204 |
+
@overload
|
205 |
+
def full_like(
|
206 |
+
a: _ArrayLike[_SCT],
|
207 |
+
fill_value: Any,
|
208 |
+
dtype: None = ...,
|
209 |
+
order: _OrderKACF = ...,
|
210 |
+
subok: bool = ...,
|
211 |
+
shape: None | _ShapeLike = ...,
|
212 |
+
) -> NDArray[_SCT]: ...
|
213 |
+
@overload
|
214 |
+
def full_like(
|
215 |
+
a: object,
|
216 |
+
fill_value: Any,
|
217 |
+
dtype: None = ...,
|
218 |
+
order: _OrderKACF = ...,
|
219 |
+
subok: bool = ...,
|
220 |
+
shape: None | _ShapeLike= ...,
|
221 |
+
) -> NDArray[Any]: ...
|
222 |
+
@overload
|
223 |
+
def full_like(
|
224 |
+
a: Any,
|
225 |
+
fill_value: Any,
|
226 |
+
dtype: _DTypeLike[_SCT],
|
227 |
+
order: _OrderKACF = ...,
|
228 |
+
subok: bool = ...,
|
229 |
+
shape: None | _ShapeLike= ...,
|
230 |
+
) -> NDArray[_SCT]: ...
|
231 |
+
@overload
|
232 |
+
def full_like(
|
233 |
+
a: Any,
|
234 |
+
fill_value: Any,
|
235 |
+
dtype: DTypeLike,
|
236 |
+
order: _OrderKACF = ...,
|
237 |
+
subok: bool = ...,
|
238 |
+
shape: None | _ShapeLike= ...,
|
239 |
+
) -> NDArray[Any]: ...
|
240 |
+
|
241 |
+
@overload
|
242 |
+
def count_nonzero(
|
243 |
+
a: ArrayLike,
|
244 |
+
axis: None = ...,
|
245 |
+
*,
|
246 |
+
keepdims: Literal[False] = ...,
|
247 |
+
) -> int: ...
|
248 |
+
@overload
|
249 |
+
def count_nonzero(
|
250 |
+
a: ArrayLike,
|
251 |
+
axis: _ShapeLike = ...,
|
252 |
+
*,
|
253 |
+
keepdims: bool = ...,
|
254 |
+
) -> Any: ... # TODO: np.intp or ndarray[np.intp]
|
255 |
+
|
256 |
+
def isfortran(a: NDArray[Any] | generic) -> bool: ...
|
257 |
+
|
258 |
+
def argwhere(a: ArrayLike) -> NDArray[intp]: ...
|
259 |
+
|
260 |
+
def flatnonzero(a: ArrayLike) -> NDArray[intp]: ...
|
261 |
+
|
262 |
+
@overload
|
263 |
+
def correlate(
|
264 |
+
a: _ArrayLikeUnknown,
|
265 |
+
v: _ArrayLikeUnknown,
|
266 |
+
mode: _CorrelateMode = ...,
|
267 |
+
) -> NDArray[Any]: ...
|
268 |
+
@overload
|
269 |
+
def correlate(
|
270 |
+
a: _ArrayLikeBool_co,
|
271 |
+
v: _ArrayLikeBool_co,
|
272 |
+
mode: _CorrelateMode = ...,
|
273 |
+
) -> NDArray[bool_]: ...
|
274 |
+
@overload
|
275 |
+
def correlate(
|
276 |
+
a: _ArrayLikeUInt_co,
|
277 |
+
v: _ArrayLikeUInt_co,
|
278 |
+
mode: _CorrelateMode = ...,
|
279 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
280 |
+
@overload
|
281 |
+
def correlate(
|
282 |
+
a: _ArrayLikeInt_co,
|
283 |
+
v: _ArrayLikeInt_co,
|
284 |
+
mode: _CorrelateMode = ...,
|
285 |
+
) -> NDArray[signedinteger[Any]]: ...
|
286 |
+
@overload
|
287 |
+
def correlate(
|
288 |
+
a: _ArrayLikeFloat_co,
|
289 |
+
v: _ArrayLikeFloat_co,
|
290 |
+
mode: _CorrelateMode = ...,
|
291 |
+
) -> NDArray[floating[Any]]: ...
|
292 |
+
@overload
|
293 |
+
def correlate(
|
294 |
+
a: _ArrayLikeComplex_co,
|
295 |
+
v: _ArrayLikeComplex_co,
|
296 |
+
mode: _CorrelateMode = ...,
|
297 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
298 |
+
@overload
|
299 |
+
def correlate(
|
300 |
+
a: _ArrayLikeTD64_co,
|
301 |
+
v: _ArrayLikeTD64_co,
|
302 |
+
mode: _CorrelateMode = ...,
|
303 |
+
) -> NDArray[timedelta64]: ...
|
304 |
+
@overload
|
305 |
+
def correlate(
|
306 |
+
a: _ArrayLikeObject_co,
|
307 |
+
v: _ArrayLikeObject_co,
|
308 |
+
mode: _CorrelateMode = ...,
|
309 |
+
) -> NDArray[object_]: ...
|
310 |
+
|
311 |
+
@overload
|
312 |
+
def convolve(
|
313 |
+
a: _ArrayLikeUnknown,
|
314 |
+
v: _ArrayLikeUnknown,
|
315 |
+
mode: _CorrelateMode = ...,
|
316 |
+
) -> NDArray[Any]: ...
|
317 |
+
@overload
|
318 |
+
def convolve(
|
319 |
+
a: _ArrayLikeBool_co,
|
320 |
+
v: _ArrayLikeBool_co,
|
321 |
+
mode: _CorrelateMode = ...,
|
322 |
+
) -> NDArray[bool_]: ...
|
323 |
+
@overload
|
324 |
+
def convolve(
|
325 |
+
a: _ArrayLikeUInt_co,
|
326 |
+
v: _ArrayLikeUInt_co,
|
327 |
+
mode: _CorrelateMode = ...,
|
328 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
329 |
+
@overload
|
330 |
+
def convolve(
|
331 |
+
a: _ArrayLikeInt_co,
|
332 |
+
v: _ArrayLikeInt_co,
|
333 |
+
mode: _CorrelateMode = ...,
|
334 |
+
) -> NDArray[signedinteger[Any]]: ...
|
335 |
+
@overload
|
336 |
+
def convolve(
|
337 |
+
a: _ArrayLikeFloat_co,
|
338 |
+
v: _ArrayLikeFloat_co,
|
339 |
+
mode: _CorrelateMode = ...,
|
340 |
+
) -> NDArray[floating[Any]]: ...
|
341 |
+
@overload
|
342 |
+
def convolve(
|
343 |
+
a: _ArrayLikeComplex_co,
|
344 |
+
v: _ArrayLikeComplex_co,
|
345 |
+
mode: _CorrelateMode = ...,
|
346 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
347 |
+
@overload
|
348 |
+
def convolve(
|
349 |
+
a: _ArrayLikeTD64_co,
|
350 |
+
v: _ArrayLikeTD64_co,
|
351 |
+
mode: _CorrelateMode = ...,
|
352 |
+
) -> NDArray[timedelta64]: ...
|
353 |
+
@overload
|
354 |
+
def convolve(
|
355 |
+
a: _ArrayLikeObject_co,
|
356 |
+
v: _ArrayLikeObject_co,
|
357 |
+
mode: _CorrelateMode = ...,
|
358 |
+
) -> NDArray[object_]: ...
|
359 |
+
|
360 |
+
@overload
|
361 |
+
def outer(
|
362 |
+
a: _ArrayLikeUnknown,
|
363 |
+
b: _ArrayLikeUnknown,
|
364 |
+
out: None = ...,
|
365 |
+
) -> NDArray[Any]: ...
|
366 |
+
@overload
|
367 |
+
def outer(
|
368 |
+
a: _ArrayLikeBool_co,
|
369 |
+
b: _ArrayLikeBool_co,
|
370 |
+
out: None = ...,
|
371 |
+
) -> NDArray[bool_]: ...
|
372 |
+
@overload
|
373 |
+
def outer(
|
374 |
+
a: _ArrayLikeUInt_co,
|
375 |
+
b: _ArrayLikeUInt_co,
|
376 |
+
out: None = ...,
|
377 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
378 |
+
@overload
|
379 |
+
def outer(
|
380 |
+
a: _ArrayLikeInt_co,
|
381 |
+
b: _ArrayLikeInt_co,
|
382 |
+
out: None = ...,
|
383 |
+
) -> NDArray[signedinteger[Any]]: ...
|
384 |
+
@overload
|
385 |
+
def outer(
|
386 |
+
a: _ArrayLikeFloat_co,
|
387 |
+
b: _ArrayLikeFloat_co,
|
388 |
+
out: None = ...,
|
389 |
+
) -> NDArray[floating[Any]]: ...
|
390 |
+
@overload
|
391 |
+
def outer(
|
392 |
+
a: _ArrayLikeComplex_co,
|
393 |
+
b: _ArrayLikeComplex_co,
|
394 |
+
out: None = ...,
|
395 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
396 |
+
@overload
|
397 |
+
def outer(
|
398 |
+
a: _ArrayLikeTD64_co,
|
399 |
+
b: _ArrayLikeTD64_co,
|
400 |
+
out: None = ...,
|
401 |
+
) -> NDArray[timedelta64]: ...
|
402 |
+
@overload
|
403 |
+
def outer(
|
404 |
+
a: _ArrayLikeObject_co,
|
405 |
+
b: _ArrayLikeObject_co,
|
406 |
+
out: None = ...,
|
407 |
+
) -> NDArray[object_]: ...
|
408 |
+
@overload
|
409 |
+
def outer(
|
410 |
+
a: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
411 |
+
b: _ArrayLikeComplex_co | _ArrayLikeTD64_co | _ArrayLikeObject_co,
|
412 |
+
out: _ArrayType,
|
413 |
+
) -> _ArrayType: ...
|
414 |
+
|
415 |
+
@overload
|
416 |
+
def tensordot(
|
417 |
+
a: _ArrayLikeUnknown,
|
418 |
+
b: _ArrayLikeUnknown,
|
419 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
420 |
+
) -> NDArray[Any]: ...
|
421 |
+
@overload
|
422 |
+
def tensordot(
|
423 |
+
a: _ArrayLikeBool_co,
|
424 |
+
b: _ArrayLikeBool_co,
|
425 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
426 |
+
) -> NDArray[bool_]: ...
|
427 |
+
@overload
|
428 |
+
def tensordot(
|
429 |
+
a: _ArrayLikeUInt_co,
|
430 |
+
b: _ArrayLikeUInt_co,
|
431 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
432 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
433 |
+
@overload
|
434 |
+
def tensordot(
|
435 |
+
a: _ArrayLikeInt_co,
|
436 |
+
b: _ArrayLikeInt_co,
|
437 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
438 |
+
) -> NDArray[signedinteger[Any]]: ...
|
439 |
+
@overload
|
440 |
+
def tensordot(
|
441 |
+
a: _ArrayLikeFloat_co,
|
442 |
+
b: _ArrayLikeFloat_co,
|
443 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
444 |
+
) -> NDArray[floating[Any]]: ...
|
445 |
+
@overload
|
446 |
+
def tensordot(
|
447 |
+
a: _ArrayLikeComplex_co,
|
448 |
+
b: _ArrayLikeComplex_co,
|
449 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
450 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
451 |
+
@overload
|
452 |
+
def tensordot(
|
453 |
+
a: _ArrayLikeTD64_co,
|
454 |
+
b: _ArrayLikeTD64_co,
|
455 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
456 |
+
) -> NDArray[timedelta64]: ...
|
457 |
+
@overload
|
458 |
+
def tensordot(
|
459 |
+
a: _ArrayLikeObject_co,
|
460 |
+
b: _ArrayLikeObject_co,
|
461 |
+
axes: int | tuple[_ShapeLike, _ShapeLike] = ...,
|
462 |
+
) -> NDArray[object_]: ...
|
463 |
+
|
464 |
+
@overload
|
465 |
+
def roll(
|
466 |
+
a: _ArrayLike[_SCT],
|
467 |
+
shift: _ShapeLike,
|
468 |
+
axis: None | _ShapeLike = ...,
|
469 |
+
) -> NDArray[_SCT]: ...
|
470 |
+
@overload
|
471 |
+
def roll(
|
472 |
+
a: ArrayLike,
|
473 |
+
shift: _ShapeLike,
|
474 |
+
axis: None | _ShapeLike = ...,
|
475 |
+
) -> NDArray[Any]: ...
|
476 |
+
|
477 |
+
def rollaxis(
|
478 |
+
a: NDArray[_SCT],
|
479 |
+
axis: int,
|
480 |
+
start: int = ...,
|
481 |
+
) -> NDArray[_SCT]: ...
|
482 |
+
|
483 |
+
def moveaxis(
|
484 |
+
a: NDArray[_SCT],
|
485 |
+
source: _ShapeLike,
|
486 |
+
destination: _ShapeLike,
|
487 |
+
) -> NDArray[_SCT]: ...
|
488 |
+
|
489 |
+
@overload
|
490 |
+
def cross(
|
491 |
+
a: _ArrayLikeUnknown,
|
492 |
+
b: _ArrayLikeUnknown,
|
493 |
+
axisa: int = ...,
|
494 |
+
axisb: int = ...,
|
495 |
+
axisc: int = ...,
|
496 |
+
axis: None | int = ...,
|
497 |
+
) -> NDArray[Any]: ...
|
498 |
+
@overload
|
499 |
+
def cross(
|
500 |
+
a: _ArrayLikeBool_co,
|
501 |
+
b: _ArrayLikeBool_co,
|
502 |
+
axisa: int = ...,
|
503 |
+
axisb: int = ...,
|
504 |
+
axisc: int = ...,
|
505 |
+
axis: None | int = ...,
|
506 |
+
) -> NoReturn: ...
|
507 |
+
@overload
|
508 |
+
def cross(
|
509 |
+
a: _ArrayLikeUInt_co,
|
510 |
+
b: _ArrayLikeUInt_co,
|
511 |
+
axisa: int = ...,
|
512 |
+
axisb: int = ...,
|
513 |
+
axisc: int = ...,
|
514 |
+
axis: None | int = ...,
|
515 |
+
) -> NDArray[unsignedinteger[Any]]: ...
|
516 |
+
@overload
|
517 |
+
def cross(
|
518 |
+
a: _ArrayLikeInt_co,
|
519 |
+
b: _ArrayLikeInt_co,
|
520 |
+
axisa: int = ...,
|
521 |
+
axisb: int = ...,
|
522 |
+
axisc: int = ...,
|
523 |
+
axis: None | int = ...,
|
524 |
+
) -> NDArray[signedinteger[Any]]: ...
|
525 |
+
@overload
|
526 |
+
def cross(
|
527 |
+
a: _ArrayLikeFloat_co,
|
528 |
+
b: _ArrayLikeFloat_co,
|
529 |
+
axisa: int = ...,
|
530 |
+
axisb: int = ...,
|
531 |
+
axisc: int = ...,
|
532 |
+
axis: None | int = ...,
|
533 |
+
) -> NDArray[floating[Any]]: ...
|
534 |
+
@overload
|
535 |
+
def cross(
|
536 |
+
a: _ArrayLikeComplex_co,
|
537 |
+
b: _ArrayLikeComplex_co,
|
538 |
+
axisa: int = ...,
|
539 |
+
axisb: int = ...,
|
540 |
+
axisc: int = ...,
|
541 |
+
axis: None | int = ...,
|
542 |
+
) -> NDArray[complexfloating[Any, Any]]: ...
|
543 |
+
@overload
|
544 |
+
def cross(
|
545 |
+
a: _ArrayLikeObject_co,
|
546 |
+
b: _ArrayLikeObject_co,
|
547 |
+
axisa: int = ...,
|
548 |
+
axisb: int = ...,
|
549 |
+
axisc: int = ...,
|
550 |
+
axis: None | int = ...,
|
551 |
+
) -> NDArray[object_]: ...
|
552 |
+
|
553 |
+
@overload
|
554 |
+
def indices(
|
555 |
+
dimensions: Sequence[int],
|
556 |
+
dtype: type[int] = ...,
|
557 |
+
sparse: Literal[False] = ...,
|
558 |
+
) -> NDArray[int_]: ...
|
559 |
+
@overload
|
560 |
+
def indices(
|
561 |
+
dimensions: Sequence[int],
|
562 |
+
dtype: type[int] = ...,
|
563 |
+
sparse: Literal[True] = ...,
|
564 |
+
) -> tuple[NDArray[int_], ...]: ...
|
565 |
+
@overload
|
566 |
+
def indices(
|
567 |
+
dimensions: Sequence[int],
|
568 |
+
dtype: _DTypeLike[_SCT],
|
569 |
+
sparse: Literal[False] = ...,
|
570 |
+
) -> NDArray[_SCT]: ...
|
571 |
+
@overload
|
572 |
+
def indices(
|
573 |
+
dimensions: Sequence[int],
|
574 |
+
dtype: _DTypeLike[_SCT],
|
575 |
+
sparse: Literal[True],
|
576 |
+
) -> tuple[NDArray[_SCT], ...]: ...
|
577 |
+
@overload
|
578 |
+
def indices(
|
579 |
+
dimensions: Sequence[int],
|
580 |
+
dtype: DTypeLike,
|
581 |
+
sparse: Literal[False] = ...,
|
582 |
+
) -> NDArray[Any]: ...
|
583 |
+
@overload
|
584 |
+
def indices(
|
585 |
+
dimensions: Sequence[int],
|
586 |
+
dtype: DTypeLike,
|
587 |
+
sparse: Literal[True],
|
588 |
+
) -> tuple[NDArray[Any], ...]: ...
|
589 |
+
|
590 |
+
def fromfunction(
|
591 |
+
function: Callable[..., _T],
|
592 |
+
shape: Sequence[int],
|
593 |
+
*,
|
594 |
+
dtype: DTypeLike = ...,
|
595 |
+
like: _SupportsArrayFunc = ...,
|
596 |
+
**kwargs: Any,
|
597 |
+
) -> _T: ...
|
598 |
+
|
599 |
+
def isscalar(element: object) -> TypeGuard[
|
600 |
+
generic | bool | int | float | complex | str | bytes | memoryview
|
601 |
+
]: ...
|
602 |
+
|
603 |
+
def binary_repr(num: SupportsIndex, width: None | int = ...) -> str: ...
|
604 |
+
|
605 |
+
def base_repr(
|
606 |
+
number: SupportsAbs[float],
|
607 |
+
base: float = ...,
|
608 |
+
padding: SupportsIndex = ...,
|
609 |
+
) -> str: ...
|
610 |
+
|
611 |
+
@overload
|
612 |
+
def identity(
|
613 |
+
n: int,
|
614 |
+
dtype: None = ...,
|
615 |
+
*,
|
616 |
+
like: _SupportsArrayFunc = ...,
|
617 |
+
) -> NDArray[float64]: ...
|
618 |
+
@overload
|
619 |
+
def identity(
|
620 |
+
n: int,
|
621 |
+
dtype: _DTypeLike[_SCT],
|
622 |
+
*,
|
623 |
+
like: _SupportsArrayFunc = ...,
|
624 |
+
) -> NDArray[_SCT]: ...
|
625 |
+
@overload
|
626 |
+
def identity(
|
627 |
+
n: int,
|
628 |
+
dtype: DTypeLike,
|
629 |
+
*,
|
630 |
+
like: _SupportsArrayFunc = ...,
|
631 |
+
) -> NDArray[Any]: ...
|
632 |
+
|
633 |
+
def allclose(
|
634 |
+
a: ArrayLike,
|
635 |
+
b: ArrayLike,
|
636 |
+
rtol: float = ...,
|
637 |
+
atol: float = ...,
|
638 |
+
equal_nan: bool = ...,
|
639 |
+
) -> bool: ...
|
640 |
+
|
641 |
+
@overload
|
642 |
+
def isclose(
|
643 |
+
a: _ScalarLike_co,
|
644 |
+
b: _ScalarLike_co,
|
645 |
+
rtol: float = ...,
|
646 |
+
atol: float = ...,
|
647 |
+
equal_nan: bool = ...,
|
648 |
+
) -> bool_: ...
|
649 |
+
@overload
|
650 |
+
def isclose(
|
651 |
+
a: ArrayLike,
|
652 |
+
b: ArrayLike,
|
653 |
+
rtol: float = ...,
|
654 |
+
atol: float = ...,
|
655 |
+
equal_nan: bool = ...,
|
656 |
+
) -> NDArray[bool_]: ...
|
657 |
+
|
658 |
+
def array_equal(a1: ArrayLike, a2: ArrayLike, equal_nan: bool = ...) -> bool: ...
|
659 |
+
|
660 |
+
def array_equiv(a1: ArrayLike, a2: ArrayLike) -> bool: ...
|
env-llmeval/lib/python3.10/site-packages/numpy/core/shape_base.pyi
ADDED
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from collections.abc import Sequence
|
2 |
+
from typing import TypeVar, overload, Any, SupportsIndex
|
3 |
+
|
4 |
+
from numpy import generic, _CastingKind
|
5 |
+
from numpy._typing import (
|
6 |
+
NDArray,
|
7 |
+
ArrayLike,
|
8 |
+
DTypeLike,
|
9 |
+
_ArrayLike,
|
10 |
+
_DTypeLike,
|
11 |
+
)
|
12 |
+
|
13 |
+
_SCT = TypeVar("_SCT", bound=generic)
|
14 |
+
_ArrayType = TypeVar("_ArrayType", bound=NDArray[Any])
|
15 |
+
|
16 |
+
__all__: list[str]
|
17 |
+
|
18 |
+
@overload
|
19 |
+
def atleast_1d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
20 |
+
@overload
|
21 |
+
def atleast_1d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
22 |
+
@overload
|
23 |
+
def atleast_1d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
|
24 |
+
|
25 |
+
@overload
|
26 |
+
def atleast_2d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
27 |
+
@overload
|
28 |
+
def atleast_2d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
29 |
+
@overload
|
30 |
+
def atleast_2d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
|
31 |
+
|
32 |
+
@overload
|
33 |
+
def atleast_3d(arys: _ArrayLike[_SCT], /) -> NDArray[_SCT]: ...
|
34 |
+
@overload
|
35 |
+
def atleast_3d(arys: ArrayLike, /) -> NDArray[Any]: ...
|
36 |
+
@overload
|
37 |
+
def atleast_3d(*arys: ArrayLike) -> list[NDArray[Any]]: ...
|
38 |
+
|
39 |
+
@overload
|
40 |
+
def vstack(
|
41 |
+
tup: Sequence[_ArrayLike[_SCT]],
|
42 |
+
*,
|
43 |
+
dtype: None = ...,
|
44 |
+
casting: _CastingKind = ...
|
45 |
+
) -> NDArray[_SCT]: ...
|
46 |
+
@overload
|
47 |
+
def vstack(
|
48 |
+
tup: Sequence[ArrayLike],
|
49 |
+
*,
|
50 |
+
dtype: _DTypeLike[_SCT],
|
51 |
+
casting: _CastingKind = ...
|
52 |
+
) -> NDArray[_SCT]: ...
|
53 |
+
@overload
|
54 |
+
def vstack(
|
55 |
+
tup: Sequence[ArrayLike],
|
56 |
+
*,
|
57 |
+
dtype: DTypeLike = ...,
|
58 |
+
casting: _CastingKind = ...
|
59 |
+
) -> NDArray[Any]: ...
|
60 |
+
|
61 |
+
@overload
|
62 |
+
def hstack(
|
63 |
+
tup: Sequence[_ArrayLike[_SCT]],
|
64 |
+
*,
|
65 |
+
dtype: None = ...,
|
66 |
+
casting: _CastingKind = ...
|
67 |
+
) -> NDArray[_SCT]: ...
|
68 |
+
@overload
|
69 |
+
def hstack(
|
70 |
+
tup: Sequence[ArrayLike],
|
71 |
+
*,
|
72 |
+
dtype: _DTypeLike[_SCT],
|
73 |
+
casting: _CastingKind = ...
|
74 |
+
) -> NDArray[_SCT]: ...
|
75 |
+
@overload
|
76 |
+
def hstack(
|
77 |
+
tup: Sequence[ArrayLike],
|
78 |
+
*,
|
79 |
+
dtype: DTypeLike = ...,
|
80 |
+
casting: _CastingKind = ...
|
81 |
+
) -> NDArray[Any]: ...
|
82 |
+
|
83 |
+
@overload
|
84 |
+
def stack(
|
85 |
+
arrays: Sequence[_ArrayLike[_SCT]],
|
86 |
+
axis: SupportsIndex = ...,
|
87 |
+
out: None = ...,
|
88 |
+
*,
|
89 |
+
dtype: None = ...,
|
90 |
+
casting: _CastingKind = ...
|
91 |
+
) -> NDArray[_SCT]: ...
|
92 |
+
@overload
|
93 |
+
def stack(
|
94 |
+
arrays: Sequence[ArrayLike],
|
95 |
+
axis: SupportsIndex = ...,
|
96 |
+
out: None = ...,
|
97 |
+
*,
|
98 |
+
dtype: _DTypeLike[_SCT],
|
99 |
+
casting: _CastingKind = ...
|
100 |
+
) -> NDArray[_SCT]: ...
|
101 |
+
@overload
|
102 |
+
def stack(
|
103 |
+
arrays: Sequence[ArrayLike],
|
104 |
+
axis: SupportsIndex = ...,
|
105 |
+
out: None = ...,
|
106 |
+
*,
|
107 |
+
dtype: DTypeLike = ...,
|
108 |
+
casting: _CastingKind = ...
|
109 |
+
) -> NDArray[Any]: ...
|
110 |
+
@overload
|
111 |
+
def stack(
|
112 |
+
arrays: Sequence[ArrayLike],
|
113 |
+
axis: SupportsIndex = ...,
|
114 |
+
out: _ArrayType = ...,
|
115 |
+
*,
|
116 |
+
dtype: DTypeLike = ...,
|
117 |
+
casting: _CastingKind = ...
|
118 |
+
) -> _ArrayType: ...
|
119 |
+
|
120 |
+
@overload
|
121 |
+
def block(arrays: _ArrayLike[_SCT]) -> NDArray[_SCT]: ...
|
122 |
+
@overload
|
123 |
+
def block(arrays: ArrayLike) -> NDArray[Any]: ...
|
env-llmeval/lib/python3.10/site-packages/numpy/core/umath.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Create the numpy.core.umath namespace for backward compatibility. In v1.16
|
3 |
+
the multiarray and umath c-extension modules were merged into a single
|
4 |
+
_multiarray_umath extension module. So we replicate the old namespace
|
5 |
+
by importing from the extension module.
|
6 |
+
|
7 |
+
"""
|
8 |
+
|
9 |
+
from . import _multiarray_umath
|
10 |
+
from ._multiarray_umath import * # noqa: F403
|
11 |
+
# These imports are needed for backward compatibility,
|
12 |
+
# do not change them. issue gh-11862
|
13 |
+
# _ones_like is semi-public, on purpose not added to __all__
|
14 |
+
from ._multiarray_umath import _UFUNC_API, _add_newdoc_ufunc, _ones_like
|
15 |
+
|
16 |
+
__all__ = [
|
17 |
+
'_UFUNC_API', 'ERR_CALL', 'ERR_DEFAULT', 'ERR_IGNORE', 'ERR_LOG',
|
18 |
+
'ERR_PRINT', 'ERR_RAISE', 'ERR_WARN', 'FLOATING_POINT_SUPPORT',
|
19 |
+
'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN',
|
20 |
+
'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID',
|
21 |
+
'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT',
|
22 |
+
'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', 'absolute', 'add',
|
23 |
+
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
|
24 |
+
'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj',
|
25 |
+
'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide',
|
26 |
+
'divmod', 'e', 'equal', 'euler_gamma', 'exp', 'exp2', 'expm1', 'fabs',
|
27 |
+
'floor', 'floor_divide', 'float_power', 'fmax', 'fmin', 'fmod', 'frexp',
|
28 |
+
'frompyfunc', 'gcd', 'geterrobj', 'greater', 'greater_equal', 'heaviside',
|
29 |
+
'hypot', 'invert', 'isfinite', 'isinf', 'isnan', 'isnat', 'lcm', 'ldexp',
|
30 |
+
'left_shift', 'less', 'less_equal', 'log', 'log10', 'log1p', 'log2',
|
31 |
+
'logaddexp', 'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
|
32 |
+
'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply', 'negative',
|
33 |
+
'nextafter', 'not_equal', 'pi', 'positive', 'power', 'rad2deg', 'radians',
|
34 |
+
'reciprocal', 'remainder', 'right_shift', 'rint', 'seterrobj', 'sign',
|
35 |
+
'signbit', 'sin', 'sinh', 'spacing', 'sqrt', 'square', 'subtract', 'tan',
|
36 |
+
'tanh', 'true_divide', 'trunc']
|
env-llmeval/lib/python3.10/site-packages/numpy/core/umath_tests.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Shim for _umath_tests to allow a deprecation period for the new name.
|
3 |
+
|
4 |
+
"""
|
5 |
+
import warnings
|
6 |
+
|
7 |
+
# 2018-04-04, numpy 1.15.0
|
8 |
+
warnings.warn(("numpy.core.umath_tests is an internal NumPy "
|
9 |
+
"module and should not be imported. It will "
|
10 |
+
"be removed in a future NumPy release."),
|
11 |
+
category=DeprecationWarning, stacklevel=2)
|
12 |
+
|
13 |
+
from ._umath_tests import *
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimd.c
ADDED
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
float *src = (float*)argv[argc-1];
|
9 |
+
float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
|
10 |
+
/* MAXMIN */
|
11 |
+
int ret = (int)vgetq_lane_f32(vmaxnmq_f32(v1, v2), 0);
|
12 |
+
ret += (int)vgetq_lane_f32(vminnmq_f32(v1, v2), 0);
|
13 |
+
/* ROUNDING */
|
14 |
+
ret += (int)vgetq_lane_f32(vrndq_f32(v1), 0);
|
15 |
+
#ifdef __aarch64__
|
16 |
+
{
|
17 |
+
double *src2 = (double*)argv[argc-1];
|
18 |
+
float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
|
19 |
+
/* MAXMIN */
|
20 |
+
ret += (int)vgetq_lane_f64(vmaxnmq_f64(vd1, vd2), 0);
|
21 |
+
ret += (int)vgetq_lane_f64(vminnmq_f64(vd1, vd2), 0);
|
22 |
+
/* ROUNDING */
|
23 |
+
ret += (int)vgetq_lane_f64(vrndq_f64(vd1), 0);
|
24 |
+
}
|
25 |
+
#endif
|
26 |
+
return ret;
|
27 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_asimdhp.c
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
float16_t *src = (float16_t*)argv[argc-1];
|
9 |
+
float16x8_t vhp = vdupq_n_f16(src[0]);
|
10 |
+
float16x4_t vlhp = vdup_n_f16(src[1]);
|
11 |
+
|
12 |
+
int ret = (int)vgetq_lane_f16(vabdq_f16(vhp, vhp), 0);
|
13 |
+
ret += (int)vget_lane_f16(vabd_f16(vlhp, vlhp), 0);
|
14 |
+
return ret;
|
15 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __AVX__
|
10 |
+
#error "HOST/ARCH doesn't support AVX"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m256 a = _mm256_add_ps(_mm256_loadu_ps((const float*)argv[argc-1]), _mm256_loadu_ps((const float*)argv[1]));
|
19 |
+
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx2.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __AVX2__
|
10 |
+
#error "HOST/ARCH doesn't support AVX2"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m256i a = _mm256_abs_epi16(_mm256_loadu_si256((const __m256i*)argv[argc-1]));
|
19 |
+
return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_clx.c
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __AVX512VNNI__
|
10 |
+
#error "HOST/ARCH doesn't support CascadeLake AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
/* VNNI */
|
19 |
+
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
20 |
+
a = _mm512_dpbusd_epi32(a, _mm512_setzero_si512(), a);
|
21 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
22 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_cnl.c
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX512VBMI__) || !defined(__AVX512IFMA__)
|
10 |
+
#error "HOST/ARCH doesn't support CannonLake AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
19 |
+
/* IFMA */
|
20 |
+
a = _mm512_madd52hi_epu64(a, a, _mm512_setzero_si512());
|
21 |
+
/* VMBI */
|
22 |
+
a = _mm512_permutex2var_epi8(a, _mm512_setzero_si512(), a);
|
23 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
24 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_icl.c
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX512VPOPCNTDQ__) || !defined(__AVX512BITALG__) || !defined(__AVX512VPOPCNTDQ__)
|
10 |
+
#error "HOST/ARCH doesn't support IceLake AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
19 |
+
/* VBMI2 */
|
20 |
+
a = _mm512_shrdv_epi64(a, a, _mm512_setzero_si512());
|
21 |
+
/* BITLAG */
|
22 |
+
a = _mm512_popcnt_epi8(a);
|
23 |
+
/* VPOPCNTDQ */
|
24 |
+
a = _mm512_popcnt_epi64(a);
|
25 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
26 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knl.c
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX512ER__) || !defined(__AVX512PF__)
|
10 |
+
#error "HOST/ARCH doesn't support Knights Landing AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
int base[128]={};
|
19 |
+
__m512d ad = _mm512_loadu_pd((const __m512d*)argv[argc-1]);
|
20 |
+
/* ER */
|
21 |
+
__m512i a = _mm512_castpd_si512(_mm512_exp2a23_pd(ad));
|
22 |
+
/* PF */
|
23 |
+
_mm512_mask_prefetch_i64scatter_pd(base, _mm512_cmpeq_epi64_mask(a, a), a, 1, _MM_HINT_T1);
|
24 |
+
return base[0];
|
25 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_knm.c
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX5124FMAPS__) || !defined(__AVX5124VNNIW__) || !defined(__AVX512VPOPCNTDQ__)
|
10 |
+
#error "HOST/ARCH doesn't support Knights Mill AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i a = _mm512_loadu_si512((const __m512i*)argv[argc-1]);
|
19 |
+
__m512 b = _mm512_loadu_ps((const __m512*)argv[argc-2]);
|
20 |
+
|
21 |
+
/* 4FMAPS */
|
22 |
+
b = _mm512_4fmadd_ps(b, b, b, b, b, NULL);
|
23 |
+
/* 4VNNIW */
|
24 |
+
a = _mm512_4dpwssd_epi32(a, a, a, a, a, NULL);
|
25 |
+
/* VPOPCNTDQ */
|
26 |
+
a = _mm512_popcnt_epi64(a);
|
27 |
+
|
28 |
+
a = _mm512_add_epi32(a, _mm512_castps_si512(b));
|
29 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
30 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512_skx.c
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__AVX512VL__) || !defined(__AVX512BW__) || !defined(__AVX512DQ__)
|
10 |
+
#error "HOST/ARCH doesn't support SkyLake AVX512 features"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i aa = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
19 |
+
/* VL */
|
20 |
+
__m256i a = _mm256_abs_epi64(_mm512_extracti64x4_epi64(aa, 1));
|
21 |
+
/* DQ */
|
22 |
+
__m512i b = _mm512_broadcast_i32x8(a);
|
23 |
+
/* BW */
|
24 |
+
b = _mm512_abs_epi16(b);
|
25 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(b));
|
26 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512cd.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __AVX512CD__
|
10 |
+
#error "HOST/ARCH doesn't support AVX512CD"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i a = _mm512_lzcnt_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
19 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_avx512f.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __AVX512F__
|
10 |
+
#error "HOST/ARCH doesn't support AVX512F"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <immintrin.h>
|
15 |
+
|
16 |
+
int main(int argc, char **argv)
|
17 |
+
{
|
18 |
+
__m512i a = _mm512_abs_epi32(_mm512_loadu_si512((const __m512i*)argv[argc-1]));
|
19 |
+
return _mm_cvtsi128_si32(_mm512_castsi512_si128(a));
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_f16c.c
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __F16C__
|
10 |
+
#error "HOST/ARCH doesn't support F16C"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <emmintrin.h>
|
15 |
+
#include <immintrin.h>
|
16 |
+
|
17 |
+
int main(int argc, char **argv)
|
18 |
+
{
|
19 |
+
__m128 a = _mm_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-1]));
|
20 |
+
__m256 a8 = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*)argv[argc-2]));
|
21 |
+
return (int)(_mm_cvtss_f32(a) + _mm_cvtss_f32(_mm256_castps256_ps128(a8)));
|
22 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_fma3.c
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__FMA__) && !defined(__AVX2__)
|
10 |
+
#error "HOST/ARCH doesn't support FMA3"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <xmmintrin.h>
|
15 |
+
#include <immintrin.h>
|
16 |
+
|
17 |
+
int main(int argc, char **argv)
|
18 |
+
{
|
19 |
+
__m256 a = _mm256_loadu_ps((const float*)argv[argc-1]);
|
20 |
+
a = _mm256_fmadd_ps(a, a, a);
|
21 |
+
return (int)_mm_cvtss_f32(_mm256_castps256_ps128(a));
|
22 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon.c
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
// passing from untraced pointers to avoid optimizing out any constants
|
9 |
+
// so we can test against the linker.
|
10 |
+
float *src = (float*)argv[argc-1];
|
11 |
+
float32x4_t v1 = vdupq_n_f32(src[0]), v2 = vdupq_n_f32(src[1]);
|
12 |
+
int ret = (int)vgetq_lane_f32(vmulq_f32(v1, v2), 0);
|
13 |
+
#ifdef __aarch64__
|
14 |
+
double *src2 = (double*)argv[argc-2];
|
15 |
+
float64x2_t vd1 = vdupq_n_f64(src2[0]), vd2 = vdupq_n_f64(src2[1]);
|
16 |
+
ret += (int)vgetq_lane_f64(vmulq_f64(vd1, vd2), 0);
|
17 |
+
#endif
|
18 |
+
return ret;
|
19 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_fp16.c
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
short *src = (short*)argv[argc-1];
|
9 |
+
float32x4_t v_z4 = vcvt_f32_f16((float16x4_t)vld1_s16(src));
|
10 |
+
return (int)vgetq_lane_f32(v_z4, 0);
|
11 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_neon_vfpv4.c
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifdef _MSC_VER
|
2 |
+
#include <Intrin.h>
|
3 |
+
#endif
|
4 |
+
#include <arm_neon.h>
|
5 |
+
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
float *src = (float*)argv[argc-1];
|
9 |
+
float32x4_t v1 = vdupq_n_f32(src[0]);
|
10 |
+
float32x4_t v2 = vdupq_n_f32(src[1]);
|
11 |
+
float32x4_t v3 = vdupq_n_f32(src[2]);
|
12 |
+
int ret = (int)vgetq_lane_f32(vfmaq_f32(v1, v2, v3), 0);
|
13 |
+
#ifdef __aarch64__
|
14 |
+
double *src2 = (double*)argv[argc-2];
|
15 |
+
float64x2_t vd1 = vdupq_n_f64(src2[0]);
|
16 |
+
float64x2_t vd2 = vdupq_n_f64(src2[1]);
|
17 |
+
float64x2_t vd3 = vdupq_n_f64(src2[2]);
|
18 |
+
ret += (int)vgetq_lane_f64(vfmaq_f64(vd1, vd2, vd3), 0);
|
19 |
+
#endif
|
20 |
+
return ret;
|
21 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_popcnt.c
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env vr `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#if !defined(__SSE4_2__) && !defined(__POPCNT__)
|
10 |
+
#error "HOST/ARCH doesn't support POPCNT"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#ifdef _MSC_VER
|
15 |
+
#include <nmmintrin.h>
|
16 |
+
#else
|
17 |
+
#include <popcntintrin.h>
|
18 |
+
#endif
|
19 |
+
|
20 |
+
int main(int argc, char **argv)
|
21 |
+
{
|
22 |
+
// To make sure popcnt instructions are generated
|
23 |
+
// and been tested against the assembler
|
24 |
+
unsigned long long a = *((unsigned long long*)argv[argc-1]);
|
25 |
+
unsigned int b = *((unsigned int*)argv[argc-2]);
|
26 |
+
|
27 |
+
#if defined(_M_X64) || defined(__x86_64__)
|
28 |
+
a = _mm_popcnt_u64(a);
|
29 |
+
#endif
|
30 |
+
b = _mm_popcnt_u32(b);
|
31 |
+
return (int)a + b;
|
32 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE__
|
10 |
+
#error "HOST/ARCH doesn't support SSE"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <xmmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128 a = _mm_add_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
19 |
+
return (int)_mm_cvtss_f32(a);
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse2.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE2__
|
10 |
+
#error "HOST/ARCH doesn't support SSE2"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <emmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128i a = _mm_add_epi16(_mm_setzero_si128(), _mm_setzero_si128());
|
19 |
+
return _mm_cvtsi128_si32(a);
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse3.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE3__
|
10 |
+
#error "HOST/ARCH doesn't support SSE3"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <pmmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128 a = _mm_hadd_ps(_mm_setzero_ps(), _mm_setzero_ps());
|
19 |
+
return (int)_mm_cvtss_f32(a);
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_sse41.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSE4_1__
|
10 |
+
#error "HOST/ARCH doesn't support SSE41"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <smmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128 a = _mm_floor_ps(_mm_setzero_ps());
|
19 |
+
return (int)_mm_cvtss_f32(a);
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_ssse3.c
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if defined(DETECT_FEATURES) && defined(__INTEL_COMPILER)
|
2 |
+
/*
|
3 |
+
* Unlike GCC and CLANG, Intel Compiler exposes all supported intrinsics,
|
4 |
+
* whether or not the build options for those features are specified.
|
5 |
+
* Therefore, we must test #definitions of CPU features when option native/host
|
6 |
+
* is enabled via `--cpu-baseline` or through env var `CFLAGS` otherwise
|
7 |
+
* the test will be broken and leads to enable all possible features.
|
8 |
+
*/
|
9 |
+
#ifndef __SSSE3__
|
10 |
+
#error "HOST/ARCH doesn't support SSSE3"
|
11 |
+
#endif
|
12 |
+
#endif
|
13 |
+
|
14 |
+
#include <tmmintrin.h>
|
15 |
+
|
16 |
+
int main(void)
|
17 |
+
{
|
18 |
+
__m128i a = _mm_hadd_epi16(_mm_setzero_si128(), _mm_setzero_si128());
|
19 |
+
return (int)_mm_cvtsi128_si32(a);
|
20 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx.c
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef __VSX__
|
2 |
+
#error "VSX is not supported"
|
3 |
+
#endif
|
4 |
+
#include <altivec.h>
|
5 |
+
|
6 |
+
#if (defined(__GNUC__) && !defined(vec_xl)) || (defined(__clang__) && !defined(__IBMC__))
|
7 |
+
#define vsx_ld vec_vsx_ld
|
8 |
+
#define vsx_st vec_vsx_st
|
9 |
+
#else
|
10 |
+
#define vsx_ld vec_xl
|
11 |
+
#define vsx_st vec_xst
|
12 |
+
#endif
|
13 |
+
|
14 |
+
int main(void)
|
15 |
+
{
|
16 |
+
unsigned int zout[4];
|
17 |
+
unsigned int z4[] = {0, 0, 0, 0};
|
18 |
+
__vector unsigned int v_z4 = vsx_ld(0, z4);
|
19 |
+
vsx_st(v_z4, 0, zout);
|
20 |
+
return zout[0];
|
21 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx3.c
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef __VSX__
|
2 |
+
#error "VSX is not supported"
|
3 |
+
#endif
|
4 |
+
#include <altivec.h>
|
5 |
+
|
6 |
+
typedef __vector unsigned int v_uint32x4;
|
7 |
+
|
8 |
+
int main(void)
|
9 |
+
{
|
10 |
+
v_uint32x4 z4 = (v_uint32x4){0, 0, 0, 0};
|
11 |
+
z4 = vec_absd(z4, z4);
|
12 |
+
return (int)vec_extract(z4, 0);
|
13 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vsx4.c
ADDED
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#ifndef __VSX__
|
2 |
+
#error "VSX is not supported"
|
3 |
+
#endif
|
4 |
+
#include <altivec.h>
|
5 |
+
|
6 |
+
typedef __vector unsigned int v_uint32x4;
|
7 |
+
|
8 |
+
int main(void)
|
9 |
+
{
|
10 |
+
v_uint32x4 v1 = (v_uint32x4){2, 4, 8, 16};
|
11 |
+
v_uint32x4 v2 = (v_uint32x4){2, 2, 2, 2};
|
12 |
+
v_uint32x4 v3 = vec_mod(v1, v2);
|
13 |
+
return (int)vec_extractm(v3);
|
14 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vx.c
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if (__VEC__ < 10301) || (__ARCH__ < 11)
|
2 |
+
#error VX not supported
|
3 |
+
#endif
|
4 |
+
|
5 |
+
#include <vecintrin.h>
|
6 |
+
int main(int argc, char **argv)
|
7 |
+
{
|
8 |
+
__vector double x = vec_abs(vec_xl(argc, (double*)argv));
|
9 |
+
__vector double y = vec_load_len((double*)argv, (unsigned int)argc);
|
10 |
+
|
11 |
+
x = vec_round(vec_ceil(x) + vec_floor(y));
|
12 |
+
__vector bool long long m = vec_cmpge(x, y);
|
13 |
+
__vector long long i = vec_signed(vec_sel(x, y, m));
|
14 |
+
|
15 |
+
return (int)vec_extract(i, 0);
|
16 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_vxe2.c
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#if (__VEC__ < 10303) || (__ARCH__ < 13)
|
2 |
+
#error VXE2 not supported
|
3 |
+
#endif
|
4 |
+
|
5 |
+
#include <vecintrin.h>
|
6 |
+
|
7 |
+
int main(int argc, char **argv)
|
8 |
+
{
|
9 |
+
int val;
|
10 |
+
__vector signed short large = { 'a', 'b', 'c', 'a', 'g', 'h', 'g', 'o' };
|
11 |
+
__vector signed short search = { 'g', 'h', 'g', 'o' };
|
12 |
+
__vector unsigned char len = { 0 };
|
13 |
+
__vector unsigned char res = vec_search_string_cc(large, search, len, &val);
|
14 |
+
__vector float x = vec_xl(argc, (float*)argv);
|
15 |
+
__vector int i = vec_signed(x);
|
16 |
+
|
17 |
+
i = vec_srdb(vec_sldb(i, i, 2), i, 3);
|
18 |
+
val += (int)vec_extract(res, 1);
|
19 |
+
val += vec_extract(i, 0);
|
20 |
+
return val;
|
21 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/cpu_xop.c
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <immintrin.h>
|
2 |
+
#ifdef _MSC_VER
|
3 |
+
#include <ammintrin.h>
|
4 |
+
#else
|
5 |
+
#include <x86intrin.h>
|
6 |
+
#endif
|
7 |
+
|
8 |
+
int main(void)
|
9 |
+
{
|
10 |
+
__m128i a = _mm_comge_epu32(_mm_setzero_si128(), _mm_setzero_si128());
|
11 |
+
return _mm_cvtsi128_si32(a);
|
12 |
+
}
|
env-llmeval/lib/python3.10/site-packages/numpy/distutils/checks/extra_avx512bw_mask.c
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#include <immintrin.h>
|
2 |
+
/**
|
3 |
+
* Test BW mask operations due to:
|
4 |
+
* - MSVC has supported it since vs2019 see,
|
5 |
+
* https://developercommunity.visualstudio.com/content/problem/518298/missing-avx512bw-mask-intrinsics.html
|
6 |
+
* - Clang >= v8.0
|
7 |
+
* - GCC >= v7.1
|
8 |
+
*/
|
9 |
+
int main(void)
|
10 |
+
{
|
11 |
+
__mmask64 m64 = _mm512_cmpeq_epi8_mask(_mm512_set1_epi8((char)1), _mm512_set1_epi8((char)1));
|
12 |
+
m64 = _kor_mask64(m64, m64);
|
13 |
+
m64 = _kxor_mask64(m64, m64);
|
14 |
+
m64 = _cvtu64_mask64(_cvtmask64_u64(m64));
|
15 |
+
m64 = _mm512_kunpackd(m64, m64);
|
16 |
+
m64 = (__mmask64)_mm512_kunpackw((__mmask32)m64, (__mmask32)m64);
|
17 |
+
return (int)_cvtmask64_u64(m64);
|
18 |
+
}
|