applied-ai-018 commited on
Commit
d07b5c9
·
verified ·
1 Parent(s): 921cd9d

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step40/zero/4.mlp.dense_4h_to_h.weight/exp_avg_sq.pt +3 -0
  2. ckpts/universal/global_step40/zero/8.attention.query_key_value.weight/exp_avg_sq.pt +3 -0
  3. venv/lib/python3.10/site-packages/numpy/_typing/__init__.py +221 -0
  4. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/__init__.cpython-310.pyc +0 -0
  5. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc +0 -0
  6. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc +0 -0
  7. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-310.pyc +0 -0
  8. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc +0 -0
  9. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc +0 -0
  10. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc +0 -0
  11. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc +0 -0
  12. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc +0 -0
  13. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc +0 -0
  14. venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/setup.cpython-310.pyc +0 -0
  15. venv/lib/python3.10/site-packages/numpy/_typing/_add_docstring.py +152 -0
  16. venv/lib/python3.10/site-packages/numpy/_typing/_array_like.py +167 -0
  17. venv/lib/python3.10/site-packages/numpy/_typing/_callable.pyi +338 -0
  18. venv/lib/python3.10/site-packages/numpy/_typing/_char_codes.py +111 -0
  19. venv/lib/python3.10/site-packages/numpy/_typing/_extended_precision.py +27 -0
  20. venv/lib/python3.10/site-packages/numpy/_typing/_nbit.py +16 -0
  21. venv/lib/python3.10/site-packages/numpy/_typing/_nested_sequence.py +86 -0
  22. venv/lib/python3.10/site-packages/numpy/_typing/_scalars.py +30 -0
  23. venv/lib/python3.10/site-packages/numpy/_typing/_shape.py +7 -0
  24. venv/lib/python3.10/site-packages/numpy/_typing/_ufunc.pyi +445 -0
  25. venv/lib/python3.10/site-packages/numpy/core/__init__.py +180 -0
  26. venv/lib/python3.10/site-packages/numpy/core/__init__.pyi +2 -0
  27. venv/lib/python3.10/site-packages/numpy/core/_add_newdocs.py +0 -0
  28. venv/lib/python3.10/site-packages/numpy/core/_add_newdocs_scalars.py +372 -0
  29. venv/lib/python3.10/site-packages/numpy/core/_asarray.py +134 -0
  30. venv/lib/python3.10/site-packages/numpy/core/_dtype.py +369 -0
  31. venv/lib/python3.10/site-packages/numpy/core/_dtype_ctypes.py +117 -0
  32. venv/lib/python3.10/site-packages/numpy/core/_exceptions.py +172 -0
  33. venv/lib/python3.10/site-packages/numpy/core/_internal.py +935 -0
  34. venv/lib/python3.10/site-packages/numpy/core/_internal.pyi +30 -0
  35. venv/lib/python3.10/site-packages/numpy/core/_machar.py +356 -0
  36. venv/lib/python3.10/site-packages/numpy/core/_multiarray_tests.cpython-310-x86_64-linux-gnu.so +0 -0
  37. venv/lib/python3.10/site-packages/numpy/core/_operand_flag_tests.cpython-310-x86_64-linux-gnu.so +0 -0
  38. venv/lib/python3.10/site-packages/numpy/core/_rational_tests.cpython-310-x86_64-linux-gnu.so +0 -0
  39. venv/lib/python3.10/site-packages/numpy/core/_string_helpers.py +100 -0
  40. venv/lib/python3.10/site-packages/numpy/core/_struct_ufunc_tests.cpython-310-x86_64-linux-gnu.so +0 -0
  41. venv/lib/python3.10/site-packages/numpy/core/_type_aliases.py +245 -0
  42. venv/lib/python3.10/site-packages/numpy/core/_ufunc_config.py +466 -0
  43. venv/lib/python3.10/site-packages/numpy/core/_ufunc_config.pyi +37 -0
  44. venv/lib/python3.10/site-packages/numpy/core/_umath_tests.cpython-310-x86_64-linux-gnu.so +0 -0
  45. venv/lib/python3.10/site-packages/numpy/core/arrayprint.pyi +142 -0
  46. venv/lib/python3.10/site-packages/numpy/core/cversions.py +13 -0
  47. venv/lib/python3.10/site-packages/numpy/core/defchararray.py +2914 -0
  48. venv/lib/python3.10/site-packages/numpy/core/einsumfunc.py +1443 -0
  49. venv/lib/python3.10/site-packages/numpy/core/einsumfunc.pyi +187 -0
  50. venv/lib/python3.10/site-packages/numpy/core/fromnumeric.py +0 -0
ckpts/universal/global_step40/zero/4.mlp.dense_4h_to_h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5552a04c793f5a78ce624e36892a0934e0511feaa448d1a316323a99018e00d8
3
+ size 33555627
ckpts/universal/global_step40/zero/8.attention.query_key_value.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1280675cdb1cb83a90d1fc2266ee8d0f0862d716d7be2a9b8d1873976dc3b8ba
3
+ size 50332843
venv/lib/python3.10/site-packages/numpy/_typing/__init__.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Private counterpart of ``numpy.typing``."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from .. import ufunc
6
+ from .._utils import set_module
7
+ from typing import TYPE_CHECKING, final
8
+
9
+
10
+ @final # Disallow the creation of arbitrary `NBitBase` subclasses
11
+ @set_module("numpy.typing")
12
+ class NBitBase:
13
+ """
14
+ A type representing `numpy.number` precision during static type checking.
15
+
16
+ Used exclusively for the purpose static type checking, `NBitBase`
17
+ represents the base of a hierarchical set of subclasses.
18
+ Each subsequent subclass is herein used for representing a lower level
19
+ of precision, *e.g.* ``64Bit > 32Bit > 16Bit``.
20
+
21
+ .. versionadded:: 1.20
22
+
23
+ Examples
24
+ --------
25
+ Below is a typical usage example: `NBitBase` is herein used for annotating
26
+ a function that takes a float and integer of arbitrary precision
27
+ as arguments and returns a new float of whichever precision is largest
28
+ (*e.g.* ``np.float16 + np.int64 -> np.float64``).
29
+
30
+ .. code-block:: python
31
+
32
+ >>> from __future__ import annotations
33
+ >>> from typing import TypeVar, TYPE_CHECKING
34
+ >>> import numpy as np
35
+ >>> import numpy.typing as npt
36
+
37
+ >>> T1 = TypeVar("T1", bound=npt.NBitBase)
38
+ >>> T2 = TypeVar("T2", bound=npt.NBitBase)
39
+
40
+ >>> def add(a: np.floating[T1], b: np.integer[T2]) -> np.floating[T1 | T2]:
41
+ ... return a + b
42
+
43
+ >>> a = np.float16()
44
+ >>> b = np.int64()
45
+ >>> out = add(a, b)
46
+
47
+ >>> if TYPE_CHECKING:
48
+ ... reveal_locals()
49
+ ... # note: Revealed local types are:
50
+ ... # note: a: numpy.floating[numpy.typing._16Bit*]
51
+ ... # note: b: numpy.signedinteger[numpy.typing._64Bit*]
52
+ ... # note: out: numpy.floating[numpy.typing._64Bit*]
53
+
54
+ """
55
+
56
+ def __init_subclass__(cls) -> None:
57
+ allowed_names = {
58
+ "NBitBase", "_256Bit", "_128Bit", "_96Bit", "_80Bit",
59
+ "_64Bit", "_32Bit", "_16Bit", "_8Bit",
60
+ }
61
+ if cls.__name__ not in allowed_names:
62
+ raise TypeError('cannot inherit from final class "NBitBase"')
63
+ super().__init_subclass__()
64
+
65
+
66
+ # Silence errors about subclassing a `@final`-decorated class
67
+ class _256Bit(NBitBase): # type: ignore[misc]
68
+ pass
69
+
70
+ class _128Bit(_256Bit): # type: ignore[misc]
71
+ pass
72
+
73
+ class _96Bit(_128Bit): # type: ignore[misc]
74
+ pass
75
+
76
+ class _80Bit(_96Bit): # type: ignore[misc]
77
+ pass
78
+
79
+ class _64Bit(_80Bit): # type: ignore[misc]
80
+ pass
81
+
82
+ class _32Bit(_64Bit): # type: ignore[misc]
83
+ pass
84
+
85
+ class _16Bit(_32Bit): # type: ignore[misc]
86
+ pass
87
+
88
+ class _8Bit(_16Bit): # type: ignore[misc]
89
+ pass
90
+
91
+
92
+ from ._nested_sequence import (
93
+ _NestedSequence as _NestedSequence,
94
+ )
95
+ from ._nbit import (
96
+ _NBitByte as _NBitByte,
97
+ _NBitShort as _NBitShort,
98
+ _NBitIntC as _NBitIntC,
99
+ _NBitIntP as _NBitIntP,
100
+ _NBitInt as _NBitInt,
101
+ _NBitLongLong as _NBitLongLong,
102
+ _NBitHalf as _NBitHalf,
103
+ _NBitSingle as _NBitSingle,
104
+ _NBitDouble as _NBitDouble,
105
+ _NBitLongDouble as _NBitLongDouble,
106
+ )
107
+ from ._char_codes import (
108
+ _BoolCodes as _BoolCodes,
109
+ _UInt8Codes as _UInt8Codes,
110
+ _UInt16Codes as _UInt16Codes,
111
+ _UInt32Codes as _UInt32Codes,
112
+ _UInt64Codes as _UInt64Codes,
113
+ _Int8Codes as _Int8Codes,
114
+ _Int16Codes as _Int16Codes,
115
+ _Int32Codes as _Int32Codes,
116
+ _Int64Codes as _Int64Codes,
117
+ _Float16Codes as _Float16Codes,
118
+ _Float32Codes as _Float32Codes,
119
+ _Float64Codes as _Float64Codes,
120
+ _Complex64Codes as _Complex64Codes,
121
+ _Complex128Codes as _Complex128Codes,
122
+ _ByteCodes as _ByteCodes,
123
+ _ShortCodes as _ShortCodes,
124
+ _IntCCodes as _IntCCodes,
125
+ _IntPCodes as _IntPCodes,
126
+ _IntCodes as _IntCodes,
127
+ _LongLongCodes as _LongLongCodes,
128
+ _UByteCodes as _UByteCodes,
129
+ _UShortCodes as _UShortCodes,
130
+ _UIntCCodes as _UIntCCodes,
131
+ _UIntPCodes as _UIntPCodes,
132
+ _UIntCodes as _UIntCodes,
133
+ _ULongLongCodes as _ULongLongCodes,
134
+ _HalfCodes as _HalfCodes,
135
+ _SingleCodes as _SingleCodes,
136
+ _DoubleCodes as _DoubleCodes,
137
+ _LongDoubleCodes as _LongDoubleCodes,
138
+ _CSingleCodes as _CSingleCodes,
139
+ _CDoubleCodes as _CDoubleCodes,
140
+ _CLongDoubleCodes as _CLongDoubleCodes,
141
+ _DT64Codes as _DT64Codes,
142
+ _TD64Codes as _TD64Codes,
143
+ _StrCodes as _StrCodes,
144
+ _BytesCodes as _BytesCodes,
145
+ _VoidCodes as _VoidCodes,
146
+ _ObjectCodes as _ObjectCodes,
147
+ )
148
+ from ._scalars import (
149
+ _CharLike_co as _CharLike_co,
150
+ _BoolLike_co as _BoolLike_co,
151
+ _UIntLike_co as _UIntLike_co,
152
+ _IntLike_co as _IntLike_co,
153
+ _FloatLike_co as _FloatLike_co,
154
+ _ComplexLike_co as _ComplexLike_co,
155
+ _TD64Like_co as _TD64Like_co,
156
+ _NumberLike_co as _NumberLike_co,
157
+ _ScalarLike_co as _ScalarLike_co,
158
+ _VoidLike_co as _VoidLike_co,
159
+ )
160
+ from ._shape import (
161
+ _Shape as _Shape,
162
+ _ShapeLike as _ShapeLike,
163
+ )
164
+ from ._dtype_like import (
165
+ DTypeLike as DTypeLike,
166
+ _DTypeLike as _DTypeLike,
167
+ _SupportsDType as _SupportsDType,
168
+ _VoidDTypeLike as _VoidDTypeLike,
169
+ _DTypeLikeBool as _DTypeLikeBool,
170
+ _DTypeLikeUInt as _DTypeLikeUInt,
171
+ _DTypeLikeInt as _DTypeLikeInt,
172
+ _DTypeLikeFloat as _DTypeLikeFloat,
173
+ _DTypeLikeComplex as _DTypeLikeComplex,
174
+ _DTypeLikeTD64 as _DTypeLikeTD64,
175
+ _DTypeLikeDT64 as _DTypeLikeDT64,
176
+ _DTypeLikeObject as _DTypeLikeObject,
177
+ _DTypeLikeVoid as _DTypeLikeVoid,
178
+ _DTypeLikeStr as _DTypeLikeStr,
179
+ _DTypeLikeBytes as _DTypeLikeBytes,
180
+ _DTypeLikeComplex_co as _DTypeLikeComplex_co,
181
+ )
182
+ from ._array_like import (
183
+ NDArray as NDArray,
184
+ ArrayLike as ArrayLike,
185
+ _ArrayLike as _ArrayLike,
186
+ _FiniteNestedSequence as _FiniteNestedSequence,
187
+ _SupportsArray as _SupportsArray,
188
+ _SupportsArrayFunc as _SupportsArrayFunc,
189
+ _ArrayLikeInt as _ArrayLikeInt,
190
+ _ArrayLikeBool_co as _ArrayLikeBool_co,
191
+ _ArrayLikeUInt_co as _ArrayLikeUInt_co,
192
+ _ArrayLikeInt_co as _ArrayLikeInt_co,
193
+ _ArrayLikeFloat_co as _ArrayLikeFloat_co,
194
+ _ArrayLikeComplex_co as _ArrayLikeComplex_co,
195
+ _ArrayLikeNumber_co as _ArrayLikeNumber_co,
196
+ _ArrayLikeTD64_co as _ArrayLikeTD64_co,
197
+ _ArrayLikeDT64_co as _ArrayLikeDT64_co,
198
+ _ArrayLikeObject_co as _ArrayLikeObject_co,
199
+ _ArrayLikeVoid_co as _ArrayLikeVoid_co,
200
+ _ArrayLikeStr_co as _ArrayLikeStr_co,
201
+ _ArrayLikeBytes_co as _ArrayLikeBytes_co,
202
+ _ArrayLikeUnknown as _ArrayLikeUnknown,
203
+ _UnknownType as _UnknownType,
204
+ )
205
+
206
+ if TYPE_CHECKING:
207
+ from ._ufunc import (
208
+ _UFunc_Nin1_Nout1 as _UFunc_Nin1_Nout1,
209
+ _UFunc_Nin2_Nout1 as _UFunc_Nin2_Nout1,
210
+ _UFunc_Nin1_Nout2 as _UFunc_Nin1_Nout2,
211
+ _UFunc_Nin2_Nout2 as _UFunc_Nin2_Nout2,
212
+ _GUFunc_Nin2_Nout1 as _GUFunc_Nin2_Nout1,
213
+ )
214
+ else:
215
+ # Declare the (type-check-only) ufunc subclasses as ufunc aliases during
216
+ # runtime; this helps autocompletion tools such as Jedi (numpy/numpy#19834)
217
+ _UFunc_Nin1_Nout1 = ufunc
218
+ _UFunc_Nin2_Nout1 = ufunc
219
+ _UFunc_Nin1_Nout2 = ufunc
220
+ _UFunc_Nin2_Nout2 = ufunc
221
+ _GUFunc_Nin2_Nout1 = ufunc
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (6.13 kB). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_add_docstring.cpython-310.pyc ADDED
Binary file (3.81 kB). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_array_like.cpython-310.pyc ADDED
Binary file (3.35 kB). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_char_codes.cpython-310.pyc ADDED
Binary file (5.11 kB). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_dtype_like.cpython-310.pyc ADDED
Binary file (3.69 kB). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_extended_precision.cpython-310.pyc ADDED
Binary file (873 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nbit.cpython-310.pyc ADDED
Binary file (480 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_nested_sequence.cpython-310.pyc ADDED
Binary file (3.28 kB). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_scalars.cpython-310.pyc ADDED
Binary file (770 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/_shape.cpython-310.pyc ADDED
Binary file (352 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/__pycache__/setup.cpython-310.pyc ADDED
Binary file (562 Bytes). View file
 
venv/lib/python3.10/site-packages/numpy/_typing/_add_docstring.py ADDED
@@ -0,0 +1,152 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A module for creating docstrings for sphinx ``data`` domains."""
2
+
3
+ import re
4
+ import textwrap
5
+
6
+ from ._array_like import NDArray
7
+
8
+ _docstrings_list = []
9
+
10
+
11
+ def add_newdoc(name: str, value: str, doc: str) -> None:
12
+ """Append ``_docstrings_list`` with a docstring for `name`.
13
+
14
+ Parameters
15
+ ----------
16
+ name : str
17
+ The name of the object.
18
+ value : str
19
+ A string-representation of the object.
20
+ doc : str
21
+ The docstring of the object.
22
+
23
+ """
24
+ _docstrings_list.append((name, value, doc))
25
+
26
+
27
+ def _parse_docstrings() -> str:
28
+ """Convert all docstrings in ``_docstrings_list`` into a single
29
+ sphinx-legible text block.
30
+
31
+ """
32
+ type_list_ret = []
33
+ for name, value, doc in _docstrings_list:
34
+ s = textwrap.dedent(doc).replace("\n", "\n ")
35
+
36
+ # Replace sections by rubrics
37
+ lines = s.split("\n")
38
+ new_lines = []
39
+ indent = ""
40
+ for line in lines:
41
+ m = re.match(r'^(\s+)[-=]+\s*$', line)
42
+ if m and new_lines:
43
+ prev = textwrap.dedent(new_lines.pop())
44
+ if prev == "Examples":
45
+ indent = ""
46
+ new_lines.append(f'{m.group(1)}.. rubric:: {prev}')
47
+ else:
48
+ indent = 4 * " "
49
+ new_lines.append(f'{m.group(1)}.. admonition:: {prev}')
50
+ new_lines.append("")
51
+ else:
52
+ new_lines.append(f"{indent}{line}")
53
+
54
+ s = "\n".join(new_lines)
55
+ s_block = f""".. data:: {name}\n :value: {value}\n {s}"""
56
+ type_list_ret.append(s_block)
57
+ return "\n".join(type_list_ret)
58
+
59
+
60
+ add_newdoc('ArrayLike', 'typing.Union[...]',
61
+ """
62
+ A `~typing.Union` representing objects that can be coerced
63
+ into an `~numpy.ndarray`.
64
+
65
+ Among others this includes the likes of:
66
+
67
+ * Scalars.
68
+ * (Nested) sequences.
69
+ * Objects implementing the `~class.__array__` protocol.
70
+
71
+ .. versionadded:: 1.20
72
+
73
+ See Also
74
+ --------
75
+ :term:`array_like`:
76
+ Any scalar or sequence that can be interpreted as an ndarray.
77
+
78
+ Examples
79
+ --------
80
+ .. code-block:: python
81
+
82
+ >>> import numpy as np
83
+ >>> import numpy.typing as npt
84
+
85
+ >>> def as_array(a: npt.ArrayLike) -> np.ndarray:
86
+ ... return np.array(a)
87
+
88
+ """)
89
+
90
+ add_newdoc('DTypeLike', 'typing.Union[...]',
91
+ """
92
+ A `~typing.Union` representing objects that can be coerced
93
+ into a `~numpy.dtype`.
94
+
95
+ Among others this includes the likes of:
96
+
97
+ * :class:`type` objects.
98
+ * Character codes or the names of :class:`type` objects.
99
+ * Objects with the ``.dtype`` attribute.
100
+
101
+ .. versionadded:: 1.20
102
+
103
+ See Also
104
+ --------
105
+ :ref:`Specifying and constructing data types <arrays.dtypes.constructing>`
106
+ A comprehensive overview of all objects that can be coerced
107
+ into data types.
108
+
109
+ Examples
110
+ --------
111
+ .. code-block:: python
112
+
113
+ >>> import numpy as np
114
+ >>> import numpy.typing as npt
115
+
116
+ >>> def as_dtype(d: npt.DTypeLike) -> np.dtype:
117
+ ... return np.dtype(d)
118
+
119
+ """)
120
+
121
+ add_newdoc('NDArray', repr(NDArray),
122
+ """
123
+ A :term:`generic <generic type>` version of
124
+ `np.ndarray[Any, np.dtype[+ScalarType]] <numpy.ndarray>`.
125
+
126
+ Can be used during runtime for typing arrays with a given dtype
127
+ and unspecified shape.
128
+
129
+ .. versionadded:: 1.21
130
+
131
+ Examples
132
+ --------
133
+ .. code-block:: python
134
+
135
+ >>> import numpy as np
136
+ >>> import numpy.typing as npt
137
+
138
+ >>> print(npt.NDArray)
139
+ numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]]
140
+
141
+ >>> print(npt.NDArray[np.float64])
142
+ numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]]
143
+
144
+ >>> NDArrayInt = npt.NDArray[np.int_]
145
+ >>> a: NDArrayInt = np.arange(10)
146
+
147
+ >>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]:
148
+ ... return np.array(a)
149
+
150
+ """)
151
+
152
+ _docstrings = _parse_docstrings()
venv/lib/python3.10/site-packages/numpy/_typing/_array_like.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+ from collections.abc import Collection, Callable, Sequence
5
+ from typing import Any, Protocol, Union, TypeVar, runtime_checkable
6
+
7
+ from numpy import (
8
+ ndarray,
9
+ dtype,
10
+ generic,
11
+ bool_,
12
+ unsignedinteger,
13
+ integer,
14
+ floating,
15
+ complexfloating,
16
+ number,
17
+ timedelta64,
18
+ datetime64,
19
+ object_,
20
+ void,
21
+ str_,
22
+ bytes_,
23
+ )
24
+ from ._nested_sequence import _NestedSequence
25
+
26
+ _T = TypeVar("_T")
27
+ _ScalarType = TypeVar("_ScalarType", bound=generic)
28
+ _ScalarType_co = TypeVar("_ScalarType_co", bound=generic, covariant=True)
29
+ _DType = TypeVar("_DType", bound=dtype[Any])
30
+ _DType_co = TypeVar("_DType_co", covariant=True, bound=dtype[Any])
31
+
32
+ NDArray = ndarray[Any, dtype[_ScalarType_co]]
33
+
34
+ # The `_SupportsArray` protocol only cares about the default dtype
35
+ # (i.e. `dtype=None` or no `dtype` parameter at all) of the to-be returned
36
+ # array.
37
+ # Concrete implementations of the protocol are responsible for adding
38
+ # any and all remaining overloads
39
+ @runtime_checkable
40
+ class _SupportsArray(Protocol[_DType_co]):
41
+ def __array__(self) -> ndarray[Any, _DType_co]: ...
42
+
43
+
44
+ @runtime_checkable
45
+ class _SupportsArrayFunc(Protocol):
46
+ """A protocol class representing `~class.__array_function__`."""
47
+ def __array_function__(
48
+ self,
49
+ func: Callable[..., Any],
50
+ types: Collection[type[Any]],
51
+ args: tuple[Any, ...],
52
+ kwargs: dict[str, Any],
53
+ ) -> object: ...
54
+
55
+
56
+ # TODO: Wait until mypy supports recursive objects in combination with typevars
57
+ _FiniteNestedSequence = Union[
58
+ _T,
59
+ Sequence[_T],
60
+ Sequence[Sequence[_T]],
61
+ Sequence[Sequence[Sequence[_T]]],
62
+ Sequence[Sequence[Sequence[Sequence[_T]]]],
63
+ ]
64
+
65
+ # A subset of `npt.ArrayLike` that can be parametrized w.r.t. `np.generic`
66
+ _ArrayLike = Union[
67
+ _SupportsArray[dtype[_ScalarType]],
68
+ _NestedSequence[_SupportsArray[dtype[_ScalarType]]],
69
+ ]
70
+
71
+ # A union representing array-like objects; consists of two typevars:
72
+ # One representing types that can be parametrized w.r.t. `np.dtype`
73
+ # and another one for the rest
74
+ _DualArrayLike = Union[
75
+ _SupportsArray[_DType],
76
+ _NestedSequence[_SupportsArray[_DType]],
77
+ _T,
78
+ _NestedSequence[_T],
79
+ ]
80
+
81
+ if sys.version_info >= (3, 12):
82
+ from collections.abc import Buffer
83
+
84
+ ArrayLike = Buffer | _DualArrayLike[
85
+ dtype[Any],
86
+ Union[bool, int, float, complex, str, bytes],
87
+ ]
88
+ else:
89
+ ArrayLike = _DualArrayLike[
90
+ dtype[Any],
91
+ Union[bool, int, float, complex, str, bytes],
92
+ ]
93
+
94
+ # `ArrayLike<X>_co`: array-like objects that can be coerced into `X`
95
+ # given the casting rules `same_kind`
96
+ _ArrayLikeBool_co = _DualArrayLike[
97
+ dtype[bool_],
98
+ bool,
99
+ ]
100
+ _ArrayLikeUInt_co = _DualArrayLike[
101
+ dtype[Union[bool_, unsignedinteger[Any]]],
102
+ bool,
103
+ ]
104
+ _ArrayLikeInt_co = _DualArrayLike[
105
+ dtype[Union[bool_, integer[Any]]],
106
+ Union[bool, int],
107
+ ]
108
+ _ArrayLikeFloat_co = _DualArrayLike[
109
+ dtype[Union[bool_, integer[Any], floating[Any]]],
110
+ Union[bool, int, float],
111
+ ]
112
+ _ArrayLikeComplex_co = _DualArrayLike[
113
+ dtype[Union[
114
+ bool_,
115
+ integer[Any],
116
+ floating[Any],
117
+ complexfloating[Any, Any],
118
+ ]],
119
+ Union[bool, int, float, complex],
120
+ ]
121
+ _ArrayLikeNumber_co = _DualArrayLike[
122
+ dtype[Union[bool_, number[Any]]],
123
+ Union[bool, int, float, complex],
124
+ ]
125
+ _ArrayLikeTD64_co = _DualArrayLike[
126
+ dtype[Union[bool_, integer[Any], timedelta64]],
127
+ Union[bool, int],
128
+ ]
129
+ _ArrayLikeDT64_co = Union[
130
+ _SupportsArray[dtype[datetime64]],
131
+ _NestedSequence[_SupportsArray[dtype[datetime64]]],
132
+ ]
133
+ _ArrayLikeObject_co = Union[
134
+ _SupportsArray[dtype[object_]],
135
+ _NestedSequence[_SupportsArray[dtype[object_]]],
136
+ ]
137
+
138
+ _ArrayLikeVoid_co = Union[
139
+ _SupportsArray[dtype[void]],
140
+ _NestedSequence[_SupportsArray[dtype[void]]],
141
+ ]
142
+ _ArrayLikeStr_co = _DualArrayLike[
143
+ dtype[str_],
144
+ str,
145
+ ]
146
+ _ArrayLikeBytes_co = _DualArrayLike[
147
+ dtype[bytes_],
148
+ bytes,
149
+ ]
150
+
151
+ _ArrayLikeInt = _DualArrayLike[
152
+ dtype[integer[Any]],
153
+ int,
154
+ ]
155
+
156
+ # Extra ArrayLike type so that pyright can deal with NDArray[Any]
157
+ # Used as the first overload, should only match NDArray[Any],
158
+ # not any actual types.
159
+ # https://github.com/numpy/numpy/pull/22193
160
+ class _UnknownType:
161
+ ...
162
+
163
+
164
+ _ArrayLikeUnknown = _DualArrayLike[
165
+ dtype[_UnknownType],
166
+ _UnknownType,
167
+ ]
venv/lib/python3.10/site-packages/numpy/_typing/_callable.pyi ADDED
@@ -0,0 +1,338 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A module with various ``typing.Protocol`` subclasses that implement
3
+ the ``__call__`` magic method.
4
+
5
+ See the `Mypy documentation`_ on protocols for more details.
6
+
7
+ .. _`Mypy documentation`: https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
8
+
9
+ """
10
+
11
+ from __future__ import annotations
12
+
13
+ from typing import (
14
+ TypeVar,
15
+ overload,
16
+ Any,
17
+ NoReturn,
18
+ Protocol,
19
+ )
20
+
21
+ from numpy import (
22
+ ndarray,
23
+ dtype,
24
+ generic,
25
+ bool_,
26
+ timedelta64,
27
+ number,
28
+ integer,
29
+ unsignedinteger,
30
+ signedinteger,
31
+ int8,
32
+ int_,
33
+ floating,
34
+ float64,
35
+ complexfloating,
36
+ complex128,
37
+ )
38
+ from ._nbit import _NBitInt, _NBitDouble
39
+ from ._scalars import (
40
+ _BoolLike_co,
41
+ _IntLike_co,
42
+ _FloatLike_co,
43
+ _NumberLike_co,
44
+ )
45
+ from . import NBitBase
46
+ from ._array_like import NDArray
47
+ from ._nested_sequence import _NestedSequence
48
+
49
+ _T1 = TypeVar("_T1")
50
+ _T2 = TypeVar("_T2")
51
+ _T1_contra = TypeVar("_T1_contra", contravariant=True)
52
+ _T2_contra = TypeVar("_T2_contra", contravariant=True)
53
+ _2Tuple = tuple[_T1, _T1]
54
+
55
+ _NBit1 = TypeVar("_NBit1", bound=NBitBase)
56
+ _NBit2 = TypeVar("_NBit2", bound=NBitBase)
57
+
58
+ _IntType = TypeVar("_IntType", bound=integer)
59
+ _FloatType = TypeVar("_FloatType", bound=floating)
60
+ _NumberType = TypeVar("_NumberType", bound=number)
61
+ _NumberType_co = TypeVar("_NumberType_co", covariant=True, bound=number)
62
+ _GenericType_co = TypeVar("_GenericType_co", covariant=True, bound=generic)
63
+
64
+ class _BoolOp(Protocol[_GenericType_co]):
65
+ @overload
66
+ def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
67
+ @overload # platform dependent
68
+ def __call__(self, other: int, /) -> int_: ...
69
+ @overload
70
+ def __call__(self, other: float, /) -> float64: ...
71
+ @overload
72
+ def __call__(self, other: complex, /) -> complex128: ...
73
+ @overload
74
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
75
+
76
+ class _BoolBitOp(Protocol[_GenericType_co]):
77
+ @overload
78
+ def __call__(self, other: _BoolLike_co, /) -> _GenericType_co: ...
79
+ @overload # platform dependent
80
+ def __call__(self, other: int, /) -> int_: ...
81
+ @overload
82
+ def __call__(self, other: _IntType, /) -> _IntType: ...
83
+
84
+ class _BoolSub(Protocol):
85
+ # Note that `other: bool_` is absent here
86
+ @overload
87
+ def __call__(self, other: bool, /) -> NoReturn: ...
88
+ @overload # platform dependent
89
+ def __call__(self, other: int, /) -> int_: ...
90
+ @overload
91
+ def __call__(self, other: float, /) -> float64: ...
92
+ @overload
93
+ def __call__(self, other: complex, /) -> complex128: ...
94
+ @overload
95
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
96
+
97
+ class _BoolTrueDiv(Protocol):
98
+ @overload
99
+ def __call__(self, other: float | _IntLike_co, /) -> float64: ...
100
+ @overload
101
+ def __call__(self, other: complex, /) -> complex128: ...
102
+ @overload
103
+ def __call__(self, other: _NumberType, /) -> _NumberType: ...
104
+
105
+ class _BoolMod(Protocol):
106
+ @overload
107
+ def __call__(self, other: _BoolLike_co, /) -> int8: ...
108
+ @overload # platform dependent
109
+ def __call__(self, other: int, /) -> int_: ...
110
+ @overload
111
+ def __call__(self, other: float, /) -> float64: ...
112
+ @overload
113
+ def __call__(self, other: _IntType, /) -> _IntType: ...
114
+ @overload
115
+ def __call__(self, other: _FloatType, /) -> _FloatType: ...
116
+
117
+ class _BoolDivMod(Protocol):
118
+ @overload
119
+ def __call__(self, other: _BoolLike_co, /) -> _2Tuple[int8]: ...
120
+ @overload # platform dependent
121
+ def __call__(self, other: int, /) -> _2Tuple[int_]: ...
122
+ @overload
123
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
124
+ @overload
125
+ def __call__(self, other: _IntType, /) -> _2Tuple[_IntType]: ...
126
+ @overload
127
+ def __call__(self, other: _FloatType, /) -> _2Tuple[_FloatType]: ...
128
+
129
+ class _TD64Div(Protocol[_NumberType_co]):
130
+ @overload
131
+ def __call__(self, other: timedelta64, /) -> _NumberType_co: ...
132
+ @overload
133
+ def __call__(self, other: _BoolLike_co, /) -> NoReturn: ...
134
+ @overload
135
+ def __call__(self, other: _FloatLike_co, /) -> timedelta64: ...
136
+
137
+ class _IntTrueDiv(Protocol[_NBit1]):
138
+ @overload
139
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
140
+ @overload
141
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
142
+ @overload
143
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
144
+ @overload
145
+ def __call__(
146
+ self, other: complex, /,
147
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
148
+ @overload
149
+ def __call__(self, other: integer[_NBit2], /) -> floating[_NBit1 | _NBit2]: ...
150
+
151
+ class _UnsignedIntOp(Protocol[_NBit1]):
152
+ # NOTE: `uint64 + signedinteger -> float64`
153
+ @overload
154
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
155
+ @overload
156
+ def __call__(
157
+ self, other: int | signedinteger[Any], /
158
+ ) -> Any: ...
159
+ @overload
160
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
161
+ @overload
162
+ def __call__(
163
+ self, other: complex, /,
164
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
165
+ @overload
166
+ def __call__(
167
+ self, other: unsignedinteger[_NBit2], /
168
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
169
+
170
+ class _UnsignedIntBitOp(Protocol[_NBit1]):
171
+ @overload
172
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
173
+ @overload
174
+ def __call__(self, other: int, /) -> signedinteger[Any]: ...
175
+ @overload
176
+ def __call__(self, other: signedinteger[Any], /) -> signedinteger[Any]: ...
177
+ @overload
178
+ def __call__(
179
+ self, other: unsignedinteger[_NBit2], /
180
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
181
+
182
+ class _UnsignedIntMod(Protocol[_NBit1]):
183
+ @overload
184
+ def __call__(self, other: bool, /) -> unsignedinteger[_NBit1]: ...
185
+ @overload
186
+ def __call__(
187
+ self, other: int | signedinteger[Any], /
188
+ ) -> Any: ...
189
+ @overload
190
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
191
+ @overload
192
+ def __call__(
193
+ self, other: unsignedinteger[_NBit2], /
194
+ ) -> unsignedinteger[_NBit1 | _NBit2]: ...
195
+
196
+ class _UnsignedIntDivMod(Protocol[_NBit1]):
197
+ @overload
198
+ def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
199
+ @overload
200
+ def __call__(
201
+ self, other: int | signedinteger[Any], /
202
+ ) -> _2Tuple[Any]: ...
203
+ @overload
204
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
205
+ @overload
206
+ def __call__(
207
+ self, other: unsignedinteger[_NBit2], /
208
+ ) -> _2Tuple[unsignedinteger[_NBit1 | _NBit2]]: ...
209
+
210
+ class _SignedIntOp(Protocol[_NBit1]):
211
+ @overload
212
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
213
+ @overload
214
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
215
+ @overload
216
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
217
+ @overload
218
+ def __call__(
219
+ self, other: complex, /,
220
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
221
+ @overload
222
+ def __call__(
223
+ self, other: signedinteger[_NBit2], /,
224
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
225
+
226
+ class _SignedIntBitOp(Protocol[_NBit1]):
227
+ @overload
228
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
229
+ @overload
230
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
231
+ @overload
232
+ def __call__(
233
+ self, other: signedinteger[_NBit2], /,
234
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
235
+
236
+ class _SignedIntMod(Protocol[_NBit1]):
237
+ @overload
238
+ def __call__(self, other: bool, /) -> signedinteger[_NBit1]: ...
239
+ @overload
240
+ def __call__(self, other: int, /) -> signedinteger[_NBit1 | _NBitInt]: ...
241
+ @overload
242
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
243
+ @overload
244
+ def __call__(
245
+ self, other: signedinteger[_NBit2], /,
246
+ ) -> signedinteger[_NBit1 | _NBit2]: ...
247
+
248
+ class _SignedIntDivMod(Protocol[_NBit1]):
249
+ @overload
250
+ def __call__(self, other: bool, /) -> _2Tuple[signedinteger[_NBit1]]: ...
251
+ @overload
252
+ def __call__(self, other: int, /) -> _2Tuple[signedinteger[_NBit1 | _NBitInt]]: ...
253
+ @overload
254
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
255
+ @overload
256
+ def __call__(
257
+ self, other: signedinteger[_NBit2], /,
258
+ ) -> _2Tuple[signedinteger[_NBit1 | _NBit2]]: ...
259
+
260
+ class _FloatOp(Protocol[_NBit1]):
261
+ @overload
262
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
263
+ @overload
264
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
265
+ @overload
266
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
267
+ @overload
268
+ def __call__(
269
+ self, other: complex, /,
270
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
271
+ @overload
272
+ def __call__(
273
+ self, other: integer[_NBit2] | floating[_NBit2], /
274
+ ) -> floating[_NBit1 | _NBit2]: ...
275
+
276
+ class _FloatMod(Protocol[_NBit1]):
277
+ @overload
278
+ def __call__(self, other: bool, /) -> floating[_NBit1]: ...
279
+ @overload
280
+ def __call__(self, other: int, /) -> floating[_NBit1 | _NBitInt]: ...
281
+ @overload
282
+ def __call__(self, other: float, /) -> floating[_NBit1 | _NBitDouble]: ...
283
+ @overload
284
+ def __call__(
285
+ self, other: integer[_NBit2] | floating[_NBit2], /
286
+ ) -> floating[_NBit1 | _NBit2]: ...
287
+
288
+ class _FloatDivMod(Protocol[_NBit1]):
289
+ @overload
290
+ def __call__(self, other: bool, /) -> _2Tuple[floating[_NBit1]]: ...
291
+ @overload
292
+ def __call__(self, other: int, /) -> _2Tuple[floating[_NBit1 | _NBitInt]]: ...
293
+ @overload
294
+ def __call__(self, other: float, /) -> _2Tuple[floating[_NBit1 | _NBitDouble]]: ...
295
+ @overload
296
+ def __call__(
297
+ self, other: integer[_NBit2] | floating[_NBit2], /
298
+ ) -> _2Tuple[floating[_NBit1 | _NBit2]]: ...
299
+
300
+ class _ComplexOp(Protocol[_NBit1]):
301
+ @overload
302
+ def __call__(self, other: bool, /) -> complexfloating[_NBit1, _NBit1]: ...
303
+ @overload
304
+ def __call__(self, other: int, /) -> complexfloating[_NBit1 | _NBitInt, _NBit1 | _NBitInt]: ...
305
+ @overload
306
+ def __call__(
307
+ self, other: complex, /,
308
+ ) -> complexfloating[_NBit1 | _NBitDouble, _NBit1 | _NBitDouble]: ...
309
+ @overload
310
+ def __call__(
311
+ self,
312
+ other: (
313
+ integer[_NBit2]
314
+ | floating[_NBit2]
315
+ | complexfloating[_NBit2, _NBit2]
316
+ ), /,
317
+ ) -> complexfloating[_NBit1 | _NBit2, _NBit1 | _NBit2]: ...
318
+
319
+ class _NumberOp(Protocol):
320
+ def __call__(self, other: _NumberLike_co, /) -> Any: ...
321
+
322
+ class _SupportsLT(Protocol):
323
+ def __lt__(self, other: Any, /) -> object: ...
324
+
325
+ class _SupportsGT(Protocol):
326
+ def __gt__(self, other: Any, /) -> object: ...
327
+
328
+ class _ComparisonOp(Protocol[_T1_contra, _T2_contra]):
329
+ @overload
330
+ def __call__(self, other: _T1_contra, /) -> bool_: ...
331
+ @overload
332
+ def __call__(self, other: _T2_contra, /) -> NDArray[bool_]: ...
333
+ @overload
334
+ def __call__(
335
+ self,
336
+ other: _SupportsLT | _SupportsGT | _NestedSequence[_SupportsLT | _SupportsGT],
337
+ /,
338
+ ) -> Any: ...
venv/lib/python3.10/site-packages/numpy/_typing/_char_codes.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Literal
2
+
3
+ _BoolCodes = Literal["?", "=?", "<?", ">?", "bool", "bool_", "bool8"]
4
+
5
+ _UInt8Codes = Literal["uint8", "u1", "=u1", "<u1", ">u1"]
6
+ _UInt16Codes = Literal["uint16", "u2", "=u2", "<u2", ">u2"]
7
+ _UInt32Codes = Literal["uint32", "u4", "=u4", "<u4", ">u4"]
8
+ _UInt64Codes = Literal["uint64", "u8", "=u8", "<u8", ">u8"]
9
+
10
+ _Int8Codes = Literal["int8", "i1", "=i1", "<i1", ">i1"]
11
+ _Int16Codes = Literal["int16", "i2", "=i2", "<i2", ">i2"]
12
+ _Int32Codes = Literal["int32", "i4", "=i4", "<i4", ">i4"]
13
+ _Int64Codes = Literal["int64", "i8", "=i8", "<i8", ">i8"]
14
+
15
+ _Float16Codes = Literal["float16", "f2", "=f2", "<f2", ">f2"]
16
+ _Float32Codes = Literal["float32", "f4", "=f4", "<f4", ">f4"]
17
+ _Float64Codes = Literal["float64", "f8", "=f8", "<f8", ">f8"]
18
+
19
+ _Complex64Codes = Literal["complex64", "c8", "=c8", "<c8", ">c8"]
20
+ _Complex128Codes = Literal["complex128", "c16", "=c16", "<c16", ">c16"]
21
+
22
+ _ByteCodes = Literal["byte", "b", "=b", "<b", ">b"]
23
+ _ShortCodes = Literal["short", "h", "=h", "<h", ">h"]
24
+ _IntCCodes = Literal["intc", "i", "=i", "<i", ">i"]
25
+ _IntPCodes = Literal["intp", "int0", "p", "=p", "<p", ">p"]
26
+ _IntCodes = Literal["long", "int", "int_", "l", "=l", "<l", ">l"]
27
+ _LongLongCodes = Literal["longlong", "q", "=q", "<q", ">q"]
28
+
29
+ _UByteCodes = Literal["ubyte", "B", "=B", "<B", ">B"]
30
+ _UShortCodes = Literal["ushort", "H", "=H", "<H", ">H"]
31
+ _UIntCCodes = Literal["uintc", "I", "=I", "<I", ">I"]
32
+ _UIntPCodes = Literal["uintp", "uint0", "P", "=P", "<P", ">P"]
33
+ _UIntCodes = Literal["ulong", "uint", "L", "=L", "<L", ">L"]
34
+ _ULongLongCodes = Literal["ulonglong", "Q", "=Q", "<Q", ">Q"]
35
+
36
+ _HalfCodes = Literal["half", "e", "=e", "<e", ">e"]
37
+ _SingleCodes = Literal["single", "f", "=f", "<f", ">f"]
38
+ _DoubleCodes = Literal["double", "float", "float_", "d", "=d", "<d", ">d"]
39
+ _LongDoubleCodes = Literal["longdouble", "longfloat", "g", "=g", "<g", ">g"]
40
+
41
+ _CSingleCodes = Literal["csingle", "singlecomplex", "F", "=F", "<F", ">F"]
42
+ _CDoubleCodes = Literal["cdouble", "complex", "complex_", "cfloat", "D", "=D", "<D", ">D"]
43
+ _CLongDoubleCodes = Literal["clongdouble", "clongfloat", "longcomplex", "G", "=G", "<G", ">G"]
44
+
45
+ _StrCodes = Literal["str", "str_", "str0", "unicode", "unicode_", "U", "=U", "<U", ">U"]
46
+ _BytesCodes = Literal["bytes", "bytes_", "bytes0", "S", "=S", "<S", ">S"]
47
+ _VoidCodes = Literal["void", "void0", "V", "=V", "<V", ">V"]
48
+ _ObjectCodes = Literal["object", "object_", "O", "=O", "<O", ">O"]
49
+
50
+ _DT64Codes = Literal[
51
+ "datetime64", "=datetime64", "<datetime64", ">datetime64",
52
+ "datetime64[Y]", "=datetime64[Y]", "<datetime64[Y]", ">datetime64[Y]",
53
+ "datetime64[M]", "=datetime64[M]", "<datetime64[M]", ">datetime64[M]",
54
+ "datetime64[W]", "=datetime64[W]", "<datetime64[W]", ">datetime64[W]",
55
+ "datetime64[D]", "=datetime64[D]", "<datetime64[D]", ">datetime64[D]",
56
+ "datetime64[h]", "=datetime64[h]", "<datetime64[h]", ">datetime64[h]",
57
+ "datetime64[m]", "=datetime64[m]", "<datetime64[m]", ">datetime64[m]",
58
+ "datetime64[s]", "=datetime64[s]", "<datetime64[s]", ">datetime64[s]",
59
+ "datetime64[ms]", "=datetime64[ms]", "<datetime64[ms]", ">datetime64[ms]",
60
+ "datetime64[us]", "=datetime64[us]", "<datetime64[us]", ">datetime64[us]",
61
+ "datetime64[ns]", "=datetime64[ns]", "<datetime64[ns]", ">datetime64[ns]",
62
+ "datetime64[ps]", "=datetime64[ps]", "<datetime64[ps]", ">datetime64[ps]",
63
+ "datetime64[fs]", "=datetime64[fs]", "<datetime64[fs]", ">datetime64[fs]",
64
+ "datetime64[as]", "=datetime64[as]", "<datetime64[as]", ">datetime64[as]",
65
+ "M", "=M", "<M", ">M",
66
+ "M8", "=M8", "<M8", ">M8",
67
+ "M8[Y]", "=M8[Y]", "<M8[Y]", ">M8[Y]",
68
+ "M8[M]", "=M8[M]", "<M8[M]", ">M8[M]",
69
+ "M8[W]", "=M8[W]", "<M8[W]", ">M8[W]",
70
+ "M8[D]", "=M8[D]", "<M8[D]", ">M8[D]",
71
+ "M8[h]", "=M8[h]", "<M8[h]", ">M8[h]",
72
+ "M8[m]", "=M8[m]", "<M8[m]", ">M8[m]",
73
+ "M8[s]", "=M8[s]", "<M8[s]", ">M8[s]",
74
+ "M8[ms]", "=M8[ms]", "<M8[ms]", ">M8[ms]",
75
+ "M8[us]", "=M8[us]", "<M8[us]", ">M8[us]",
76
+ "M8[ns]", "=M8[ns]", "<M8[ns]", ">M8[ns]",
77
+ "M8[ps]", "=M8[ps]", "<M8[ps]", ">M8[ps]",
78
+ "M8[fs]", "=M8[fs]", "<M8[fs]", ">M8[fs]",
79
+ "M8[as]", "=M8[as]", "<M8[as]", ">M8[as]",
80
+ ]
81
+ _TD64Codes = Literal[
82
+ "timedelta64", "=timedelta64", "<timedelta64", ">timedelta64",
83
+ "timedelta64[Y]", "=timedelta64[Y]", "<timedelta64[Y]", ">timedelta64[Y]",
84
+ "timedelta64[M]", "=timedelta64[M]", "<timedelta64[M]", ">timedelta64[M]",
85
+ "timedelta64[W]", "=timedelta64[W]", "<timedelta64[W]", ">timedelta64[W]",
86
+ "timedelta64[D]", "=timedelta64[D]", "<timedelta64[D]", ">timedelta64[D]",
87
+ "timedelta64[h]", "=timedelta64[h]", "<timedelta64[h]", ">timedelta64[h]",
88
+ "timedelta64[m]", "=timedelta64[m]", "<timedelta64[m]", ">timedelta64[m]",
89
+ "timedelta64[s]", "=timedelta64[s]", "<timedelta64[s]", ">timedelta64[s]",
90
+ "timedelta64[ms]", "=timedelta64[ms]", "<timedelta64[ms]", ">timedelta64[ms]",
91
+ "timedelta64[us]", "=timedelta64[us]", "<timedelta64[us]", ">timedelta64[us]",
92
+ "timedelta64[ns]", "=timedelta64[ns]", "<timedelta64[ns]", ">timedelta64[ns]",
93
+ "timedelta64[ps]", "=timedelta64[ps]", "<timedelta64[ps]", ">timedelta64[ps]",
94
+ "timedelta64[fs]", "=timedelta64[fs]", "<timedelta64[fs]", ">timedelta64[fs]",
95
+ "timedelta64[as]", "=timedelta64[as]", "<timedelta64[as]", ">timedelta64[as]",
96
+ "m", "=m", "<m", ">m",
97
+ "m8", "=m8", "<m8", ">m8",
98
+ "m8[Y]", "=m8[Y]", "<m8[Y]", ">m8[Y]",
99
+ "m8[M]", "=m8[M]", "<m8[M]", ">m8[M]",
100
+ "m8[W]", "=m8[W]", "<m8[W]", ">m8[W]",
101
+ "m8[D]", "=m8[D]", "<m8[D]", ">m8[D]",
102
+ "m8[h]", "=m8[h]", "<m8[h]", ">m8[h]",
103
+ "m8[m]", "=m8[m]", "<m8[m]", ">m8[m]",
104
+ "m8[s]", "=m8[s]", "<m8[s]", ">m8[s]",
105
+ "m8[ms]", "=m8[ms]", "<m8[ms]", ">m8[ms]",
106
+ "m8[us]", "=m8[us]", "<m8[us]", ">m8[us]",
107
+ "m8[ns]", "=m8[ns]", "<m8[ns]", ">m8[ns]",
108
+ "m8[ps]", "=m8[ps]", "<m8[ps]", ">m8[ps]",
109
+ "m8[fs]", "=m8[fs]", "<m8[fs]", ">m8[fs]",
110
+ "m8[as]", "=m8[as]", "<m8[as]", ">m8[as]",
111
+ ]
venv/lib/python3.10/site-packages/numpy/_typing/_extended_precision.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A module with platform-specific extended precision
2
+ `numpy.number` subclasses.
3
+
4
+ The subclasses are defined here (instead of ``__init__.pyi``) such
5
+ that they can be imported conditionally via the numpy's mypy plugin.
6
+ """
7
+
8
+ import numpy as np
9
+ from . import (
10
+ _80Bit,
11
+ _96Bit,
12
+ _128Bit,
13
+ _256Bit,
14
+ )
15
+
16
+ uint128 = np.unsignedinteger[_128Bit]
17
+ uint256 = np.unsignedinteger[_256Bit]
18
+ int128 = np.signedinteger[_128Bit]
19
+ int256 = np.signedinteger[_256Bit]
20
+ float80 = np.floating[_80Bit]
21
+ float96 = np.floating[_96Bit]
22
+ float128 = np.floating[_128Bit]
23
+ float256 = np.floating[_256Bit]
24
+ complex160 = np.complexfloating[_80Bit, _80Bit]
25
+ complex192 = np.complexfloating[_96Bit, _96Bit]
26
+ complex256 = np.complexfloating[_128Bit, _128Bit]
27
+ complex512 = np.complexfloating[_256Bit, _256Bit]
venv/lib/python3.10/site-packages/numpy/_typing/_nbit.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A module with the precisions of platform-specific `~numpy.number`s."""
2
+
3
+ from typing import Any
4
+
5
+ # To-be replaced with a `npt.NBitBase` subclass by numpy's mypy plugin
6
+ _NBitByte = Any
7
+ _NBitShort = Any
8
+ _NBitIntC = Any
9
+ _NBitIntP = Any
10
+ _NBitInt = Any
11
+ _NBitLongLong = Any
12
+
13
+ _NBitHalf = Any
14
+ _NBitSingle = Any
15
+ _NBitDouble = Any
16
+ _NBitLongDouble = Any
venv/lib/python3.10/site-packages/numpy/_typing/_nested_sequence.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A module containing the `_NestedSequence` protocol."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Iterator
6
+ from typing import (
7
+ Any,
8
+ TypeVar,
9
+ Protocol,
10
+ runtime_checkable,
11
+ )
12
+
13
+ __all__ = ["_NestedSequence"]
14
+
15
+ _T_co = TypeVar("_T_co", covariant=True)
16
+
17
+
18
+ @runtime_checkable
19
+ class _NestedSequence(Protocol[_T_co]):
20
+ """A protocol for representing nested sequences.
21
+
22
+ Warning
23
+ -------
24
+ `_NestedSequence` currently does not work in combination with typevars,
25
+ *e.g.* ``def func(a: _NestedSequnce[T]) -> T: ...``.
26
+
27
+ See Also
28
+ --------
29
+ collections.abc.Sequence
30
+ ABCs for read-only and mutable :term:`sequences`.
31
+
32
+ Examples
33
+ --------
34
+ .. code-block:: python
35
+
36
+ >>> from __future__ import annotations
37
+
38
+ >>> from typing import TYPE_CHECKING
39
+ >>> import numpy as np
40
+ >>> from numpy._typing import _NestedSequence
41
+
42
+ >>> def get_dtype(seq: _NestedSequence[float]) -> np.dtype[np.float64]:
43
+ ... return np.asarray(seq).dtype
44
+
45
+ >>> a = get_dtype([1.0])
46
+ >>> b = get_dtype([[1.0]])
47
+ >>> c = get_dtype([[[1.0]]])
48
+ >>> d = get_dtype([[[[1.0]]]])
49
+
50
+ >>> if TYPE_CHECKING:
51
+ ... reveal_locals()
52
+ ... # note: Revealed local types are:
53
+ ... # note: a: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
54
+ ... # note: b: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
55
+ ... # note: c: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
56
+ ... # note: d: numpy.dtype[numpy.floating[numpy._typing._64Bit]]
57
+
58
+ """
59
+
60
+ def __len__(self, /) -> int:
61
+ """Implement ``len(self)``."""
62
+ raise NotImplementedError
63
+
64
+ def __getitem__(self, index: int, /) -> _T_co | _NestedSequence[_T_co]:
65
+ """Implement ``self[x]``."""
66
+ raise NotImplementedError
67
+
68
+ def __contains__(self, x: object, /) -> bool:
69
+ """Implement ``x in self``."""
70
+ raise NotImplementedError
71
+
72
+ def __iter__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
73
+ """Implement ``iter(self)``."""
74
+ raise NotImplementedError
75
+
76
+ def __reversed__(self, /) -> Iterator[_T_co | _NestedSequence[_T_co]]:
77
+ """Implement ``reversed(self)``."""
78
+ raise NotImplementedError
79
+
80
+ def count(self, value: Any, /) -> int:
81
+ """Return the number of occurrences of `value`."""
82
+ raise NotImplementedError
83
+
84
+ def index(self, value: Any, /) -> int:
85
+ """Return the first index of `value`."""
86
+ raise NotImplementedError
venv/lib/python3.10/site-packages/numpy/_typing/_scalars.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Union, Any
2
+
3
+ import numpy as np
4
+
5
+ # NOTE: `_StrLike_co` and `_BytesLike_co` are pointless, as `np.str_` and
6
+ # `np.bytes_` are already subclasses of their builtin counterpart
7
+
8
+ _CharLike_co = Union[str, bytes]
9
+
10
+ # The 6 `<X>Like_co` type-aliases below represent all scalars that can be
11
+ # coerced into `<X>` (with the casting rule `same_kind`)
12
+ _BoolLike_co = Union[bool, np.bool_]
13
+ _UIntLike_co = Union[_BoolLike_co, np.unsignedinteger[Any]]
14
+ _IntLike_co = Union[_BoolLike_co, int, np.integer[Any]]
15
+ _FloatLike_co = Union[_IntLike_co, float, np.floating[Any]]
16
+ _ComplexLike_co = Union[_FloatLike_co, complex, np.complexfloating[Any, Any]]
17
+ _TD64Like_co = Union[_IntLike_co, np.timedelta64]
18
+
19
+ _NumberLike_co = Union[int, float, complex, np.number[Any], np.bool_]
20
+ _ScalarLike_co = Union[
21
+ int,
22
+ float,
23
+ complex,
24
+ str,
25
+ bytes,
26
+ np.generic,
27
+ ]
28
+
29
+ # `_VoidLike_co` is technically not a scalar, but it's close enough
30
+ _VoidLike_co = Union[tuple[Any, ...], np.void]
venv/lib/python3.10/site-packages/numpy/_typing/_shape.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ from typing import Union, SupportsIndex
3
+
4
+ _Shape = tuple[int, ...]
5
+
6
+ # Anything that can be coerced to a shape tuple
7
+ _ShapeLike = Union[SupportsIndex, Sequence[SupportsIndex]]
venv/lib/python3.10/site-packages/numpy/_typing/_ufunc.pyi ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """A module with private type-check-only `numpy.ufunc` subclasses.
2
+
3
+ The signatures of the ufuncs are too varied to reasonably type
4
+ with a single class. So instead, `ufunc` has been expanded into
5
+ four private subclasses, one for each combination of
6
+ `~ufunc.nin` and `~ufunc.nout`.
7
+
8
+ """
9
+
10
+ from typing import (
11
+ Any,
12
+ Generic,
13
+ overload,
14
+ TypeVar,
15
+ Literal,
16
+ SupportsIndex,
17
+ Protocol,
18
+ )
19
+
20
+ from numpy import ufunc, _CastingKind, _OrderKACF
21
+ from numpy.typing import NDArray
22
+
23
+ from ._shape import _ShapeLike
24
+ from ._scalars import _ScalarLike_co
25
+ from ._array_like import ArrayLike, _ArrayLikeBool_co, _ArrayLikeInt_co
26
+ from ._dtype_like import DTypeLike
27
+
28
+ _T = TypeVar("_T")
29
+ _2Tuple = tuple[_T, _T]
30
+ _3Tuple = tuple[_T, _T, _T]
31
+ _4Tuple = tuple[_T, _T, _T, _T]
32
+
33
+ _NTypes = TypeVar("_NTypes", bound=int)
34
+ _IDType = TypeVar("_IDType", bound=Any)
35
+ _NameType = TypeVar("_NameType", bound=str)
36
+
37
+
38
+ class _SupportsArrayUFunc(Protocol):
39
+ def __array_ufunc__(
40
+ self,
41
+ ufunc: ufunc,
42
+ method: Literal["__call__", "reduce", "reduceat", "accumulate", "outer", "inner"],
43
+ *inputs: Any,
44
+ **kwargs: Any,
45
+ ) -> Any: ...
46
+
47
+
48
+ # NOTE: In reality `extobj` should be a length of list 3 containing an
49
+ # int, an int, and a callable, but there's no way to properly express
50
+ # non-homogenous lists.
51
+ # Use `Any` over `Union` to avoid issues related to lists invariance.
52
+
53
+ # NOTE: `reduce`, `accumulate`, `reduceat` and `outer` raise a ValueError for
54
+ # ufuncs that don't accept two input arguments and return one output argument.
55
+ # In such cases the respective methods are simply typed as `None`.
56
+
57
+ # NOTE: Similarly, `at` won't be defined for ufuncs that return
58
+ # multiple outputs; in such cases `at` is typed as `None`
59
+
60
+ # NOTE: If 2 output types are returned then `out` must be a
61
+ # 2-tuple of arrays. Otherwise `None` or a plain array are also acceptable
62
+
63
+ class _UFunc_Nin1_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
64
+ @property
65
+ def __name__(self) -> _NameType: ...
66
+ @property
67
+ def ntypes(self) -> _NTypes: ...
68
+ @property
69
+ def identity(self) -> _IDType: ...
70
+ @property
71
+ def nin(self) -> Literal[1]: ...
72
+ @property
73
+ def nout(self) -> Literal[1]: ...
74
+ @property
75
+ def nargs(self) -> Literal[2]: ...
76
+ @property
77
+ def signature(self) -> None: ...
78
+ @property
79
+ def reduce(self) -> None: ...
80
+ @property
81
+ def accumulate(self) -> None: ...
82
+ @property
83
+ def reduceat(self) -> None: ...
84
+ @property
85
+ def outer(self) -> None: ...
86
+
87
+ @overload
88
+ def __call__(
89
+ self,
90
+ __x1: _ScalarLike_co,
91
+ out: None = ...,
92
+ *,
93
+ where: None | _ArrayLikeBool_co = ...,
94
+ casting: _CastingKind = ...,
95
+ order: _OrderKACF = ...,
96
+ dtype: DTypeLike = ...,
97
+ subok: bool = ...,
98
+ signature: str | _2Tuple[None | str] = ...,
99
+ extobj: list[Any] = ...,
100
+ ) -> Any: ...
101
+ @overload
102
+ def __call__(
103
+ self,
104
+ __x1: ArrayLike,
105
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
106
+ *,
107
+ where: None | _ArrayLikeBool_co = ...,
108
+ casting: _CastingKind = ...,
109
+ order: _OrderKACF = ...,
110
+ dtype: DTypeLike = ...,
111
+ subok: bool = ...,
112
+ signature: str | _2Tuple[None | str] = ...,
113
+ extobj: list[Any] = ...,
114
+ ) -> NDArray[Any]: ...
115
+ @overload
116
+ def __call__(
117
+ self,
118
+ __x1: _SupportsArrayUFunc,
119
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
120
+ *,
121
+ where: None | _ArrayLikeBool_co = ...,
122
+ casting: _CastingKind = ...,
123
+ order: _OrderKACF = ...,
124
+ dtype: DTypeLike = ...,
125
+ subok: bool = ...,
126
+ signature: str | _2Tuple[None | str] = ...,
127
+ extobj: list[Any] = ...,
128
+ ) -> Any: ...
129
+
130
+ def at(
131
+ self,
132
+ a: _SupportsArrayUFunc,
133
+ indices: _ArrayLikeInt_co,
134
+ /,
135
+ ) -> None: ...
136
+
137
+ class _UFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
138
+ @property
139
+ def __name__(self) -> _NameType: ...
140
+ @property
141
+ def ntypes(self) -> _NTypes: ...
142
+ @property
143
+ def identity(self) -> _IDType: ...
144
+ @property
145
+ def nin(self) -> Literal[2]: ...
146
+ @property
147
+ def nout(self) -> Literal[1]: ...
148
+ @property
149
+ def nargs(self) -> Literal[3]: ...
150
+ @property
151
+ def signature(self) -> None: ...
152
+
153
+ @overload
154
+ def __call__(
155
+ self,
156
+ __x1: _ScalarLike_co,
157
+ __x2: _ScalarLike_co,
158
+ out: None = ...,
159
+ *,
160
+ where: None | _ArrayLikeBool_co = ...,
161
+ casting: _CastingKind = ...,
162
+ order: _OrderKACF = ...,
163
+ dtype: DTypeLike = ...,
164
+ subok: bool = ...,
165
+ signature: str | _3Tuple[None | str] = ...,
166
+ extobj: list[Any] = ...,
167
+ ) -> Any: ...
168
+ @overload
169
+ def __call__(
170
+ self,
171
+ __x1: ArrayLike,
172
+ __x2: ArrayLike,
173
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
174
+ *,
175
+ where: None | _ArrayLikeBool_co = ...,
176
+ casting: _CastingKind = ...,
177
+ order: _OrderKACF = ...,
178
+ dtype: DTypeLike = ...,
179
+ subok: bool = ...,
180
+ signature: str | _3Tuple[None | str] = ...,
181
+ extobj: list[Any] = ...,
182
+ ) -> NDArray[Any]: ...
183
+
184
+ def at(
185
+ self,
186
+ a: NDArray[Any],
187
+ indices: _ArrayLikeInt_co,
188
+ b: ArrayLike,
189
+ /,
190
+ ) -> None: ...
191
+
192
+ def reduce(
193
+ self,
194
+ array: ArrayLike,
195
+ axis: None | _ShapeLike = ...,
196
+ dtype: DTypeLike = ...,
197
+ out: None | NDArray[Any] = ...,
198
+ keepdims: bool = ...,
199
+ initial: Any = ...,
200
+ where: _ArrayLikeBool_co = ...,
201
+ ) -> Any: ...
202
+
203
+ def accumulate(
204
+ self,
205
+ array: ArrayLike,
206
+ axis: SupportsIndex = ...,
207
+ dtype: DTypeLike = ...,
208
+ out: None | NDArray[Any] = ...,
209
+ ) -> NDArray[Any]: ...
210
+
211
+ def reduceat(
212
+ self,
213
+ array: ArrayLike,
214
+ indices: _ArrayLikeInt_co,
215
+ axis: SupportsIndex = ...,
216
+ dtype: DTypeLike = ...,
217
+ out: None | NDArray[Any] = ...,
218
+ ) -> NDArray[Any]: ...
219
+
220
+ # Expand `**kwargs` into explicit keyword-only arguments
221
+ @overload
222
+ def outer(
223
+ self,
224
+ A: _ScalarLike_co,
225
+ B: _ScalarLike_co,
226
+ /, *,
227
+ out: None = ...,
228
+ where: None | _ArrayLikeBool_co = ...,
229
+ casting: _CastingKind = ...,
230
+ order: _OrderKACF = ...,
231
+ dtype: DTypeLike = ...,
232
+ subok: bool = ...,
233
+ signature: str | _3Tuple[None | str] = ...,
234
+ extobj: list[Any] = ...,
235
+ ) -> Any: ...
236
+ @overload
237
+ def outer( # type: ignore[misc]
238
+ self,
239
+ A: ArrayLike,
240
+ B: ArrayLike,
241
+ /, *,
242
+ out: None | NDArray[Any] | tuple[NDArray[Any]] = ...,
243
+ where: None | _ArrayLikeBool_co = ...,
244
+ casting: _CastingKind = ...,
245
+ order: _OrderKACF = ...,
246
+ dtype: DTypeLike = ...,
247
+ subok: bool = ...,
248
+ signature: str | _3Tuple[None | str] = ...,
249
+ extobj: list[Any] = ...,
250
+ ) -> NDArray[Any]: ...
251
+
252
+ class _UFunc_Nin1_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
253
+ @property
254
+ def __name__(self) -> _NameType: ...
255
+ @property
256
+ def ntypes(self) -> _NTypes: ...
257
+ @property
258
+ def identity(self) -> _IDType: ...
259
+ @property
260
+ def nin(self) -> Literal[1]: ...
261
+ @property
262
+ def nout(self) -> Literal[2]: ...
263
+ @property
264
+ def nargs(self) -> Literal[3]: ...
265
+ @property
266
+ def signature(self) -> None: ...
267
+ @property
268
+ def at(self) -> None: ...
269
+ @property
270
+ def reduce(self) -> None: ...
271
+ @property
272
+ def accumulate(self) -> None: ...
273
+ @property
274
+ def reduceat(self) -> None: ...
275
+ @property
276
+ def outer(self) -> None: ...
277
+
278
+ @overload
279
+ def __call__(
280
+ self,
281
+ __x1: _ScalarLike_co,
282
+ __out1: None = ...,
283
+ __out2: None = ...,
284
+ *,
285
+ where: None | _ArrayLikeBool_co = ...,
286
+ casting: _CastingKind = ...,
287
+ order: _OrderKACF = ...,
288
+ dtype: DTypeLike = ...,
289
+ subok: bool = ...,
290
+ signature: str | _3Tuple[None | str] = ...,
291
+ extobj: list[Any] = ...,
292
+ ) -> _2Tuple[Any]: ...
293
+ @overload
294
+ def __call__(
295
+ self,
296
+ __x1: ArrayLike,
297
+ __out1: None | NDArray[Any] = ...,
298
+ __out2: None | NDArray[Any] = ...,
299
+ *,
300
+ out: _2Tuple[NDArray[Any]] = ...,
301
+ where: None | _ArrayLikeBool_co = ...,
302
+ casting: _CastingKind = ...,
303
+ order: _OrderKACF = ...,
304
+ dtype: DTypeLike = ...,
305
+ subok: bool = ...,
306
+ signature: str | _3Tuple[None | str] = ...,
307
+ extobj: list[Any] = ...,
308
+ ) -> _2Tuple[NDArray[Any]]: ...
309
+ @overload
310
+ def __call__(
311
+ self,
312
+ __x1: _SupportsArrayUFunc,
313
+ __out1: None | NDArray[Any] = ...,
314
+ __out2: None | NDArray[Any] = ...,
315
+ *,
316
+ out: _2Tuple[NDArray[Any]] = ...,
317
+ where: None | _ArrayLikeBool_co = ...,
318
+ casting: _CastingKind = ...,
319
+ order: _OrderKACF = ...,
320
+ dtype: DTypeLike = ...,
321
+ subok: bool = ...,
322
+ signature: str | _3Tuple[None | str] = ...,
323
+ extobj: list[Any] = ...,
324
+ ) -> _2Tuple[Any]: ...
325
+
326
+ class _UFunc_Nin2_Nout2(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
327
+ @property
328
+ def __name__(self) -> _NameType: ...
329
+ @property
330
+ def ntypes(self) -> _NTypes: ...
331
+ @property
332
+ def identity(self) -> _IDType: ...
333
+ @property
334
+ def nin(self) -> Literal[2]: ...
335
+ @property
336
+ def nout(self) -> Literal[2]: ...
337
+ @property
338
+ def nargs(self) -> Literal[4]: ...
339
+ @property
340
+ def signature(self) -> None: ...
341
+ @property
342
+ def at(self) -> None: ...
343
+ @property
344
+ def reduce(self) -> None: ...
345
+ @property
346
+ def accumulate(self) -> None: ...
347
+ @property
348
+ def reduceat(self) -> None: ...
349
+ @property
350
+ def outer(self) -> None: ...
351
+
352
+ @overload
353
+ def __call__(
354
+ self,
355
+ __x1: _ScalarLike_co,
356
+ __x2: _ScalarLike_co,
357
+ __out1: None = ...,
358
+ __out2: None = ...,
359
+ *,
360
+ where: None | _ArrayLikeBool_co = ...,
361
+ casting: _CastingKind = ...,
362
+ order: _OrderKACF = ...,
363
+ dtype: DTypeLike = ...,
364
+ subok: bool = ...,
365
+ signature: str | _4Tuple[None | str] = ...,
366
+ extobj: list[Any] = ...,
367
+ ) -> _2Tuple[Any]: ...
368
+ @overload
369
+ def __call__(
370
+ self,
371
+ __x1: ArrayLike,
372
+ __x2: ArrayLike,
373
+ __out1: None | NDArray[Any] = ...,
374
+ __out2: None | NDArray[Any] = ...,
375
+ *,
376
+ out: _2Tuple[NDArray[Any]] = ...,
377
+ where: None | _ArrayLikeBool_co = ...,
378
+ casting: _CastingKind = ...,
379
+ order: _OrderKACF = ...,
380
+ dtype: DTypeLike = ...,
381
+ subok: bool = ...,
382
+ signature: str | _4Tuple[None | str] = ...,
383
+ extobj: list[Any] = ...,
384
+ ) -> _2Tuple[NDArray[Any]]: ...
385
+
386
+ class _GUFunc_Nin2_Nout1(ufunc, Generic[_NameType, _NTypes, _IDType]): # type: ignore[misc]
387
+ @property
388
+ def __name__(self) -> _NameType: ...
389
+ @property
390
+ def ntypes(self) -> _NTypes: ...
391
+ @property
392
+ def identity(self) -> _IDType: ...
393
+ @property
394
+ def nin(self) -> Literal[2]: ...
395
+ @property
396
+ def nout(self) -> Literal[1]: ...
397
+ @property
398
+ def nargs(self) -> Literal[3]: ...
399
+
400
+ # NOTE: In practice the only gufunc in the main namespace is `matmul`,
401
+ # so we can use its signature here
402
+ @property
403
+ def signature(self) -> Literal["(n?,k),(k,m?)->(n?,m?)"]: ...
404
+ @property
405
+ def reduce(self) -> None: ...
406
+ @property
407
+ def accumulate(self) -> None: ...
408
+ @property
409
+ def reduceat(self) -> None: ...
410
+ @property
411
+ def outer(self) -> None: ...
412
+ @property
413
+ def at(self) -> None: ...
414
+
415
+ # Scalar for 1D array-likes; ndarray otherwise
416
+ @overload
417
+ def __call__(
418
+ self,
419
+ __x1: ArrayLike,
420
+ __x2: ArrayLike,
421
+ out: None = ...,
422
+ *,
423
+ casting: _CastingKind = ...,
424
+ order: _OrderKACF = ...,
425
+ dtype: DTypeLike = ...,
426
+ subok: bool = ...,
427
+ signature: str | _3Tuple[None | str] = ...,
428
+ extobj: list[Any] = ...,
429
+ axes: list[_2Tuple[SupportsIndex]] = ...,
430
+ ) -> Any: ...
431
+ @overload
432
+ def __call__(
433
+ self,
434
+ __x1: ArrayLike,
435
+ __x2: ArrayLike,
436
+ out: NDArray[Any] | tuple[NDArray[Any]],
437
+ *,
438
+ casting: _CastingKind = ...,
439
+ order: _OrderKACF = ...,
440
+ dtype: DTypeLike = ...,
441
+ subok: bool = ...,
442
+ signature: str | _3Tuple[None | str] = ...,
443
+ extobj: list[Any] = ...,
444
+ axes: list[_2Tuple[SupportsIndex]] = ...,
445
+ ) -> NDArray[Any]: ...
venv/lib/python3.10/site-packages/numpy/core/__init__.py ADDED
@@ -0,0 +1,180 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
3
+
4
+ Please note that this module is private. All functions and objects
5
+ are available in the main ``numpy`` namespace - use that instead.
6
+
7
+ """
8
+
9
+ import os
10
+ import warnings
11
+
12
+ from numpy.version import version as __version__
13
+
14
+
15
+ # disables OpenBLAS affinity setting of the main thread that limits
16
+ # python threads or processes to one core
17
+ env_added = []
18
+ for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
19
+ if envkey not in os.environ:
20
+ os.environ[envkey] = '1'
21
+ env_added.append(envkey)
22
+
23
+ try:
24
+ from . import multiarray
25
+ except ImportError as exc:
26
+ import sys
27
+ msg = """
28
+
29
+ IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
30
+
31
+ Importing the numpy C-extensions failed. This error can happen for
32
+ many reasons, often due to issues with your setup or how NumPy was
33
+ installed.
34
+
35
+ We have compiled some common reasons and troubleshooting tips at:
36
+
37
+ https://numpy.org/devdocs/user/troubleshooting-importerror.html
38
+
39
+ Please note and check the following:
40
+
41
+ * The Python version is: Python%d.%d from "%s"
42
+ * The NumPy version is: "%s"
43
+
44
+ and make sure that they are the versions you expect.
45
+ Please carefully study the documentation linked above for further help.
46
+
47
+ Original error was: %s
48
+ """ % (sys.version_info[0], sys.version_info[1], sys.executable,
49
+ __version__, exc)
50
+ raise ImportError(msg)
51
+ finally:
52
+ for envkey in env_added:
53
+ del os.environ[envkey]
54
+ del envkey
55
+ del env_added
56
+ del os
57
+
58
+ from . import umath
59
+
60
+ # Check that multiarray,umath are pure python modules wrapping
61
+ # _multiarray_umath and not either of the old c-extension modules
62
+ if not (hasattr(multiarray, '_multiarray_umath') and
63
+ hasattr(umath, '_multiarray_umath')):
64
+ import sys
65
+ path = sys.modules['numpy'].__path__
66
+ msg = ("Something is wrong with the numpy installation. "
67
+ "While importing we detected an older version of "
68
+ "numpy in {}. One method of fixing this is to repeatedly uninstall "
69
+ "numpy until none is found, then reinstall this version.")
70
+ raise ImportError(msg.format(path))
71
+
72
+ from . import numerictypes as nt
73
+ multiarray.set_typeDict(nt.sctypeDict)
74
+ from . import numeric
75
+ from .numeric import *
76
+ from . import fromnumeric
77
+ from .fromnumeric import *
78
+ from . import defchararray as char
79
+ from . import records
80
+ from . import records as rec
81
+ from .records import record, recarray, format_parser
82
+ # Note: module name memmap is overwritten by a class with same name
83
+ from .memmap import *
84
+ from .defchararray import chararray
85
+ from . import function_base
86
+ from .function_base import *
87
+ from . import _machar
88
+ from . import getlimits
89
+ from .getlimits import *
90
+ from . import shape_base
91
+ from .shape_base import *
92
+ from . import einsumfunc
93
+ from .einsumfunc import *
94
+ del nt
95
+
96
+ from .numeric import absolute as abs
97
+
98
+ # do this after everything else, to minimize the chance of this misleadingly
99
+ # appearing in an import-time traceback
100
+ from . import _add_newdocs
101
+ from . import _add_newdocs_scalars
102
+ # add these for module-freeze analysis (like PyInstaller)
103
+ from . import _dtype_ctypes
104
+ from . import _internal
105
+ from . import _dtype
106
+ from . import _methods
107
+
108
+ __all__ = ['char', 'rec', 'memmap']
109
+ __all__ += numeric.__all__
110
+ __all__ += ['record', 'recarray', 'format_parser']
111
+ __all__ += ['chararray']
112
+ __all__ += function_base.__all__
113
+ __all__ += getlimits.__all__
114
+ __all__ += shape_base.__all__
115
+ __all__ += einsumfunc.__all__
116
+
117
+ # We used to use `np.core._ufunc_reconstruct` to unpickle. This is unnecessary,
118
+ # but old pickles saved before 1.20 will be using it, and there is no reason
119
+ # to break loading them.
120
+ def _ufunc_reconstruct(module, name):
121
+ # The `fromlist` kwarg is required to ensure that `mod` points to the
122
+ # inner-most module rather than the parent package when module name is
123
+ # nested. This makes it possible to pickle non-toplevel ufuncs such as
124
+ # scipy.special.expit for instance.
125
+ mod = __import__(module, fromlist=[name])
126
+ return getattr(mod, name)
127
+
128
+
129
+ def _ufunc_reduce(func):
130
+ # Report the `__name__`. pickle will try to find the module. Note that
131
+ # pickle supports for this `__name__` to be a `__qualname__`. It may
132
+ # make sense to add a `__qualname__` to ufuncs, to allow this more
133
+ # explicitly (Numba has ufuncs as attributes).
134
+ # See also: https://github.com/dask/distributed/issues/3450
135
+ return func.__name__
136
+
137
+
138
+ def _DType_reconstruct(scalar_type):
139
+ # This is a work-around to pickle type(np.dtype(np.float64)), etc.
140
+ # and it should eventually be replaced with a better solution, e.g. when
141
+ # DTypes become HeapTypes.
142
+ return type(dtype(scalar_type))
143
+
144
+
145
+ def _DType_reduce(DType):
146
+ # As types/classes, most DTypes can simply be pickled by their name:
147
+ if not DType._legacy or DType.__module__ == "numpy.dtypes":
148
+ return DType.__name__
149
+
150
+ # However, user defined legacy dtypes (like rational) do not end up in
151
+ # `numpy.dtypes` as module and do not have a public class at all.
152
+ # For these, we pickle them by reconstructing them from the scalar type:
153
+ scalar_type = DType.type
154
+ return _DType_reconstruct, (scalar_type,)
155
+
156
+
157
+ def __getattr__(name):
158
+ # Deprecated 2022-11-22, NumPy 1.25.
159
+ if name == "MachAr":
160
+ warnings.warn(
161
+ "The `np.core.MachAr` is considered private API (NumPy 1.24)",
162
+ DeprecationWarning, stacklevel=2,
163
+ )
164
+ return _machar.MachAr
165
+ raise AttributeError(f"Module {__name__!r} has no attribute {name!r}")
166
+
167
+
168
+ import copyreg
169
+
170
+ copyreg.pickle(ufunc, _ufunc_reduce)
171
+ copyreg.pickle(type(dtype), _DType_reduce, _DType_reconstruct)
172
+
173
+ # Unclutter namespace (must keep _*_reconstruct for unpickling)
174
+ del copyreg
175
+ del _ufunc_reduce
176
+ del _DType_reduce
177
+
178
+ from numpy._pytesttester import PytestTester
179
+ test = PytestTester(__name__)
180
+ del PytestTester
venv/lib/python3.10/site-packages/numpy/core/__init__.pyi ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ # NOTE: The `np.core` namespace is deliberately kept empty due to it
2
+ # being private (despite the lack of leading underscore)
venv/lib/python3.10/site-packages/numpy/core/_add_newdocs.py ADDED
The diff for this file is too large to render. See raw diff
 
venv/lib/python3.10/site-packages/numpy/core/_add_newdocs_scalars.py ADDED
@@ -0,0 +1,372 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file is separate from ``_add_newdocs.py`` so that it can be mocked out by
3
+ our sphinx ``conf.py`` during doc builds, where we want to avoid showing
4
+ platform-dependent information.
5
+ """
6
+ import sys
7
+ import os
8
+ from numpy.core import dtype
9
+ from numpy.core import numerictypes as _numerictypes
10
+ from numpy.core.function_base import add_newdoc
11
+
12
+ ##############################################################################
13
+ #
14
+ # Documentation for concrete scalar classes
15
+ #
16
+ ##############################################################################
17
+
18
+ def numeric_type_aliases(aliases):
19
+ def type_aliases_gen():
20
+ for alias, doc in aliases:
21
+ try:
22
+ alias_type = getattr(_numerictypes, alias)
23
+ except AttributeError:
24
+ # The set of aliases that actually exist varies between platforms
25
+ pass
26
+ else:
27
+ yield (alias_type, alias, doc)
28
+ return list(type_aliases_gen())
29
+
30
+
31
+ possible_aliases = numeric_type_aliases([
32
+ ('int8', '8-bit signed integer (``-128`` to ``127``)'),
33
+ ('int16', '16-bit signed integer (``-32_768`` to ``32_767``)'),
34
+ ('int32', '32-bit signed integer (``-2_147_483_648`` to ``2_147_483_647``)'),
35
+ ('int64', '64-bit signed integer (``-9_223_372_036_854_775_808`` to ``9_223_372_036_854_775_807``)'),
36
+ ('intp', 'Signed integer large enough to fit pointer, compatible with C ``intptr_t``'),
37
+ ('uint8', '8-bit unsigned integer (``0`` to ``255``)'),
38
+ ('uint16', '16-bit unsigned integer (``0`` to ``65_535``)'),
39
+ ('uint32', '32-bit unsigned integer (``0`` to ``4_294_967_295``)'),
40
+ ('uint64', '64-bit unsigned integer (``0`` to ``18_446_744_073_709_551_615``)'),
41
+ ('uintp', 'Unsigned integer large enough to fit pointer, compatible with C ``uintptr_t``'),
42
+ ('float16', '16-bit-precision floating-point number type: sign bit, 5 bits exponent, 10 bits mantissa'),
43
+ ('float32', '32-bit-precision floating-point number type: sign bit, 8 bits exponent, 23 bits mantissa'),
44
+ ('float64', '64-bit precision floating-point number type: sign bit, 11 bits exponent, 52 bits mantissa'),
45
+ ('float96', '96-bit extended-precision floating-point number type'),
46
+ ('float128', '128-bit extended-precision floating-point number type'),
47
+ ('complex64', 'Complex number type composed of 2 32-bit-precision floating-point numbers'),
48
+ ('complex128', 'Complex number type composed of 2 64-bit-precision floating-point numbers'),
49
+ ('complex192', 'Complex number type composed of 2 96-bit extended-precision floating-point numbers'),
50
+ ('complex256', 'Complex number type composed of 2 128-bit extended-precision floating-point numbers'),
51
+ ])
52
+
53
+
54
+ def _get_platform_and_machine():
55
+ try:
56
+ system, _, _, _, machine = os.uname()
57
+ except AttributeError:
58
+ system = sys.platform
59
+ if system == 'win32':
60
+ machine = os.environ.get('PROCESSOR_ARCHITEW6432', '') \
61
+ or os.environ.get('PROCESSOR_ARCHITECTURE', '')
62
+ else:
63
+ machine = 'unknown'
64
+ return system, machine
65
+
66
+
67
+ _system, _machine = _get_platform_and_machine()
68
+ _doc_alias_string = f":Alias on this platform ({_system} {_machine}):"
69
+
70
+
71
+ def add_newdoc_for_scalar_type(obj, fixed_aliases, doc):
72
+ # note: `:field: value` is rST syntax which renders as field lists.
73
+ o = getattr(_numerictypes, obj)
74
+
75
+ character_code = dtype(o).char
76
+ canonical_name_doc = "" if obj == o.__name__ else \
77
+ f":Canonical name: `numpy.{obj}`\n "
78
+ if fixed_aliases:
79
+ alias_doc = ''.join(f":Alias: `numpy.{alias}`\n "
80
+ for alias in fixed_aliases)
81
+ else:
82
+ alias_doc = ''
83
+ alias_doc += ''.join(f"{_doc_alias_string} `numpy.{alias}`: {doc}.\n "
84
+ for (alias_type, alias, doc) in possible_aliases if alias_type is o)
85
+
86
+ docstring = f"""
87
+ {doc.strip()}
88
+
89
+ :Character code: ``'{character_code}'``
90
+ {canonical_name_doc}{alias_doc}
91
+ """
92
+
93
+ add_newdoc('numpy.core.numerictypes', obj, docstring)
94
+
95
+
96
+ add_newdoc_for_scalar_type('bool_', [],
97
+ """
98
+ Boolean type (True or False), stored as a byte.
99
+
100
+ .. warning::
101
+
102
+ The :class:`bool_` type is not a subclass of the :class:`int_` type
103
+ (the :class:`bool_` is not even a number type). This is different
104
+ than Python's default implementation of :class:`bool` as a
105
+ sub-class of :class:`int`.
106
+ """)
107
+
108
+ add_newdoc_for_scalar_type('byte', [],
109
+ """
110
+ Signed integer type, compatible with C ``char``.
111
+ """)
112
+
113
+ add_newdoc_for_scalar_type('short', [],
114
+ """
115
+ Signed integer type, compatible with C ``short``.
116
+ """)
117
+
118
+ add_newdoc_for_scalar_type('intc', [],
119
+ """
120
+ Signed integer type, compatible with C ``int``.
121
+ """)
122
+
123
+ add_newdoc_for_scalar_type('int_', [],
124
+ """
125
+ Signed integer type, compatible with Python `int` and C ``long``.
126
+ """)
127
+
128
+ add_newdoc_for_scalar_type('longlong', [],
129
+ """
130
+ Signed integer type, compatible with C ``long long``.
131
+ """)
132
+
133
+ add_newdoc_for_scalar_type('ubyte', [],
134
+ """
135
+ Unsigned integer type, compatible with C ``unsigned char``.
136
+ """)
137
+
138
+ add_newdoc_for_scalar_type('ushort', [],
139
+ """
140
+ Unsigned integer type, compatible with C ``unsigned short``.
141
+ """)
142
+
143
+ add_newdoc_for_scalar_type('uintc', [],
144
+ """
145
+ Unsigned integer type, compatible with C ``unsigned int``.
146
+ """)
147
+
148
+ add_newdoc_for_scalar_type('uint', [],
149
+ """
150
+ Unsigned integer type, compatible with C ``unsigned long``.
151
+ """)
152
+
153
+ add_newdoc_for_scalar_type('ulonglong', [],
154
+ """
155
+ Signed integer type, compatible with C ``unsigned long long``.
156
+ """)
157
+
158
+ add_newdoc_for_scalar_type('half', [],
159
+ """
160
+ Half-precision floating-point number type.
161
+ """)
162
+
163
+ add_newdoc_for_scalar_type('single', [],
164
+ """
165
+ Single-precision floating-point number type, compatible with C ``float``.
166
+ """)
167
+
168
+ add_newdoc_for_scalar_type('double', ['float_'],
169
+ """
170
+ Double-precision floating-point number type, compatible with Python `float`
171
+ and C ``double``.
172
+ """)
173
+
174
+ add_newdoc_for_scalar_type('longdouble', ['longfloat'],
175
+ """
176
+ Extended-precision floating-point number type, compatible with C
177
+ ``long double`` but not necessarily with IEEE 754 quadruple-precision.
178
+ """)
179
+
180
+ add_newdoc_for_scalar_type('csingle', ['singlecomplex'],
181
+ """
182
+ Complex number type composed of two single-precision floating-point
183
+ numbers.
184
+ """)
185
+
186
+ add_newdoc_for_scalar_type('cdouble', ['cfloat', 'complex_'],
187
+ """
188
+ Complex number type composed of two double-precision floating-point
189
+ numbers, compatible with Python `complex`.
190
+ """)
191
+
192
+ add_newdoc_for_scalar_type('clongdouble', ['clongfloat', 'longcomplex'],
193
+ """
194
+ Complex number type composed of two extended-precision floating-point
195
+ numbers.
196
+ """)
197
+
198
+ add_newdoc_for_scalar_type('object_', [],
199
+ """
200
+ Any Python object.
201
+ """)
202
+
203
+ add_newdoc_for_scalar_type('str_', ['unicode_'],
204
+ r"""
205
+ A unicode string.
206
+
207
+ This type strips trailing null codepoints.
208
+
209
+ >>> s = np.str_("abc\x00")
210
+ >>> s
211
+ 'abc'
212
+
213
+ Unlike the builtin `str`, this supports the :ref:`python:bufferobjects`, exposing its
214
+ contents as UCS4:
215
+
216
+ >>> m = memoryview(np.str_("abc"))
217
+ >>> m.format
218
+ '3w'
219
+ >>> m.tobytes()
220
+ b'a\x00\x00\x00b\x00\x00\x00c\x00\x00\x00'
221
+ """)
222
+
223
+ add_newdoc_for_scalar_type('bytes_', ['string_'],
224
+ r"""
225
+ A byte string.
226
+
227
+ When used in arrays, this type strips trailing null bytes.
228
+ """)
229
+
230
+ add_newdoc_for_scalar_type('void', [],
231
+ r"""
232
+ np.void(length_or_data, /, dtype=None)
233
+
234
+ Create a new structured or unstructured void scalar.
235
+
236
+ Parameters
237
+ ----------
238
+ length_or_data : int, array-like, bytes-like, object
239
+ One of multiple meanings (see notes). The length or
240
+ bytes data of an unstructured void. Or alternatively,
241
+ the data to be stored in the new scalar when `dtype`
242
+ is provided.
243
+ This can be an array-like, in which case an array may
244
+ be returned.
245
+ dtype : dtype, optional
246
+ If provided the dtype of the new scalar. This dtype must
247
+ be "void" dtype (i.e. a structured or unstructured void,
248
+ see also :ref:`defining-structured-types`).
249
+
250
+ ..versionadded:: 1.24
251
+
252
+ Notes
253
+ -----
254
+ For historical reasons and because void scalars can represent both
255
+ arbitrary byte data and structured dtypes, the void constructor
256
+ has three calling conventions:
257
+
258
+ 1. ``np.void(5)`` creates a ``dtype="V5"`` scalar filled with five
259
+ ``\0`` bytes. The 5 can be a Python or NumPy integer.
260
+ 2. ``np.void(b"bytes-like")`` creates a void scalar from the byte string.
261
+ The dtype itemsize will match the byte string length, here ``"V10"``.
262
+ 3. When a ``dtype=`` is passed the call is roughly the same as an
263
+ array creation. However, a void scalar rather than array is returned.
264
+
265
+ Please see the examples which show all three different conventions.
266
+
267
+ Examples
268
+ --------
269
+ >>> np.void(5)
270
+ void(b'\x00\x00\x00\x00\x00')
271
+ >>> np.void(b'abcd')
272
+ void(b'\x61\x62\x63\x64')
273
+ >>> np.void((5, 3.2, "eggs"), dtype="i,d,S5")
274
+ (5, 3.2, b'eggs') # looks like a tuple, but is `np.void`
275
+ >>> np.void(3, dtype=[('x', np.int8), ('y', np.int8)])
276
+ (3, 3) # looks like a tuple, but is `np.void`
277
+
278
+ """)
279
+
280
+ add_newdoc_for_scalar_type('datetime64', [],
281
+ """
282
+ If created from a 64-bit integer, it represents an offset from
283
+ ``1970-01-01T00:00:00``.
284
+ If created from string, the string can be in ISO 8601 date
285
+ or datetime format.
286
+
287
+ >>> np.datetime64(10, 'Y')
288
+ numpy.datetime64('1980')
289
+ >>> np.datetime64('1980', 'Y')
290
+ numpy.datetime64('1980')
291
+ >>> np.datetime64(10, 'D')
292
+ numpy.datetime64('1970-01-11')
293
+
294
+ See :ref:`arrays.datetime` for more information.
295
+ """)
296
+
297
+ add_newdoc_for_scalar_type('timedelta64', [],
298
+ """
299
+ A timedelta stored as a 64-bit integer.
300
+
301
+ See :ref:`arrays.datetime` for more information.
302
+ """)
303
+
304
+ add_newdoc('numpy.core.numerictypes', "integer", ('is_integer',
305
+ """
306
+ integer.is_integer() -> bool
307
+
308
+ Return ``True`` if the number is finite with integral value.
309
+
310
+ .. versionadded:: 1.22
311
+
312
+ Examples
313
+ --------
314
+ >>> np.int64(-2).is_integer()
315
+ True
316
+ >>> np.uint32(5).is_integer()
317
+ True
318
+ """))
319
+
320
+ # TODO: work out how to put this on the base class, np.floating
321
+ for float_name in ('half', 'single', 'double', 'longdouble'):
322
+ add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
323
+ """
324
+ {ftype}.as_integer_ratio() -> (int, int)
325
+
326
+ Return a pair of integers, whose ratio is exactly equal to the original
327
+ floating point number, and with a positive denominator.
328
+ Raise `OverflowError` on infinities and a `ValueError` on NaNs.
329
+
330
+ >>> np.{ftype}(10.0).as_integer_ratio()
331
+ (10, 1)
332
+ >>> np.{ftype}(0.0).as_integer_ratio()
333
+ (0, 1)
334
+ >>> np.{ftype}(-.25).as_integer_ratio()
335
+ (-1, 4)
336
+ """.format(ftype=float_name)))
337
+
338
+ add_newdoc('numpy.core.numerictypes', float_name, ('is_integer',
339
+ f"""
340
+ {float_name}.is_integer() -> bool
341
+
342
+ Return ``True`` if the floating point number is finite with integral
343
+ value, and ``False`` otherwise.
344
+
345
+ .. versionadded:: 1.22
346
+
347
+ Examples
348
+ --------
349
+ >>> np.{float_name}(-2.0).is_integer()
350
+ True
351
+ >>> np.{float_name}(3.2).is_integer()
352
+ False
353
+ """))
354
+
355
+ for int_name in ('int8', 'uint8', 'int16', 'uint16', 'int32', 'uint32',
356
+ 'int64', 'uint64', 'int64', 'uint64', 'int64', 'uint64'):
357
+ # Add negative examples for signed cases by checking typecode
358
+ add_newdoc('numpy.core.numerictypes', int_name, ('bit_count',
359
+ f"""
360
+ {int_name}.bit_count() -> int
361
+
362
+ Computes the number of 1-bits in the absolute value of the input.
363
+ Analogous to the builtin `int.bit_count` or ``popcount`` in C++.
364
+
365
+ Examples
366
+ --------
367
+ >>> np.{int_name}(127).bit_count()
368
+ 7""" +
369
+ (f"""
370
+ >>> np.{int_name}(-127).bit_count()
371
+ 7
372
+ """ if dtype(int_name).char.islower() else "")))
venv/lib/python3.10/site-packages/numpy/core/_asarray.py ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions in the ``as*array`` family that promote array-likes into arrays.
3
+
4
+ `require` fits this category despite its name not matching this pattern.
5
+ """
6
+ from .overrides import (
7
+ array_function_dispatch,
8
+ set_array_function_like_doc,
9
+ set_module,
10
+ )
11
+ from .multiarray import array, asanyarray
12
+
13
+
14
+ __all__ = ["require"]
15
+
16
+
17
+ POSSIBLE_FLAGS = {
18
+ 'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
19
+ 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
20
+ 'A': 'A', 'ALIGNED': 'A',
21
+ 'W': 'W', 'WRITEABLE': 'W',
22
+ 'O': 'O', 'OWNDATA': 'O',
23
+ 'E': 'E', 'ENSUREARRAY': 'E'
24
+ }
25
+
26
+
27
+ @set_array_function_like_doc
28
+ @set_module('numpy')
29
+ def require(a, dtype=None, requirements=None, *, like=None):
30
+ """
31
+ Return an ndarray of the provided type that satisfies requirements.
32
+
33
+ This function is useful to be sure that an array with the correct flags
34
+ is returned for passing to compiled code (perhaps through ctypes).
35
+
36
+ Parameters
37
+ ----------
38
+ a : array_like
39
+ The object to be converted to a type-and-requirement-satisfying array.
40
+ dtype : data-type
41
+ The required data-type. If None preserve the current dtype. If your
42
+ application requires the data to be in native byteorder, include
43
+ a byteorder specification as a part of the dtype specification.
44
+ requirements : str or sequence of str
45
+ The requirements list can be any of the following
46
+
47
+ * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
48
+ * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
49
+ * 'ALIGNED' ('A') - ensure a data-type aligned array
50
+ * 'WRITEABLE' ('W') - ensure a writable array
51
+ * 'OWNDATA' ('O') - ensure an array that owns its own data
52
+ * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
53
+ ${ARRAY_FUNCTION_LIKE}
54
+
55
+ .. versionadded:: 1.20.0
56
+
57
+ Returns
58
+ -------
59
+ out : ndarray
60
+ Array with specified requirements and type if given.
61
+
62
+ See Also
63
+ --------
64
+ asarray : Convert input to an ndarray.
65
+ asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
66
+ ascontiguousarray : Convert input to a contiguous array.
67
+ asfortranarray : Convert input to an ndarray with column-major
68
+ memory order.
69
+ ndarray.flags : Information about the memory layout of the array.
70
+
71
+ Notes
72
+ -----
73
+ The returned array will be guaranteed to have the listed requirements
74
+ by making a copy if needed.
75
+
76
+ Examples
77
+ --------
78
+ >>> x = np.arange(6).reshape(2,3)
79
+ >>> x.flags
80
+ C_CONTIGUOUS : True
81
+ F_CONTIGUOUS : False
82
+ OWNDATA : False
83
+ WRITEABLE : True
84
+ ALIGNED : True
85
+ WRITEBACKIFCOPY : False
86
+
87
+ >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
88
+ >>> y.flags
89
+ C_CONTIGUOUS : False
90
+ F_CONTIGUOUS : True
91
+ OWNDATA : True
92
+ WRITEABLE : True
93
+ ALIGNED : True
94
+ WRITEBACKIFCOPY : False
95
+
96
+ """
97
+ if like is not None:
98
+ return _require_with_like(
99
+ like,
100
+ a,
101
+ dtype=dtype,
102
+ requirements=requirements,
103
+ )
104
+
105
+ if not requirements:
106
+ return asanyarray(a, dtype=dtype)
107
+
108
+ requirements = {POSSIBLE_FLAGS[x.upper()] for x in requirements}
109
+
110
+ if 'E' in requirements:
111
+ requirements.remove('E')
112
+ subok = False
113
+ else:
114
+ subok = True
115
+
116
+ order = 'A'
117
+ if requirements >= {'C', 'F'}:
118
+ raise ValueError('Cannot specify both "C" and "F" order')
119
+ elif 'F' in requirements:
120
+ order = 'F'
121
+ requirements.remove('F')
122
+ elif 'C' in requirements:
123
+ order = 'C'
124
+ requirements.remove('C')
125
+
126
+ arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
127
+
128
+ for prop in requirements:
129
+ if not arr.flags[prop]:
130
+ return arr.copy(order)
131
+ return arr
132
+
133
+
134
+ _require_with_like = array_function_dispatch()(require)
venv/lib/python3.10/site-packages/numpy/core/_dtype.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A place for code to be called from the implementation of np.dtype
3
+
4
+ String handling is much easier to do correctly in python.
5
+ """
6
+ import numpy as np
7
+
8
+
9
+ _kind_to_stem = {
10
+ 'u': 'uint',
11
+ 'i': 'int',
12
+ 'c': 'complex',
13
+ 'f': 'float',
14
+ 'b': 'bool',
15
+ 'V': 'void',
16
+ 'O': 'object',
17
+ 'M': 'datetime',
18
+ 'm': 'timedelta',
19
+ 'S': 'bytes',
20
+ 'U': 'str',
21
+ }
22
+
23
+
24
+ def _kind_name(dtype):
25
+ try:
26
+ return _kind_to_stem[dtype.kind]
27
+ except KeyError as e:
28
+ raise RuntimeError(
29
+ "internal dtype error, unknown kind {!r}"
30
+ .format(dtype.kind)
31
+ ) from None
32
+
33
+
34
+ def __str__(dtype):
35
+ if dtype.fields is not None:
36
+ return _struct_str(dtype, include_align=True)
37
+ elif dtype.subdtype:
38
+ return _subarray_str(dtype)
39
+ elif issubclass(dtype.type, np.flexible) or not dtype.isnative:
40
+ return dtype.str
41
+ else:
42
+ return dtype.name
43
+
44
+
45
+ def __repr__(dtype):
46
+ arg_str = _construction_repr(dtype, include_align=False)
47
+ if dtype.isalignedstruct:
48
+ arg_str = arg_str + ", align=True"
49
+ return "dtype({})".format(arg_str)
50
+
51
+
52
+ def _unpack_field(dtype, offset, title=None):
53
+ """
54
+ Helper function to normalize the items in dtype.fields.
55
+
56
+ Call as:
57
+
58
+ dtype, offset, title = _unpack_field(*dtype.fields[name])
59
+ """
60
+ return dtype, offset, title
61
+
62
+
63
+ def _isunsized(dtype):
64
+ # PyDataType_ISUNSIZED
65
+ return dtype.itemsize == 0
66
+
67
+
68
+ def _construction_repr(dtype, include_align=False, short=False):
69
+ """
70
+ Creates a string repr of the dtype, excluding the 'dtype()' part
71
+ surrounding the object. This object may be a string, a list, or
72
+ a dict depending on the nature of the dtype. This
73
+ is the object passed as the first parameter to the dtype
74
+ constructor, and if no additional constructor parameters are
75
+ given, will reproduce the exact memory layout.
76
+
77
+ Parameters
78
+ ----------
79
+ short : bool
80
+ If true, this creates a shorter repr using 'kind' and 'itemsize', instead
81
+ of the longer type name.
82
+
83
+ include_align : bool
84
+ If true, this includes the 'align=True' parameter
85
+ inside the struct dtype construction dict when needed. Use this flag
86
+ if you want a proper repr string without the 'dtype()' part around it.
87
+
88
+ If false, this does not preserve the
89
+ 'align=True' parameter or sticky NPY_ALIGNED_STRUCT flag for
90
+ struct arrays like the regular repr does, because the 'align'
91
+ flag is not part of first dtype constructor parameter. This
92
+ mode is intended for a full 'repr', where the 'align=True' is
93
+ provided as the second parameter.
94
+ """
95
+ if dtype.fields is not None:
96
+ return _struct_str(dtype, include_align=include_align)
97
+ elif dtype.subdtype:
98
+ return _subarray_str(dtype)
99
+ else:
100
+ return _scalar_str(dtype, short=short)
101
+
102
+
103
+ def _scalar_str(dtype, short):
104
+ byteorder = _byte_order_str(dtype)
105
+
106
+ if dtype.type == np.bool_:
107
+ if short:
108
+ return "'?'"
109
+ else:
110
+ return "'bool'"
111
+
112
+ elif dtype.type == np.object_:
113
+ # The object reference may be different sizes on different
114
+ # platforms, so it should never include the itemsize here.
115
+ return "'O'"
116
+
117
+ elif dtype.type == np.bytes_:
118
+ if _isunsized(dtype):
119
+ return "'S'"
120
+ else:
121
+ return "'S%d'" % dtype.itemsize
122
+
123
+ elif dtype.type == np.str_:
124
+ if _isunsized(dtype):
125
+ return "'%sU'" % byteorder
126
+ else:
127
+ return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
128
+
129
+ # unlike the other types, subclasses of void are preserved - but
130
+ # historically the repr does not actually reveal the subclass
131
+ elif issubclass(dtype.type, np.void):
132
+ if _isunsized(dtype):
133
+ return "'V'"
134
+ else:
135
+ return "'V%d'" % dtype.itemsize
136
+
137
+ elif dtype.type == np.datetime64:
138
+ return "'%sM8%s'" % (byteorder, _datetime_metadata_str(dtype))
139
+
140
+ elif dtype.type == np.timedelta64:
141
+ return "'%sm8%s'" % (byteorder, _datetime_metadata_str(dtype))
142
+
143
+ elif np.issubdtype(dtype, np.number):
144
+ # Short repr with endianness, like '<f8'
145
+ if short or dtype.byteorder not in ('=', '|'):
146
+ return "'%s%c%d'" % (byteorder, dtype.kind, dtype.itemsize)
147
+
148
+ # Longer repr, like 'float64'
149
+ else:
150
+ return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
151
+
152
+ elif dtype.isbuiltin == 2:
153
+ return dtype.type.__name__
154
+
155
+ else:
156
+ raise RuntimeError(
157
+ "Internal error: NumPy dtype unrecognized type number")
158
+
159
+
160
+ def _byte_order_str(dtype):
161
+ """ Normalize byteorder to '<' or '>' """
162
+ # hack to obtain the native and swapped byte order characters
163
+ swapped = np.dtype(int).newbyteorder('S')
164
+ native = swapped.newbyteorder('S')
165
+
166
+ byteorder = dtype.byteorder
167
+ if byteorder == '=':
168
+ return native.byteorder
169
+ if byteorder == 'S':
170
+ # TODO: this path can never be reached
171
+ return swapped.byteorder
172
+ elif byteorder == '|':
173
+ return ''
174
+ else:
175
+ return byteorder
176
+
177
+
178
+ def _datetime_metadata_str(dtype):
179
+ # TODO: this duplicates the C metastr_to_unicode functionality
180
+ unit, count = np.datetime_data(dtype)
181
+ if unit == 'generic':
182
+ return ''
183
+ elif count == 1:
184
+ return '[{}]'.format(unit)
185
+ else:
186
+ return '[{}{}]'.format(count, unit)
187
+
188
+
189
+ def _struct_dict_str(dtype, includealignedflag):
190
+ # unpack the fields dictionary into ls
191
+ names = dtype.names
192
+ fld_dtypes = []
193
+ offsets = []
194
+ titles = []
195
+ for name in names:
196
+ fld_dtype, offset, title = _unpack_field(*dtype.fields[name])
197
+ fld_dtypes.append(fld_dtype)
198
+ offsets.append(offset)
199
+ titles.append(title)
200
+
201
+ # Build up a string to make the dictionary
202
+
203
+ if np.core.arrayprint._get_legacy_print_mode() <= 121:
204
+ colon = ":"
205
+ fieldsep = ","
206
+ else:
207
+ colon = ": "
208
+ fieldsep = ", "
209
+
210
+ # First, the names
211
+ ret = "{'names'%s[" % colon
212
+ ret += fieldsep.join(repr(name) for name in names)
213
+
214
+ # Second, the formats
215
+ ret += "], 'formats'%s[" % colon
216
+ ret += fieldsep.join(
217
+ _construction_repr(fld_dtype, short=True) for fld_dtype in fld_dtypes)
218
+
219
+ # Third, the offsets
220
+ ret += "], 'offsets'%s[" % colon
221
+ ret += fieldsep.join("%d" % offset for offset in offsets)
222
+
223
+ # Fourth, the titles
224
+ if any(title is not None for title in titles):
225
+ ret += "], 'titles'%s[" % colon
226
+ ret += fieldsep.join(repr(title) for title in titles)
227
+
228
+ # Fifth, the itemsize
229
+ ret += "], 'itemsize'%s%d" % (colon, dtype.itemsize)
230
+
231
+ if (includealignedflag and dtype.isalignedstruct):
232
+ # Finally, the aligned flag
233
+ ret += ", 'aligned'%sTrue}" % colon
234
+ else:
235
+ ret += "}"
236
+
237
+ return ret
238
+
239
+
240
+ def _aligned_offset(offset, alignment):
241
+ # round up offset:
242
+ return - (-offset // alignment) * alignment
243
+
244
+
245
+ def _is_packed(dtype):
246
+ """
247
+ Checks whether the structured data type in 'dtype'
248
+ has a simple layout, where all the fields are in order,
249
+ and follow each other with no alignment padding.
250
+
251
+ When this returns true, the dtype can be reconstructed
252
+ from a list of the field names and dtypes with no additional
253
+ dtype parameters.
254
+
255
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
256
+ """
257
+ align = dtype.isalignedstruct
258
+ max_alignment = 1
259
+ total_offset = 0
260
+ for name in dtype.names:
261
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
262
+
263
+ if align:
264
+ total_offset = _aligned_offset(total_offset, fld_dtype.alignment)
265
+ max_alignment = max(max_alignment, fld_dtype.alignment)
266
+
267
+ if fld_offset != total_offset:
268
+ return False
269
+ total_offset += fld_dtype.itemsize
270
+
271
+ if align:
272
+ total_offset = _aligned_offset(total_offset, max_alignment)
273
+
274
+ if total_offset != dtype.itemsize:
275
+ return False
276
+ return True
277
+
278
+
279
+ def _struct_list_str(dtype):
280
+ items = []
281
+ for name in dtype.names:
282
+ fld_dtype, fld_offset, title = _unpack_field(*dtype.fields[name])
283
+
284
+ item = "("
285
+ if title is not None:
286
+ item += "({!r}, {!r}), ".format(title, name)
287
+ else:
288
+ item += "{!r}, ".format(name)
289
+ # Special case subarray handling here
290
+ if fld_dtype.subdtype is not None:
291
+ base, shape = fld_dtype.subdtype
292
+ item += "{}, {}".format(
293
+ _construction_repr(base, short=True),
294
+ shape
295
+ )
296
+ else:
297
+ item += _construction_repr(fld_dtype, short=True)
298
+
299
+ item += ")"
300
+ items.append(item)
301
+
302
+ return "[" + ", ".join(items) + "]"
303
+
304
+
305
+ def _struct_str(dtype, include_align):
306
+ # The list str representation can't include the 'align=' flag,
307
+ # so if it is requested and the struct has the aligned flag set,
308
+ # we must use the dict str instead.
309
+ if not (include_align and dtype.isalignedstruct) and _is_packed(dtype):
310
+ sub = _struct_list_str(dtype)
311
+
312
+ else:
313
+ sub = _struct_dict_str(dtype, include_align)
314
+
315
+ # If the data type isn't the default, void, show it
316
+ if dtype.type != np.void:
317
+ return "({t.__module__}.{t.__name__}, {f})".format(t=dtype.type, f=sub)
318
+ else:
319
+ return sub
320
+
321
+
322
+ def _subarray_str(dtype):
323
+ base, shape = dtype.subdtype
324
+ return "({}, {})".format(
325
+ _construction_repr(base, short=True),
326
+ shape
327
+ )
328
+
329
+
330
+ def _name_includes_bit_suffix(dtype):
331
+ if dtype.type == np.object_:
332
+ # pointer size varies by system, best to omit it
333
+ return False
334
+ elif dtype.type == np.bool_:
335
+ # implied
336
+ return False
337
+ elif dtype.type is None:
338
+ return True
339
+ elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
340
+ # unspecified
341
+ return False
342
+ else:
343
+ return True
344
+
345
+
346
+ def _name_get(dtype):
347
+ # provides dtype.name.__get__, documented as returning a "bit name"
348
+
349
+ if dtype.isbuiltin == 2:
350
+ # user dtypes don't promise to do anything special
351
+ return dtype.type.__name__
352
+
353
+ if dtype.kind == '\x00':
354
+ name = type(dtype).__name__
355
+ elif issubclass(dtype.type, np.void):
356
+ # historically, void subclasses preserve their name, eg `record64`
357
+ name = dtype.type.__name__
358
+ else:
359
+ name = _kind_name(dtype)
360
+
361
+ # append bit counts
362
+ if _name_includes_bit_suffix(dtype):
363
+ name += "{}".format(dtype.itemsize * 8)
364
+
365
+ # append metadata to datetimes
366
+ if dtype.type in (np.datetime64, np.timedelta64):
367
+ name += _datetime_metadata_str(dtype)
368
+
369
+ return name
venv/lib/python3.10/site-packages/numpy/core/_dtype_ctypes.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Conversion from ctypes to dtype.
3
+
4
+ In an ideal world, we could achieve this through the PEP3118 buffer protocol,
5
+ something like::
6
+
7
+ def dtype_from_ctypes_type(t):
8
+ # needed to ensure that the shape of `t` is within memoryview.format
9
+ class DummyStruct(ctypes.Structure):
10
+ _fields_ = [('a', t)]
11
+
12
+ # empty to avoid memory allocation
13
+ ctype_0 = (DummyStruct * 0)()
14
+ mv = memoryview(ctype_0)
15
+
16
+ # convert the struct, and slice back out the field
17
+ return _dtype_from_pep3118(mv.format)['a']
18
+
19
+ Unfortunately, this fails because:
20
+
21
+ * ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
22
+ * PEP3118 cannot represent unions, but both numpy and ctypes can
23
+ * ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
24
+ """
25
+
26
+ # We delay-import ctypes for distributions that do not include it.
27
+ # While this module is not used unless the user passes in ctypes
28
+ # members, it is eagerly imported from numpy/core/__init__.py.
29
+ import numpy as np
30
+
31
+
32
+ def _from_ctypes_array(t):
33
+ return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
34
+
35
+
36
+ def _from_ctypes_structure(t):
37
+ for item in t._fields_:
38
+ if len(item) > 2:
39
+ raise TypeError(
40
+ "ctypes bitfields have no dtype equivalent")
41
+
42
+ if hasattr(t, "_pack_"):
43
+ import ctypes
44
+ formats = []
45
+ offsets = []
46
+ names = []
47
+ current_offset = 0
48
+ for fname, ftyp in t._fields_:
49
+ names.append(fname)
50
+ formats.append(dtype_from_ctypes_type(ftyp))
51
+ # Each type has a default offset, this is platform dependent for some types.
52
+ effective_pack = min(t._pack_, ctypes.alignment(ftyp))
53
+ current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
54
+ offsets.append(current_offset)
55
+ current_offset += ctypes.sizeof(ftyp)
56
+
57
+ return np.dtype(dict(
58
+ formats=formats,
59
+ offsets=offsets,
60
+ names=names,
61
+ itemsize=ctypes.sizeof(t)))
62
+ else:
63
+ fields = []
64
+ for fname, ftyp in t._fields_:
65
+ fields.append((fname, dtype_from_ctypes_type(ftyp)))
66
+
67
+ # by default, ctypes structs are aligned
68
+ return np.dtype(fields, align=True)
69
+
70
+
71
+ def _from_ctypes_scalar(t):
72
+ """
73
+ Return the dtype type with endianness included if it's the case
74
+ """
75
+ if getattr(t, '__ctype_be__', None) is t:
76
+ return np.dtype('>' + t._type_)
77
+ elif getattr(t, '__ctype_le__', None) is t:
78
+ return np.dtype('<' + t._type_)
79
+ else:
80
+ return np.dtype(t._type_)
81
+
82
+
83
+ def _from_ctypes_union(t):
84
+ import ctypes
85
+ formats = []
86
+ offsets = []
87
+ names = []
88
+ for fname, ftyp in t._fields_:
89
+ names.append(fname)
90
+ formats.append(dtype_from_ctypes_type(ftyp))
91
+ offsets.append(0) # Union fields are offset to 0
92
+
93
+ return np.dtype(dict(
94
+ formats=formats,
95
+ offsets=offsets,
96
+ names=names,
97
+ itemsize=ctypes.sizeof(t)))
98
+
99
+
100
+ def dtype_from_ctypes_type(t):
101
+ """
102
+ Construct a dtype object from a ctypes type
103
+ """
104
+ import _ctypes
105
+ if issubclass(t, _ctypes.Array):
106
+ return _from_ctypes_array(t)
107
+ elif issubclass(t, _ctypes._Pointer):
108
+ raise TypeError("ctypes pointers have no dtype equivalent")
109
+ elif issubclass(t, _ctypes.Structure):
110
+ return _from_ctypes_structure(t)
111
+ elif issubclass(t, _ctypes.Union):
112
+ return _from_ctypes_union(t)
113
+ elif isinstance(getattr(t, '_type_', None), str):
114
+ return _from_ctypes_scalar(t)
115
+ else:
116
+ raise NotImplementedError(
117
+ "Unknown ctypes type {}".format(t.__name__))
venv/lib/python3.10/site-packages/numpy/core/_exceptions.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Various richly-typed exceptions, that also help us deal with string formatting
3
+ in python where it's easier.
4
+
5
+ By putting the formatting in `__str__`, we also avoid paying the cost for
6
+ users who silence the exceptions.
7
+ """
8
+ from .._utils import set_module
9
+
10
+ def _unpack_tuple(tup):
11
+ if len(tup) == 1:
12
+ return tup[0]
13
+ else:
14
+ return tup
15
+
16
+
17
+ def _display_as_base(cls):
18
+ """
19
+ A decorator that makes an exception class look like its base.
20
+
21
+ We use this to hide subclasses that are implementation details - the user
22
+ should catch the base type, which is what the traceback will show them.
23
+
24
+ Classes decorated with this decorator are subject to removal without a
25
+ deprecation warning.
26
+ """
27
+ assert issubclass(cls, Exception)
28
+ cls.__name__ = cls.__base__.__name__
29
+ return cls
30
+
31
+
32
+ class UFuncTypeError(TypeError):
33
+ """ Base class for all ufunc exceptions """
34
+ def __init__(self, ufunc):
35
+ self.ufunc = ufunc
36
+
37
+
38
+ @_display_as_base
39
+ class _UFuncNoLoopError(UFuncTypeError):
40
+ """ Thrown when a ufunc loop cannot be found """
41
+ def __init__(self, ufunc, dtypes):
42
+ super().__init__(ufunc)
43
+ self.dtypes = tuple(dtypes)
44
+
45
+ def __str__(self):
46
+ return (
47
+ "ufunc {!r} did not contain a loop with signature matching types "
48
+ "{!r} -> {!r}"
49
+ ).format(
50
+ self.ufunc.__name__,
51
+ _unpack_tuple(self.dtypes[:self.ufunc.nin]),
52
+ _unpack_tuple(self.dtypes[self.ufunc.nin:])
53
+ )
54
+
55
+
56
+ @_display_as_base
57
+ class _UFuncBinaryResolutionError(_UFuncNoLoopError):
58
+ """ Thrown when a binary resolution fails """
59
+ def __init__(self, ufunc, dtypes):
60
+ super().__init__(ufunc, dtypes)
61
+ assert len(self.dtypes) == 2
62
+
63
+ def __str__(self):
64
+ return (
65
+ "ufunc {!r} cannot use operands with types {!r} and {!r}"
66
+ ).format(
67
+ self.ufunc.__name__, *self.dtypes
68
+ )
69
+
70
+
71
+ @_display_as_base
72
+ class _UFuncCastingError(UFuncTypeError):
73
+ def __init__(self, ufunc, casting, from_, to):
74
+ super().__init__(ufunc)
75
+ self.casting = casting
76
+ self.from_ = from_
77
+ self.to = to
78
+
79
+
80
+ @_display_as_base
81
+ class _UFuncInputCastingError(_UFuncCastingError):
82
+ """ Thrown when a ufunc input cannot be casted """
83
+ def __init__(self, ufunc, casting, from_, to, i):
84
+ super().__init__(ufunc, casting, from_, to)
85
+ self.in_i = i
86
+
87
+ def __str__(self):
88
+ # only show the number if more than one input exists
89
+ i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
90
+ return (
91
+ "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
92
+ "rule {!r}"
93
+ ).format(
94
+ self.ufunc.__name__, i_str, self.from_, self.to, self.casting
95
+ )
96
+
97
+
98
+ @_display_as_base
99
+ class _UFuncOutputCastingError(_UFuncCastingError):
100
+ """ Thrown when a ufunc output cannot be casted """
101
+ def __init__(self, ufunc, casting, from_, to, i):
102
+ super().__init__(ufunc, casting, from_, to)
103
+ self.out_i = i
104
+
105
+ def __str__(self):
106
+ # only show the number if more than one output exists
107
+ i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
108
+ return (
109
+ "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
110
+ "rule {!r}"
111
+ ).format(
112
+ self.ufunc.__name__, i_str, self.from_, self.to, self.casting
113
+ )
114
+
115
+
116
+ @_display_as_base
117
+ class _ArrayMemoryError(MemoryError):
118
+ """ Thrown when an array cannot be allocated"""
119
+ def __init__(self, shape, dtype):
120
+ self.shape = shape
121
+ self.dtype = dtype
122
+
123
+ @property
124
+ def _total_size(self):
125
+ num_bytes = self.dtype.itemsize
126
+ for dim in self.shape:
127
+ num_bytes *= dim
128
+ return num_bytes
129
+
130
+ @staticmethod
131
+ def _size_to_string(num_bytes):
132
+ """ Convert a number of bytes into a binary size string """
133
+
134
+ # https://en.wikipedia.org/wiki/Binary_prefix
135
+ LOG2_STEP = 10
136
+ STEP = 1024
137
+ units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
138
+
139
+ unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
140
+ unit_val = 1 << (unit_i * LOG2_STEP)
141
+ n_units = num_bytes / unit_val
142
+ del unit_val
143
+
144
+ # ensure we pick a unit that is correct after rounding
145
+ if round(n_units) == STEP:
146
+ unit_i += 1
147
+ n_units /= STEP
148
+
149
+ # deal with sizes so large that we don't have units for them
150
+ if unit_i >= len(units):
151
+ new_unit_i = len(units) - 1
152
+ n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
153
+ unit_i = new_unit_i
154
+
155
+ unit_name = units[unit_i]
156
+ # format with a sensible number of digits
157
+ if unit_i == 0:
158
+ # no decimal point on bytes
159
+ return '{:.0f} {}'.format(n_units, unit_name)
160
+ elif round(n_units) < 1000:
161
+ # 3 significant figures, if none are dropped to the left of the .
162
+ return '{:#.3g} {}'.format(n_units, unit_name)
163
+ else:
164
+ # just give all the digits otherwise
165
+ return '{:#.0f} {}'.format(n_units, unit_name)
166
+
167
+ def __str__(self):
168
+ size_str = self._size_to_string(self._total_size)
169
+ return (
170
+ "Unable to allocate {} for an array with shape {} and data type {}"
171
+ .format(size_str, self.shape, self.dtype)
172
+ )
venv/lib/python3.10/site-packages/numpy/core/_internal.py ADDED
@@ -0,0 +1,935 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ A place for internal code
3
+
4
+ Some things are more easily handled Python.
5
+
6
+ """
7
+ import ast
8
+ import re
9
+ import sys
10
+ import warnings
11
+
12
+ from ..exceptions import DTypePromotionError
13
+ from .multiarray import dtype, array, ndarray, promote_types
14
+ try:
15
+ import ctypes
16
+ except ImportError:
17
+ ctypes = None
18
+
19
+ IS_PYPY = sys.implementation.name == 'pypy'
20
+
21
+ if sys.byteorder == 'little':
22
+ _nbo = '<'
23
+ else:
24
+ _nbo = '>'
25
+
26
+ def _makenames_list(adict, align):
27
+ allfields = []
28
+
29
+ for fname, obj in adict.items():
30
+ n = len(obj)
31
+ if not isinstance(obj, tuple) or n not in (2, 3):
32
+ raise ValueError("entry not a 2- or 3- tuple")
33
+ if n > 2 and obj[2] == fname:
34
+ continue
35
+ num = int(obj[1])
36
+ if num < 0:
37
+ raise ValueError("invalid offset.")
38
+ format = dtype(obj[0], align=align)
39
+ if n > 2:
40
+ title = obj[2]
41
+ else:
42
+ title = None
43
+ allfields.append((fname, format, num, title))
44
+ # sort by offsets
45
+ allfields.sort(key=lambda x: x[2])
46
+ names = [x[0] for x in allfields]
47
+ formats = [x[1] for x in allfields]
48
+ offsets = [x[2] for x in allfields]
49
+ titles = [x[3] for x in allfields]
50
+
51
+ return names, formats, offsets, titles
52
+
53
+ # Called in PyArray_DescrConverter function when
54
+ # a dictionary without "names" and "formats"
55
+ # fields is used as a data-type descriptor.
56
+ def _usefields(adict, align):
57
+ try:
58
+ names = adict[-1]
59
+ except KeyError:
60
+ names = None
61
+ if names is None:
62
+ names, formats, offsets, titles = _makenames_list(adict, align)
63
+ else:
64
+ formats = []
65
+ offsets = []
66
+ titles = []
67
+ for name in names:
68
+ res = adict[name]
69
+ formats.append(res[0])
70
+ offsets.append(res[1])
71
+ if len(res) > 2:
72
+ titles.append(res[2])
73
+ else:
74
+ titles.append(None)
75
+
76
+ return dtype({"names": names,
77
+ "formats": formats,
78
+ "offsets": offsets,
79
+ "titles": titles}, align)
80
+
81
+
82
+ # construct an array_protocol descriptor list
83
+ # from the fields attribute of a descriptor
84
+ # This calls itself recursively but should eventually hit
85
+ # a descriptor that has no fields and then return
86
+ # a simple typestring
87
+
88
+ def _array_descr(descriptor):
89
+ fields = descriptor.fields
90
+ if fields is None:
91
+ subdtype = descriptor.subdtype
92
+ if subdtype is None:
93
+ if descriptor.metadata is None:
94
+ return descriptor.str
95
+ else:
96
+ new = descriptor.metadata.copy()
97
+ if new:
98
+ return (descriptor.str, new)
99
+ else:
100
+ return descriptor.str
101
+ else:
102
+ return (_array_descr(subdtype[0]), subdtype[1])
103
+
104
+ names = descriptor.names
105
+ ordered_fields = [fields[x] + (x,) for x in names]
106
+ result = []
107
+ offset = 0
108
+ for field in ordered_fields:
109
+ if field[1] > offset:
110
+ num = field[1] - offset
111
+ result.append(('', f'|V{num}'))
112
+ offset += num
113
+ elif field[1] < offset:
114
+ raise ValueError(
115
+ "dtype.descr is not defined for types with overlapping or "
116
+ "out-of-order fields")
117
+ if len(field) > 3:
118
+ name = (field[2], field[3])
119
+ else:
120
+ name = field[2]
121
+ if field[0].subdtype:
122
+ tup = (name, _array_descr(field[0].subdtype[0]),
123
+ field[0].subdtype[1])
124
+ else:
125
+ tup = (name, _array_descr(field[0]))
126
+ offset += field[0].itemsize
127
+ result.append(tup)
128
+
129
+ if descriptor.itemsize > offset:
130
+ num = descriptor.itemsize - offset
131
+ result.append(('', f'|V{num}'))
132
+
133
+ return result
134
+
135
+ # Build a new array from the information in a pickle.
136
+ # Note that the name numpy.core._internal._reconstruct is embedded in
137
+ # pickles of ndarrays made with NumPy before release 1.0
138
+ # so don't remove the name here, or you'll
139
+ # break backward compatibility.
140
+ def _reconstruct(subtype, shape, dtype):
141
+ return ndarray.__new__(subtype, shape, dtype)
142
+
143
+
144
+ # format_re was originally from numarray by J. Todd Miller
145
+
146
+ format_re = re.compile(r'(?P<order1>[<>|=]?)'
147
+ r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
148
+ r'(?P<order2>[<>|=]?)'
149
+ r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
150
+ sep_re = re.compile(r'\s*,\s*')
151
+ space_re = re.compile(r'\s+$')
152
+
153
+ # astr is a string (perhaps comma separated)
154
+
155
+ _convorder = {'=': _nbo}
156
+
157
+ def _commastring(astr):
158
+ startindex = 0
159
+ result = []
160
+ while startindex < len(astr):
161
+ mo = format_re.match(astr, pos=startindex)
162
+ try:
163
+ (order1, repeats, order2, dtype) = mo.groups()
164
+ except (TypeError, AttributeError):
165
+ raise ValueError(
166
+ f'format number {len(result)+1} of "{astr}" is not recognized'
167
+ ) from None
168
+ startindex = mo.end()
169
+ # Separator or ending padding
170
+ if startindex < len(astr):
171
+ if space_re.match(astr, pos=startindex):
172
+ startindex = len(astr)
173
+ else:
174
+ mo = sep_re.match(astr, pos=startindex)
175
+ if not mo:
176
+ raise ValueError(
177
+ 'format number %d of "%s" is not recognized' %
178
+ (len(result)+1, astr))
179
+ startindex = mo.end()
180
+
181
+ if order2 == '':
182
+ order = order1
183
+ elif order1 == '':
184
+ order = order2
185
+ else:
186
+ order1 = _convorder.get(order1, order1)
187
+ order2 = _convorder.get(order2, order2)
188
+ if (order1 != order2):
189
+ raise ValueError(
190
+ 'inconsistent byte-order specification %s and %s' %
191
+ (order1, order2))
192
+ order = order1
193
+
194
+ if order in ('|', '=', _nbo):
195
+ order = ''
196
+ dtype = order + dtype
197
+ if (repeats == ''):
198
+ newitem = dtype
199
+ else:
200
+ newitem = (dtype, ast.literal_eval(repeats))
201
+ result.append(newitem)
202
+
203
+ return result
204
+
205
+ class dummy_ctype:
206
+ def __init__(self, cls):
207
+ self._cls = cls
208
+ def __mul__(self, other):
209
+ return self
210
+ def __call__(self, *other):
211
+ return self._cls(other)
212
+ def __eq__(self, other):
213
+ return self._cls == other._cls
214
+ def __ne__(self, other):
215
+ return self._cls != other._cls
216
+
217
+ def _getintp_ctype():
218
+ val = _getintp_ctype.cache
219
+ if val is not None:
220
+ return val
221
+ if ctypes is None:
222
+ import numpy as np
223
+ val = dummy_ctype(np.intp)
224
+ else:
225
+ char = dtype('p').char
226
+ if char == 'i':
227
+ val = ctypes.c_int
228
+ elif char == 'l':
229
+ val = ctypes.c_long
230
+ elif char == 'q':
231
+ val = ctypes.c_longlong
232
+ else:
233
+ val = ctypes.c_long
234
+ _getintp_ctype.cache = val
235
+ return val
236
+ _getintp_ctype.cache = None
237
+
238
+ # Used for .ctypes attribute of ndarray
239
+
240
+ class _missing_ctypes:
241
+ def cast(self, num, obj):
242
+ return num.value
243
+
244
+ class c_void_p:
245
+ def __init__(self, ptr):
246
+ self.value = ptr
247
+
248
+
249
+ class _ctypes:
250
+ def __init__(self, array, ptr=None):
251
+ self._arr = array
252
+
253
+ if ctypes:
254
+ self._ctypes = ctypes
255
+ self._data = self._ctypes.c_void_p(ptr)
256
+ else:
257
+ # fake a pointer-like object that holds onto the reference
258
+ self._ctypes = _missing_ctypes()
259
+ self._data = self._ctypes.c_void_p(ptr)
260
+ self._data._objects = array
261
+
262
+ if self._arr.ndim == 0:
263
+ self._zerod = True
264
+ else:
265
+ self._zerod = False
266
+
267
+ def data_as(self, obj):
268
+ """
269
+ Return the data pointer cast to a particular c-types object.
270
+ For example, calling ``self._as_parameter_`` is equivalent to
271
+ ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
272
+ pointer to a ctypes array of floating-point data:
273
+ ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
274
+
275
+ The returned pointer will keep a reference to the array.
276
+ """
277
+ # _ctypes.cast function causes a circular reference of self._data in
278
+ # self._data._objects. Attributes of self._data cannot be released
279
+ # until gc.collect is called. Make a copy of the pointer first then let
280
+ # it hold the array reference. This is a workaround to circumvent the
281
+ # CPython bug https://bugs.python.org/issue12836
282
+ ptr = self._ctypes.cast(self._data, obj)
283
+ ptr._arr = self._arr
284
+ return ptr
285
+
286
+ def shape_as(self, obj):
287
+ """
288
+ Return the shape tuple as an array of some other c-types
289
+ type. For example: ``self.shape_as(ctypes.c_short)``.
290
+ """
291
+ if self._zerod:
292
+ return None
293
+ return (obj*self._arr.ndim)(*self._arr.shape)
294
+
295
+ def strides_as(self, obj):
296
+ """
297
+ Return the strides tuple as an array of some other
298
+ c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
299
+ """
300
+ if self._zerod:
301
+ return None
302
+ return (obj*self._arr.ndim)(*self._arr.strides)
303
+
304
+ @property
305
+ def data(self):
306
+ """
307
+ A pointer to the memory area of the array as a Python integer.
308
+ This memory area may contain data that is not aligned, or not in correct
309
+ byte-order. The memory area may not even be writeable. The array
310
+ flags and data-type of this array should be respected when passing this
311
+ attribute to arbitrary C-code to avoid trouble that can include Python
312
+ crashing. User Beware! The value of this attribute is exactly the same
313
+ as ``self._array_interface_['data'][0]``.
314
+
315
+ Note that unlike ``data_as``, a reference will not be kept to the array:
316
+ code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
317
+ pointer to a deallocated array, and should be spelt
318
+ ``(a + b).ctypes.data_as(ctypes.c_void_p)``
319
+ """
320
+ return self._data.value
321
+
322
+ @property
323
+ def shape(self):
324
+ """
325
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
326
+ the basetype is the C-integer corresponding to ``dtype('p')`` on this
327
+ platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
328
+ `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
329
+ the platform. The ctypes array contains the shape of
330
+ the underlying array.
331
+ """
332
+ return self.shape_as(_getintp_ctype())
333
+
334
+ @property
335
+ def strides(self):
336
+ """
337
+ (c_intp*self.ndim): A ctypes array of length self.ndim where
338
+ the basetype is the same as for the shape attribute. This ctypes array
339
+ contains the strides information from the underlying array. This strides
340
+ information is important for showing how many bytes must be jumped to
341
+ get to the next element in the array.
342
+ """
343
+ return self.strides_as(_getintp_ctype())
344
+
345
+ @property
346
+ def _as_parameter_(self):
347
+ """
348
+ Overrides the ctypes semi-magic method
349
+
350
+ Enables `c_func(some_array.ctypes)`
351
+ """
352
+ return self.data_as(ctypes.c_void_p)
353
+
354
+ # Numpy 1.21.0, 2021-05-18
355
+
356
+ def get_data(self):
357
+ """Deprecated getter for the `_ctypes.data` property.
358
+
359
+ .. deprecated:: 1.21
360
+ """
361
+ warnings.warn('"get_data" is deprecated. Use "data" instead',
362
+ DeprecationWarning, stacklevel=2)
363
+ return self.data
364
+
365
+ def get_shape(self):
366
+ """Deprecated getter for the `_ctypes.shape` property.
367
+
368
+ .. deprecated:: 1.21
369
+ """
370
+ warnings.warn('"get_shape" is deprecated. Use "shape" instead',
371
+ DeprecationWarning, stacklevel=2)
372
+ return self.shape
373
+
374
+ def get_strides(self):
375
+ """Deprecated getter for the `_ctypes.strides` property.
376
+
377
+ .. deprecated:: 1.21
378
+ """
379
+ warnings.warn('"get_strides" is deprecated. Use "strides" instead',
380
+ DeprecationWarning, stacklevel=2)
381
+ return self.strides
382
+
383
+ def get_as_parameter(self):
384
+ """Deprecated getter for the `_ctypes._as_parameter_` property.
385
+
386
+ .. deprecated:: 1.21
387
+ """
388
+ warnings.warn(
389
+ '"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
390
+ DeprecationWarning, stacklevel=2,
391
+ )
392
+ return self._as_parameter_
393
+
394
+
395
+ def _newnames(datatype, order):
396
+ """
397
+ Given a datatype and an order object, return a new names tuple, with the
398
+ order indicated
399
+ """
400
+ oldnames = datatype.names
401
+ nameslist = list(oldnames)
402
+ if isinstance(order, str):
403
+ order = [order]
404
+ seen = set()
405
+ if isinstance(order, (list, tuple)):
406
+ for name in order:
407
+ try:
408
+ nameslist.remove(name)
409
+ except ValueError:
410
+ if name in seen:
411
+ raise ValueError(f"duplicate field name: {name}") from None
412
+ else:
413
+ raise ValueError(f"unknown field name: {name}") from None
414
+ seen.add(name)
415
+ return tuple(list(order) + nameslist)
416
+ raise ValueError(f"unsupported order value: {order}")
417
+
418
+ def _copy_fields(ary):
419
+ """Return copy of structured array with padding between fields removed.
420
+
421
+ Parameters
422
+ ----------
423
+ ary : ndarray
424
+ Structured array from which to remove padding bytes
425
+
426
+ Returns
427
+ -------
428
+ ary_copy : ndarray
429
+ Copy of ary with padding bytes removed
430
+ """
431
+ dt = ary.dtype
432
+ copy_dtype = {'names': dt.names,
433
+ 'formats': [dt.fields[name][0] for name in dt.names]}
434
+ return array(ary, dtype=copy_dtype, copy=True)
435
+
436
+ def _promote_fields(dt1, dt2):
437
+ """ Perform type promotion for two structured dtypes.
438
+
439
+ Parameters
440
+ ----------
441
+ dt1 : structured dtype
442
+ First dtype.
443
+ dt2 : structured dtype
444
+ Second dtype.
445
+
446
+ Returns
447
+ -------
448
+ out : dtype
449
+ The promoted dtype
450
+
451
+ Notes
452
+ -----
453
+ If one of the inputs is aligned, the result will be. The titles of
454
+ both descriptors must match (point to the same field).
455
+ """
456
+ # Both must be structured and have the same names in the same order
457
+ if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
458
+ raise DTypePromotionError(
459
+ f"field names `{dt1.names}` and `{dt2.names}` mismatch.")
460
+
461
+ # if both are identical, we can (maybe!) just return the same dtype.
462
+ identical = dt1 is dt2
463
+ new_fields = []
464
+ for name in dt1.names:
465
+ field1 = dt1.fields[name]
466
+ field2 = dt2.fields[name]
467
+ new_descr = promote_types(field1[0], field2[0])
468
+ identical = identical and new_descr is field1[0]
469
+
470
+ # Check that the titles match (if given):
471
+ if field1[2:] != field2[2:]:
472
+ raise DTypePromotionError(
473
+ f"field titles of field '{name}' mismatch")
474
+ if len(field1) == 2:
475
+ new_fields.append((name, new_descr))
476
+ else:
477
+ new_fields.append(((field1[2], name), new_descr))
478
+
479
+ res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
480
+
481
+ # Might as well preserve identity (and metadata) if the dtype is identical
482
+ # and the itemsize, offsets are also unmodified. This could probably be
483
+ # sped up, but also probably just be removed entirely.
484
+ if identical and res.itemsize == dt1.itemsize:
485
+ for name in dt1.names:
486
+ if dt1.fields[name][1] != res.fields[name][1]:
487
+ return res # the dtype changed.
488
+ return dt1
489
+
490
+ return res
491
+
492
+
493
+ def _getfield_is_safe(oldtype, newtype, offset):
494
+ """ Checks safety of getfield for object arrays.
495
+
496
+ As in _view_is_safe, we need to check that memory containing objects is not
497
+ reinterpreted as a non-object datatype and vice versa.
498
+
499
+ Parameters
500
+ ----------
501
+ oldtype : data-type
502
+ Data type of the original ndarray.
503
+ newtype : data-type
504
+ Data type of the field being accessed by ndarray.getfield
505
+ offset : int
506
+ Offset of the field being accessed by ndarray.getfield
507
+
508
+ Raises
509
+ ------
510
+ TypeError
511
+ If the field access is invalid
512
+
513
+ """
514
+ if newtype.hasobject or oldtype.hasobject:
515
+ if offset == 0 and newtype == oldtype:
516
+ return
517
+ if oldtype.names is not None:
518
+ for name in oldtype.names:
519
+ if (oldtype.fields[name][1] == offset and
520
+ oldtype.fields[name][0] == newtype):
521
+ return
522
+ raise TypeError("Cannot get/set field of an object array")
523
+ return
524
+
525
+ def _view_is_safe(oldtype, newtype):
526
+ """ Checks safety of a view involving object arrays, for example when
527
+ doing::
528
+
529
+ np.zeros(10, dtype=oldtype).view(newtype)
530
+
531
+ Parameters
532
+ ----------
533
+ oldtype : data-type
534
+ Data type of original ndarray
535
+ newtype : data-type
536
+ Data type of the view
537
+
538
+ Raises
539
+ ------
540
+ TypeError
541
+ If the new type is incompatible with the old type.
542
+
543
+ """
544
+
545
+ # if the types are equivalent, there is no problem.
546
+ # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
547
+ if oldtype == newtype:
548
+ return
549
+
550
+ if newtype.hasobject or oldtype.hasobject:
551
+ raise TypeError("Cannot change data-type for object array.")
552
+ return
553
+
554
+ # Given a string containing a PEP 3118 format specifier,
555
+ # construct a NumPy dtype
556
+
557
+ _pep3118_native_map = {
558
+ '?': '?',
559
+ 'c': 'S1',
560
+ 'b': 'b',
561
+ 'B': 'B',
562
+ 'h': 'h',
563
+ 'H': 'H',
564
+ 'i': 'i',
565
+ 'I': 'I',
566
+ 'l': 'l',
567
+ 'L': 'L',
568
+ 'q': 'q',
569
+ 'Q': 'Q',
570
+ 'e': 'e',
571
+ 'f': 'f',
572
+ 'd': 'd',
573
+ 'g': 'g',
574
+ 'Zf': 'F',
575
+ 'Zd': 'D',
576
+ 'Zg': 'G',
577
+ 's': 'S',
578
+ 'w': 'U',
579
+ 'O': 'O',
580
+ 'x': 'V', # padding
581
+ }
582
+ _pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
583
+
584
+ _pep3118_standard_map = {
585
+ '?': '?',
586
+ 'c': 'S1',
587
+ 'b': 'b',
588
+ 'B': 'B',
589
+ 'h': 'i2',
590
+ 'H': 'u2',
591
+ 'i': 'i4',
592
+ 'I': 'u4',
593
+ 'l': 'i4',
594
+ 'L': 'u4',
595
+ 'q': 'i8',
596
+ 'Q': 'u8',
597
+ 'e': 'f2',
598
+ 'f': 'f',
599
+ 'd': 'd',
600
+ 'Zf': 'F',
601
+ 'Zd': 'D',
602
+ 's': 'S',
603
+ 'w': 'U',
604
+ 'O': 'O',
605
+ 'x': 'V', # padding
606
+ }
607
+ _pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
608
+
609
+ _pep3118_unsupported_map = {
610
+ 'u': 'UCS-2 strings',
611
+ '&': 'pointers',
612
+ 't': 'bitfields',
613
+ 'X': 'function pointers',
614
+ }
615
+
616
+ class _Stream:
617
+ def __init__(self, s):
618
+ self.s = s
619
+ self.byteorder = '@'
620
+
621
+ def advance(self, n):
622
+ res = self.s[:n]
623
+ self.s = self.s[n:]
624
+ return res
625
+
626
+ def consume(self, c):
627
+ if self.s[:len(c)] == c:
628
+ self.advance(len(c))
629
+ return True
630
+ return False
631
+
632
+ def consume_until(self, c):
633
+ if callable(c):
634
+ i = 0
635
+ while i < len(self.s) and not c(self.s[i]):
636
+ i = i + 1
637
+ return self.advance(i)
638
+ else:
639
+ i = self.s.index(c)
640
+ res = self.advance(i)
641
+ self.advance(len(c))
642
+ return res
643
+
644
+ @property
645
+ def next(self):
646
+ return self.s[0]
647
+
648
+ def __bool__(self):
649
+ return bool(self.s)
650
+
651
+
652
+ def _dtype_from_pep3118(spec):
653
+ stream = _Stream(spec)
654
+ dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
655
+ return dtype
656
+
657
+ def __dtype_from_pep3118(stream, is_subdtype):
658
+ field_spec = dict(
659
+ names=[],
660
+ formats=[],
661
+ offsets=[],
662
+ itemsize=0
663
+ )
664
+ offset = 0
665
+ common_alignment = 1
666
+ is_padding = False
667
+
668
+ # Parse spec
669
+ while stream:
670
+ value = None
671
+
672
+ # End of structure, bail out to upper level
673
+ if stream.consume('}'):
674
+ break
675
+
676
+ # Sub-arrays (1)
677
+ shape = None
678
+ if stream.consume('('):
679
+ shape = stream.consume_until(')')
680
+ shape = tuple(map(int, shape.split(',')))
681
+
682
+ # Byte order
683
+ if stream.next in ('@', '=', '<', '>', '^', '!'):
684
+ byteorder = stream.advance(1)
685
+ if byteorder == '!':
686
+ byteorder = '>'
687
+ stream.byteorder = byteorder
688
+
689
+ # Byte order characters also control native vs. standard type sizes
690
+ if stream.byteorder in ('@', '^'):
691
+ type_map = _pep3118_native_map
692
+ type_map_chars = _pep3118_native_typechars
693
+ else:
694
+ type_map = _pep3118_standard_map
695
+ type_map_chars = _pep3118_standard_typechars
696
+
697
+ # Item sizes
698
+ itemsize_str = stream.consume_until(lambda c: not c.isdigit())
699
+ if itemsize_str:
700
+ itemsize = int(itemsize_str)
701
+ else:
702
+ itemsize = 1
703
+
704
+ # Data types
705
+ is_padding = False
706
+
707
+ if stream.consume('T{'):
708
+ value, align = __dtype_from_pep3118(
709
+ stream, is_subdtype=True)
710
+ elif stream.next in type_map_chars:
711
+ if stream.next == 'Z':
712
+ typechar = stream.advance(2)
713
+ else:
714
+ typechar = stream.advance(1)
715
+
716
+ is_padding = (typechar == 'x')
717
+ dtypechar = type_map[typechar]
718
+ if dtypechar in 'USV':
719
+ dtypechar += '%d' % itemsize
720
+ itemsize = 1
721
+ numpy_byteorder = {'@': '=', '^': '='}.get(
722
+ stream.byteorder, stream.byteorder)
723
+ value = dtype(numpy_byteorder + dtypechar)
724
+ align = value.alignment
725
+ elif stream.next in _pep3118_unsupported_map:
726
+ desc = _pep3118_unsupported_map[stream.next]
727
+ raise NotImplementedError(
728
+ "Unrepresentable PEP 3118 data type {!r} ({})"
729
+ .format(stream.next, desc))
730
+ else:
731
+ raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
732
+
733
+ #
734
+ # Native alignment may require padding
735
+ #
736
+ # Here we assume that the presence of a '@' character implicitly implies
737
+ # that the start of the array is *already* aligned.
738
+ #
739
+ extra_offset = 0
740
+ if stream.byteorder == '@':
741
+ start_padding = (-offset) % align
742
+ intra_padding = (-value.itemsize) % align
743
+
744
+ offset += start_padding
745
+
746
+ if intra_padding != 0:
747
+ if itemsize > 1 or (shape is not None and _prod(shape) > 1):
748
+ # Inject internal padding to the end of the sub-item
749
+ value = _add_trailing_padding(value, intra_padding)
750
+ else:
751
+ # We can postpone the injection of internal padding,
752
+ # as the item appears at most once
753
+ extra_offset += intra_padding
754
+
755
+ # Update common alignment
756
+ common_alignment = _lcm(align, common_alignment)
757
+
758
+ # Convert itemsize to sub-array
759
+ if itemsize != 1:
760
+ value = dtype((value, (itemsize,)))
761
+
762
+ # Sub-arrays (2)
763
+ if shape is not None:
764
+ value = dtype((value, shape))
765
+
766
+ # Field name
767
+ if stream.consume(':'):
768
+ name = stream.consume_until(':')
769
+ else:
770
+ name = None
771
+
772
+ if not (is_padding and name is None):
773
+ if name is not None and name in field_spec['names']:
774
+ raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
775
+ field_spec['names'].append(name)
776
+ field_spec['formats'].append(value)
777
+ field_spec['offsets'].append(offset)
778
+
779
+ offset += value.itemsize
780
+ offset += extra_offset
781
+
782
+ field_spec['itemsize'] = offset
783
+
784
+ # extra final padding for aligned types
785
+ if stream.byteorder == '@':
786
+ field_spec['itemsize'] += (-offset) % common_alignment
787
+
788
+ # Check if this was a simple 1-item type, and unwrap it
789
+ if (field_spec['names'] == [None]
790
+ and field_spec['offsets'][0] == 0
791
+ and field_spec['itemsize'] == field_spec['formats'][0].itemsize
792
+ and not is_subdtype):
793
+ ret = field_spec['formats'][0]
794
+ else:
795
+ _fix_names(field_spec)
796
+ ret = dtype(field_spec)
797
+
798
+ # Finished
799
+ return ret, common_alignment
800
+
801
+ def _fix_names(field_spec):
802
+ """ Replace names which are None with the next unused f%d name """
803
+ names = field_spec['names']
804
+ for i, name in enumerate(names):
805
+ if name is not None:
806
+ continue
807
+
808
+ j = 0
809
+ while True:
810
+ name = f'f{j}'
811
+ if name not in names:
812
+ break
813
+ j = j + 1
814
+ names[i] = name
815
+
816
+ def _add_trailing_padding(value, padding):
817
+ """Inject the specified number of padding bytes at the end of a dtype"""
818
+ if value.fields is None:
819
+ field_spec = dict(
820
+ names=['f0'],
821
+ formats=[value],
822
+ offsets=[0],
823
+ itemsize=value.itemsize
824
+ )
825
+ else:
826
+ fields = value.fields
827
+ names = value.names
828
+ field_spec = dict(
829
+ names=names,
830
+ formats=[fields[name][0] for name in names],
831
+ offsets=[fields[name][1] for name in names],
832
+ itemsize=value.itemsize
833
+ )
834
+
835
+ field_spec['itemsize'] += padding
836
+ return dtype(field_spec)
837
+
838
+ def _prod(a):
839
+ p = 1
840
+ for x in a:
841
+ p *= x
842
+ return p
843
+
844
+ def _gcd(a, b):
845
+ """Calculate the greatest common divisor of a and b"""
846
+ while b:
847
+ a, b = b, a % b
848
+ return a
849
+
850
+ def _lcm(a, b):
851
+ return a // _gcd(a, b) * b
852
+
853
+ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
854
+ """ Format the error message for when __array_ufunc__ gives up. """
855
+ args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
856
+ ['{}={!r}'.format(k, v)
857
+ for k, v in kwargs.items()])
858
+ args = inputs + kwargs.get('out', ())
859
+ types_string = ', '.join(repr(type(arg).__name__) for arg in args)
860
+ return ('operand type(s) all returned NotImplemented from '
861
+ '__array_ufunc__({!r}, {!r}, {}): {}'
862
+ .format(ufunc, method, args_string, types_string))
863
+
864
+
865
+ def array_function_errmsg_formatter(public_api, types):
866
+ """ Format the error message for when __array_ufunc__ gives up. """
867
+ func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
868
+ return ("no implementation found for '{}' on types that implement "
869
+ '__array_function__: {}'.format(func_name, list(types)))
870
+
871
+
872
+ def _ufunc_doc_signature_formatter(ufunc):
873
+ """
874
+ Builds a signature string which resembles PEP 457
875
+
876
+ This is used to construct the first line of the docstring
877
+ """
878
+
879
+ # input arguments are simple
880
+ if ufunc.nin == 1:
881
+ in_args = 'x'
882
+ else:
883
+ in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
884
+
885
+ # output arguments are both keyword or positional
886
+ if ufunc.nout == 0:
887
+ out_args = ', /, out=()'
888
+ elif ufunc.nout == 1:
889
+ out_args = ', /, out=None'
890
+ else:
891
+ out_args = '[, {positional}], / [, out={default}]'.format(
892
+ positional=', '.join(
893
+ 'out{}'.format(i+1) for i in range(ufunc.nout)),
894
+ default=repr((None,)*ufunc.nout)
895
+ )
896
+
897
+ # keyword only args depend on whether this is a gufunc
898
+ kwargs = (
899
+ ", casting='same_kind'"
900
+ ", order='K'"
901
+ ", dtype=None"
902
+ ", subok=True"
903
+ )
904
+
905
+ # NOTE: gufuncs may or may not support the `axis` parameter
906
+ if ufunc.signature is None:
907
+ kwargs = f", where=True{kwargs}[, signature, extobj]"
908
+ else:
909
+ kwargs += "[, signature, extobj, axes, axis]"
910
+
911
+ # join all the parts together
912
+ return '{name}({in_args}{out_args}, *{kwargs})'.format(
913
+ name=ufunc.__name__,
914
+ in_args=in_args,
915
+ out_args=out_args,
916
+ kwargs=kwargs
917
+ )
918
+
919
+
920
+ def npy_ctypes_check(cls):
921
+ # determine if a class comes from ctypes, in order to work around
922
+ # a bug in the buffer protocol for those objects, bpo-10746
923
+ try:
924
+ # ctypes class are new-style, so have an __mro__. This probably fails
925
+ # for ctypes classes with multiple inheritance.
926
+ if IS_PYPY:
927
+ # (..., _ctypes.basics._CData, Bufferable, object)
928
+ ctype_base = cls.__mro__[-3]
929
+ else:
930
+ # # (..., _ctypes._CData, object)
931
+ ctype_base = cls.__mro__[-2]
932
+ # right now, they're part of the _ctypes module
933
+ return '_ctypes' in ctype_base.__module__
934
+ except Exception:
935
+ return False
venv/lib/python3.10/site-packages/numpy/core/_internal.pyi ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypeVar, overload, Generic
2
+ import ctypes as ct
3
+
4
+ from numpy import ndarray
5
+ from numpy.ctypeslib import c_intp
6
+
7
+ _CastT = TypeVar("_CastT", bound=ct._CanCastTo) # Copied from `ctypes.cast`
8
+ _CT = TypeVar("_CT", bound=ct._CData)
9
+ _PT = TypeVar("_PT", bound=None | int)
10
+
11
+ # TODO: Let the likes of `shape_as` and `strides_as` return `None`
12
+ # for 0D arrays once we've got shape-support
13
+
14
+ class _ctypes(Generic[_PT]):
15
+ @overload
16
+ def __new__(cls, array: ndarray[Any, Any], ptr: None = ...) -> _ctypes[None]: ...
17
+ @overload
18
+ def __new__(cls, array: ndarray[Any, Any], ptr: _PT) -> _ctypes[_PT]: ...
19
+ @property
20
+ def data(self) -> _PT: ...
21
+ @property
22
+ def shape(self) -> ct.Array[c_intp]: ...
23
+ @property
24
+ def strides(self) -> ct.Array[c_intp]: ...
25
+ @property
26
+ def _as_parameter_(self) -> ct.c_void_p: ...
27
+
28
+ def data_as(self, obj: type[_CastT]) -> _CastT: ...
29
+ def shape_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
30
+ def strides_as(self, obj: type[_CT]) -> ct.Array[_CT]: ...
venv/lib/python3.10/site-packages/numpy/core/_machar.py ADDED
@@ -0,0 +1,356 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Machine arithmetic - determine the parameters of the
3
+ floating-point arithmetic system
4
+
5
+ Author: Pearu Peterson, September 2003
6
+
7
+ """
8
+ __all__ = ['MachAr']
9
+
10
+ from .fromnumeric import any
11
+ from ._ufunc_config import errstate
12
+ from .._utils import set_module
13
+
14
+ # Need to speed this up...especially for longfloat
15
+
16
+ # Deprecated 2021-10-20, NumPy 1.22
17
+ class MachAr:
18
+ """
19
+ Diagnosing machine parameters.
20
+
21
+ Attributes
22
+ ----------
23
+ ibeta : int
24
+ Radix in which numbers are represented.
25
+ it : int
26
+ Number of base-`ibeta` digits in the floating point mantissa M.
27
+ machep : int
28
+ Exponent of the smallest (most negative) power of `ibeta` that,
29
+ added to 1.0, gives something different from 1.0
30
+ eps : float
31
+ Floating-point number ``beta**machep`` (floating point precision)
32
+ negep : int
33
+ Exponent of the smallest power of `ibeta` that, subtracted
34
+ from 1.0, gives something different from 1.0.
35
+ epsneg : float
36
+ Floating-point number ``beta**negep``.
37
+ iexp : int
38
+ Number of bits in the exponent (including its sign and bias).
39
+ minexp : int
40
+ Smallest (most negative) power of `ibeta` consistent with there
41
+ being no leading zeros in the mantissa.
42
+ xmin : float
43
+ Floating-point number ``beta**minexp`` (the smallest [in
44
+ magnitude] positive floating point number with full precision).
45
+ maxexp : int
46
+ Smallest (positive) power of `ibeta` that causes overflow.
47
+ xmax : float
48
+ ``(1-epsneg) * beta**maxexp`` (the largest [in magnitude]
49
+ usable floating value).
50
+ irnd : int
51
+ In ``range(6)``, information on what kind of rounding is done
52
+ in addition, and on how underflow is handled.
53
+ ngrd : int
54
+ Number of 'guard digits' used when truncating the product
55
+ of two mantissas to fit the representation.
56
+ epsilon : float
57
+ Same as `eps`.
58
+ tiny : float
59
+ An alias for `smallest_normal`, kept for backwards compatibility.
60
+ huge : float
61
+ Same as `xmax`.
62
+ precision : float
63
+ ``- int(-log10(eps))``
64
+ resolution : float
65
+ ``- 10**(-precision)``
66
+ smallest_normal : float
67
+ The smallest positive floating point number with 1 as leading bit in
68
+ the mantissa following IEEE-754. Same as `xmin`.
69
+ smallest_subnormal : float
70
+ The smallest positive floating point number with 0 as leading bit in
71
+ the mantissa following IEEE-754.
72
+
73
+ Parameters
74
+ ----------
75
+ float_conv : function, optional
76
+ Function that converts an integer or integer array to a float
77
+ or float array. Default is `float`.
78
+ int_conv : function, optional
79
+ Function that converts a float or float array to an integer or
80
+ integer array. Default is `int`.
81
+ float_to_float : function, optional
82
+ Function that converts a float array to float. Default is `float`.
83
+ Note that this does not seem to do anything useful in the current
84
+ implementation.
85
+ float_to_str : function, optional
86
+ Function that converts a single float to a string. Default is
87
+ ``lambda v:'%24.16e' %v``.
88
+ title : str, optional
89
+ Title that is printed in the string representation of `MachAr`.
90
+
91
+ See Also
92
+ --------
93
+ finfo : Machine limits for floating point types.
94
+ iinfo : Machine limits for integer types.
95
+
96
+ References
97
+ ----------
98
+ .. [1] Press, Teukolsky, Vetterling and Flannery,
99
+ "Numerical Recipes in C++," 2nd ed,
100
+ Cambridge University Press, 2002, p. 31.
101
+
102
+ """
103
+
104
+ def __init__(self, float_conv=float,int_conv=int,
105
+ float_to_float=float,
106
+ float_to_str=lambda v:'%24.16e' % v,
107
+ title='Python floating point number'):
108
+ """
109
+
110
+ float_conv - convert integer to float (array)
111
+ int_conv - convert float (array) to integer
112
+ float_to_float - convert float array to float
113
+ float_to_str - convert array float to str
114
+ title - description of used floating point numbers
115
+
116
+ """
117
+ # We ignore all errors here because we are purposely triggering
118
+ # underflow to detect the properties of the runninng arch.
119
+ with errstate(under='ignore'):
120
+ self._do_init(float_conv, int_conv, float_to_float, float_to_str, title)
121
+
122
+ def _do_init(self, float_conv, int_conv, float_to_float, float_to_str, title):
123
+ max_iterN = 10000
124
+ msg = "Did not converge after %d tries with %s"
125
+ one = float_conv(1)
126
+ two = one + one
127
+ zero = one - one
128
+
129
+ # Do we really need to do this? Aren't they 2 and 2.0?
130
+ # Determine ibeta and beta
131
+ a = one
132
+ for _ in range(max_iterN):
133
+ a = a + a
134
+ temp = a + one
135
+ temp1 = temp - a
136
+ if any(temp1 - one != zero):
137
+ break
138
+ else:
139
+ raise RuntimeError(msg % (_, one.dtype))
140
+ b = one
141
+ for _ in range(max_iterN):
142
+ b = b + b
143
+ temp = a + b
144
+ itemp = int_conv(temp-a)
145
+ if any(itemp != 0):
146
+ break
147
+ else:
148
+ raise RuntimeError(msg % (_, one.dtype))
149
+ ibeta = itemp
150
+ beta = float_conv(ibeta)
151
+
152
+ # Determine it and irnd
153
+ it = -1
154
+ b = one
155
+ for _ in range(max_iterN):
156
+ it = it + 1
157
+ b = b * beta
158
+ temp = b + one
159
+ temp1 = temp - b
160
+ if any(temp1 - one != zero):
161
+ break
162
+ else:
163
+ raise RuntimeError(msg % (_, one.dtype))
164
+
165
+ betah = beta / two
166
+ a = one
167
+ for _ in range(max_iterN):
168
+ a = a + a
169
+ temp = a + one
170
+ temp1 = temp - a
171
+ if any(temp1 - one != zero):
172
+ break
173
+ else:
174
+ raise RuntimeError(msg % (_, one.dtype))
175
+ temp = a + betah
176
+ irnd = 0
177
+ if any(temp-a != zero):
178
+ irnd = 1
179
+ tempa = a + beta
180
+ temp = tempa + betah
181
+ if irnd == 0 and any(temp-tempa != zero):
182
+ irnd = 2
183
+
184
+ # Determine negep and epsneg
185
+ negep = it + 3
186
+ betain = one / beta
187
+ a = one
188
+ for i in range(negep):
189
+ a = a * betain
190
+ b = a
191
+ for _ in range(max_iterN):
192
+ temp = one - a
193
+ if any(temp-one != zero):
194
+ break
195
+ a = a * beta
196
+ negep = negep - 1
197
+ # Prevent infinite loop on PPC with gcc 4.0:
198
+ if negep < 0:
199
+ raise RuntimeError("could not determine machine tolerance "
200
+ "for 'negep', locals() -> %s" % (locals()))
201
+ else:
202
+ raise RuntimeError(msg % (_, one.dtype))
203
+ negep = -negep
204
+ epsneg = a
205
+
206
+ # Determine machep and eps
207
+ machep = - it - 3
208
+ a = b
209
+
210
+ for _ in range(max_iterN):
211
+ temp = one + a
212
+ if any(temp-one != zero):
213
+ break
214
+ a = a * beta
215
+ machep = machep + 1
216
+ else:
217
+ raise RuntimeError(msg % (_, one.dtype))
218
+ eps = a
219
+
220
+ # Determine ngrd
221
+ ngrd = 0
222
+ temp = one + eps
223
+ if irnd == 0 and any(temp*one - one != zero):
224
+ ngrd = 1
225
+
226
+ # Determine iexp
227
+ i = 0
228
+ k = 1
229
+ z = betain
230
+ t = one + eps
231
+ nxres = 0
232
+ for _ in range(max_iterN):
233
+ y = z
234
+ z = y*y
235
+ a = z*one # Check here for underflow
236
+ temp = z*t
237
+ if any(a+a == zero) or any(abs(z) >= y):
238
+ break
239
+ temp1 = temp * betain
240
+ if any(temp1*beta == z):
241
+ break
242
+ i = i + 1
243
+ k = k + k
244
+ else:
245
+ raise RuntimeError(msg % (_, one.dtype))
246
+ if ibeta != 10:
247
+ iexp = i + 1
248
+ mx = k + k
249
+ else:
250
+ iexp = 2
251
+ iz = ibeta
252
+ while k >= iz:
253
+ iz = iz * ibeta
254
+ iexp = iexp + 1
255
+ mx = iz + iz - 1
256
+
257
+ # Determine minexp and xmin
258
+ for _ in range(max_iterN):
259
+ xmin = y
260
+ y = y * betain
261
+ a = y * one
262
+ temp = y * t
263
+ if any((a + a) != zero) and any(abs(y) < xmin):
264
+ k = k + 1
265
+ temp1 = temp * betain
266
+ if any(temp1*beta == y) and any(temp != y):
267
+ nxres = 3
268
+ xmin = y
269
+ break
270
+ else:
271
+ break
272
+ else:
273
+ raise RuntimeError(msg % (_, one.dtype))
274
+ minexp = -k
275
+
276
+ # Determine maxexp, xmax
277
+ if mx <= k + k - 3 and ibeta != 10:
278
+ mx = mx + mx
279
+ iexp = iexp + 1
280
+ maxexp = mx + minexp
281
+ irnd = irnd + nxres
282
+ if irnd >= 2:
283
+ maxexp = maxexp - 2
284
+ i = maxexp + minexp
285
+ if ibeta == 2 and not i:
286
+ maxexp = maxexp - 1
287
+ if i > 20:
288
+ maxexp = maxexp - 1
289
+ if any(a != y):
290
+ maxexp = maxexp - 2
291
+ xmax = one - epsneg
292
+ if any(xmax*one != xmax):
293
+ xmax = one - beta*epsneg
294
+ xmax = xmax / (xmin*beta*beta*beta)
295
+ i = maxexp + minexp + 3
296
+ for j in range(i):
297
+ if ibeta == 2:
298
+ xmax = xmax + xmax
299
+ else:
300
+ xmax = xmax * beta
301
+
302
+ smallest_subnormal = abs(xmin / beta ** (it))
303
+
304
+ self.ibeta = ibeta
305
+ self.it = it
306
+ self.negep = negep
307
+ self.epsneg = float_to_float(epsneg)
308
+ self._str_epsneg = float_to_str(epsneg)
309
+ self.machep = machep
310
+ self.eps = float_to_float(eps)
311
+ self._str_eps = float_to_str(eps)
312
+ self.ngrd = ngrd
313
+ self.iexp = iexp
314
+ self.minexp = minexp
315
+ self.xmin = float_to_float(xmin)
316
+ self._str_xmin = float_to_str(xmin)
317
+ self.maxexp = maxexp
318
+ self.xmax = float_to_float(xmax)
319
+ self._str_xmax = float_to_str(xmax)
320
+ self.irnd = irnd
321
+
322
+ self.title = title
323
+ # Commonly used parameters
324
+ self.epsilon = self.eps
325
+ self.tiny = self.xmin
326
+ self.huge = self.xmax
327
+ self.smallest_normal = self.xmin
328
+ self._str_smallest_normal = float_to_str(self.xmin)
329
+ self.smallest_subnormal = float_to_float(smallest_subnormal)
330
+ self._str_smallest_subnormal = float_to_str(smallest_subnormal)
331
+
332
+ import math
333
+ self.precision = int(-math.log10(float_to_float(self.eps)))
334
+ ten = two + two + two + two + two
335
+ resolution = ten ** (-self.precision)
336
+ self.resolution = float_to_float(resolution)
337
+ self._str_resolution = float_to_str(resolution)
338
+
339
+ def __str__(self):
340
+ fmt = (
341
+ 'Machine parameters for %(title)s\n'
342
+ '---------------------------------------------------------------------\n'
343
+ 'ibeta=%(ibeta)s it=%(it)s iexp=%(iexp)s ngrd=%(ngrd)s irnd=%(irnd)s\n'
344
+ 'machep=%(machep)s eps=%(_str_eps)s (beta**machep == epsilon)\n'
345
+ 'negep =%(negep)s epsneg=%(_str_epsneg)s (beta**epsneg)\n'
346
+ 'minexp=%(minexp)s xmin=%(_str_xmin)s (beta**minexp == tiny)\n'
347
+ 'maxexp=%(maxexp)s xmax=%(_str_xmax)s ((1-epsneg)*beta**maxexp == huge)\n'
348
+ 'smallest_normal=%(smallest_normal)s '
349
+ 'smallest_subnormal=%(smallest_subnormal)s\n'
350
+ '---------------------------------------------------------------------\n'
351
+ )
352
+ return fmt % self.__dict__
353
+
354
+
355
+ if __name__ == '__main__':
356
+ print(MachAr())
venv/lib/python3.10/site-packages/numpy/core/_multiarray_tests.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (176 kB). View file
 
venv/lib/python3.10/site-packages/numpy/core/_operand_flag_tests.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (16.9 kB). View file
 
venv/lib/python3.10/site-packages/numpy/core/_rational_tests.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (59.8 kB). View file
 
venv/lib/python3.10/site-packages/numpy/core/_string_helpers.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ String-handling utilities to avoid locale-dependence.
3
+
4
+ Used primarily to generate type name aliases.
5
+ """
6
+ # "import string" is costly to import!
7
+ # Construct the translation tables directly
8
+ # "A" = chr(65), "a" = chr(97)
9
+ _all_chars = tuple(map(chr, range(256)))
10
+ _ascii_upper = _all_chars[65:65+26]
11
+ _ascii_lower = _all_chars[97:97+26]
12
+ LOWER_TABLE = "".join(_all_chars[:65] + _ascii_lower + _all_chars[65+26:])
13
+ UPPER_TABLE = "".join(_all_chars[:97] + _ascii_upper + _all_chars[97+26:])
14
+
15
+
16
+ def english_lower(s):
17
+ """ Apply English case rules to convert ASCII strings to all lower case.
18
+
19
+ This is an internal utility function to replace calls to str.lower() such
20
+ that we can avoid changing behavior with changing locales. In particular,
21
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
22
+ both lowercase and uppercase. Thus, "I".lower() != "i" in a "tr" locale.
23
+
24
+ Parameters
25
+ ----------
26
+ s : str
27
+
28
+ Returns
29
+ -------
30
+ lowered : str
31
+
32
+ Examples
33
+ --------
34
+ >>> from numpy.core.numerictypes import english_lower
35
+ >>> english_lower('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
36
+ 'abcdefghijklmnopqrstuvwxyzabcdefghijklmnopqrstuvwxyz0123456789_'
37
+ >>> english_lower('')
38
+ ''
39
+ """
40
+ lowered = s.translate(LOWER_TABLE)
41
+ return lowered
42
+
43
+
44
+ def english_upper(s):
45
+ """ Apply English case rules to convert ASCII strings to all upper case.
46
+
47
+ This is an internal utility function to replace calls to str.upper() such
48
+ that we can avoid changing behavior with changing locales. In particular,
49
+ Turkish has distinct dotted and dotless variants of the Latin letter "I" in
50
+ both lowercase and uppercase. Thus, "i".upper() != "I" in a "tr" locale.
51
+
52
+ Parameters
53
+ ----------
54
+ s : str
55
+
56
+ Returns
57
+ -------
58
+ uppered : str
59
+
60
+ Examples
61
+ --------
62
+ >>> from numpy.core.numerictypes import english_upper
63
+ >>> english_upper('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_')
64
+ 'ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_'
65
+ >>> english_upper('')
66
+ ''
67
+ """
68
+ uppered = s.translate(UPPER_TABLE)
69
+ return uppered
70
+
71
+
72
+ def english_capitalize(s):
73
+ """ Apply English case rules to convert the first character of an ASCII
74
+ string to upper case.
75
+
76
+ This is an internal utility function to replace calls to str.capitalize()
77
+ such that we can avoid changing behavior with changing locales.
78
+
79
+ Parameters
80
+ ----------
81
+ s : str
82
+
83
+ Returns
84
+ -------
85
+ capitalized : str
86
+
87
+ Examples
88
+ --------
89
+ >>> from numpy.core.numerictypes import english_capitalize
90
+ >>> english_capitalize('int8')
91
+ 'Int8'
92
+ >>> english_capitalize('Int8')
93
+ 'Int8'
94
+ >>> english_capitalize('')
95
+ ''
96
+ """
97
+ if s:
98
+ return english_upper(s[0]) + s[1:]
99
+ else:
100
+ return s
venv/lib/python3.10/site-packages/numpy/core/_struct_ufunc_tests.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (17 kB). View file
 
venv/lib/python3.10/site-packages/numpy/core/_type_aliases.py ADDED
@@ -0,0 +1,245 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Due to compatibility, numpy has a very large number of different naming
3
+ conventions for the scalar types (those subclassing from `numpy.generic`).
4
+ This file produces a convoluted set of dictionaries mapping names to types,
5
+ and sometimes other mappings too.
6
+
7
+ .. data:: allTypes
8
+ A dictionary of names to types that will be exposed as attributes through
9
+ ``np.core.numerictypes.*``
10
+
11
+ .. data:: sctypeDict
12
+ Similar to `allTypes`, but maps a broader set of aliases to their types.
13
+
14
+ .. data:: sctypes
15
+ A dictionary keyed by a "type group" string, providing a list of types
16
+ under that group.
17
+
18
+ """
19
+
20
+ from numpy.compat import unicode
21
+ from numpy.core._string_helpers import english_lower
22
+ from numpy.core.multiarray import typeinfo, dtype
23
+ from numpy.core._dtype import _kind_name
24
+
25
+
26
+ sctypeDict = {} # Contains all leaf-node scalar types with aliases
27
+ allTypes = {} # Collect the types we will add to the module
28
+
29
+
30
+ # separate the actual type info from the abstract base classes
31
+ _abstract_types = {}
32
+ _concrete_typeinfo = {}
33
+ for k, v in typeinfo.items():
34
+ # make all the keys lowercase too
35
+ k = english_lower(k)
36
+ if isinstance(v, type):
37
+ _abstract_types[k] = v
38
+ else:
39
+ _concrete_typeinfo[k] = v
40
+
41
+ _concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
42
+
43
+
44
+ def _bits_of(obj):
45
+ try:
46
+ info = next(v for v in _concrete_typeinfo.values() if v.type is obj)
47
+ except StopIteration:
48
+ if obj in _abstract_types.values():
49
+ msg = "Cannot count the bits of an abstract type"
50
+ raise ValueError(msg) from None
51
+
52
+ # some third-party type - make a best-guess
53
+ return dtype(obj).itemsize * 8
54
+ else:
55
+ return info.bits
56
+
57
+
58
+ def bitname(obj):
59
+ """Return a bit-width name for a given type object"""
60
+ bits = _bits_of(obj)
61
+ dt = dtype(obj)
62
+ char = dt.kind
63
+ base = _kind_name(dt)
64
+
65
+ if base == 'object':
66
+ bits = 0
67
+
68
+ if bits != 0:
69
+ char = "%s%d" % (char, bits // 8)
70
+
71
+ return base, bits, char
72
+
73
+
74
+ def _add_types():
75
+ for name, info in _concrete_typeinfo.items():
76
+ # define C-name and insert typenum and typechar references also
77
+ allTypes[name] = info.type
78
+ sctypeDict[name] = info.type
79
+ sctypeDict[info.char] = info.type
80
+ sctypeDict[info.num] = info.type
81
+
82
+ for name, cls in _abstract_types.items():
83
+ allTypes[name] = cls
84
+ _add_types()
85
+
86
+ # This is the priority order used to assign the bit-sized NPY_INTxx names, which
87
+ # must match the order in npy_common.h in order for NPY_INTxx and np.intxx to be
88
+ # consistent.
89
+ # If two C types have the same size, then the earliest one in this list is used
90
+ # as the sized name.
91
+ _int_ctypes = ['long', 'longlong', 'int', 'short', 'byte']
92
+ _uint_ctypes = list('u' + t for t in _int_ctypes)
93
+
94
+ def _add_aliases():
95
+ for name, info in _concrete_typeinfo.items():
96
+ # these are handled by _add_integer_aliases
97
+ if name in _int_ctypes or name in _uint_ctypes:
98
+ continue
99
+
100
+ # insert bit-width version for this class (if relevant)
101
+ base, bit, char = bitname(info.type)
102
+
103
+ myname = "%s%d" % (base, bit)
104
+
105
+ # ensure that (c)longdouble does not overwrite the aliases assigned to
106
+ # (c)double
107
+ if name in ('longdouble', 'clongdouble') and myname in allTypes:
108
+ continue
109
+
110
+ # Add to the main namespace if desired:
111
+ if bit != 0 and base != "bool":
112
+ allTypes[myname] = info.type
113
+
114
+ # add forward, reverse, and string mapping to numarray
115
+ sctypeDict[char] = info.type
116
+
117
+ # add mapping for both the bit name
118
+ sctypeDict[myname] = info.type
119
+
120
+
121
+ _add_aliases()
122
+
123
+ def _add_integer_aliases():
124
+ seen_bits = set()
125
+ for i_ctype, u_ctype in zip(_int_ctypes, _uint_ctypes):
126
+ i_info = _concrete_typeinfo[i_ctype]
127
+ u_info = _concrete_typeinfo[u_ctype]
128
+ bits = i_info.bits # same for both
129
+
130
+ for info, charname, intname in [
131
+ (i_info,'i%d' % (bits//8,), 'int%d' % bits),
132
+ (u_info,'u%d' % (bits//8,), 'uint%d' % bits)]:
133
+ if bits not in seen_bits:
134
+ # sometimes two different types have the same number of bits
135
+ # if so, the one iterated over first takes precedence
136
+ allTypes[intname] = info.type
137
+ sctypeDict[intname] = info.type
138
+ sctypeDict[charname] = info.type
139
+
140
+ seen_bits.add(bits)
141
+
142
+ _add_integer_aliases()
143
+
144
+ # We use these later
145
+ void = allTypes['void']
146
+
147
+ #
148
+ # Rework the Python names (so that float and complex and int are consistent
149
+ # with Python usage)
150
+ #
151
+ def _set_up_aliases():
152
+ type_pairs = [('complex_', 'cdouble'),
153
+ ('single', 'float'),
154
+ ('csingle', 'cfloat'),
155
+ ('singlecomplex', 'cfloat'),
156
+ ('float_', 'double'),
157
+ ('intc', 'int'),
158
+ ('uintc', 'uint'),
159
+ ('int_', 'long'),
160
+ ('uint', 'ulong'),
161
+ ('cfloat', 'cdouble'),
162
+ ('longfloat', 'longdouble'),
163
+ ('clongfloat', 'clongdouble'),
164
+ ('longcomplex', 'clongdouble'),
165
+ ('bool_', 'bool'),
166
+ ('bytes_', 'string'),
167
+ ('string_', 'string'),
168
+ ('str_', 'unicode'),
169
+ ('unicode_', 'unicode'),
170
+ ('object_', 'object')]
171
+ for alias, t in type_pairs:
172
+ allTypes[alias] = allTypes[t]
173
+ sctypeDict[alias] = sctypeDict[t]
174
+ # Remove aliases overriding python types and modules
175
+ to_remove = ['object', 'int', 'float',
176
+ 'complex', 'bool', 'string', 'datetime', 'timedelta',
177
+ 'bytes', 'str']
178
+
179
+ for t in to_remove:
180
+ try:
181
+ del allTypes[t]
182
+ del sctypeDict[t]
183
+ except KeyError:
184
+ pass
185
+
186
+ # Additional aliases in sctypeDict that should not be exposed as attributes
187
+ attrs_to_remove = ['ulong']
188
+
189
+ for t in attrs_to_remove:
190
+ try:
191
+ del allTypes[t]
192
+ except KeyError:
193
+ pass
194
+ _set_up_aliases()
195
+
196
+
197
+ sctypes = {'int': [],
198
+ 'uint':[],
199
+ 'float':[],
200
+ 'complex':[],
201
+ 'others':[bool, object, bytes, unicode, void]}
202
+
203
+ def _add_array_type(typename, bits):
204
+ try:
205
+ t = allTypes['%s%d' % (typename, bits)]
206
+ except KeyError:
207
+ pass
208
+ else:
209
+ sctypes[typename].append(t)
210
+
211
+ def _set_array_types():
212
+ ibytes = [1, 2, 4, 8, 16, 32, 64]
213
+ fbytes = [2, 4, 8, 10, 12, 16, 32, 64]
214
+ for bytes in ibytes:
215
+ bits = 8*bytes
216
+ _add_array_type('int', bits)
217
+ _add_array_type('uint', bits)
218
+ for bytes in fbytes:
219
+ bits = 8*bytes
220
+ _add_array_type('float', bits)
221
+ _add_array_type('complex', 2*bits)
222
+ _gi = dtype('p')
223
+ if _gi.type not in sctypes['int']:
224
+ indx = 0
225
+ sz = _gi.itemsize
226
+ _lst = sctypes['int']
227
+ while (indx < len(_lst) and sz >= _lst[indx](0).itemsize):
228
+ indx += 1
229
+ sctypes['int'].insert(indx, _gi.type)
230
+ sctypes['uint'].insert(indx, dtype('P').type)
231
+ _set_array_types()
232
+
233
+
234
+ # Add additional strings to the sctypeDict
235
+ _toadd = ['int', 'float', 'complex', 'bool', 'object',
236
+ 'str', 'bytes', ('a', 'bytes_'),
237
+ ('int0', 'intp'), ('uint0', 'uintp')]
238
+
239
+ for name in _toadd:
240
+ if isinstance(name, tuple):
241
+ sctypeDict[name[0]] = allTypes[name[1]]
242
+ else:
243
+ sctypeDict[name] = allTypes['%s_' % name]
244
+
245
+ del _toadd, name
venv/lib/python3.10/site-packages/numpy/core/_ufunc_config.py ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Functions for changing global ufunc configuration
3
+
4
+ This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
5
+ """
6
+ import collections.abc
7
+ import contextlib
8
+ import contextvars
9
+
10
+ from .._utils import set_module
11
+ from .umath import (
12
+ UFUNC_BUFSIZE_DEFAULT,
13
+ ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT,
14
+ SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID,
15
+ )
16
+ from . import umath
17
+
18
+ __all__ = [
19
+ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
20
+ "errstate", '_no_nep50_warning'
21
+ ]
22
+
23
+ _errdict = {"ignore": ERR_IGNORE,
24
+ "warn": ERR_WARN,
25
+ "raise": ERR_RAISE,
26
+ "call": ERR_CALL,
27
+ "print": ERR_PRINT,
28
+ "log": ERR_LOG}
29
+
30
+ _errdict_rev = {value: key for key, value in _errdict.items()}
31
+
32
+
33
+ @set_module('numpy')
34
+ def seterr(all=None, divide=None, over=None, under=None, invalid=None):
35
+ """
36
+ Set how floating-point errors are handled.
37
+
38
+ Note that operations on integer scalar types (such as `int16`) are
39
+ handled like floating point, and are affected by these settings.
40
+
41
+ Parameters
42
+ ----------
43
+ all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
44
+ Set treatment for all types of floating-point errors at once:
45
+
46
+ - ignore: Take no action when the exception occurs.
47
+ - warn: Print a `RuntimeWarning` (via the Python `warnings` module).
48
+ - raise: Raise a `FloatingPointError`.
49
+ - call: Call a function specified using the `seterrcall` function.
50
+ - print: Print a warning directly to ``stdout``.
51
+ - log: Record error in a Log object specified by `seterrcall`.
52
+
53
+ The default is not to change the current behavior.
54
+ divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
55
+ Treatment for division by zero.
56
+ over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
57
+ Treatment for floating-point overflow.
58
+ under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
59
+ Treatment for floating-point underflow.
60
+ invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
61
+ Treatment for invalid floating-point operation.
62
+
63
+ Returns
64
+ -------
65
+ old_settings : dict
66
+ Dictionary containing the old settings.
67
+
68
+ See also
69
+ --------
70
+ seterrcall : Set a callback function for the 'call' mode.
71
+ geterr, geterrcall, errstate
72
+
73
+ Notes
74
+ -----
75
+ The floating-point exceptions are defined in the IEEE 754 standard [1]_:
76
+
77
+ - Division by zero: infinite result obtained from finite numbers.
78
+ - Overflow: result too large to be expressed.
79
+ - Underflow: result so close to zero that some precision
80
+ was lost.
81
+ - Invalid operation: result is not an expressible number, typically
82
+ indicates that a NaN was produced.
83
+
84
+ .. [1] https://en.wikipedia.org/wiki/IEEE_754
85
+
86
+ Examples
87
+ --------
88
+ >>> old_settings = np.seterr(all='ignore') #seterr to known value
89
+ >>> np.seterr(over='raise')
90
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
91
+ >>> np.seterr(**old_settings) # reset to default
92
+ {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
93
+
94
+ >>> np.int16(32000) * np.int16(3)
95
+ 30464
96
+ >>> old_settings = np.seterr(all='warn', over='raise')
97
+ >>> np.int16(32000) * np.int16(3)
98
+ Traceback (most recent call last):
99
+ File "<stdin>", line 1, in <module>
100
+ FloatingPointError: overflow encountered in scalar multiply
101
+
102
+ >>> old_settings = np.seterr(all='print')
103
+ >>> np.geterr()
104
+ {'divide': 'print', 'over': 'print', 'under': 'print', 'invalid': 'print'}
105
+ >>> np.int16(32000) * np.int16(3)
106
+ 30464
107
+
108
+ """
109
+
110
+ pyvals = umath.geterrobj()
111
+ old = geterr()
112
+
113
+ if divide is None:
114
+ divide = all or old['divide']
115
+ if over is None:
116
+ over = all or old['over']
117
+ if under is None:
118
+ under = all or old['under']
119
+ if invalid is None:
120
+ invalid = all or old['invalid']
121
+
122
+ maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
123
+ (_errdict[over] << SHIFT_OVERFLOW) +
124
+ (_errdict[under] << SHIFT_UNDERFLOW) +
125
+ (_errdict[invalid] << SHIFT_INVALID))
126
+
127
+ pyvals[1] = maskvalue
128
+ umath.seterrobj(pyvals)
129
+ return old
130
+
131
+
132
+ @set_module('numpy')
133
+ def geterr():
134
+ """
135
+ Get the current way of handling floating-point errors.
136
+
137
+ Returns
138
+ -------
139
+ res : dict
140
+ A dictionary with keys "divide", "over", "under", and "invalid",
141
+ whose values are from the strings "ignore", "print", "log", "warn",
142
+ "raise", and "call". The keys represent possible floating-point
143
+ exceptions, and the values define how these exceptions are handled.
144
+
145
+ See Also
146
+ --------
147
+ geterrcall, seterr, seterrcall
148
+
149
+ Notes
150
+ -----
151
+ For complete documentation of the types of floating-point exceptions and
152
+ treatment options, see `seterr`.
153
+
154
+ Examples
155
+ --------
156
+ >>> np.geterr()
157
+ {'divide': 'warn', 'over': 'warn', 'under': 'ignore', 'invalid': 'warn'}
158
+ >>> np.arange(3.) / np.arange(3.)
159
+ array([nan, 1., 1.])
160
+
161
+ >>> oldsettings = np.seterr(all='warn', over='raise')
162
+ >>> np.geterr()
163
+ {'divide': 'warn', 'over': 'raise', 'under': 'warn', 'invalid': 'warn'}
164
+ >>> np.arange(3.) / np.arange(3.)
165
+ array([nan, 1., 1.])
166
+
167
+ """
168
+ maskvalue = umath.geterrobj()[1]
169
+ mask = 7
170
+ res = {}
171
+ val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
172
+ res['divide'] = _errdict_rev[val]
173
+ val = (maskvalue >> SHIFT_OVERFLOW) & mask
174
+ res['over'] = _errdict_rev[val]
175
+ val = (maskvalue >> SHIFT_UNDERFLOW) & mask
176
+ res['under'] = _errdict_rev[val]
177
+ val = (maskvalue >> SHIFT_INVALID) & mask
178
+ res['invalid'] = _errdict_rev[val]
179
+ return res
180
+
181
+
182
+ @set_module('numpy')
183
+ def setbufsize(size):
184
+ """
185
+ Set the size of the buffer used in ufuncs.
186
+
187
+ Parameters
188
+ ----------
189
+ size : int
190
+ Size of buffer.
191
+
192
+ """
193
+ if size > 10e6:
194
+ raise ValueError("Buffer size, %s, is too big." % size)
195
+ if size < 5:
196
+ raise ValueError("Buffer size, %s, is too small." % size)
197
+ if size % 16 != 0:
198
+ raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
199
+
200
+ pyvals = umath.geterrobj()
201
+ old = getbufsize()
202
+ pyvals[0] = size
203
+ umath.seterrobj(pyvals)
204
+ return old
205
+
206
+
207
+ @set_module('numpy')
208
+ def getbufsize():
209
+ """
210
+ Return the size of the buffer used in ufuncs.
211
+
212
+ Returns
213
+ -------
214
+ getbufsize : int
215
+ Size of ufunc buffer in bytes.
216
+
217
+ """
218
+ return umath.geterrobj()[0]
219
+
220
+
221
+ @set_module('numpy')
222
+ def seterrcall(func):
223
+ """
224
+ Set the floating-point error callback function or log object.
225
+
226
+ There are two ways to capture floating-point error messages. The first
227
+ is to set the error-handler to 'call', using `seterr`. Then, set
228
+ the function to call using this function.
229
+
230
+ The second is to set the error-handler to 'log', using `seterr`.
231
+ Floating-point errors then trigger a call to the 'write' method of
232
+ the provided object.
233
+
234
+ Parameters
235
+ ----------
236
+ func : callable f(err, flag) or object with write method
237
+ Function to call upon floating-point errors ('call'-mode) or
238
+ object whose 'write' method is used to log such message ('log'-mode).
239
+
240
+ The call function takes two arguments. The first is a string describing
241
+ the type of error (such as "divide by zero", "overflow", "underflow",
242
+ or "invalid value"), and the second is the status flag. The flag is a
243
+ byte, whose four least-significant bits indicate the type of error, one
244
+ of "divide", "over", "under", "invalid"::
245
+
246
+ [0 0 0 0 divide over under invalid]
247
+
248
+ In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
249
+
250
+ If an object is provided, its write method should take one argument,
251
+ a string.
252
+
253
+ Returns
254
+ -------
255
+ h : callable, log instance or None
256
+ The old error handler.
257
+
258
+ See Also
259
+ --------
260
+ seterr, geterr, geterrcall
261
+
262
+ Examples
263
+ --------
264
+ Callback upon error:
265
+
266
+ >>> def err_handler(type, flag):
267
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
268
+ ...
269
+
270
+ >>> saved_handler = np.seterrcall(err_handler)
271
+ >>> save_err = np.seterr(all='call')
272
+
273
+ >>> np.array([1, 2, 3]) / 0.0
274
+ Floating point error (divide by zero), with flag 1
275
+ array([inf, inf, inf])
276
+
277
+ >>> np.seterrcall(saved_handler)
278
+ <function err_handler at 0x...>
279
+ >>> np.seterr(**save_err)
280
+ {'divide': 'call', 'over': 'call', 'under': 'call', 'invalid': 'call'}
281
+
282
+ Log error message:
283
+
284
+ >>> class Log:
285
+ ... def write(self, msg):
286
+ ... print("LOG: %s" % msg)
287
+ ...
288
+
289
+ >>> log = Log()
290
+ >>> saved_handler = np.seterrcall(log)
291
+ >>> save_err = np.seterr(all='log')
292
+
293
+ >>> np.array([1, 2, 3]) / 0.0
294
+ LOG: Warning: divide by zero encountered in divide
295
+ array([inf, inf, inf])
296
+
297
+ >>> np.seterrcall(saved_handler)
298
+ <numpy.core.numeric.Log object at 0x...>
299
+ >>> np.seterr(**save_err)
300
+ {'divide': 'log', 'over': 'log', 'under': 'log', 'invalid': 'log'}
301
+
302
+ """
303
+ if func is not None and not isinstance(func, collections.abc.Callable):
304
+ if (not hasattr(func, 'write') or
305
+ not isinstance(func.write, collections.abc.Callable)):
306
+ raise ValueError("Only callable can be used as callback")
307
+ pyvals = umath.geterrobj()
308
+ old = geterrcall()
309
+ pyvals[2] = func
310
+ umath.seterrobj(pyvals)
311
+ return old
312
+
313
+
314
+ @set_module('numpy')
315
+ def geterrcall():
316
+ """
317
+ Return the current callback function used on floating-point errors.
318
+
319
+ When the error handling for a floating-point error (one of "divide",
320
+ "over", "under", or "invalid") is set to 'call' or 'log', the function
321
+ that is called or the log instance that is written to is returned by
322
+ `geterrcall`. This function or log instance has been set with
323
+ `seterrcall`.
324
+
325
+ Returns
326
+ -------
327
+ errobj : callable, log instance or None
328
+ The current error handler. If no handler was set through `seterrcall`,
329
+ ``None`` is returned.
330
+
331
+ See Also
332
+ --------
333
+ seterrcall, seterr, geterr
334
+
335
+ Notes
336
+ -----
337
+ For complete documentation of the types of floating-point exceptions and
338
+ treatment options, see `seterr`.
339
+
340
+ Examples
341
+ --------
342
+ >>> np.geterrcall() # we did not yet set a handler, returns None
343
+
344
+ >>> oldsettings = np.seterr(all='call')
345
+ >>> def err_handler(type, flag):
346
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
347
+ >>> oldhandler = np.seterrcall(err_handler)
348
+ >>> np.array([1, 2, 3]) / 0.0
349
+ Floating point error (divide by zero), with flag 1
350
+ array([inf, inf, inf])
351
+
352
+ >>> cur_handler = np.geterrcall()
353
+ >>> cur_handler is err_handler
354
+ True
355
+
356
+ """
357
+ return umath.geterrobj()[2]
358
+
359
+
360
+ class _unspecified:
361
+ pass
362
+
363
+
364
+ _Unspecified = _unspecified()
365
+
366
+
367
+ @set_module('numpy')
368
+ class errstate(contextlib.ContextDecorator):
369
+ """
370
+ errstate(**kwargs)
371
+
372
+ Context manager for floating-point error handling.
373
+
374
+ Using an instance of `errstate` as a context manager allows statements in
375
+ that context to execute with a known error handling behavior. Upon entering
376
+ the context the error handling is set with `seterr` and `seterrcall`, and
377
+ upon exiting it is reset to what it was before.
378
+
379
+ .. versionchanged:: 1.17.0
380
+ `errstate` is also usable as a function decorator, saving
381
+ a level of indentation if an entire function is wrapped.
382
+ See :py:class:`contextlib.ContextDecorator` for more information.
383
+
384
+ Parameters
385
+ ----------
386
+ kwargs : {divide, over, under, invalid}
387
+ Keyword arguments. The valid keywords are the possible floating-point
388
+ exceptions. Each keyword should have a string value that defines the
389
+ treatment for the particular error. Possible values are
390
+ {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
391
+
392
+ See Also
393
+ --------
394
+ seterr, geterr, seterrcall, geterrcall
395
+
396
+ Notes
397
+ -----
398
+ For complete documentation of the types of floating-point exceptions and
399
+ treatment options, see `seterr`.
400
+
401
+ Examples
402
+ --------
403
+ >>> olderr = np.seterr(all='ignore') # Set error handling to known state.
404
+
405
+ >>> np.arange(3) / 0.
406
+ array([nan, inf, inf])
407
+ >>> with np.errstate(divide='warn'):
408
+ ... np.arange(3) / 0.
409
+ array([nan, inf, inf])
410
+
411
+ >>> np.sqrt(-1)
412
+ nan
413
+ >>> with np.errstate(invalid='raise'):
414
+ ... np.sqrt(-1)
415
+ Traceback (most recent call last):
416
+ File "<stdin>", line 2, in <module>
417
+ FloatingPointError: invalid value encountered in sqrt
418
+
419
+ Outside the context the error handling behavior has not changed:
420
+
421
+ >>> np.geterr()
422
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
423
+
424
+ """
425
+
426
+ def __init__(self, *, call=_Unspecified, **kwargs):
427
+ self.call = call
428
+ self.kwargs = kwargs
429
+
430
+ def __enter__(self):
431
+ self.oldstate = seterr(**self.kwargs)
432
+ if self.call is not _Unspecified:
433
+ self.oldcall = seterrcall(self.call)
434
+
435
+ def __exit__(self, *exc_info):
436
+ seterr(**self.oldstate)
437
+ if self.call is not _Unspecified:
438
+ seterrcall(self.oldcall)
439
+
440
+
441
+ def _setdef():
442
+ defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
443
+ umath.seterrobj(defval)
444
+
445
+
446
+ # set the default values
447
+ _setdef()
448
+
449
+
450
+ NO_NEP50_WARNING = contextvars.ContextVar("_no_nep50_warning", default=False)
451
+
452
+ @set_module('numpy')
453
+ @contextlib.contextmanager
454
+ def _no_nep50_warning():
455
+ """
456
+ Context manager to disable NEP 50 warnings. This context manager is
457
+ only relevant if the NEP 50 warnings are enabled globally (which is not
458
+ thread/context safe).
459
+
460
+ This warning context manager itself is fully safe, however.
461
+ """
462
+ token = NO_NEP50_WARNING.set(True)
463
+ try:
464
+ yield
465
+ finally:
466
+ NO_NEP50_WARNING.reset(token)
venv/lib/python3.10/site-packages/numpy/core/_ufunc_config.pyi ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+ from typing import Any, Literal, TypedDict
3
+
4
+ from numpy import _SupportsWrite
5
+
6
+ _ErrKind = Literal["ignore", "warn", "raise", "call", "print", "log"]
7
+ _ErrFunc = Callable[[str, int], Any]
8
+
9
+ class _ErrDict(TypedDict):
10
+ divide: _ErrKind
11
+ over: _ErrKind
12
+ under: _ErrKind
13
+ invalid: _ErrKind
14
+
15
+ class _ErrDictOptional(TypedDict, total=False):
16
+ all: None | _ErrKind
17
+ divide: None | _ErrKind
18
+ over: None | _ErrKind
19
+ under: None | _ErrKind
20
+ invalid: None | _ErrKind
21
+
22
+ def seterr(
23
+ all: None | _ErrKind = ...,
24
+ divide: None | _ErrKind = ...,
25
+ over: None | _ErrKind = ...,
26
+ under: None | _ErrKind = ...,
27
+ invalid: None | _ErrKind = ...,
28
+ ) -> _ErrDict: ...
29
+ def geterr() -> _ErrDict: ...
30
+ def setbufsize(size: int) -> int: ...
31
+ def getbufsize() -> int: ...
32
+ def seterrcall(
33
+ func: None | _ErrFunc | _SupportsWrite[str]
34
+ ) -> None | _ErrFunc | _SupportsWrite[str]: ...
35
+ def geterrcall() -> None | _ErrFunc | _SupportsWrite[str]: ...
36
+
37
+ # See `numpy/__init__.pyi` for the `errstate` class and `no_nep5_warnings`
venv/lib/python3.10/site-packages/numpy/core/_umath_tests.cpython-310-x86_64-linux-gnu.so ADDED
Binary file (42.3 kB). View file
 
venv/lib/python3.10/site-packages/numpy/core/arrayprint.pyi ADDED
@@ -0,0 +1,142 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from types import TracebackType
2
+ from collections.abc import Callable
3
+ from typing import Any, Literal, TypedDict, SupportsIndex
4
+
5
+ # Using a private class is by no means ideal, but it is simply a consequence
6
+ # of a `contextlib.context` returning an instance of aforementioned class
7
+ from contextlib import _GeneratorContextManager
8
+
9
+ from numpy import (
10
+ ndarray,
11
+ generic,
12
+ bool_,
13
+ integer,
14
+ timedelta64,
15
+ datetime64,
16
+ floating,
17
+ complexfloating,
18
+ void,
19
+ str_,
20
+ bytes_,
21
+ longdouble,
22
+ clongdouble,
23
+ )
24
+ from numpy._typing import ArrayLike, _CharLike_co, _FloatLike_co
25
+
26
+ _FloatMode = Literal["fixed", "unique", "maxprec", "maxprec_equal"]
27
+
28
+ class _FormatDict(TypedDict, total=False):
29
+ bool: Callable[[bool_], str]
30
+ int: Callable[[integer[Any]], str]
31
+ timedelta: Callable[[timedelta64], str]
32
+ datetime: Callable[[datetime64], str]
33
+ float: Callable[[floating[Any]], str]
34
+ longfloat: Callable[[longdouble], str]
35
+ complexfloat: Callable[[complexfloating[Any, Any]], str]
36
+ longcomplexfloat: Callable[[clongdouble], str]
37
+ void: Callable[[void], str]
38
+ numpystr: Callable[[_CharLike_co], str]
39
+ object: Callable[[object], str]
40
+ all: Callable[[object], str]
41
+ int_kind: Callable[[integer[Any]], str]
42
+ float_kind: Callable[[floating[Any]], str]
43
+ complex_kind: Callable[[complexfloating[Any, Any]], str]
44
+ str_kind: Callable[[_CharLike_co], str]
45
+
46
+ class _FormatOptions(TypedDict):
47
+ precision: int
48
+ threshold: int
49
+ edgeitems: int
50
+ linewidth: int
51
+ suppress: bool
52
+ nanstr: str
53
+ infstr: str
54
+ formatter: None | _FormatDict
55
+ sign: Literal["-", "+", " "]
56
+ floatmode: _FloatMode
57
+ legacy: Literal[False, "1.13", "1.21"]
58
+
59
+ def set_printoptions(
60
+ precision: None | SupportsIndex = ...,
61
+ threshold: None | int = ...,
62
+ edgeitems: None | int = ...,
63
+ linewidth: None | int = ...,
64
+ suppress: None | bool = ...,
65
+ nanstr: None | str = ...,
66
+ infstr: None | str = ...,
67
+ formatter: None | _FormatDict = ...,
68
+ sign: Literal[None, "-", "+", " "] = ...,
69
+ floatmode: None | _FloatMode = ...,
70
+ *,
71
+ legacy: Literal[None, False, "1.13", "1.21"] = ...
72
+ ) -> None: ...
73
+ def get_printoptions() -> _FormatOptions: ...
74
+ def array2string(
75
+ a: ndarray[Any, Any],
76
+ max_line_width: None | int = ...,
77
+ precision: None | SupportsIndex = ...,
78
+ suppress_small: None | bool = ...,
79
+ separator: str = ...,
80
+ prefix: str = ...,
81
+ # NOTE: With the `style` argument being deprecated,
82
+ # all arguments between `formatter` and `suffix` are de facto
83
+ # keyworld-only arguments
84
+ *,
85
+ formatter: None | _FormatDict = ...,
86
+ threshold: None | int = ...,
87
+ edgeitems: None | int = ...,
88
+ sign: Literal[None, "-", "+", " "] = ...,
89
+ floatmode: None | _FloatMode = ...,
90
+ suffix: str = ...,
91
+ legacy: Literal[None, False, "1.13", "1.21"] = ...,
92
+ ) -> str: ...
93
+ def format_float_scientific(
94
+ x: _FloatLike_co,
95
+ precision: None | int = ...,
96
+ unique: bool = ...,
97
+ trim: Literal["k", ".", "0", "-"] = ...,
98
+ sign: bool = ...,
99
+ pad_left: None | int = ...,
100
+ exp_digits: None | int = ...,
101
+ min_digits: None | int = ...,
102
+ ) -> str: ...
103
+ def format_float_positional(
104
+ x: _FloatLike_co,
105
+ precision: None | int = ...,
106
+ unique: bool = ...,
107
+ fractional: bool = ...,
108
+ trim: Literal["k", ".", "0", "-"] = ...,
109
+ sign: bool = ...,
110
+ pad_left: None | int = ...,
111
+ pad_right: None | int = ...,
112
+ min_digits: None | int = ...,
113
+ ) -> str: ...
114
+ def array_repr(
115
+ arr: ndarray[Any, Any],
116
+ max_line_width: None | int = ...,
117
+ precision: None | SupportsIndex = ...,
118
+ suppress_small: None | bool = ...,
119
+ ) -> str: ...
120
+ def array_str(
121
+ a: ndarray[Any, Any],
122
+ max_line_width: None | int = ...,
123
+ precision: None | SupportsIndex = ...,
124
+ suppress_small: None | bool = ...,
125
+ ) -> str: ...
126
+ def set_string_function(
127
+ f: None | Callable[[ndarray[Any, Any]], str], repr: bool = ...
128
+ ) -> None: ...
129
+ def printoptions(
130
+ precision: None | SupportsIndex = ...,
131
+ threshold: None | int = ...,
132
+ edgeitems: None | int = ...,
133
+ linewidth: None | int = ...,
134
+ suppress: None | bool = ...,
135
+ nanstr: None | str = ...,
136
+ infstr: None | str = ...,
137
+ formatter: None | _FormatDict = ...,
138
+ sign: Literal[None, "-", "+", " "] = ...,
139
+ floatmode: None | _FloatMode = ...,
140
+ *,
141
+ legacy: Literal[None, False, "1.13", "1.21"] = ...
142
+ ) -> _GeneratorContextManager[_FormatOptions]: ...
venv/lib/python3.10/site-packages/numpy/core/cversions.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Simple script to compute the api hash of the current API.
2
+
3
+ The API has is defined by numpy_api_order and ufunc_api_order.
4
+
5
+ """
6
+ from os.path import dirname
7
+
8
+ from code_generators.genapi import fullapi_hash
9
+ from code_generators.numpy_api import full_api
10
+
11
+ if __name__ == '__main__':
12
+ curdir = dirname(__file__)
13
+ print(fullapi_hash(full_api))
venv/lib/python3.10/site-packages/numpy/core/defchararray.py ADDED
@@ -0,0 +1,2914 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module contains a set of functions for vectorized string
3
+ operations and methods.
4
+
5
+ .. note::
6
+ The `chararray` class exists for backwards compatibility with
7
+ Numarray, it is not recommended for new development. Starting from numpy
8
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
9
+ `dtype` `object_`, `bytes_` or `str_`, and use the free functions
10
+ in the `numpy.char` module for fast vectorized string operations.
11
+
12
+ Some methods will only be available if the corresponding string method is
13
+ available in your version of Python.
14
+
15
+ The preferred alias for `defchararray` is `numpy.char`.
16
+
17
+ """
18
+ import functools
19
+
20
+ from .._utils import set_module
21
+ from .numerictypes import (
22
+ bytes_, str_, integer, int_, object_, bool_, character)
23
+ from .numeric import ndarray, compare_chararrays
24
+ from .numeric import array as narray
25
+ from numpy.core.multiarray import _vec_string
26
+ from numpy.core import overrides
27
+ from numpy.compat import asbytes
28
+ import numpy
29
+
30
+ __all__ = [
31
+ 'equal', 'not_equal', 'greater_equal', 'less_equal',
32
+ 'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
33
+ 'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
34
+ 'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
35
+ 'istitle', 'isupper', 'join', 'ljust', 'lower', 'lstrip', 'partition',
36
+ 'replace', 'rfind', 'rindex', 'rjust', 'rpartition', 'rsplit',
37
+ 'rstrip', 'split', 'splitlines', 'startswith', 'strip', 'swapcase',
38
+ 'title', 'translate', 'upper', 'zfill', 'isnumeric', 'isdecimal',
39
+ 'array', 'asarray'
40
+ ]
41
+
42
+
43
+ _globalvar = 0
44
+
45
+ array_function_dispatch = functools.partial(
46
+ overrides.array_function_dispatch, module='numpy.char')
47
+
48
+
49
+ def _is_unicode(arr):
50
+ """Returns True if arr is a string or a string array with a dtype that
51
+ represents a unicode string, otherwise returns False.
52
+
53
+ """
54
+ if (isinstance(arr, str) or
55
+ issubclass(numpy.asarray(arr).dtype.type, str)):
56
+ return True
57
+ return False
58
+
59
+
60
+ def _to_bytes_or_str_array(result, output_dtype_like=None):
61
+ """
62
+ Helper function to cast a result back into an array
63
+ with the appropriate dtype if an object array must be used
64
+ as an intermediary.
65
+ """
66
+ ret = numpy.asarray(result.tolist())
67
+ dtype = getattr(output_dtype_like, 'dtype', None)
68
+ if dtype is not None:
69
+ return ret.astype(type(dtype)(_get_num_chars(ret)), copy=False)
70
+ return ret
71
+
72
+
73
+ def _clean_args(*args):
74
+ """
75
+ Helper function for delegating arguments to Python string
76
+ functions.
77
+
78
+ Many of the Python string operations that have optional arguments
79
+ do not use 'None' to indicate a default value. In these cases,
80
+ we need to remove all None arguments, and those following them.
81
+ """
82
+ newargs = []
83
+ for chk in args:
84
+ if chk is None:
85
+ break
86
+ newargs.append(chk)
87
+ return newargs
88
+
89
+ def _get_num_chars(a):
90
+ """
91
+ Helper function that returns the number of characters per field in
92
+ a string or unicode array. This is to abstract out the fact that
93
+ for a unicode array this is itemsize / 4.
94
+ """
95
+ if issubclass(a.dtype.type, str_):
96
+ return a.itemsize // 4
97
+ return a.itemsize
98
+
99
+
100
+ def _binary_op_dispatcher(x1, x2):
101
+ return (x1, x2)
102
+
103
+
104
+ @array_function_dispatch(_binary_op_dispatcher)
105
+ def equal(x1, x2):
106
+ """
107
+ Return (x1 == x2) element-wise.
108
+
109
+ Unlike `numpy.equal`, this comparison is performed by first
110
+ stripping whitespace characters from the end of the string. This
111
+ behavior is provided for backward-compatibility with numarray.
112
+
113
+ Parameters
114
+ ----------
115
+ x1, x2 : array_like of str or unicode
116
+ Input arrays of the same shape.
117
+
118
+ Returns
119
+ -------
120
+ out : ndarray
121
+ Output array of bools.
122
+
123
+ See Also
124
+ --------
125
+ not_equal, greater_equal, less_equal, greater, less
126
+ """
127
+ return compare_chararrays(x1, x2, '==', True)
128
+
129
+
130
+ @array_function_dispatch(_binary_op_dispatcher)
131
+ def not_equal(x1, x2):
132
+ """
133
+ Return (x1 != x2) element-wise.
134
+
135
+ Unlike `numpy.not_equal`, this comparison is performed by first
136
+ stripping whitespace characters from the end of the string. This
137
+ behavior is provided for backward-compatibility with numarray.
138
+
139
+ Parameters
140
+ ----------
141
+ x1, x2 : array_like of str or unicode
142
+ Input arrays of the same shape.
143
+
144
+ Returns
145
+ -------
146
+ out : ndarray
147
+ Output array of bools.
148
+
149
+ See Also
150
+ --------
151
+ equal, greater_equal, less_equal, greater, less
152
+ """
153
+ return compare_chararrays(x1, x2, '!=', True)
154
+
155
+
156
+ @array_function_dispatch(_binary_op_dispatcher)
157
+ def greater_equal(x1, x2):
158
+ """
159
+ Return (x1 >= x2) element-wise.
160
+
161
+ Unlike `numpy.greater_equal`, this comparison is performed by
162
+ first stripping whitespace characters from the end of the string.
163
+ This behavior is provided for backward-compatibility with
164
+ numarray.
165
+
166
+ Parameters
167
+ ----------
168
+ x1, x2 : array_like of str or unicode
169
+ Input arrays of the same shape.
170
+
171
+ Returns
172
+ -------
173
+ out : ndarray
174
+ Output array of bools.
175
+
176
+ See Also
177
+ --------
178
+ equal, not_equal, less_equal, greater, less
179
+ """
180
+ return compare_chararrays(x1, x2, '>=', True)
181
+
182
+
183
+ @array_function_dispatch(_binary_op_dispatcher)
184
+ def less_equal(x1, x2):
185
+ """
186
+ Return (x1 <= x2) element-wise.
187
+
188
+ Unlike `numpy.less_equal`, this comparison is performed by first
189
+ stripping whitespace characters from the end of the string. This
190
+ behavior is provided for backward-compatibility with numarray.
191
+
192
+ Parameters
193
+ ----------
194
+ x1, x2 : array_like of str or unicode
195
+ Input arrays of the same shape.
196
+
197
+ Returns
198
+ -------
199
+ out : ndarray
200
+ Output array of bools.
201
+
202
+ See Also
203
+ --------
204
+ equal, not_equal, greater_equal, greater, less
205
+ """
206
+ return compare_chararrays(x1, x2, '<=', True)
207
+
208
+
209
+ @array_function_dispatch(_binary_op_dispatcher)
210
+ def greater(x1, x2):
211
+ """
212
+ Return (x1 > x2) element-wise.
213
+
214
+ Unlike `numpy.greater`, this comparison is performed by first
215
+ stripping whitespace characters from the end of the string. This
216
+ behavior is provided for backward-compatibility with numarray.
217
+
218
+ Parameters
219
+ ----------
220
+ x1, x2 : array_like of str or unicode
221
+ Input arrays of the same shape.
222
+
223
+ Returns
224
+ -------
225
+ out : ndarray
226
+ Output array of bools.
227
+
228
+ See Also
229
+ --------
230
+ equal, not_equal, greater_equal, less_equal, less
231
+ """
232
+ return compare_chararrays(x1, x2, '>', True)
233
+
234
+
235
+ @array_function_dispatch(_binary_op_dispatcher)
236
+ def less(x1, x2):
237
+ """
238
+ Return (x1 < x2) element-wise.
239
+
240
+ Unlike `numpy.greater`, this comparison is performed by first
241
+ stripping whitespace characters from the end of the string. This
242
+ behavior is provided for backward-compatibility with numarray.
243
+
244
+ Parameters
245
+ ----------
246
+ x1, x2 : array_like of str or unicode
247
+ Input arrays of the same shape.
248
+
249
+ Returns
250
+ -------
251
+ out : ndarray
252
+ Output array of bools.
253
+
254
+ See Also
255
+ --------
256
+ equal, not_equal, greater_equal, less_equal, greater
257
+ """
258
+ return compare_chararrays(x1, x2, '<', True)
259
+
260
+
261
+ def _unary_op_dispatcher(a):
262
+ return (a,)
263
+
264
+
265
+ @array_function_dispatch(_unary_op_dispatcher)
266
+ def str_len(a):
267
+ """
268
+ Return len(a) element-wise.
269
+
270
+ Parameters
271
+ ----------
272
+ a : array_like of str or unicode
273
+
274
+ Returns
275
+ -------
276
+ out : ndarray
277
+ Output array of integers
278
+
279
+ See Also
280
+ --------
281
+ len
282
+
283
+ Examples
284
+ --------
285
+ >>> a = np.array(['Grace Hopper Conference', 'Open Source Day'])
286
+ >>> np.char.str_len(a)
287
+ array([23, 15])
288
+ >>> a = np.array([u'\u0420', u'\u043e'])
289
+ >>> np.char.str_len(a)
290
+ array([1, 1])
291
+ >>> a = np.array([['hello', 'world'], [u'\u0420', u'\u043e']])
292
+ >>> np.char.str_len(a)
293
+ array([[5, 5], [1, 1]])
294
+ """
295
+ # Note: __len__, etc. currently return ints, which are not C-integers.
296
+ # Generally intp would be expected for lengths, although int is sufficient
297
+ # due to the dtype itemsize limitation.
298
+ return _vec_string(a, int_, '__len__')
299
+
300
+
301
+ @array_function_dispatch(_binary_op_dispatcher)
302
+ def add(x1, x2):
303
+ """
304
+ Return element-wise string concatenation for two arrays of str or unicode.
305
+
306
+ Arrays `x1` and `x2` must have the same shape.
307
+
308
+ Parameters
309
+ ----------
310
+ x1 : array_like of str or unicode
311
+ Input array.
312
+ x2 : array_like of str or unicode
313
+ Input array.
314
+
315
+ Returns
316
+ -------
317
+ add : ndarray
318
+ Output array of `bytes_` or `str_`, depending on input types
319
+ of the same shape as `x1` and `x2`.
320
+
321
+ """
322
+ arr1 = numpy.asarray(x1)
323
+ arr2 = numpy.asarray(x2)
324
+ out_size = _get_num_chars(arr1) + _get_num_chars(arr2)
325
+
326
+ if type(arr1.dtype) != type(arr2.dtype):
327
+ # Enforce this for now. The solution to it will be implement add
328
+ # as a ufunc. It never worked right on Python 3: bytes + unicode gave
329
+ # nonsense unicode + bytes errored, and unicode + object used the
330
+ # object dtype itemsize as num chars (worked on short strings).
331
+ # bytes + void worked but promoting void->bytes is dubious also.
332
+ raise TypeError(
333
+ "np.char.add() requires both arrays of the same dtype kind, but "
334
+ f"got dtypes: '{arr1.dtype}' and '{arr2.dtype}' (the few cases "
335
+ "where this used to work often lead to incorrect results).")
336
+
337
+ return _vec_string(arr1, type(arr1.dtype)(out_size), '__add__', (arr2,))
338
+
339
+ def _multiply_dispatcher(a, i):
340
+ return (a,)
341
+
342
+
343
+ @array_function_dispatch(_multiply_dispatcher)
344
+ def multiply(a, i):
345
+ """
346
+ Return (a * i), that is string multiple concatenation,
347
+ element-wise.
348
+
349
+ Values in `i` of less than 0 are treated as 0 (which yields an
350
+ empty string).
351
+
352
+ Parameters
353
+ ----------
354
+ a : array_like of str or unicode
355
+
356
+ i : array_like of ints
357
+
358
+ Returns
359
+ -------
360
+ out : ndarray
361
+ Output array of str or unicode, depending on input types
362
+
363
+ Examples
364
+ --------
365
+ >>> a = np.array(["a", "b", "c"])
366
+ >>> np.char.multiply(x, 3)
367
+ array(['aaa', 'bbb', 'ccc'], dtype='<U3')
368
+ >>> i = np.array([1, 2, 3])
369
+ >>> np.char.multiply(a, i)
370
+ array(['a', 'bb', 'ccc'], dtype='<U3')
371
+ >>> np.char.multiply(np.array(['a']), i)
372
+ array(['a', 'aa', 'aaa'], dtype='<U3')
373
+ >>> a = np.array(['a', 'b', 'c', 'd', 'e', 'f']).reshape((2, 3))
374
+ >>> np.char.multiply(a, 3)
375
+ array([['aaa', 'bbb', 'ccc'],
376
+ ['ddd', 'eee', 'fff']], dtype='<U3')
377
+ >>> np.char.multiply(a, i)
378
+ array([['a', 'bb', 'ccc'],
379
+ ['d', 'ee', 'fff']], dtype='<U3')
380
+ """
381
+ a_arr = numpy.asarray(a)
382
+ i_arr = numpy.asarray(i)
383
+ if not issubclass(i_arr.dtype.type, integer):
384
+ raise ValueError("Can only multiply by integers")
385
+ out_size = _get_num_chars(a_arr) * max(int(i_arr.max()), 0)
386
+ return _vec_string(
387
+ a_arr, type(a_arr.dtype)(out_size), '__mul__', (i_arr,))
388
+
389
+
390
+ def _mod_dispatcher(a, values):
391
+ return (a, values)
392
+
393
+
394
+ @array_function_dispatch(_mod_dispatcher)
395
+ def mod(a, values):
396
+ """
397
+ Return (a % i), that is pre-Python 2.6 string formatting
398
+ (interpolation), element-wise for a pair of array_likes of str
399
+ or unicode.
400
+
401
+ Parameters
402
+ ----------
403
+ a : array_like of str or unicode
404
+
405
+ values : array_like of values
406
+ These values will be element-wise interpolated into the string.
407
+
408
+ Returns
409
+ -------
410
+ out : ndarray
411
+ Output array of str or unicode, depending on input types
412
+
413
+ See Also
414
+ --------
415
+ str.__mod__
416
+
417
+ """
418
+ return _to_bytes_or_str_array(
419
+ _vec_string(a, object_, '__mod__', (values,)), a)
420
+
421
+
422
+ @array_function_dispatch(_unary_op_dispatcher)
423
+ def capitalize(a):
424
+ """
425
+ Return a copy of `a` with only the first character of each element
426
+ capitalized.
427
+
428
+ Calls `str.capitalize` element-wise.
429
+
430
+ For 8-bit strings, this method is locale-dependent.
431
+
432
+ Parameters
433
+ ----------
434
+ a : array_like of str or unicode
435
+ Input array of strings to capitalize.
436
+
437
+ Returns
438
+ -------
439
+ out : ndarray
440
+ Output array of str or unicode, depending on input
441
+ types
442
+
443
+ See Also
444
+ --------
445
+ str.capitalize
446
+
447
+ Examples
448
+ --------
449
+ >>> c = np.array(['a1b2','1b2a','b2a1','2a1b'],'S4'); c
450
+ array(['a1b2', '1b2a', 'b2a1', '2a1b'],
451
+ dtype='|S4')
452
+ >>> np.char.capitalize(c)
453
+ array(['A1b2', '1b2a', 'B2a1', '2a1b'],
454
+ dtype='|S4')
455
+
456
+ """
457
+ a_arr = numpy.asarray(a)
458
+ return _vec_string(a_arr, a_arr.dtype, 'capitalize')
459
+
460
+
461
+ def _center_dispatcher(a, width, fillchar=None):
462
+ return (a,)
463
+
464
+
465
+ @array_function_dispatch(_center_dispatcher)
466
+ def center(a, width, fillchar=' '):
467
+ """
468
+ Return a copy of `a` with its elements centered in a string of
469
+ length `width`.
470
+
471
+ Calls `str.center` element-wise.
472
+
473
+ Parameters
474
+ ----------
475
+ a : array_like of str or unicode
476
+
477
+ width : int
478
+ The length of the resulting strings
479
+ fillchar : str or unicode, optional
480
+ The padding character to use (default is space).
481
+
482
+ Returns
483
+ -------
484
+ out : ndarray
485
+ Output array of str or unicode, depending on input
486
+ types
487
+
488
+ See Also
489
+ --------
490
+ str.center
491
+
492
+ Notes
493
+ -----
494
+ This function is intended to work with arrays of strings. The
495
+ fill character is not applied to numeric types.
496
+
497
+ Examples
498
+ --------
499
+ >>> c = np.array(['a1b2','1b2a','b2a1','2a1b']); c
500
+ array(['a1b2', '1b2a', 'b2a1', '2a1b'], dtype='<U4')
501
+ >>> np.char.center(c, width=9)
502
+ array([' a1b2 ', ' 1b2a ', ' b2a1 ', ' 2a1b '], dtype='<U9')
503
+ >>> np.char.center(c, width=9, fillchar='*')
504
+ array(['***a1b2**', '***1b2a**', '***b2a1**', '***2a1b**'], dtype='<U9')
505
+ >>> np.char.center(c, width=1)
506
+ array(['a', '1', 'b', '2'], dtype='<U1')
507
+
508
+ """
509
+ a_arr = numpy.asarray(a)
510
+ width_arr = numpy.asarray(width)
511
+ size = int(numpy.max(width_arr.flat))
512
+ if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
513
+ fillchar = asbytes(fillchar)
514
+ return _vec_string(
515
+ a_arr, type(a_arr.dtype)(size), 'center', (width_arr, fillchar))
516
+
517
+
518
+ def _count_dispatcher(a, sub, start=None, end=None):
519
+ return (a,)
520
+
521
+
522
+ @array_function_dispatch(_count_dispatcher)
523
+ def count(a, sub, start=0, end=None):
524
+ """
525
+ Returns an array with the number of non-overlapping occurrences of
526
+ substring `sub` in the range [`start`, `end`].
527
+
528
+ Calls `str.count` element-wise.
529
+
530
+ Parameters
531
+ ----------
532
+ a : array_like of str or unicode
533
+
534
+ sub : str or unicode
535
+ The substring to search for.
536
+
537
+ start, end : int, optional
538
+ Optional arguments `start` and `end` are interpreted as slice
539
+ notation to specify the range in which to count.
540
+
541
+ Returns
542
+ -------
543
+ out : ndarray
544
+ Output array of ints.
545
+
546
+ See Also
547
+ --------
548
+ str.count
549
+
550
+ Examples
551
+ --------
552
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
553
+ >>> c
554
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
555
+ >>> np.char.count(c, 'A')
556
+ array([3, 1, 1])
557
+ >>> np.char.count(c, 'aA')
558
+ array([3, 1, 0])
559
+ >>> np.char.count(c, 'A', start=1, end=4)
560
+ array([2, 1, 1])
561
+ >>> np.char.count(c, 'A', start=1, end=3)
562
+ array([1, 0, 0])
563
+
564
+ """
565
+ return _vec_string(a, int_, 'count', [sub, start] + _clean_args(end))
566
+
567
+
568
+ def _code_dispatcher(a, encoding=None, errors=None):
569
+ return (a,)
570
+
571
+
572
+ @array_function_dispatch(_code_dispatcher)
573
+ def decode(a, encoding=None, errors=None):
574
+ r"""
575
+ Calls ``bytes.decode`` element-wise.
576
+
577
+ The set of available codecs comes from the Python standard library,
578
+ and may be extended at runtime. For more information, see the
579
+ :mod:`codecs` module.
580
+
581
+ Parameters
582
+ ----------
583
+ a : array_like of str or unicode
584
+
585
+ encoding : str, optional
586
+ The name of an encoding
587
+
588
+ errors : str, optional
589
+ Specifies how to handle encoding errors
590
+
591
+ Returns
592
+ -------
593
+ out : ndarray
594
+
595
+ See Also
596
+ --------
597
+ :py:meth:`bytes.decode`
598
+
599
+ Notes
600
+ -----
601
+ The type of the result will depend on the encoding specified.
602
+
603
+ Examples
604
+ --------
605
+ >>> c = np.array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
606
+ ... b'\x81\x82\xc2\xc1\xc2\x82\x81'])
607
+ >>> c
608
+ array([b'\x81\xc1\x81\xc1\x81\xc1', b'@@\x81\xc1@@',
609
+ ... b'\x81\x82\xc2\xc1\xc2\x82\x81'], dtype='|S7')
610
+ >>> np.char.decode(c, encoding='cp037')
611
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
612
+
613
+ """
614
+ return _to_bytes_or_str_array(
615
+ _vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
616
+
617
+
618
+ @array_function_dispatch(_code_dispatcher)
619
+ def encode(a, encoding=None, errors=None):
620
+ """
621
+ Calls `str.encode` element-wise.
622
+
623
+ The set of available codecs comes from the Python standard library,
624
+ and may be extended at runtime. For more information, see the codecs
625
+ module.
626
+
627
+ Parameters
628
+ ----------
629
+ a : array_like of str or unicode
630
+
631
+ encoding : str, optional
632
+ The name of an encoding
633
+
634
+ errors : str, optional
635
+ Specifies how to handle encoding errors
636
+
637
+ Returns
638
+ -------
639
+ out : ndarray
640
+
641
+ See Also
642
+ --------
643
+ str.encode
644
+
645
+ Notes
646
+ -----
647
+ The type of the result will depend on the encoding specified.
648
+
649
+ """
650
+ return _to_bytes_or_str_array(
651
+ _vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
652
+
653
+
654
+ def _endswith_dispatcher(a, suffix, start=None, end=None):
655
+ return (a,)
656
+
657
+
658
+ @array_function_dispatch(_endswith_dispatcher)
659
+ def endswith(a, suffix, start=0, end=None):
660
+ """
661
+ Returns a boolean array which is `True` where the string element
662
+ in `a` ends with `suffix`, otherwise `False`.
663
+
664
+ Calls `str.endswith` element-wise.
665
+
666
+ Parameters
667
+ ----------
668
+ a : array_like of str or unicode
669
+
670
+ suffix : str
671
+
672
+ start, end : int, optional
673
+ With optional `start`, test beginning at that position. With
674
+ optional `end`, stop comparing at that position.
675
+
676
+ Returns
677
+ -------
678
+ out : ndarray
679
+ Outputs an array of bools.
680
+
681
+ See Also
682
+ --------
683
+ str.endswith
684
+
685
+ Examples
686
+ --------
687
+ >>> s = np.array(['foo', 'bar'])
688
+ >>> s[0] = 'foo'
689
+ >>> s[1] = 'bar'
690
+ >>> s
691
+ array(['foo', 'bar'], dtype='<U3')
692
+ >>> np.char.endswith(s, 'ar')
693
+ array([False, True])
694
+ >>> np.char.endswith(s, 'a', start=1, end=2)
695
+ array([False, True])
696
+
697
+ """
698
+ return _vec_string(
699
+ a, bool_, 'endswith', [suffix, start] + _clean_args(end))
700
+
701
+
702
+ def _expandtabs_dispatcher(a, tabsize=None):
703
+ return (a,)
704
+
705
+
706
+ @array_function_dispatch(_expandtabs_dispatcher)
707
+ def expandtabs(a, tabsize=8):
708
+ """
709
+ Return a copy of each string element where all tab characters are
710
+ replaced by one or more spaces.
711
+
712
+ Calls `str.expandtabs` element-wise.
713
+
714
+ Return a copy of each string element where all tab characters are
715
+ replaced by one or more spaces, depending on the current column
716
+ and the given `tabsize`. The column number is reset to zero after
717
+ each newline occurring in the string. This doesn't understand other
718
+ non-printing characters or escape sequences.
719
+
720
+ Parameters
721
+ ----------
722
+ a : array_like of str or unicode
723
+ Input array
724
+ tabsize : int, optional
725
+ Replace tabs with `tabsize` number of spaces. If not given defaults
726
+ to 8 spaces.
727
+
728
+ Returns
729
+ -------
730
+ out : ndarray
731
+ Output array of str or unicode, depending on input type
732
+
733
+ See Also
734
+ --------
735
+ str.expandtabs
736
+
737
+ """
738
+ return _to_bytes_or_str_array(
739
+ _vec_string(a, object_, 'expandtabs', (tabsize,)), a)
740
+
741
+
742
+ @array_function_dispatch(_count_dispatcher)
743
+ def find(a, sub, start=0, end=None):
744
+ """
745
+ For each element, return the lowest index in the string where
746
+ substring `sub` is found.
747
+
748
+ Calls `str.find` element-wise.
749
+
750
+ For each element, return the lowest index in the string where
751
+ substring `sub` is found, such that `sub` is contained in the
752
+ range [`start`, `end`].
753
+
754
+ Parameters
755
+ ----------
756
+ a : array_like of str or unicode
757
+
758
+ sub : str or unicode
759
+
760
+ start, end : int, optional
761
+ Optional arguments `start` and `end` are interpreted as in
762
+ slice notation.
763
+
764
+ Returns
765
+ -------
766
+ out : ndarray or int
767
+ Output array of ints. Returns -1 if `sub` is not found.
768
+
769
+ See Also
770
+ --------
771
+ str.find
772
+
773
+ Examples
774
+ --------
775
+ >>> a = np.array(["NumPy is a Python library"])
776
+ >>> np.char.find(a, "Python", start=0, end=None)
777
+ array([11])
778
+
779
+ """
780
+ return _vec_string(
781
+ a, int_, 'find', [sub, start] + _clean_args(end))
782
+
783
+
784
+ @array_function_dispatch(_count_dispatcher)
785
+ def index(a, sub, start=0, end=None):
786
+ """
787
+ Like `find`, but raises `ValueError` when the substring is not found.
788
+
789
+ Calls `str.index` element-wise.
790
+
791
+ Parameters
792
+ ----------
793
+ a : array_like of str or unicode
794
+
795
+ sub : str or unicode
796
+
797
+ start, end : int, optional
798
+
799
+ Returns
800
+ -------
801
+ out : ndarray
802
+ Output array of ints. Returns -1 if `sub` is not found.
803
+
804
+ See Also
805
+ --------
806
+ find, str.find
807
+
808
+ Examples
809
+ --------
810
+ >>> a = np.array(["Computer Science"])
811
+ >>> np.char.index(a, "Science", start=0, end=None)
812
+ array([9])
813
+
814
+ """
815
+ return _vec_string(
816
+ a, int_, 'index', [sub, start] + _clean_args(end))
817
+
818
+
819
+ @array_function_dispatch(_unary_op_dispatcher)
820
+ def isalnum(a):
821
+ """
822
+ Returns true for each element if all characters in the string are
823
+ alphanumeric and there is at least one character, false otherwise.
824
+
825
+ Calls `str.isalnum` element-wise.
826
+
827
+ For 8-bit strings, this method is locale-dependent.
828
+
829
+ Parameters
830
+ ----------
831
+ a : array_like of str or unicode
832
+
833
+ Returns
834
+ -------
835
+ out : ndarray
836
+ Output array of str or unicode, depending on input type
837
+
838
+ See Also
839
+ --------
840
+ str.isalnum
841
+ """
842
+ return _vec_string(a, bool_, 'isalnum')
843
+
844
+
845
+ @array_function_dispatch(_unary_op_dispatcher)
846
+ def isalpha(a):
847
+ """
848
+ Returns true for each element if all characters in the string are
849
+ alphabetic and there is at least one character, false otherwise.
850
+
851
+ Calls `str.isalpha` element-wise.
852
+
853
+ For 8-bit strings, this method is locale-dependent.
854
+
855
+ Parameters
856
+ ----------
857
+ a : array_like of str or unicode
858
+
859
+ Returns
860
+ -------
861
+ out : ndarray
862
+ Output array of bools
863
+
864
+ See Also
865
+ --------
866
+ str.isalpha
867
+ """
868
+ return _vec_string(a, bool_, 'isalpha')
869
+
870
+
871
+ @array_function_dispatch(_unary_op_dispatcher)
872
+ def isdigit(a):
873
+ """
874
+ Returns true for each element if all characters in the string are
875
+ digits and there is at least one character, false otherwise.
876
+
877
+ Calls `str.isdigit` element-wise.
878
+
879
+ For 8-bit strings, this method is locale-dependent.
880
+
881
+ Parameters
882
+ ----------
883
+ a : array_like of str or unicode
884
+
885
+ Returns
886
+ -------
887
+ out : ndarray
888
+ Output array of bools
889
+
890
+ See Also
891
+ --------
892
+ str.isdigit
893
+
894
+ Examples
895
+ --------
896
+ >>> a = np.array(['a', 'b', '0'])
897
+ >>> np.char.isdigit(a)
898
+ array([False, False, True])
899
+ >>> a = np.array([['a', 'b', '0'], ['c', '1', '2']])
900
+ >>> np.char.isdigit(a)
901
+ array([[False, False, True], [False, True, True]])
902
+ """
903
+ return _vec_string(a, bool_, 'isdigit')
904
+
905
+
906
+ @array_function_dispatch(_unary_op_dispatcher)
907
+ def islower(a):
908
+ """
909
+ Returns true for each element if all cased characters in the
910
+ string are lowercase and there is at least one cased character,
911
+ false otherwise.
912
+
913
+ Calls `str.islower` element-wise.
914
+
915
+ For 8-bit strings, this method is locale-dependent.
916
+
917
+ Parameters
918
+ ----------
919
+ a : array_like of str or unicode
920
+
921
+ Returns
922
+ -------
923
+ out : ndarray
924
+ Output array of bools
925
+
926
+ See Also
927
+ --------
928
+ str.islower
929
+ """
930
+ return _vec_string(a, bool_, 'islower')
931
+
932
+
933
+ @array_function_dispatch(_unary_op_dispatcher)
934
+ def isspace(a):
935
+ """
936
+ Returns true for each element if there are only whitespace
937
+ characters in the string and there is at least one character,
938
+ false otherwise.
939
+
940
+ Calls `str.isspace` element-wise.
941
+
942
+ For 8-bit strings, this method is locale-dependent.
943
+
944
+ Parameters
945
+ ----------
946
+ a : array_like of str or unicode
947
+
948
+ Returns
949
+ -------
950
+ out : ndarray
951
+ Output array of bools
952
+
953
+ See Also
954
+ --------
955
+ str.isspace
956
+ """
957
+ return _vec_string(a, bool_, 'isspace')
958
+
959
+
960
+ @array_function_dispatch(_unary_op_dispatcher)
961
+ def istitle(a):
962
+ """
963
+ Returns true for each element if the element is a titlecased
964
+ string and there is at least one character, false otherwise.
965
+
966
+ Call `str.istitle` element-wise.
967
+
968
+ For 8-bit strings, this method is locale-dependent.
969
+
970
+ Parameters
971
+ ----------
972
+ a : array_like of str or unicode
973
+
974
+ Returns
975
+ -------
976
+ out : ndarray
977
+ Output array of bools
978
+
979
+ See Also
980
+ --------
981
+ str.istitle
982
+ """
983
+ return _vec_string(a, bool_, 'istitle')
984
+
985
+
986
+ @array_function_dispatch(_unary_op_dispatcher)
987
+ def isupper(a):
988
+ """
989
+ Return true for each element if all cased characters in the
990
+ string are uppercase and there is at least one character, false
991
+ otherwise.
992
+
993
+ Call `str.isupper` element-wise.
994
+
995
+ For 8-bit strings, this method is locale-dependent.
996
+
997
+ Parameters
998
+ ----------
999
+ a : array_like of str or unicode
1000
+
1001
+ Returns
1002
+ -------
1003
+ out : ndarray
1004
+ Output array of bools
1005
+
1006
+ See Also
1007
+ --------
1008
+ str.isupper
1009
+
1010
+ Examples
1011
+ --------
1012
+ >>> str = "GHC"
1013
+ >>> np.char.isupper(str)
1014
+ array(True)
1015
+ >>> a = np.array(["hello", "HELLO", "Hello"])
1016
+ >>> np.char.isupper(a)
1017
+ array([False, True, False])
1018
+
1019
+ """
1020
+ return _vec_string(a, bool_, 'isupper')
1021
+
1022
+
1023
+ def _join_dispatcher(sep, seq):
1024
+ return (sep, seq)
1025
+
1026
+
1027
+ @array_function_dispatch(_join_dispatcher)
1028
+ def join(sep, seq):
1029
+ """
1030
+ Return a string which is the concatenation of the strings in the
1031
+ sequence `seq`.
1032
+
1033
+ Calls `str.join` element-wise.
1034
+
1035
+ Parameters
1036
+ ----------
1037
+ sep : array_like of str or unicode
1038
+ seq : array_like of str or unicode
1039
+
1040
+ Returns
1041
+ -------
1042
+ out : ndarray
1043
+ Output array of str or unicode, depending on input types
1044
+
1045
+ See Also
1046
+ --------
1047
+ str.join
1048
+
1049
+ Examples
1050
+ --------
1051
+ >>> np.char.join('-', 'osd')
1052
+ array('o-s-d', dtype='<U5')
1053
+
1054
+ >>> np.char.join(['-', '.'], ['ghc', 'osd'])
1055
+ array(['g-h-c', 'o.s.d'], dtype='<U5')
1056
+
1057
+ """
1058
+ return _to_bytes_or_str_array(
1059
+ _vec_string(sep, object_, 'join', (seq,)), seq)
1060
+
1061
+
1062
+
1063
+ def _just_dispatcher(a, width, fillchar=None):
1064
+ return (a,)
1065
+
1066
+
1067
+ @array_function_dispatch(_just_dispatcher)
1068
+ def ljust(a, width, fillchar=' '):
1069
+ """
1070
+ Return an array with the elements of `a` left-justified in a
1071
+ string of length `width`.
1072
+
1073
+ Calls `str.ljust` element-wise.
1074
+
1075
+ Parameters
1076
+ ----------
1077
+ a : array_like of str or unicode
1078
+
1079
+ width : int
1080
+ The length of the resulting strings
1081
+ fillchar : str or unicode, optional
1082
+ The character to use for padding
1083
+
1084
+ Returns
1085
+ -------
1086
+ out : ndarray
1087
+ Output array of str or unicode, depending on input type
1088
+
1089
+ See Also
1090
+ --------
1091
+ str.ljust
1092
+
1093
+ """
1094
+ a_arr = numpy.asarray(a)
1095
+ width_arr = numpy.asarray(width)
1096
+ size = int(numpy.max(width_arr.flat))
1097
+ if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
1098
+ fillchar = asbytes(fillchar)
1099
+ return _vec_string(
1100
+ a_arr, type(a_arr.dtype)(size), 'ljust', (width_arr, fillchar))
1101
+
1102
+
1103
+ @array_function_dispatch(_unary_op_dispatcher)
1104
+ def lower(a):
1105
+ """
1106
+ Return an array with the elements converted to lowercase.
1107
+
1108
+ Call `str.lower` element-wise.
1109
+
1110
+ For 8-bit strings, this method is locale-dependent.
1111
+
1112
+ Parameters
1113
+ ----------
1114
+ a : array_like, {str, unicode}
1115
+ Input array.
1116
+
1117
+ Returns
1118
+ -------
1119
+ out : ndarray, {str, unicode}
1120
+ Output array of str or unicode, depending on input type
1121
+
1122
+ See Also
1123
+ --------
1124
+ str.lower
1125
+
1126
+ Examples
1127
+ --------
1128
+ >>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
1129
+ array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
1130
+ >>> np.char.lower(c)
1131
+ array(['a1b c', '1bca', 'bca1'], dtype='<U5')
1132
+
1133
+ """
1134
+ a_arr = numpy.asarray(a)
1135
+ return _vec_string(a_arr, a_arr.dtype, 'lower')
1136
+
1137
+
1138
+ def _strip_dispatcher(a, chars=None):
1139
+ return (a,)
1140
+
1141
+
1142
+ @array_function_dispatch(_strip_dispatcher)
1143
+ def lstrip(a, chars=None):
1144
+ """
1145
+ For each element in `a`, return a copy with the leading characters
1146
+ removed.
1147
+
1148
+ Calls `str.lstrip` element-wise.
1149
+
1150
+ Parameters
1151
+ ----------
1152
+ a : array-like, {str, unicode}
1153
+ Input array.
1154
+
1155
+ chars : {str, unicode}, optional
1156
+ The `chars` argument is a string specifying the set of
1157
+ characters to be removed. If omitted or None, the `chars`
1158
+ argument defaults to removing whitespace. The `chars` argument
1159
+ is not a prefix; rather, all combinations of its values are
1160
+ stripped.
1161
+
1162
+ Returns
1163
+ -------
1164
+ out : ndarray, {str, unicode}
1165
+ Output array of str or unicode, depending on input type
1166
+
1167
+ See Also
1168
+ --------
1169
+ str.lstrip
1170
+
1171
+ Examples
1172
+ --------
1173
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
1174
+ >>> c
1175
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
1176
+
1177
+ The 'a' variable is unstripped from c[1] because whitespace leading.
1178
+
1179
+ >>> np.char.lstrip(c, 'a')
1180
+ array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')
1181
+
1182
+
1183
+ >>> np.char.lstrip(c, 'A') # leaves c unchanged
1184
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
1185
+ >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
1186
+ ... # XXX: is this a regression? This used to return True
1187
+ ... # np.char.lstrip(c,'') does not modify c at all.
1188
+ False
1189
+ >>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
1190
+ True
1191
+
1192
+ """
1193
+ a_arr = numpy.asarray(a)
1194
+ return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
1195
+
1196
+
1197
+ def _partition_dispatcher(a, sep):
1198
+ return (a,)
1199
+
1200
+
1201
+ @array_function_dispatch(_partition_dispatcher)
1202
+ def partition(a, sep):
1203
+ """
1204
+ Partition each element in `a` around `sep`.
1205
+
1206
+ Calls `str.partition` element-wise.
1207
+
1208
+ For each element in `a`, split the element as the first
1209
+ occurrence of `sep`, and return 3 strings containing the part
1210
+ before the separator, the separator itself, and the part after
1211
+ the separator. If the separator is not found, return 3 strings
1212
+ containing the string itself, followed by two empty strings.
1213
+
1214
+ Parameters
1215
+ ----------
1216
+ a : array_like, {str, unicode}
1217
+ Input array
1218
+ sep : {str, unicode}
1219
+ Separator to split each string element in `a`.
1220
+
1221
+ Returns
1222
+ -------
1223
+ out : ndarray, {str, unicode}
1224
+ Output array of str or unicode, depending on input type.
1225
+ The output array will have an extra dimension with 3
1226
+ elements per input element.
1227
+
1228
+ See Also
1229
+ --------
1230
+ str.partition
1231
+
1232
+ """
1233
+ return _to_bytes_or_str_array(
1234
+ _vec_string(a, object_, 'partition', (sep,)), a)
1235
+
1236
+
1237
+ def _replace_dispatcher(a, old, new, count=None):
1238
+ return (a,)
1239
+
1240
+
1241
+ @array_function_dispatch(_replace_dispatcher)
1242
+ def replace(a, old, new, count=None):
1243
+ """
1244
+ For each element in `a`, return a copy of the string with all
1245
+ occurrences of substring `old` replaced by `new`.
1246
+
1247
+ Calls `str.replace` element-wise.
1248
+
1249
+ Parameters
1250
+ ----------
1251
+ a : array-like of str or unicode
1252
+
1253
+ old, new : str or unicode
1254
+
1255
+ count : int, optional
1256
+ If the optional argument `count` is given, only the first
1257
+ `count` occurrences are replaced.
1258
+
1259
+ Returns
1260
+ -------
1261
+ out : ndarray
1262
+ Output array of str or unicode, depending on input type
1263
+
1264
+ See Also
1265
+ --------
1266
+ str.replace
1267
+
1268
+ Examples
1269
+ --------
1270
+ >>> a = np.array(["That is a mango", "Monkeys eat mangos"])
1271
+ >>> np.char.replace(a, 'mango', 'banana')
1272
+ array(['That is a banana', 'Monkeys eat bananas'], dtype='<U19')
1273
+
1274
+ >>> a = np.array(["The dish is fresh", "This is it"])
1275
+ >>> np.char.replace(a, 'is', 'was')
1276
+ array(['The dwash was fresh', 'Thwas was it'], dtype='<U19')
1277
+ """
1278
+ return _to_bytes_or_str_array(
1279
+ _vec_string(a, object_, 'replace', [old, new] + _clean_args(count)), a)
1280
+
1281
+
1282
+ @array_function_dispatch(_count_dispatcher)
1283
+ def rfind(a, sub, start=0, end=None):
1284
+ """
1285
+ For each element in `a`, return the highest index in the string
1286
+ where substring `sub` is found, such that `sub` is contained
1287
+ within [`start`, `end`].
1288
+
1289
+ Calls `str.rfind` element-wise.
1290
+
1291
+ Parameters
1292
+ ----------
1293
+ a : array-like of str or unicode
1294
+
1295
+ sub : str or unicode
1296
+
1297
+ start, end : int, optional
1298
+ Optional arguments `start` and `end` are interpreted as in
1299
+ slice notation.
1300
+
1301
+ Returns
1302
+ -------
1303
+ out : ndarray
1304
+ Output array of ints. Return -1 on failure.
1305
+
1306
+ See Also
1307
+ --------
1308
+ str.rfind
1309
+
1310
+ """
1311
+ return _vec_string(
1312
+ a, int_, 'rfind', [sub, start] + _clean_args(end))
1313
+
1314
+
1315
+ @array_function_dispatch(_count_dispatcher)
1316
+ def rindex(a, sub, start=0, end=None):
1317
+ """
1318
+ Like `rfind`, but raises `ValueError` when the substring `sub` is
1319
+ not found.
1320
+
1321
+ Calls `str.rindex` element-wise.
1322
+
1323
+ Parameters
1324
+ ----------
1325
+ a : array-like of str or unicode
1326
+
1327
+ sub : str or unicode
1328
+
1329
+ start, end : int, optional
1330
+
1331
+ Returns
1332
+ -------
1333
+ out : ndarray
1334
+ Output array of ints.
1335
+
1336
+ See Also
1337
+ --------
1338
+ rfind, str.rindex
1339
+
1340
+ """
1341
+ return _vec_string(
1342
+ a, int_, 'rindex', [sub, start] + _clean_args(end))
1343
+
1344
+
1345
+ @array_function_dispatch(_just_dispatcher)
1346
+ def rjust(a, width, fillchar=' '):
1347
+ """
1348
+ Return an array with the elements of `a` right-justified in a
1349
+ string of length `width`.
1350
+
1351
+ Calls `str.rjust` element-wise.
1352
+
1353
+ Parameters
1354
+ ----------
1355
+ a : array_like of str or unicode
1356
+
1357
+ width : int
1358
+ The length of the resulting strings
1359
+ fillchar : str or unicode, optional
1360
+ The character to use for padding
1361
+
1362
+ Returns
1363
+ -------
1364
+ out : ndarray
1365
+ Output array of str or unicode, depending on input type
1366
+
1367
+ See Also
1368
+ --------
1369
+ str.rjust
1370
+
1371
+ """
1372
+ a_arr = numpy.asarray(a)
1373
+ width_arr = numpy.asarray(width)
1374
+ size = int(numpy.max(width_arr.flat))
1375
+ if numpy.issubdtype(a_arr.dtype, numpy.bytes_):
1376
+ fillchar = asbytes(fillchar)
1377
+ return _vec_string(
1378
+ a_arr, type(a_arr.dtype)(size), 'rjust', (width_arr, fillchar))
1379
+
1380
+
1381
+ @array_function_dispatch(_partition_dispatcher)
1382
+ def rpartition(a, sep):
1383
+ """
1384
+ Partition (split) each element around the right-most separator.
1385
+
1386
+ Calls `str.rpartition` element-wise.
1387
+
1388
+ For each element in `a`, split the element as the last
1389
+ occurrence of `sep`, and return 3 strings containing the part
1390
+ before the separator, the separator itself, and the part after
1391
+ the separator. If the separator is not found, return 3 strings
1392
+ containing the string itself, followed by two empty strings.
1393
+
1394
+ Parameters
1395
+ ----------
1396
+ a : array_like of str or unicode
1397
+ Input array
1398
+ sep : str or unicode
1399
+ Right-most separator to split each element in array.
1400
+
1401
+ Returns
1402
+ -------
1403
+ out : ndarray
1404
+ Output array of string or unicode, depending on input
1405
+ type. The output array will have an extra dimension with
1406
+ 3 elements per input element.
1407
+
1408
+ See Also
1409
+ --------
1410
+ str.rpartition
1411
+
1412
+ """
1413
+ return _to_bytes_or_str_array(
1414
+ _vec_string(a, object_, 'rpartition', (sep,)), a)
1415
+
1416
+
1417
+ def _split_dispatcher(a, sep=None, maxsplit=None):
1418
+ return (a,)
1419
+
1420
+
1421
+ @array_function_dispatch(_split_dispatcher)
1422
+ def rsplit(a, sep=None, maxsplit=None):
1423
+ """
1424
+ For each element in `a`, return a list of the words in the
1425
+ string, using `sep` as the delimiter string.
1426
+
1427
+ Calls `str.rsplit` element-wise.
1428
+
1429
+ Except for splitting from the right, `rsplit`
1430
+ behaves like `split`.
1431
+
1432
+ Parameters
1433
+ ----------
1434
+ a : array_like of str or unicode
1435
+
1436
+ sep : str or unicode, optional
1437
+ If `sep` is not specified or None, any whitespace string
1438
+ is a separator.
1439
+ maxsplit : int, optional
1440
+ If `maxsplit` is given, at most `maxsplit` splits are done,
1441
+ the rightmost ones.
1442
+
1443
+ Returns
1444
+ -------
1445
+ out : ndarray
1446
+ Array of list objects
1447
+
1448
+ See Also
1449
+ --------
1450
+ str.rsplit, split
1451
+
1452
+ """
1453
+ # This will return an array of lists of different sizes, so we
1454
+ # leave it as an object array
1455
+ return _vec_string(
1456
+ a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
1457
+
1458
+
1459
+ def _strip_dispatcher(a, chars=None):
1460
+ return (a,)
1461
+
1462
+
1463
+ @array_function_dispatch(_strip_dispatcher)
1464
+ def rstrip(a, chars=None):
1465
+ """
1466
+ For each element in `a`, return a copy with the trailing
1467
+ characters removed.
1468
+
1469
+ Calls `str.rstrip` element-wise.
1470
+
1471
+ Parameters
1472
+ ----------
1473
+ a : array-like of str or unicode
1474
+
1475
+ chars : str or unicode, optional
1476
+ The `chars` argument is a string specifying the set of
1477
+ characters to be removed. If omitted or None, the `chars`
1478
+ argument defaults to removing whitespace. The `chars` argument
1479
+ is not a suffix; rather, all combinations of its values are
1480
+ stripped.
1481
+
1482
+ Returns
1483
+ -------
1484
+ out : ndarray
1485
+ Output array of str or unicode, depending on input type
1486
+
1487
+ See Also
1488
+ --------
1489
+ str.rstrip
1490
+
1491
+ Examples
1492
+ --------
1493
+ >>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
1494
+ array(['aAaAaA', 'abBABba'],
1495
+ dtype='|S7')
1496
+ >>> np.char.rstrip(c, b'a')
1497
+ array(['aAaAaA', 'abBABb'],
1498
+ dtype='|S7')
1499
+ >>> np.char.rstrip(c, b'A')
1500
+ array(['aAaAa', 'abBABba'],
1501
+ dtype='|S7')
1502
+
1503
+ """
1504
+ a_arr = numpy.asarray(a)
1505
+ return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
1506
+
1507
+
1508
+ @array_function_dispatch(_split_dispatcher)
1509
+ def split(a, sep=None, maxsplit=None):
1510
+ """
1511
+ For each element in `a`, return a list of the words in the
1512
+ string, using `sep` as the delimiter string.
1513
+
1514
+ Calls `str.split` element-wise.
1515
+
1516
+ Parameters
1517
+ ----------
1518
+ a : array_like of str or unicode
1519
+
1520
+ sep : str or unicode, optional
1521
+ If `sep` is not specified or None, any whitespace string is a
1522
+ separator.
1523
+
1524
+ maxsplit : int, optional
1525
+ If `maxsplit` is given, at most `maxsplit` splits are done.
1526
+
1527
+ Returns
1528
+ -------
1529
+ out : ndarray
1530
+ Array of list objects
1531
+
1532
+ See Also
1533
+ --------
1534
+ str.split, rsplit
1535
+
1536
+ """
1537
+ # This will return an array of lists of different sizes, so we
1538
+ # leave it as an object array
1539
+ return _vec_string(
1540
+ a, object_, 'split', [sep] + _clean_args(maxsplit))
1541
+
1542
+
1543
+ def _splitlines_dispatcher(a, keepends=None):
1544
+ return (a,)
1545
+
1546
+
1547
+ @array_function_dispatch(_splitlines_dispatcher)
1548
+ def splitlines(a, keepends=None):
1549
+ """
1550
+ For each element in `a`, return a list of the lines in the
1551
+ element, breaking at line boundaries.
1552
+
1553
+ Calls `str.splitlines` element-wise.
1554
+
1555
+ Parameters
1556
+ ----------
1557
+ a : array_like of str or unicode
1558
+
1559
+ keepends : bool, optional
1560
+ Line breaks are not included in the resulting list unless
1561
+ keepends is given and true.
1562
+
1563
+ Returns
1564
+ -------
1565
+ out : ndarray
1566
+ Array of list objects
1567
+
1568
+ See Also
1569
+ --------
1570
+ str.splitlines
1571
+
1572
+ """
1573
+ return _vec_string(
1574
+ a, object_, 'splitlines', _clean_args(keepends))
1575
+
1576
+
1577
+ def _startswith_dispatcher(a, prefix, start=None, end=None):
1578
+ return (a,)
1579
+
1580
+
1581
+ @array_function_dispatch(_startswith_dispatcher)
1582
+ def startswith(a, prefix, start=0, end=None):
1583
+ """
1584
+ Returns a boolean array which is `True` where the string element
1585
+ in `a` starts with `prefix`, otherwise `False`.
1586
+
1587
+ Calls `str.startswith` element-wise.
1588
+
1589
+ Parameters
1590
+ ----------
1591
+ a : array_like of str or unicode
1592
+
1593
+ prefix : str
1594
+
1595
+ start, end : int, optional
1596
+ With optional `start`, test beginning at that position. With
1597
+ optional `end`, stop comparing at that position.
1598
+
1599
+ Returns
1600
+ -------
1601
+ out : ndarray
1602
+ Array of booleans
1603
+
1604
+ See Also
1605
+ --------
1606
+ str.startswith
1607
+
1608
+ """
1609
+ return _vec_string(
1610
+ a, bool_, 'startswith', [prefix, start] + _clean_args(end))
1611
+
1612
+
1613
+ @array_function_dispatch(_strip_dispatcher)
1614
+ def strip(a, chars=None):
1615
+ """
1616
+ For each element in `a`, return a copy with the leading and
1617
+ trailing characters removed.
1618
+
1619
+ Calls `str.strip` element-wise.
1620
+
1621
+ Parameters
1622
+ ----------
1623
+ a : array-like of str or unicode
1624
+
1625
+ chars : str or unicode, optional
1626
+ The `chars` argument is a string specifying the set of
1627
+ characters to be removed. If omitted or None, the `chars`
1628
+ argument defaults to removing whitespace. The `chars` argument
1629
+ is not a prefix or suffix; rather, all combinations of its
1630
+ values are stripped.
1631
+
1632
+ Returns
1633
+ -------
1634
+ out : ndarray
1635
+ Output array of str or unicode, depending on input type
1636
+
1637
+ See Also
1638
+ --------
1639
+ str.strip
1640
+
1641
+ Examples
1642
+ --------
1643
+ >>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
1644
+ >>> c
1645
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
1646
+ >>> np.char.strip(c)
1647
+ array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')
1648
+ >>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
1649
+ array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')
1650
+ >>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
1651
+ array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')
1652
+
1653
+ """
1654
+ a_arr = numpy.asarray(a)
1655
+ return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
1656
+
1657
+
1658
+ @array_function_dispatch(_unary_op_dispatcher)
1659
+ def swapcase(a):
1660
+ """
1661
+ Return element-wise a copy of the string with
1662
+ uppercase characters converted to lowercase and vice versa.
1663
+
1664
+ Calls `str.swapcase` element-wise.
1665
+
1666
+ For 8-bit strings, this method is locale-dependent.
1667
+
1668
+ Parameters
1669
+ ----------
1670
+ a : array_like, {str, unicode}
1671
+ Input array.
1672
+
1673
+ Returns
1674
+ -------
1675
+ out : ndarray, {str, unicode}
1676
+ Output array of str or unicode, depending on input type
1677
+
1678
+ See Also
1679
+ --------
1680
+ str.swapcase
1681
+
1682
+ Examples
1683
+ --------
1684
+ >>> c=np.array(['a1B c','1b Ca','b Ca1','cA1b'],'S5'); c
1685
+ array(['a1B c', '1b Ca', 'b Ca1', 'cA1b'],
1686
+ dtype='|S5')
1687
+ >>> np.char.swapcase(c)
1688
+ array(['A1b C', '1B cA', 'B cA1', 'Ca1B'],
1689
+ dtype='|S5')
1690
+
1691
+ """
1692
+ a_arr = numpy.asarray(a)
1693
+ return _vec_string(a_arr, a_arr.dtype, 'swapcase')
1694
+
1695
+
1696
+ @array_function_dispatch(_unary_op_dispatcher)
1697
+ def title(a):
1698
+ """
1699
+ Return element-wise title cased version of string or unicode.
1700
+
1701
+ Title case words start with uppercase characters, all remaining cased
1702
+ characters are lowercase.
1703
+
1704
+ Calls `str.title` element-wise.
1705
+
1706
+ For 8-bit strings, this method is locale-dependent.
1707
+
1708
+ Parameters
1709
+ ----------
1710
+ a : array_like, {str, unicode}
1711
+ Input array.
1712
+
1713
+ Returns
1714
+ -------
1715
+ out : ndarray
1716
+ Output array of str or unicode, depending on input type
1717
+
1718
+ See Also
1719
+ --------
1720
+ str.title
1721
+
1722
+ Examples
1723
+ --------
1724
+ >>> c=np.array(['a1b c','1b ca','b ca1','ca1b'],'S5'); c
1725
+ array(['a1b c', '1b ca', 'b ca1', 'ca1b'],
1726
+ dtype='|S5')
1727
+ >>> np.char.title(c)
1728
+ array(['A1B C', '1B Ca', 'B Ca1', 'Ca1B'],
1729
+ dtype='|S5')
1730
+
1731
+ """
1732
+ a_arr = numpy.asarray(a)
1733
+ return _vec_string(a_arr, a_arr.dtype, 'title')
1734
+
1735
+
1736
+ def _translate_dispatcher(a, table, deletechars=None):
1737
+ return (a,)
1738
+
1739
+
1740
+ @array_function_dispatch(_translate_dispatcher)
1741
+ def translate(a, table, deletechars=None):
1742
+ """
1743
+ For each element in `a`, return a copy of the string where all
1744
+ characters occurring in the optional argument `deletechars` are
1745
+ removed, and the remaining characters have been mapped through the
1746
+ given translation table.
1747
+
1748
+ Calls `str.translate` element-wise.
1749
+
1750
+ Parameters
1751
+ ----------
1752
+ a : array-like of str or unicode
1753
+
1754
+ table : str of length 256
1755
+
1756
+ deletechars : str
1757
+
1758
+ Returns
1759
+ -------
1760
+ out : ndarray
1761
+ Output array of str or unicode, depending on input type
1762
+
1763
+ See Also
1764
+ --------
1765
+ str.translate
1766
+
1767
+ """
1768
+ a_arr = numpy.asarray(a)
1769
+ if issubclass(a_arr.dtype.type, str_):
1770
+ return _vec_string(
1771
+ a_arr, a_arr.dtype, 'translate', (table,))
1772
+ else:
1773
+ return _vec_string(
1774
+ a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
1775
+
1776
+
1777
+ @array_function_dispatch(_unary_op_dispatcher)
1778
+ def upper(a):
1779
+ """
1780
+ Return an array with the elements converted to uppercase.
1781
+
1782
+ Calls `str.upper` element-wise.
1783
+
1784
+ For 8-bit strings, this method is locale-dependent.
1785
+
1786
+ Parameters
1787
+ ----------
1788
+ a : array_like, {str, unicode}
1789
+ Input array.
1790
+
1791
+ Returns
1792
+ -------
1793
+ out : ndarray, {str, unicode}
1794
+ Output array of str or unicode, depending on input type
1795
+
1796
+ See Also
1797
+ --------
1798
+ str.upper
1799
+
1800
+ Examples
1801
+ --------
1802
+ >>> c = np.array(['a1b c', '1bca', 'bca1']); c
1803
+ array(['a1b c', '1bca', 'bca1'], dtype='<U5')
1804
+ >>> np.char.upper(c)
1805
+ array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
1806
+
1807
+ """
1808
+ a_arr = numpy.asarray(a)
1809
+ return _vec_string(a_arr, a_arr.dtype, 'upper')
1810
+
1811
+
1812
+ def _zfill_dispatcher(a, width):
1813
+ return (a,)
1814
+
1815
+
1816
+ @array_function_dispatch(_zfill_dispatcher)
1817
+ def zfill(a, width):
1818
+ """
1819
+ Return the numeric string left-filled with zeros
1820
+
1821
+ Calls `str.zfill` element-wise.
1822
+
1823
+ Parameters
1824
+ ----------
1825
+ a : array_like, {str, unicode}
1826
+ Input array.
1827
+ width : int
1828
+ Width of string to left-fill elements in `a`.
1829
+
1830
+ Returns
1831
+ -------
1832
+ out : ndarray, {str, unicode}
1833
+ Output array of str or unicode, depending on input type
1834
+
1835
+ See Also
1836
+ --------
1837
+ str.zfill
1838
+
1839
+ """
1840
+ a_arr = numpy.asarray(a)
1841
+ width_arr = numpy.asarray(width)
1842
+ size = int(numpy.max(width_arr.flat))
1843
+ return _vec_string(
1844
+ a_arr, type(a_arr.dtype)(size), 'zfill', (width_arr,))
1845
+
1846
+
1847
+ @array_function_dispatch(_unary_op_dispatcher)
1848
+ def isnumeric(a):
1849
+ """
1850
+ For each element, return True if there are only numeric
1851
+ characters in the element.
1852
+
1853
+ Calls `str.isnumeric` element-wise.
1854
+
1855
+ Numeric characters include digit characters, and all characters
1856
+ that have the Unicode numeric value property, e.g. ``U+2155,
1857
+ VULGAR FRACTION ONE FIFTH``.
1858
+
1859
+ Parameters
1860
+ ----------
1861
+ a : array_like, unicode
1862
+ Input array.
1863
+
1864
+ Returns
1865
+ -------
1866
+ out : ndarray, bool
1867
+ Array of booleans of same shape as `a`.
1868
+
1869
+ See Also
1870
+ --------
1871
+ str.isnumeric
1872
+
1873
+ Examples
1874
+ --------
1875
+ >>> np.char.isnumeric(['123', '123abc', '9.0', '1/4', 'VIII'])
1876
+ array([ True, False, False, False, False])
1877
+
1878
+ """
1879
+ if not _is_unicode(a):
1880
+ raise TypeError("isnumeric is only available for Unicode strings and arrays")
1881
+ return _vec_string(a, bool_, 'isnumeric')
1882
+
1883
+
1884
+ @array_function_dispatch(_unary_op_dispatcher)
1885
+ def isdecimal(a):
1886
+ """
1887
+ For each element, return True if there are only decimal
1888
+ characters in the element.
1889
+
1890
+ Calls `str.isdecimal` element-wise.
1891
+
1892
+ Decimal characters include digit characters, and all characters
1893
+ that can be used to form decimal-radix numbers,
1894
+ e.g. ``U+0660, ARABIC-INDIC DIGIT ZERO``.
1895
+
1896
+ Parameters
1897
+ ----------
1898
+ a : array_like, unicode
1899
+ Input array.
1900
+
1901
+ Returns
1902
+ -------
1903
+ out : ndarray, bool
1904
+ Array of booleans identical in shape to `a`.
1905
+
1906
+ See Also
1907
+ --------
1908
+ str.isdecimal
1909
+
1910
+ Examples
1911
+ --------
1912
+ >>> np.char.isdecimal(['12345', '4.99', '123ABC', ''])
1913
+ array([ True, False, False, False])
1914
+
1915
+ """
1916
+ if not _is_unicode(a):
1917
+ raise TypeError(
1918
+ "isdecimal is only available for Unicode strings and arrays")
1919
+ return _vec_string(a, bool_, 'isdecimal')
1920
+
1921
+
1922
+ @set_module('numpy')
1923
+ class chararray(ndarray):
1924
+ """
1925
+ chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
1926
+ strides=None, order=None)
1927
+
1928
+ Provides a convenient view on arrays of string and unicode values.
1929
+
1930
+ .. note::
1931
+ The `chararray` class exists for backwards compatibility with
1932
+ Numarray, it is not recommended for new development. Starting from numpy
1933
+ 1.4, if one needs arrays of strings, it is recommended to use arrays of
1934
+ `dtype` `object_`, `bytes_` or `str_`, and use the free functions
1935
+ in the `numpy.char` module for fast vectorized string operations.
1936
+
1937
+ Versus a regular NumPy array of type `str` or `unicode`, this
1938
+ class adds the following functionality:
1939
+
1940
+ 1) values automatically have whitespace removed from the end
1941
+ when indexed
1942
+
1943
+ 2) comparison operators automatically remove whitespace from the
1944
+ end when comparing values
1945
+
1946
+ 3) vectorized string operations are provided as methods
1947
+ (e.g. `.endswith`) and infix operators (e.g. ``"+", "*", "%"``)
1948
+
1949
+ chararrays should be created using `numpy.char.array` or
1950
+ `numpy.char.asarray`, rather than this constructor directly.
1951
+
1952
+ This constructor creates the array, using `buffer` (with `offset`
1953
+ and `strides`) if it is not ``None``. If `buffer` is ``None``, then
1954
+ constructs a new array with `strides` in "C order", unless both
1955
+ ``len(shape) >= 2`` and ``order='F'``, in which case `strides`
1956
+ is in "Fortran order".
1957
+
1958
+ Methods
1959
+ -------
1960
+ astype
1961
+ argsort
1962
+ copy
1963
+ count
1964
+ decode
1965
+ dump
1966
+ dumps
1967
+ encode
1968
+ endswith
1969
+ expandtabs
1970
+ fill
1971
+ find
1972
+ flatten
1973
+ getfield
1974
+ index
1975
+ isalnum
1976
+ isalpha
1977
+ isdecimal
1978
+ isdigit
1979
+ islower
1980
+ isnumeric
1981
+ isspace
1982
+ istitle
1983
+ isupper
1984
+ item
1985
+ join
1986
+ ljust
1987
+ lower
1988
+ lstrip
1989
+ nonzero
1990
+ put
1991
+ ravel
1992
+ repeat
1993
+ replace
1994
+ reshape
1995
+ resize
1996
+ rfind
1997
+ rindex
1998
+ rjust
1999
+ rsplit
2000
+ rstrip
2001
+ searchsorted
2002
+ setfield
2003
+ setflags
2004
+ sort
2005
+ split
2006
+ splitlines
2007
+ squeeze
2008
+ startswith
2009
+ strip
2010
+ swapaxes
2011
+ swapcase
2012
+ take
2013
+ title
2014
+ tofile
2015
+ tolist
2016
+ tostring
2017
+ translate
2018
+ transpose
2019
+ upper
2020
+ view
2021
+ zfill
2022
+
2023
+ Parameters
2024
+ ----------
2025
+ shape : tuple
2026
+ Shape of the array.
2027
+ itemsize : int, optional
2028
+ Length of each array element, in number of characters. Default is 1.
2029
+ unicode : bool, optional
2030
+ Are the array elements of type unicode (True) or string (False).
2031
+ Default is False.
2032
+ buffer : object exposing the buffer interface or str, optional
2033
+ Memory address of the start of the array data. Default is None,
2034
+ in which case a new array is created.
2035
+ offset : int, optional
2036
+ Fixed stride displacement from the beginning of an axis?
2037
+ Default is 0. Needs to be >=0.
2038
+ strides : array_like of ints, optional
2039
+ Strides for the array (see `ndarray.strides` for full description).
2040
+ Default is None.
2041
+ order : {'C', 'F'}, optional
2042
+ The order in which the array data is stored in memory: 'C' ->
2043
+ "row major" order (the default), 'F' -> "column major"
2044
+ (Fortran) order.
2045
+
2046
+ Examples
2047
+ --------
2048
+ >>> charar = np.chararray((3, 3))
2049
+ >>> charar[:] = 'a'
2050
+ >>> charar
2051
+ chararray([[b'a', b'a', b'a'],
2052
+ [b'a', b'a', b'a'],
2053
+ [b'a', b'a', b'a']], dtype='|S1')
2054
+
2055
+ >>> charar = np.chararray(charar.shape, itemsize=5)
2056
+ >>> charar[:] = 'abc'
2057
+ >>> charar
2058
+ chararray([[b'abc', b'abc', b'abc'],
2059
+ [b'abc', b'abc', b'abc'],
2060
+ [b'abc', b'abc', b'abc']], dtype='|S5')
2061
+
2062
+ """
2063
+ def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
2064
+ offset=0, strides=None, order='C'):
2065
+ global _globalvar
2066
+
2067
+ if unicode:
2068
+ dtype = str_
2069
+ else:
2070
+ dtype = bytes_
2071
+
2072
+ # force itemsize to be a Python int, since using NumPy integer
2073
+ # types results in itemsize.itemsize being used as the size of
2074
+ # strings in the new array.
2075
+ itemsize = int(itemsize)
2076
+
2077
+ if isinstance(buffer, str):
2078
+ # unicode objects do not have the buffer interface
2079
+ filler = buffer
2080
+ buffer = None
2081
+ else:
2082
+ filler = None
2083
+
2084
+ _globalvar = 1
2085
+ if buffer is None:
2086
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
2087
+ order=order)
2088
+ else:
2089
+ self = ndarray.__new__(subtype, shape, (dtype, itemsize),
2090
+ buffer=buffer,
2091
+ offset=offset, strides=strides,
2092
+ order=order)
2093
+ if filler is not None:
2094
+ self[...] = filler
2095
+ _globalvar = 0
2096
+ return self
2097
+
2098
+ def __array_finalize__(self, obj):
2099
+ # The b is a special case because it is used for reconstructing.
2100
+ if not _globalvar and self.dtype.char not in 'SUbc':
2101
+ raise ValueError("Can only create a chararray from string data.")
2102
+
2103
+ def __getitem__(self, obj):
2104
+ val = ndarray.__getitem__(self, obj)
2105
+
2106
+ if isinstance(val, character):
2107
+ temp = val.rstrip()
2108
+ if len(temp) == 0:
2109
+ val = ''
2110
+ else:
2111
+ val = temp
2112
+
2113
+ return val
2114
+
2115
+ # IMPLEMENTATION NOTE: Most of the methods of this class are
2116
+ # direct delegations to the free functions in this module.
2117
+ # However, those that return an array of strings should instead
2118
+ # return a chararray, so some extra wrapping is required.
2119
+
2120
+ def __eq__(self, other):
2121
+ """
2122
+ Return (self == other) element-wise.
2123
+
2124
+ See Also
2125
+ --------
2126
+ equal
2127
+ """
2128
+ return equal(self, other)
2129
+
2130
+ def __ne__(self, other):
2131
+ """
2132
+ Return (self != other) element-wise.
2133
+
2134
+ See Also
2135
+ --------
2136
+ not_equal
2137
+ """
2138
+ return not_equal(self, other)
2139
+
2140
+ def __ge__(self, other):
2141
+ """
2142
+ Return (self >= other) element-wise.
2143
+
2144
+ See Also
2145
+ --------
2146
+ greater_equal
2147
+ """
2148
+ return greater_equal(self, other)
2149
+
2150
+ def __le__(self, other):
2151
+ """
2152
+ Return (self <= other) element-wise.
2153
+
2154
+ See Also
2155
+ --------
2156
+ less_equal
2157
+ """
2158
+ return less_equal(self, other)
2159
+
2160
+ def __gt__(self, other):
2161
+ """
2162
+ Return (self > other) element-wise.
2163
+
2164
+ See Also
2165
+ --------
2166
+ greater
2167
+ """
2168
+ return greater(self, other)
2169
+
2170
+ def __lt__(self, other):
2171
+ """
2172
+ Return (self < other) element-wise.
2173
+
2174
+ See Also
2175
+ --------
2176
+ less
2177
+ """
2178
+ return less(self, other)
2179
+
2180
+ def __add__(self, other):
2181
+ """
2182
+ Return (self + other), that is string concatenation,
2183
+ element-wise for a pair of array_likes of str or unicode.
2184
+
2185
+ See Also
2186
+ --------
2187
+ add
2188
+ """
2189
+ return asarray(add(self, other))
2190
+
2191
+ def __radd__(self, other):
2192
+ """
2193
+ Return (other + self), that is string concatenation,
2194
+ element-wise for a pair of array_likes of `bytes_` or `str_`.
2195
+
2196
+ See Also
2197
+ --------
2198
+ add
2199
+ """
2200
+ return asarray(add(numpy.asarray(other), self))
2201
+
2202
+ def __mul__(self, i):
2203
+ """
2204
+ Return (self * i), that is string multiple concatenation,
2205
+ element-wise.
2206
+
2207
+ See Also
2208
+ --------
2209
+ multiply
2210
+ """
2211
+ return asarray(multiply(self, i))
2212
+
2213
+ def __rmul__(self, i):
2214
+ """
2215
+ Return (self * i), that is string multiple concatenation,
2216
+ element-wise.
2217
+
2218
+ See Also
2219
+ --------
2220
+ multiply
2221
+ """
2222
+ return asarray(multiply(self, i))
2223
+
2224
+ def __mod__(self, i):
2225
+ """
2226
+ Return (self % i), that is pre-Python 2.6 string formatting
2227
+ (interpolation), element-wise for a pair of array_likes of `bytes_`
2228
+ or `str_`.
2229
+
2230
+ See Also
2231
+ --------
2232
+ mod
2233
+ """
2234
+ return asarray(mod(self, i))
2235
+
2236
+ def __rmod__(self, other):
2237
+ return NotImplemented
2238
+
2239
+ def argsort(self, axis=-1, kind=None, order=None):
2240
+ """
2241
+ Return the indices that sort the array lexicographically.
2242
+
2243
+ For full documentation see `numpy.argsort`, for which this method is
2244
+ in fact merely a "thin wrapper."
2245
+
2246
+ Examples
2247
+ --------
2248
+ >>> c = np.array(['a1b c', '1b ca', 'b ca1', 'Ca1b'], 'S5')
2249
+ >>> c = c.view(np.chararray); c
2250
+ chararray(['a1b c', '1b ca', 'b ca1', 'Ca1b'],
2251
+ dtype='|S5')
2252
+ >>> c[c.argsort()]
2253
+ chararray(['1b ca', 'Ca1b', 'a1b c', 'b ca1'],
2254
+ dtype='|S5')
2255
+
2256
+ """
2257
+ return self.__array__().argsort(axis, kind, order)
2258
+ argsort.__doc__ = ndarray.argsort.__doc__
2259
+
2260
+ def capitalize(self):
2261
+ """
2262
+ Return a copy of `self` with only the first character of each element
2263
+ capitalized.
2264
+
2265
+ See Also
2266
+ --------
2267
+ char.capitalize
2268
+
2269
+ """
2270
+ return asarray(capitalize(self))
2271
+
2272
+ def center(self, width, fillchar=' '):
2273
+ """
2274
+ Return a copy of `self` with its elements centered in a
2275
+ string of length `width`.
2276
+
2277
+ See Also
2278
+ --------
2279
+ center
2280
+ """
2281
+ return asarray(center(self, width, fillchar))
2282
+
2283
+ def count(self, sub, start=0, end=None):
2284
+ """
2285
+ Returns an array with the number of non-overlapping occurrences of
2286
+ substring `sub` in the range [`start`, `end`].
2287
+
2288
+ See Also
2289
+ --------
2290
+ char.count
2291
+
2292
+ """
2293
+ return count(self, sub, start, end)
2294
+
2295
+ def decode(self, encoding=None, errors=None):
2296
+ """
2297
+ Calls ``bytes.decode`` element-wise.
2298
+
2299
+ See Also
2300
+ --------
2301
+ char.decode
2302
+
2303
+ """
2304
+ return decode(self, encoding, errors)
2305
+
2306
+ def encode(self, encoding=None, errors=None):
2307
+ """
2308
+ Calls `str.encode` element-wise.
2309
+
2310
+ See Also
2311
+ --------
2312
+ char.encode
2313
+
2314
+ """
2315
+ return encode(self, encoding, errors)
2316
+
2317
+ def endswith(self, suffix, start=0, end=None):
2318
+ """
2319
+ Returns a boolean array which is `True` where the string element
2320
+ in `self` ends with `suffix`, otherwise `False`.
2321
+
2322
+ See Also
2323
+ --------
2324
+ char.endswith
2325
+
2326
+ """
2327
+ return endswith(self, suffix, start, end)
2328
+
2329
+ def expandtabs(self, tabsize=8):
2330
+ """
2331
+ Return a copy of each string element where all tab characters are
2332
+ replaced by one or more spaces.
2333
+
2334
+ See Also
2335
+ --------
2336
+ char.expandtabs
2337
+
2338
+ """
2339
+ return asarray(expandtabs(self, tabsize))
2340
+
2341
+ def find(self, sub, start=0, end=None):
2342
+ """
2343
+ For each element, return the lowest index in the string where
2344
+ substring `sub` is found.
2345
+
2346
+ See Also
2347
+ --------
2348
+ char.find
2349
+
2350
+ """
2351
+ return find(self, sub, start, end)
2352
+
2353
+ def index(self, sub, start=0, end=None):
2354
+ """
2355
+ Like `find`, but raises `ValueError` when the substring is not found.
2356
+
2357
+ See Also
2358
+ --------
2359
+ char.index
2360
+
2361
+ """
2362
+ return index(self, sub, start, end)
2363
+
2364
+ def isalnum(self):
2365
+ """
2366
+ Returns true for each element if all characters in the string
2367
+ are alphanumeric and there is at least one character, false
2368
+ otherwise.
2369
+
2370
+ See Also
2371
+ --------
2372
+ char.isalnum
2373
+
2374
+ """
2375
+ return isalnum(self)
2376
+
2377
+ def isalpha(self):
2378
+ """
2379
+ Returns true for each element if all characters in the string
2380
+ are alphabetic and there is at least one character, false
2381
+ otherwise.
2382
+
2383
+ See Also
2384
+ --------
2385
+ char.isalpha
2386
+
2387
+ """
2388
+ return isalpha(self)
2389
+
2390
+ def isdigit(self):
2391
+ """
2392
+ Returns true for each element if all characters in the string are
2393
+ digits and there is at least one character, false otherwise.
2394
+
2395
+ See Also
2396
+ --------
2397
+ char.isdigit
2398
+
2399
+ """
2400
+ return isdigit(self)
2401
+
2402
+ def islower(self):
2403
+ """
2404
+ Returns true for each element if all cased characters in the
2405
+ string are lowercase and there is at least one cased character,
2406
+ false otherwise.
2407
+
2408
+ See Also
2409
+ --------
2410
+ char.islower
2411
+
2412
+ """
2413
+ return islower(self)
2414
+
2415
+ def isspace(self):
2416
+ """
2417
+ Returns true for each element if there are only whitespace
2418
+ characters in the string and there is at least one character,
2419
+ false otherwise.
2420
+
2421
+ See Also
2422
+ --------
2423
+ char.isspace
2424
+
2425
+ """
2426
+ return isspace(self)
2427
+
2428
+ def istitle(self):
2429
+ """
2430
+ Returns true for each element if the element is a titlecased
2431
+ string and there is at least one character, false otherwise.
2432
+
2433
+ See Also
2434
+ --------
2435
+ char.istitle
2436
+
2437
+ """
2438
+ return istitle(self)
2439
+
2440
+ def isupper(self):
2441
+ """
2442
+ Returns true for each element if all cased characters in the
2443
+ string are uppercase and there is at least one character, false
2444
+ otherwise.
2445
+
2446
+ See Also
2447
+ --------
2448
+ char.isupper
2449
+
2450
+ """
2451
+ return isupper(self)
2452
+
2453
+ def join(self, seq):
2454
+ """
2455
+ Return a string which is the concatenation of the strings in the
2456
+ sequence `seq`.
2457
+
2458
+ See Also
2459
+ --------
2460
+ char.join
2461
+
2462
+ """
2463
+ return join(self, seq)
2464
+
2465
+ def ljust(self, width, fillchar=' '):
2466
+ """
2467
+ Return an array with the elements of `self` left-justified in a
2468
+ string of length `width`.
2469
+
2470
+ See Also
2471
+ --------
2472
+ char.ljust
2473
+
2474
+ """
2475
+ return asarray(ljust(self, width, fillchar))
2476
+
2477
+ def lower(self):
2478
+ """
2479
+ Return an array with the elements of `self` converted to
2480
+ lowercase.
2481
+
2482
+ See Also
2483
+ --------
2484
+ char.lower
2485
+
2486
+ """
2487
+ return asarray(lower(self))
2488
+
2489
+ def lstrip(self, chars=None):
2490
+ """
2491
+ For each element in `self`, return a copy with the leading characters
2492
+ removed.
2493
+
2494
+ See Also
2495
+ --------
2496
+ char.lstrip
2497
+
2498
+ """
2499
+ return asarray(lstrip(self, chars))
2500
+
2501
+ def partition(self, sep):
2502
+ """
2503
+ Partition each element in `self` around `sep`.
2504
+
2505
+ See Also
2506
+ --------
2507
+ partition
2508
+ """
2509
+ return asarray(partition(self, sep))
2510
+
2511
+ def replace(self, old, new, count=None):
2512
+ """
2513
+ For each element in `self`, return a copy of the string with all
2514
+ occurrences of substring `old` replaced by `new`.
2515
+
2516
+ See Also
2517
+ --------
2518
+ char.replace
2519
+
2520
+ """
2521
+ return asarray(replace(self, old, new, count))
2522
+
2523
+ def rfind(self, sub, start=0, end=None):
2524
+ """
2525
+ For each element in `self`, return the highest index in the string
2526
+ where substring `sub` is found, such that `sub` is contained
2527
+ within [`start`, `end`].
2528
+
2529
+ See Also
2530
+ --------
2531
+ char.rfind
2532
+
2533
+ """
2534
+ return rfind(self, sub, start, end)
2535
+
2536
+ def rindex(self, sub, start=0, end=None):
2537
+ """
2538
+ Like `rfind`, but raises `ValueError` when the substring `sub` is
2539
+ not found.
2540
+
2541
+ See Also
2542
+ --------
2543
+ char.rindex
2544
+
2545
+ """
2546
+ return rindex(self, sub, start, end)
2547
+
2548
+ def rjust(self, width, fillchar=' '):
2549
+ """
2550
+ Return an array with the elements of `self`
2551
+ right-justified in a string of length `width`.
2552
+
2553
+ See Also
2554
+ --------
2555
+ char.rjust
2556
+
2557
+ """
2558
+ return asarray(rjust(self, width, fillchar))
2559
+
2560
+ def rpartition(self, sep):
2561
+ """
2562
+ Partition each element in `self` around `sep`.
2563
+
2564
+ See Also
2565
+ --------
2566
+ rpartition
2567
+ """
2568
+ return asarray(rpartition(self, sep))
2569
+
2570
+ def rsplit(self, sep=None, maxsplit=None):
2571
+ """
2572
+ For each element in `self`, return a list of the words in
2573
+ the string, using `sep` as the delimiter string.
2574
+
2575
+ See Also
2576
+ --------
2577
+ char.rsplit
2578
+
2579
+ """
2580
+ return rsplit(self, sep, maxsplit)
2581
+
2582
+ def rstrip(self, chars=None):
2583
+ """
2584
+ For each element in `self`, return a copy with the trailing
2585
+ characters removed.
2586
+
2587
+ See Also
2588
+ --------
2589
+ char.rstrip
2590
+
2591
+ """
2592
+ return asarray(rstrip(self, chars))
2593
+
2594
+ def split(self, sep=None, maxsplit=None):
2595
+ """
2596
+ For each element in `self`, return a list of the words in the
2597
+ string, using `sep` as the delimiter string.
2598
+
2599
+ See Also
2600
+ --------
2601
+ char.split
2602
+
2603
+ """
2604
+ return split(self, sep, maxsplit)
2605
+
2606
+ def splitlines(self, keepends=None):
2607
+ """
2608
+ For each element in `self`, return a list of the lines in the
2609
+ element, breaking at line boundaries.
2610
+
2611
+ See Also
2612
+ --------
2613
+ char.splitlines
2614
+
2615
+ """
2616
+ return splitlines(self, keepends)
2617
+
2618
+ def startswith(self, prefix, start=0, end=None):
2619
+ """
2620
+ Returns a boolean array which is `True` where the string element
2621
+ in `self` starts with `prefix`, otherwise `False`.
2622
+
2623
+ See Also
2624
+ --------
2625
+ char.startswith
2626
+
2627
+ """
2628
+ return startswith(self, prefix, start, end)
2629
+
2630
+ def strip(self, chars=None):
2631
+ """
2632
+ For each element in `self`, return a copy with the leading and
2633
+ trailing characters removed.
2634
+
2635
+ See Also
2636
+ --------
2637
+ char.strip
2638
+
2639
+ """
2640
+ return asarray(strip(self, chars))
2641
+
2642
+ def swapcase(self):
2643
+ """
2644
+ For each element in `self`, return a copy of the string with
2645
+ uppercase characters converted to lowercase and vice versa.
2646
+
2647
+ See Also
2648
+ --------
2649
+ char.swapcase
2650
+
2651
+ """
2652
+ return asarray(swapcase(self))
2653
+
2654
+ def title(self):
2655
+ """
2656
+ For each element in `self`, return a titlecased version of the
2657
+ string: words start with uppercase characters, all remaining cased
2658
+ characters are lowercase.
2659
+
2660
+ See Also
2661
+ --------
2662
+ char.title
2663
+
2664
+ """
2665
+ return asarray(title(self))
2666
+
2667
+ def translate(self, table, deletechars=None):
2668
+ """
2669
+ For each element in `self`, return a copy of the string where
2670
+ all characters occurring in the optional argument
2671
+ `deletechars` are removed, and the remaining characters have
2672
+ been mapped through the given translation table.
2673
+
2674
+ See Also
2675
+ --------
2676
+ char.translate
2677
+
2678
+ """
2679
+ return asarray(translate(self, table, deletechars))
2680
+
2681
+ def upper(self):
2682
+ """
2683
+ Return an array with the elements of `self` converted to
2684
+ uppercase.
2685
+
2686
+ See Also
2687
+ --------
2688
+ char.upper
2689
+
2690
+ """
2691
+ return asarray(upper(self))
2692
+
2693
+ def zfill(self, width):
2694
+ """
2695
+ Return the numeric string left-filled with zeros in a string of
2696
+ length `width`.
2697
+
2698
+ See Also
2699
+ --------
2700
+ char.zfill
2701
+
2702
+ """
2703
+ return asarray(zfill(self, width))
2704
+
2705
+ def isnumeric(self):
2706
+ """
2707
+ For each element in `self`, return True if there are only
2708
+ numeric characters in the element.
2709
+
2710
+ See Also
2711
+ --------
2712
+ char.isnumeric
2713
+
2714
+ """
2715
+ return isnumeric(self)
2716
+
2717
+ def isdecimal(self):
2718
+ """
2719
+ For each element in `self`, return True if there are only
2720
+ decimal characters in the element.
2721
+
2722
+ See Also
2723
+ --------
2724
+ char.isdecimal
2725
+
2726
+ """
2727
+ return isdecimal(self)
2728
+
2729
+
2730
+ @set_module("numpy.char")
2731
+ def array(obj, itemsize=None, copy=True, unicode=None, order=None):
2732
+ """
2733
+ Create a `chararray`.
2734
+
2735
+ .. note::
2736
+ This class is provided for numarray backward-compatibility.
2737
+ New code (not concerned with numarray compatibility) should use
2738
+ arrays of type `bytes_` or `str_` and use the free functions
2739
+ in :mod:`numpy.char <numpy.core.defchararray>` for fast
2740
+ vectorized string operations instead.
2741
+
2742
+ Versus a regular NumPy array of type `str` or `unicode`, this
2743
+ class adds the following functionality:
2744
+
2745
+ 1) values automatically have whitespace removed from the end
2746
+ when indexed
2747
+
2748
+ 2) comparison operators automatically remove whitespace from the
2749
+ end when comparing values
2750
+
2751
+ 3) vectorized string operations are provided as methods
2752
+ (e.g. `str.endswith`) and infix operators (e.g. ``+, *, %``)
2753
+
2754
+ Parameters
2755
+ ----------
2756
+ obj : array of str or unicode-like
2757
+
2758
+ itemsize : int, optional
2759
+ `itemsize` is the number of characters per scalar in the
2760
+ resulting array. If `itemsize` is None, and `obj` is an
2761
+ object array or a Python list, the `itemsize` will be
2762
+ automatically determined. If `itemsize` is provided and `obj`
2763
+ is of type str or unicode, then the `obj` string will be
2764
+ chunked into `itemsize` pieces.
2765
+
2766
+ copy : bool, optional
2767
+ If true (default), then the object is copied. Otherwise, a copy
2768
+ will only be made if __array__ returns a copy, if obj is a
2769
+ nested sequence, or if a copy is needed to satisfy any of the other
2770
+ requirements (`itemsize`, unicode, `order`, etc.).
2771
+
2772
+ unicode : bool, optional
2773
+ When true, the resulting `chararray` can contain Unicode
2774
+ characters, when false only 8-bit characters. If unicode is
2775
+ None and `obj` is one of the following:
2776
+
2777
+ - a `chararray`,
2778
+ - an ndarray of type `str` or `unicode`
2779
+ - a Python str or unicode object,
2780
+
2781
+ then the unicode setting of the output array will be
2782
+ automatically determined.
2783
+
2784
+ order : {'C', 'F', 'A'}, optional
2785
+ Specify the order of the array. If order is 'C' (default), then the
2786
+ array will be in C-contiguous order (last-index varies the
2787
+ fastest). If order is 'F', then the returned array
2788
+ will be in Fortran-contiguous order (first-index varies the
2789
+ fastest). If order is 'A', then the returned array may
2790
+ be in any order (either C-, Fortran-contiguous, or even
2791
+ discontiguous).
2792
+ """
2793
+ if isinstance(obj, (bytes, str)):
2794
+ if unicode is None:
2795
+ if isinstance(obj, str):
2796
+ unicode = True
2797
+ else:
2798
+ unicode = False
2799
+
2800
+ if itemsize is None:
2801
+ itemsize = len(obj)
2802
+ shape = len(obj) // itemsize
2803
+
2804
+ return chararray(shape, itemsize=itemsize, unicode=unicode,
2805
+ buffer=obj, order=order)
2806
+
2807
+ if isinstance(obj, (list, tuple)):
2808
+ obj = numpy.asarray(obj)
2809
+
2810
+ if isinstance(obj, ndarray) and issubclass(obj.dtype.type, character):
2811
+ # If we just have a vanilla chararray, create a chararray
2812
+ # view around it.
2813
+ if not isinstance(obj, chararray):
2814
+ obj = obj.view(chararray)
2815
+
2816
+ if itemsize is None:
2817
+ itemsize = obj.itemsize
2818
+ # itemsize is in 8-bit chars, so for Unicode, we need
2819
+ # to divide by the size of a single Unicode character,
2820
+ # which for NumPy is always 4
2821
+ if issubclass(obj.dtype.type, str_):
2822
+ itemsize //= 4
2823
+
2824
+ if unicode is None:
2825
+ if issubclass(obj.dtype.type, str_):
2826
+ unicode = True
2827
+ else:
2828
+ unicode = False
2829
+
2830
+ if unicode:
2831
+ dtype = str_
2832
+ else:
2833
+ dtype = bytes_
2834
+
2835
+ if order is not None:
2836
+ obj = numpy.asarray(obj, order=order)
2837
+ if (copy or
2838
+ (itemsize != obj.itemsize) or
2839
+ (not unicode and isinstance(obj, str_)) or
2840
+ (unicode and isinstance(obj, bytes_))):
2841
+ obj = obj.astype((dtype, int(itemsize)))
2842
+ return obj
2843
+
2844
+ if isinstance(obj, ndarray) and issubclass(obj.dtype.type, object):
2845
+ if itemsize is None:
2846
+ # Since no itemsize was specified, convert the input array to
2847
+ # a list so the ndarray constructor will automatically
2848
+ # determine the itemsize for us.
2849
+ obj = obj.tolist()
2850
+ # Fall through to the default case
2851
+
2852
+ if unicode:
2853
+ dtype = str_
2854
+ else:
2855
+ dtype = bytes_
2856
+
2857
+ if itemsize is None:
2858
+ val = narray(obj, dtype=dtype, order=order, subok=True)
2859
+ else:
2860
+ val = narray(obj, dtype=(dtype, itemsize), order=order, subok=True)
2861
+ return val.view(chararray)
2862
+
2863
+
2864
+ @set_module("numpy.char")
2865
+ def asarray(obj, itemsize=None, unicode=None, order=None):
2866
+ """
2867
+ Convert the input to a `chararray`, copying the data only if
2868
+ necessary.
2869
+
2870
+ Versus a regular NumPy array of type `str` or `unicode`, this
2871
+ class adds the following functionality:
2872
+
2873
+ 1) values automatically have whitespace removed from the end
2874
+ when indexed
2875
+
2876
+ 2) comparison operators automatically remove whitespace from the
2877
+ end when comparing values
2878
+
2879
+ 3) vectorized string operations are provided as methods
2880
+ (e.g. `str.endswith`) and infix operators (e.g. ``+``, ``*``,``%``)
2881
+
2882
+ Parameters
2883
+ ----------
2884
+ obj : array of str or unicode-like
2885
+
2886
+ itemsize : int, optional
2887
+ `itemsize` is the number of characters per scalar in the
2888
+ resulting array. If `itemsize` is None, and `obj` is an
2889
+ object array or a Python list, the `itemsize` will be
2890
+ automatically determined. If `itemsize` is provided and `obj`
2891
+ is of type str or unicode, then the `obj` string will be
2892
+ chunked into `itemsize` pieces.
2893
+
2894
+ unicode : bool, optional
2895
+ When true, the resulting `chararray` can contain Unicode
2896
+ characters, when false only 8-bit characters. If unicode is
2897
+ None and `obj` is one of the following:
2898
+
2899
+ - a `chararray`,
2900
+ - an ndarray of type `str` or 'unicode`
2901
+ - a Python str or unicode object,
2902
+
2903
+ then the unicode setting of the output array will be
2904
+ automatically determined.
2905
+
2906
+ order : {'C', 'F'}, optional
2907
+ Specify the order of the array. If order is 'C' (default), then the
2908
+ array will be in C-contiguous order (last-index varies the
2909
+ fastest). If order is 'F', then the returned array
2910
+ will be in Fortran-contiguous order (first-index varies the
2911
+ fastest).
2912
+ """
2913
+ return array(obj, itemsize, copy=False,
2914
+ unicode=unicode, order=order)
venv/lib/python3.10/site-packages/numpy/core/einsumfunc.py ADDED
@@ -0,0 +1,1443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Implementation of optimized einsum.
3
+
4
+ """
5
+ import itertools
6
+ import operator
7
+
8
+ from numpy.core.multiarray import c_einsum
9
+ from numpy.core.numeric import asanyarray, tensordot
10
+ from numpy.core.overrides import array_function_dispatch
11
+
12
+ __all__ = ['einsum', 'einsum_path']
13
+
14
+ einsum_symbols = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
15
+ einsum_symbols_set = set(einsum_symbols)
16
+
17
+
18
+ def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
19
+ """
20
+ Computes the number of FLOPS in the contraction.
21
+
22
+ Parameters
23
+ ----------
24
+ idx_contraction : iterable
25
+ The indices involved in the contraction
26
+ inner : bool
27
+ Does this contraction require an inner product?
28
+ num_terms : int
29
+ The number of terms in a contraction
30
+ size_dictionary : dict
31
+ The size of each of the indices in idx_contraction
32
+
33
+ Returns
34
+ -------
35
+ flop_count : int
36
+ The total number of FLOPS required for the contraction.
37
+
38
+ Examples
39
+ --------
40
+
41
+ >>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
42
+ 30
43
+
44
+ >>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
45
+ 60
46
+
47
+ """
48
+
49
+ overall_size = _compute_size_by_dict(idx_contraction, size_dictionary)
50
+ op_factor = max(1, num_terms - 1)
51
+ if inner:
52
+ op_factor += 1
53
+
54
+ return overall_size * op_factor
55
+
56
+ def _compute_size_by_dict(indices, idx_dict):
57
+ """
58
+ Computes the product of the elements in indices based on the dictionary
59
+ idx_dict.
60
+
61
+ Parameters
62
+ ----------
63
+ indices : iterable
64
+ Indices to base the product on.
65
+ idx_dict : dictionary
66
+ Dictionary of index sizes
67
+
68
+ Returns
69
+ -------
70
+ ret : int
71
+ The resulting product.
72
+
73
+ Examples
74
+ --------
75
+ >>> _compute_size_by_dict('abbc', {'a': 2, 'b':3, 'c':5})
76
+ 90
77
+
78
+ """
79
+ ret = 1
80
+ for i in indices:
81
+ ret *= idx_dict[i]
82
+ return ret
83
+
84
+
85
+ def _find_contraction(positions, input_sets, output_set):
86
+ """
87
+ Finds the contraction for a given set of input and output sets.
88
+
89
+ Parameters
90
+ ----------
91
+ positions : iterable
92
+ Integer positions of terms used in the contraction.
93
+ input_sets : list
94
+ List of sets that represent the lhs side of the einsum subscript
95
+ output_set : set
96
+ Set that represents the rhs side of the overall einsum subscript
97
+
98
+ Returns
99
+ -------
100
+ new_result : set
101
+ The indices of the resulting contraction
102
+ remaining : list
103
+ List of sets that have not been contracted, the new set is appended to
104
+ the end of this list
105
+ idx_removed : set
106
+ Indices removed from the entire contraction
107
+ idx_contraction : set
108
+ The indices used in the current contraction
109
+
110
+ Examples
111
+ --------
112
+
113
+ # A simple dot product test case
114
+ >>> pos = (0, 1)
115
+ >>> isets = [set('ab'), set('bc')]
116
+ >>> oset = set('ac')
117
+ >>> _find_contraction(pos, isets, oset)
118
+ ({'a', 'c'}, [{'a', 'c'}], {'b'}, {'a', 'b', 'c'})
119
+
120
+ # A more complex case with additional terms in the contraction
121
+ >>> pos = (0, 2)
122
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
123
+ >>> oset = set('ac')
124
+ >>> _find_contraction(pos, isets, oset)
125
+ ({'a', 'c'}, [{'a', 'c'}, {'a', 'c'}], {'b', 'd'}, {'a', 'b', 'c', 'd'})
126
+ """
127
+
128
+ idx_contract = set()
129
+ idx_remain = output_set.copy()
130
+ remaining = []
131
+ for ind, value in enumerate(input_sets):
132
+ if ind in positions:
133
+ idx_contract |= value
134
+ else:
135
+ remaining.append(value)
136
+ idx_remain |= value
137
+
138
+ new_result = idx_remain & idx_contract
139
+ idx_removed = (idx_contract - new_result)
140
+ remaining.append(new_result)
141
+
142
+ return (new_result, remaining, idx_removed, idx_contract)
143
+
144
+
145
+ def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
146
+ """
147
+ Computes all possible pair contractions, sieves the results based
148
+ on ``memory_limit`` and returns the lowest cost path. This algorithm
149
+ scales factorial with respect to the elements in the list ``input_sets``.
150
+
151
+ Parameters
152
+ ----------
153
+ input_sets : list
154
+ List of sets that represent the lhs side of the einsum subscript
155
+ output_set : set
156
+ Set that represents the rhs side of the overall einsum subscript
157
+ idx_dict : dictionary
158
+ Dictionary of index sizes
159
+ memory_limit : int
160
+ The maximum number of elements in a temporary array
161
+
162
+ Returns
163
+ -------
164
+ path : list
165
+ The optimal contraction order within the memory limit constraint.
166
+
167
+ Examples
168
+ --------
169
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
170
+ >>> oset = set()
171
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
172
+ >>> _optimal_path(isets, oset, idx_sizes, 5000)
173
+ [(0, 2), (0, 1)]
174
+ """
175
+
176
+ full_results = [(0, [], input_sets)]
177
+ for iteration in range(len(input_sets) - 1):
178
+ iter_results = []
179
+
180
+ # Compute all unique pairs
181
+ for curr in full_results:
182
+ cost, positions, remaining = curr
183
+ for con in itertools.combinations(range(len(input_sets) - iteration), 2):
184
+
185
+ # Find the contraction
186
+ cont = _find_contraction(con, remaining, output_set)
187
+ new_result, new_input_sets, idx_removed, idx_contract = cont
188
+
189
+ # Sieve the results based on memory_limit
190
+ new_size = _compute_size_by_dict(new_result, idx_dict)
191
+ if new_size > memory_limit:
192
+ continue
193
+
194
+ # Build (total_cost, positions, indices_remaining)
195
+ total_cost = cost + _flop_count(idx_contract, idx_removed, len(con), idx_dict)
196
+ new_pos = positions + [con]
197
+ iter_results.append((total_cost, new_pos, new_input_sets))
198
+
199
+ # Update combinatorial list, if we did not find anything return best
200
+ # path + remaining contractions
201
+ if iter_results:
202
+ full_results = iter_results
203
+ else:
204
+ path = min(full_results, key=lambda x: x[0])[1]
205
+ path += [tuple(range(len(input_sets) - iteration))]
206
+ return path
207
+
208
+ # If we have not found anything return single einsum contraction
209
+ if len(full_results) == 0:
210
+ return [tuple(range(len(input_sets)))]
211
+
212
+ path = min(full_results, key=lambda x: x[0])[1]
213
+ return path
214
+
215
+ def _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost, naive_cost):
216
+ """Compute the cost (removed size + flops) and resultant indices for
217
+ performing the contraction specified by ``positions``.
218
+
219
+ Parameters
220
+ ----------
221
+ positions : tuple of int
222
+ The locations of the proposed tensors to contract.
223
+ input_sets : list of sets
224
+ The indices found on each tensors.
225
+ output_set : set
226
+ The output indices of the expression.
227
+ idx_dict : dict
228
+ Mapping of each index to its size.
229
+ memory_limit : int
230
+ The total allowed size for an intermediary tensor.
231
+ path_cost : int
232
+ The contraction cost so far.
233
+ naive_cost : int
234
+ The cost of the unoptimized expression.
235
+
236
+ Returns
237
+ -------
238
+ cost : (int, int)
239
+ A tuple containing the size of any indices removed, and the flop cost.
240
+ positions : tuple of int
241
+ The locations of the proposed tensors to contract.
242
+ new_input_sets : list of sets
243
+ The resulting new list of indices if this proposed contraction is performed.
244
+
245
+ """
246
+
247
+ # Find the contraction
248
+ contract = _find_contraction(positions, input_sets, output_set)
249
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
250
+
251
+ # Sieve the results based on memory_limit
252
+ new_size = _compute_size_by_dict(idx_result, idx_dict)
253
+ if new_size > memory_limit:
254
+ return None
255
+
256
+ # Build sort tuple
257
+ old_sizes = (_compute_size_by_dict(input_sets[p], idx_dict) for p in positions)
258
+ removed_size = sum(old_sizes) - new_size
259
+
260
+ # NB: removed_size used to be just the size of any removed indices i.e.:
261
+ # helpers.compute_size_by_dict(idx_removed, idx_dict)
262
+ cost = _flop_count(idx_contract, idx_removed, len(positions), idx_dict)
263
+ sort = (-removed_size, cost)
264
+
265
+ # Sieve based on total cost as well
266
+ if (path_cost + cost) > naive_cost:
267
+ return None
268
+
269
+ # Add contraction to possible choices
270
+ return [sort, positions, new_input_sets]
271
+
272
+
273
+ def _update_other_results(results, best):
274
+ """Update the positions and provisional input_sets of ``results`` based on
275
+ performing the contraction result ``best``. Remove any involving the tensors
276
+ contracted.
277
+
278
+ Parameters
279
+ ----------
280
+ results : list
281
+ List of contraction results produced by ``_parse_possible_contraction``.
282
+ best : list
283
+ The best contraction of ``results`` i.e. the one that will be performed.
284
+
285
+ Returns
286
+ -------
287
+ mod_results : list
288
+ The list of modified results, updated with outcome of ``best`` contraction.
289
+ """
290
+
291
+ best_con = best[1]
292
+ bx, by = best_con
293
+ mod_results = []
294
+
295
+ for cost, (x, y), con_sets in results:
296
+
297
+ # Ignore results involving tensors just contracted
298
+ if x in best_con or y in best_con:
299
+ continue
300
+
301
+ # Update the input_sets
302
+ del con_sets[by - int(by > x) - int(by > y)]
303
+ del con_sets[bx - int(bx > x) - int(bx > y)]
304
+ con_sets.insert(-1, best[2][-1])
305
+
306
+ # Update the position indices
307
+ mod_con = x - int(x > bx) - int(x > by), y - int(y > bx) - int(y > by)
308
+ mod_results.append((cost, mod_con, con_sets))
309
+
310
+ return mod_results
311
+
312
+ def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
313
+ """
314
+ Finds the path by contracting the best pair until the input list is
315
+ exhausted. The best pair is found by minimizing the tuple
316
+ ``(-prod(indices_removed), cost)``. What this amounts to is prioritizing
317
+ matrix multiplication or inner product operations, then Hadamard like
318
+ operations, and finally outer operations. Outer products are limited by
319
+ ``memory_limit``. This algorithm scales cubically with respect to the
320
+ number of elements in the list ``input_sets``.
321
+
322
+ Parameters
323
+ ----------
324
+ input_sets : list
325
+ List of sets that represent the lhs side of the einsum subscript
326
+ output_set : set
327
+ Set that represents the rhs side of the overall einsum subscript
328
+ idx_dict : dictionary
329
+ Dictionary of index sizes
330
+ memory_limit : int
331
+ The maximum number of elements in a temporary array
332
+
333
+ Returns
334
+ -------
335
+ path : list
336
+ The greedy contraction order within the memory limit constraint.
337
+
338
+ Examples
339
+ --------
340
+ >>> isets = [set('abd'), set('ac'), set('bdc')]
341
+ >>> oset = set()
342
+ >>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
343
+ >>> _greedy_path(isets, oset, idx_sizes, 5000)
344
+ [(0, 2), (0, 1)]
345
+ """
346
+
347
+ # Handle trivial cases that leaked through
348
+ if len(input_sets) == 1:
349
+ return [(0,)]
350
+ elif len(input_sets) == 2:
351
+ return [(0, 1)]
352
+
353
+ # Build up a naive cost
354
+ contract = _find_contraction(range(len(input_sets)), input_sets, output_set)
355
+ idx_result, new_input_sets, idx_removed, idx_contract = contract
356
+ naive_cost = _flop_count(idx_contract, idx_removed, len(input_sets), idx_dict)
357
+
358
+ # Initially iterate over all pairs
359
+ comb_iter = itertools.combinations(range(len(input_sets)), 2)
360
+ known_contractions = []
361
+
362
+ path_cost = 0
363
+ path = []
364
+
365
+ for iteration in range(len(input_sets) - 1):
366
+
367
+ # Iterate over all pairs on first step, only previously found pairs on subsequent steps
368
+ for positions in comb_iter:
369
+
370
+ # Always initially ignore outer products
371
+ if input_sets[positions[0]].isdisjoint(input_sets[positions[1]]):
372
+ continue
373
+
374
+ result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit, path_cost,
375
+ naive_cost)
376
+ if result is not None:
377
+ known_contractions.append(result)
378
+
379
+ # If we do not have a inner contraction, rescan pairs including outer products
380
+ if len(known_contractions) == 0:
381
+
382
+ # Then check the outer products
383
+ for positions in itertools.combinations(range(len(input_sets)), 2):
384
+ result = _parse_possible_contraction(positions, input_sets, output_set, idx_dict, memory_limit,
385
+ path_cost, naive_cost)
386
+ if result is not None:
387
+ known_contractions.append(result)
388
+
389
+ # If we still did not find any remaining contractions, default back to einsum like behavior
390
+ if len(known_contractions) == 0:
391
+ path.append(tuple(range(len(input_sets))))
392
+ break
393
+
394
+ # Sort based on first index
395
+ best = min(known_contractions, key=lambda x: x[0])
396
+
397
+ # Now propagate as many unused contractions as possible to next iteration
398
+ known_contractions = _update_other_results(known_contractions, best)
399
+
400
+ # Next iteration only compute contractions with the new tensor
401
+ # All other contractions have been accounted for
402
+ input_sets = best[2]
403
+ new_tensor_pos = len(input_sets) - 1
404
+ comb_iter = ((i, new_tensor_pos) for i in range(new_tensor_pos))
405
+
406
+ # Update path and total cost
407
+ path.append(best[1])
408
+ path_cost += best[0][1]
409
+
410
+ return path
411
+
412
+
413
+ def _can_dot(inputs, result, idx_removed):
414
+ """
415
+ Checks if we can use BLAS (np.tensordot) call and its beneficial to do so.
416
+
417
+ Parameters
418
+ ----------
419
+ inputs : list of str
420
+ Specifies the subscripts for summation.
421
+ result : str
422
+ Resulting summation.
423
+ idx_removed : set
424
+ Indices that are removed in the summation
425
+
426
+
427
+ Returns
428
+ -------
429
+ type : bool
430
+ Returns true if BLAS should and can be used, else False
431
+
432
+ Notes
433
+ -----
434
+ If the operations is BLAS level 1 or 2 and is not already aligned
435
+ we default back to einsum as the memory movement to copy is more
436
+ costly than the operation itself.
437
+
438
+
439
+ Examples
440
+ --------
441
+
442
+ # Standard GEMM operation
443
+ >>> _can_dot(['ij', 'jk'], 'ik', set('j'))
444
+ True
445
+
446
+ # Can use the standard BLAS, but requires odd data movement
447
+ >>> _can_dot(['ijj', 'jk'], 'ik', set('j'))
448
+ False
449
+
450
+ # DDOT where the memory is not aligned
451
+ >>> _can_dot(['ijk', 'ikj'], '', set('ijk'))
452
+ False
453
+
454
+ """
455
+
456
+ # All `dot` calls remove indices
457
+ if len(idx_removed) == 0:
458
+ return False
459
+
460
+ # BLAS can only handle two operands
461
+ if len(inputs) != 2:
462
+ return False
463
+
464
+ input_left, input_right = inputs
465
+
466
+ for c in set(input_left + input_right):
467
+ # can't deal with repeated indices on same input or more than 2 total
468
+ nl, nr = input_left.count(c), input_right.count(c)
469
+ if (nl > 1) or (nr > 1) or (nl + nr > 2):
470
+ return False
471
+
472
+ # can't do implicit summation or dimension collapse e.g.
473
+ # "ab,bc->c" (implicitly sum over 'a')
474
+ # "ab,ca->ca" (take diagonal of 'a')
475
+ if nl + nr - 1 == int(c in result):
476
+ return False
477
+
478
+ # Build a few temporaries
479
+ set_left = set(input_left)
480
+ set_right = set(input_right)
481
+ keep_left = set_left - idx_removed
482
+ keep_right = set_right - idx_removed
483
+ rs = len(idx_removed)
484
+
485
+ # At this point we are a DOT, GEMV, or GEMM operation
486
+
487
+ # Handle inner products
488
+
489
+ # DDOT with aligned data
490
+ if input_left == input_right:
491
+ return True
492
+
493
+ # DDOT without aligned data (better to use einsum)
494
+ if set_left == set_right:
495
+ return False
496
+
497
+ # Handle the 4 possible (aligned) GEMV or GEMM cases
498
+
499
+ # GEMM or GEMV no transpose
500
+ if input_left[-rs:] == input_right[:rs]:
501
+ return True
502
+
503
+ # GEMM or GEMV transpose both
504
+ if input_left[:rs] == input_right[-rs:]:
505
+ return True
506
+
507
+ # GEMM or GEMV transpose right
508
+ if input_left[-rs:] == input_right[-rs:]:
509
+ return True
510
+
511
+ # GEMM or GEMV transpose left
512
+ if input_left[:rs] == input_right[:rs]:
513
+ return True
514
+
515
+ # Einsum is faster than GEMV if we have to copy data
516
+ if not keep_left or not keep_right:
517
+ return False
518
+
519
+ # We are a matrix-matrix product, but we need to copy data
520
+ return True
521
+
522
+
523
+ def _parse_einsum_input(operands):
524
+ """
525
+ A reproduction of einsum c side einsum parsing in python.
526
+
527
+ Returns
528
+ -------
529
+ input_strings : str
530
+ Parsed input strings
531
+ output_string : str
532
+ Parsed output string
533
+ operands : list of array_like
534
+ The operands to use in the numpy contraction
535
+
536
+ Examples
537
+ --------
538
+ The operand list is simplified to reduce printing:
539
+
540
+ >>> np.random.seed(123)
541
+ >>> a = np.random.rand(4, 4)
542
+ >>> b = np.random.rand(4, 4, 4)
543
+ >>> _parse_einsum_input(('...a,...a->...', a, b))
544
+ ('za,xza', 'xz', [a, b]) # may vary
545
+
546
+ >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
547
+ ('za,xza', 'xz', [a, b]) # may vary
548
+ """
549
+
550
+ if len(operands) == 0:
551
+ raise ValueError("No input operands")
552
+
553
+ if isinstance(operands[0], str):
554
+ subscripts = operands[0].replace(" ", "")
555
+ operands = [asanyarray(v) for v in operands[1:]]
556
+
557
+ # Ensure all characters are valid
558
+ for s in subscripts:
559
+ if s in '.,->':
560
+ continue
561
+ if s not in einsum_symbols:
562
+ raise ValueError("Character %s is not a valid symbol." % s)
563
+
564
+ else:
565
+ tmp_operands = list(operands)
566
+ operand_list = []
567
+ subscript_list = []
568
+ for p in range(len(operands) // 2):
569
+ operand_list.append(tmp_operands.pop(0))
570
+ subscript_list.append(tmp_operands.pop(0))
571
+
572
+ output_list = tmp_operands[-1] if len(tmp_operands) else None
573
+ operands = [asanyarray(v) for v in operand_list]
574
+ subscripts = ""
575
+ last = len(subscript_list) - 1
576
+ for num, sub in enumerate(subscript_list):
577
+ for s in sub:
578
+ if s is Ellipsis:
579
+ subscripts += "..."
580
+ else:
581
+ try:
582
+ s = operator.index(s)
583
+ except TypeError as e:
584
+ raise TypeError("For this input type lists must contain "
585
+ "either int or Ellipsis") from e
586
+ subscripts += einsum_symbols[s]
587
+ if num != last:
588
+ subscripts += ","
589
+
590
+ if output_list is not None:
591
+ subscripts += "->"
592
+ for s in output_list:
593
+ if s is Ellipsis:
594
+ subscripts += "..."
595
+ else:
596
+ try:
597
+ s = operator.index(s)
598
+ except TypeError as e:
599
+ raise TypeError("For this input type lists must contain "
600
+ "either int or Ellipsis") from e
601
+ subscripts += einsum_symbols[s]
602
+ # Check for proper "->"
603
+ if ("-" in subscripts) or (">" in subscripts):
604
+ invalid = (subscripts.count("-") > 1) or (subscripts.count(">") > 1)
605
+ if invalid or (subscripts.count("->") != 1):
606
+ raise ValueError("Subscripts can only contain one '->'.")
607
+
608
+ # Parse ellipses
609
+ if "." in subscripts:
610
+ used = subscripts.replace(".", "").replace(",", "").replace("->", "")
611
+ unused = list(einsum_symbols_set - set(used))
612
+ ellipse_inds = "".join(unused)
613
+ longest = 0
614
+
615
+ if "->" in subscripts:
616
+ input_tmp, output_sub = subscripts.split("->")
617
+ split_subscripts = input_tmp.split(",")
618
+ out_sub = True
619
+ else:
620
+ split_subscripts = subscripts.split(',')
621
+ out_sub = False
622
+
623
+ for num, sub in enumerate(split_subscripts):
624
+ if "." in sub:
625
+ if (sub.count(".") != 3) or (sub.count("...") != 1):
626
+ raise ValueError("Invalid Ellipses.")
627
+
628
+ # Take into account numerical values
629
+ if operands[num].shape == ():
630
+ ellipse_count = 0
631
+ else:
632
+ ellipse_count = max(operands[num].ndim, 1)
633
+ ellipse_count -= (len(sub) - 3)
634
+
635
+ if ellipse_count > longest:
636
+ longest = ellipse_count
637
+
638
+ if ellipse_count < 0:
639
+ raise ValueError("Ellipses lengths do not match.")
640
+ elif ellipse_count == 0:
641
+ split_subscripts[num] = sub.replace('...', '')
642
+ else:
643
+ rep_inds = ellipse_inds[-ellipse_count:]
644
+ split_subscripts[num] = sub.replace('...', rep_inds)
645
+
646
+ subscripts = ",".join(split_subscripts)
647
+ if longest == 0:
648
+ out_ellipse = ""
649
+ else:
650
+ out_ellipse = ellipse_inds[-longest:]
651
+
652
+ if out_sub:
653
+ subscripts += "->" + output_sub.replace("...", out_ellipse)
654
+ else:
655
+ # Special care for outputless ellipses
656
+ output_subscript = ""
657
+ tmp_subscripts = subscripts.replace(",", "")
658
+ for s in sorted(set(tmp_subscripts)):
659
+ if s not in (einsum_symbols):
660
+ raise ValueError("Character %s is not a valid symbol." % s)
661
+ if tmp_subscripts.count(s) == 1:
662
+ output_subscript += s
663
+ normal_inds = ''.join(sorted(set(output_subscript) -
664
+ set(out_ellipse)))
665
+
666
+ subscripts += "->" + out_ellipse + normal_inds
667
+
668
+ # Build output string if does not exist
669
+ if "->" in subscripts:
670
+ input_subscripts, output_subscript = subscripts.split("->")
671
+ else:
672
+ input_subscripts = subscripts
673
+ # Build output subscripts
674
+ tmp_subscripts = subscripts.replace(",", "")
675
+ output_subscript = ""
676
+ for s in sorted(set(tmp_subscripts)):
677
+ if s not in einsum_symbols:
678
+ raise ValueError("Character %s is not a valid symbol." % s)
679
+ if tmp_subscripts.count(s) == 1:
680
+ output_subscript += s
681
+
682
+ # Make sure output subscripts are in the input
683
+ for char in output_subscript:
684
+ if char not in input_subscripts:
685
+ raise ValueError("Output character %s did not appear in the input"
686
+ % char)
687
+
688
+ # Make sure number operands is equivalent to the number of terms
689
+ if len(input_subscripts.split(',')) != len(operands):
690
+ raise ValueError("Number of einsum subscripts must be equal to the "
691
+ "number of operands.")
692
+
693
+ return (input_subscripts, output_subscript, operands)
694
+
695
+
696
+ def _einsum_path_dispatcher(*operands, optimize=None, einsum_call=None):
697
+ # NOTE: technically, we should only dispatch on array-like arguments, not
698
+ # subscripts (given as strings). But separating operands into
699
+ # arrays/subscripts is a little tricky/slow (given einsum's two supported
700
+ # signatures), so as a practical shortcut we dispatch on everything.
701
+ # Strings will be ignored for dispatching since they don't define
702
+ # __array_function__.
703
+ return operands
704
+
705
+
706
+ @array_function_dispatch(_einsum_path_dispatcher, module='numpy')
707
+ def einsum_path(*operands, optimize='greedy', einsum_call=False):
708
+ """
709
+ einsum_path(subscripts, *operands, optimize='greedy')
710
+
711
+ Evaluates the lowest cost contraction order for an einsum expression by
712
+ considering the creation of intermediate arrays.
713
+
714
+ Parameters
715
+ ----------
716
+ subscripts : str
717
+ Specifies the subscripts for summation.
718
+ *operands : list of array_like
719
+ These are the arrays for the operation.
720
+ optimize : {bool, list, tuple, 'greedy', 'optimal'}
721
+ Choose the type of path. If a tuple is provided, the second argument is
722
+ assumed to be the maximum intermediate size created. If only a single
723
+ argument is provided the largest input or output array size is used
724
+ as a maximum intermediate size.
725
+
726
+ * if a list is given that starts with ``einsum_path``, uses this as the
727
+ contraction path
728
+ * if False no optimization is taken
729
+ * if True defaults to the 'greedy' algorithm
730
+ * 'optimal' An algorithm that combinatorially explores all possible
731
+ ways of contracting the listed tensors and chooses the least costly
732
+ path. Scales exponentially with the number of terms in the
733
+ contraction.
734
+ * 'greedy' An algorithm that chooses the best pair contraction
735
+ at each step. Effectively, this algorithm searches the largest inner,
736
+ Hadamard, and then outer products at each step. Scales cubically with
737
+ the number of terms in the contraction. Equivalent to the 'optimal'
738
+ path for most contractions.
739
+
740
+ Default is 'greedy'.
741
+
742
+ Returns
743
+ -------
744
+ path : list of tuples
745
+ A list representation of the einsum path.
746
+ string_repr : str
747
+ A printable representation of the einsum path.
748
+
749
+ Notes
750
+ -----
751
+ The resulting path indicates which terms of the input contraction should be
752
+ contracted first, the result of this contraction is then appended to the
753
+ end of the contraction list. This list can then be iterated over until all
754
+ intermediate contractions are complete.
755
+
756
+ See Also
757
+ --------
758
+ einsum, linalg.multi_dot
759
+
760
+ Examples
761
+ --------
762
+
763
+ We can begin with a chain dot example. In this case, it is optimal to
764
+ contract the ``b`` and ``c`` tensors first as represented by the first
765
+ element of the path ``(1, 2)``. The resulting tensor is added to the end
766
+ of the contraction and the remaining contraction ``(0, 1)`` is then
767
+ completed.
768
+
769
+ >>> np.random.seed(123)
770
+ >>> a = np.random.rand(2, 2)
771
+ >>> b = np.random.rand(2, 5)
772
+ >>> c = np.random.rand(5, 2)
773
+ >>> path_info = np.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
774
+ >>> print(path_info[0])
775
+ ['einsum_path', (1, 2), (0, 1)]
776
+ >>> print(path_info[1])
777
+ Complete contraction: ij,jk,kl->il # may vary
778
+ Naive scaling: 4
779
+ Optimized scaling: 3
780
+ Naive FLOP count: 1.600e+02
781
+ Optimized FLOP count: 5.600e+01
782
+ Theoretical speedup: 2.857
783
+ Largest intermediate: 4.000e+00 elements
784
+ -------------------------------------------------------------------------
785
+ scaling current remaining
786
+ -------------------------------------------------------------------------
787
+ 3 kl,jk->jl ij,jl->il
788
+ 3 jl,ij->il il->il
789
+
790
+
791
+ A more complex index transformation example.
792
+
793
+ >>> I = np.random.rand(10, 10, 10, 10)
794
+ >>> C = np.random.rand(10, 10)
795
+ >>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
796
+ ... optimize='greedy')
797
+
798
+ >>> print(path_info[0])
799
+ ['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
800
+ >>> print(path_info[1])
801
+ Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
802
+ Naive scaling: 8
803
+ Optimized scaling: 5
804
+ Naive FLOP count: 8.000e+08
805
+ Optimized FLOP count: 8.000e+05
806
+ Theoretical speedup: 1000.000
807
+ Largest intermediate: 1.000e+04 elements
808
+ --------------------------------------------------------------------------
809
+ scaling current remaining
810
+ --------------------------------------------------------------------------
811
+ 5 abcd,ea->bcde fb,gc,hd,bcde->efgh
812
+ 5 bcde,fb->cdef gc,hd,cdef->efgh
813
+ 5 cdef,gc->defg hd,defg->efgh
814
+ 5 defg,hd->efgh efgh->efgh
815
+ """
816
+
817
+ # Figure out what the path really is
818
+ path_type = optimize
819
+ if path_type is True:
820
+ path_type = 'greedy'
821
+ if path_type is None:
822
+ path_type = False
823
+
824
+ explicit_einsum_path = False
825
+ memory_limit = None
826
+
827
+ # No optimization or a named path algorithm
828
+ if (path_type is False) or isinstance(path_type, str):
829
+ pass
830
+
831
+ # Given an explicit path
832
+ elif len(path_type) and (path_type[0] == 'einsum_path'):
833
+ explicit_einsum_path = True
834
+
835
+ # Path tuple with memory limit
836
+ elif ((len(path_type) == 2) and isinstance(path_type[0], str) and
837
+ isinstance(path_type[1], (int, float))):
838
+ memory_limit = int(path_type[1])
839
+ path_type = path_type[0]
840
+
841
+ else:
842
+ raise TypeError("Did not understand the path: %s" % str(path_type))
843
+
844
+ # Hidden option, only einsum should call this
845
+ einsum_call_arg = einsum_call
846
+
847
+ # Python side parsing
848
+ input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
849
+
850
+ # Build a few useful list and sets
851
+ input_list = input_subscripts.split(',')
852
+ input_sets = [set(x) for x in input_list]
853
+ output_set = set(output_subscript)
854
+ indices = set(input_subscripts.replace(',', ''))
855
+
856
+ # Get length of each unique dimension and ensure all dimensions are correct
857
+ dimension_dict = {}
858
+ broadcast_indices = [[] for x in range(len(input_list))]
859
+ for tnum, term in enumerate(input_list):
860
+ sh = operands[tnum].shape
861
+ if len(sh) != len(term):
862
+ raise ValueError("Einstein sum subscript %s does not contain the "
863
+ "correct number of indices for operand %d."
864
+ % (input_subscripts[tnum], tnum))
865
+ for cnum, char in enumerate(term):
866
+ dim = sh[cnum]
867
+
868
+ # Build out broadcast indices
869
+ if dim == 1:
870
+ broadcast_indices[tnum].append(char)
871
+
872
+ if char in dimension_dict.keys():
873
+ # For broadcasting cases we always want the largest dim size
874
+ if dimension_dict[char] == 1:
875
+ dimension_dict[char] = dim
876
+ elif dim not in (1, dimension_dict[char]):
877
+ raise ValueError("Size of label '%s' for operand %d (%d) "
878
+ "does not match previous terms (%d)."
879
+ % (char, tnum, dimension_dict[char], dim))
880
+ else:
881
+ dimension_dict[char] = dim
882
+
883
+ # Convert broadcast inds to sets
884
+ broadcast_indices = [set(x) for x in broadcast_indices]
885
+
886
+ # Compute size of each input array plus the output array
887
+ size_list = [_compute_size_by_dict(term, dimension_dict)
888
+ for term in input_list + [output_subscript]]
889
+ max_size = max(size_list)
890
+
891
+ if memory_limit is None:
892
+ memory_arg = max_size
893
+ else:
894
+ memory_arg = memory_limit
895
+
896
+ # Compute naive cost
897
+ # This isn't quite right, need to look into exactly how einsum does this
898
+ inner_product = (sum(len(x) for x in input_sets) - len(indices)) > 0
899
+ naive_cost = _flop_count(indices, inner_product, len(input_list), dimension_dict)
900
+
901
+ # Compute the path
902
+ if explicit_einsum_path:
903
+ path = path_type[1:]
904
+ elif (
905
+ (path_type is False)
906
+ or (len(input_list) in [1, 2])
907
+ or (indices == output_set)
908
+ ):
909
+ # Nothing to be optimized, leave it to einsum
910
+ path = [tuple(range(len(input_list)))]
911
+ elif path_type == "greedy":
912
+ path = _greedy_path(input_sets, output_set, dimension_dict, memory_arg)
913
+ elif path_type == "optimal":
914
+ path = _optimal_path(input_sets, output_set, dimension_dict, memory_arg)
915
+ else:
916
+ raise KeyError("Path name %s not found", path_type)
917
+
918
+ cost_list, scale_list, size_list, contraction_list = [], [], [], []
919
+
920
+ # Build contraction tuple (positions, gemm, einsum_str, remaining)
921
+ for cnum, contract_inds in enumerate(path):
922
+ # Make sure we remove inds from right to left
923
+ contract_inds = tuple(sorted(list(contract_inds), reverse=True))
924
+
925
+ contract = _find_contraction(contract_inds, input_sets, output_set)
926
+ out_inds, input_sets, idx_removed, idx_contract = contract
927
+
928
+ cost = _flop_count(idx_contract, idx_removed, len(contract_inds), dimension_dict)
929
+ cost_list.append(cost)
930
+ scale_list.append(len(idx_contract))
931
+ size_list.append(_compute_size_by_dict(out_inds, dimension_dict))
932
+
933
+ bcast = set()
934
+ tmp_inputs = []
935
+ for x in contract_inds:
936
+ tmp_inputs.append(input_list.pop(x))
937
+ bcast |= broadcast_indices.pop(x)
938
+
939
+ new_bcast_inds = bcast - idx_removed
940
+
941
+ # If we're broadcasting, nix blas
942
+ if not len(idx_removed & bcast):
943
+ do_blas = _can_dot(tmp_inputs, out_inds, idx_removed)
944
+ else:
945
+ do_blas = False
946
+
947
+ # Last contraction
948
+ if (cnum - len(path)) == -1:
949
+ idx_result = output_subscript
950
+ else:
951
+ sort_result = [(dimension_dict[ind], ind) for ind in out_inds]
952
+ idx_result = "".join([x[1] for x in sorted(sort_result)])
953
+
954
+ input_list.append(idx_result)
955
+ broadcast_indices.append(new_bcast_inds)
956
+ einsum_str = ",".join(tmp_inputs) + "->" + idx_result
957
+
958
+ contraction = (contract_inds, idx_removed, einsum_str, input_list[:], do_blas)
959
+ contraction_list.append(contraction)
960
+
961
+ opt_cost = sum(cost_list) + 1
962
+
963
+ if len(input_list) != 1:
964
+ # Explicit "einsum_path" is usually trusted, but we detect this kind of
965
+ # mistake in order to prevent from returning an intermediate value.
966
+ raise RuntimeError(
967
+ "Invalid einsum_path is specified: {} more operands has to be "
968
+ "contracted.".format(len(input_list) - 1))
969
+
970
+ if einsum_call_arg:
971
+ return (operands, contraction_list)
972
+
973
+ # Return the path along with a nice string representation
974
+ overall_contraction = input_subscripts + "->" + output_subscript
975
+ header = ("scaling", "current", "remaining")
976
+
977
+ speedup = naive_cost / opt_cost
978
+ max_i = max(size_list)
979
+
980
+ path_print = " Complete contraction: %s\n" % overall_contraction
981
+ path_print += " Naive scaling: %d\n" % len(indices)
982
+ path_print += " Optimized scaling: %d\n" % max(scale_list)
983
+ path_print += " Naive FLOP count: %.3e\n" % naive_cost
984
+ path_print += " Optimized FLOP count: %.3e\n" % opt_cost
985
+ path_print += " Theoretical speedup: %3.3f\n" % speedup
986
+ path_print += " Largest intermediate: %.3e elements\n" % max_i
987
+ path_print += "-" * 74 + "\n"
988
+ path_print += "%6s %24s %40s\n" % header
989
+ path_print += "-" * 74
990
+
991
+ for n, contraction in enumerate(contraction_list):
992
+ inds, idx_rm, einsum_str, remaining, blas = contraction
993
+ remaining_str = ",".join(remaining) + "->" + output_subscript
994
+ path_run = (scale_list[n], einsum_str, remaining_str)
995
+ path_print += "\n%4d %24s %40s" % path_run
996
+
997
+ path = ['einsum_path'] + path
998
+ return (path, path_print)
999
+
1000
+
1001
+ def _einsum_dispatcher(*operands, out=None, optimize=None, **kwargs):
1002
+ # Arguably we dispatch on more arguments than we really should; see note in
1003
+ # _einsum_path_dispatcher for why.
1004
+ yield from operands
1005
+ yield out
1006
+
1007
+
1008
+ # Rewrite einsum to handle different cases
1009
+ @array_function_dispatch(_einsum_dispatcher, module='numpy')
1010
+ def einsum(*operands, out=None, optimize=False, **kwargs):
1011
+ """
1012
+ einsum(subscripts, *operands, out=None, dtype=None, order='K',
1013
+ casting='safe', optimize=False)
1014
+
1015
+ Evaluates the Einstein summation convention on the operands.
1016
+
1017
+ Using the Einstein summation convention, many common multi-dimensional,
1018
+ linear algebraic array operations can be represented in a simple fashion.
1019
+ In *implicit* mode `einsum` computes these values.
1020
+
1021
+ In *explicit* mode, `einsum` provides further flexibility to compute
1022
+ other array operations that might not be considered classical Einstein
1023
+ summation operations, by disabling, or forcing summation over specified
1024
+ subscript labels.
1025
+
1026
+ See the notes and examples for clarification.
1027
+
1028
+ Parameters
1029
+ ----------
1030
+ subscripts : str
1031
+ Specifies the subscripts for summation as comma separated list of
1032
+ subscript labels. An implicit (classical Einstein summation)
1033
+ calculation is performed unless the explicit indicator '->' is
1034
+ included as well as subscript labels of the precise output form.
1035
+ operands : list of array_like
1036
+ These are the arrays for the operation.
1037
+ out : ndarray, optional
1038
+ If provided, the calculation is done into this array.
1039
+ dtype : {data-type, None}, optional
1040
+ If provided, forces the calculation to use the data type specified.
1041
+ Note that you may have to also give a more liberal `casting`
1042
+ parameter to allow the conversions. Default is None.
1043
+ order : {'C', 'F', 'A', 'K'}, optional
1044
+ Controls the memory layout of the output. 'C' means it should
1045
+ be C contiguous. 'F' means it should be Fortran contiguous,
1046
+ 'A' means it should be 'F' if the inputs are all 'F', 'C' otherwise.
1047
+ 'K' means it should be as close to the layout as the inputs as
1048
+ is possible, including arbitrarily permuted axes.
1049
+ Default is 'K'.
1050
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
1051
+ Controls what kind of data casting may occur. Setting this to
1052
+ 'unsafe' is not recommended, as it can adversely affect accumulations.
1053
+
1054
+ * 'no' means the data types should not be cast at all.
1055
+ * 'equiv' means only byte-order changes are allowed.
1056
+ * 'safe' means only casts which can preserve values are allowed.
1057
+ * 'same_kind' means only safe casts or casts within a kind,
1058
+ like float64 to float32, are allowed.
1059
+ * 'unsafe' means any data conversions may be done.
1060
+
1061
+ Default is 'safe'.
1062
+ optimize : {False, True, 'greedy', 'optimal'}, optional
1063
+ Controls if intermediate optimization should occur. No optimization
1064
+ will occur if False and True will default to the 'greedy' algorithm.
1065
+ Also accepts an explicit contraction list from the ``np.einsum_path``
1066
+ function. See ``np.einsum_path`` for more details. Defaults to False.
1067
+
1068
+ Returns
1069
+ -------
1070
+ output : ndarray
1071
+ The calculation based on the Einstein summation convention.
1072
+
1073
+ See Also
1074
+ --------
1075
+ einsum_path, dot, inner, outer, tensordot, linalg.multi_dot
1076
+ einops :
1077
+ similar verbose interface is provided by
1078
+ `einops <https://github.com/arogozhnikov/einops>`_ package to cover
1079
+ additional operations: transpose, reshape/flatten, repeat/tile,
1080
+ squeeze/unsqueeze and reductions.
1081
+ opt_einsum :
1082
+ `opt_einsum <https://optimized-einsum.readthedocs.io/en/stable/>`_
1083
+ optimizes contraction order for einsum-like expressions
1084
+ in backend-agnostic manner.
1085
+
1086
+ Notes
1087
+ -----
1088
+ .. versionadded:: 1.6.0
1089
+
1090
+ The Einstein summation convention can be used to compute
1091
+ many multi-dimensional, linear algebraic array operations. `einsum`
1092
+ provides a succinct way of representing these.
1093
+
1094
+ A non-exhaustive list of these operations,
1095
+ which can be computed by `einsum`, is shown below along with examples:
1096
+
1097
+ * Trace of an array, :py:func:`numpy.trace`.
1098
+ * Return a diagonal, :py:func:`numpy.diag`.
1099
+ * Array axis summations, :py:func:`numpy.sum`.
1100
+ * Transpositions and permutations, :py:func:`numpy.transpose`.
1101
+ * Matrix multiplication and dot product, :py:func:`numpy.matmul` :py:func:`numpy.dot`.
1102
+ * Vector inner and outer products, :py:func:`numpy.inner` :py:func:`numpy.outer`.
1103
+ * Broadcasting, element-wise and scalar multiplication, :py:func:`numpy.multiply`.
1104
+ * Tensor contractions, :py:func:`numpy.tensordot`.
1105
+ * Chained array operations, in efficient calculation order, :py:func:`numpy.einsum_path`.
1106
+
1107
+ The subscripts string is a comma-separated list of subscript labels,
1108
+ where each label refers to a dimension of the corresponding operand.
1109
+ Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
1110
+ is equivalent to :py:func:`np.inner(a,b) <numpy.inner>`. If a label
1111
+ appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
1112
+ view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
1113
+ describes traditional matrix multiplication and is equivalent to
1114
+ :py:func:`np.matmul(a,b) <numpy.matmul>`. Repeated subscript labels in one
1115
+ operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
1116
+ to :py:func:`np.trace(a) <numpy.trace>`.
1117
+
1118
+ In *implicit mode*, the chosen subscripts are important
1119
+ since the axes of the output are reordered alphabetically. This
1120
+ means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
1121
+ ``np.einsum('ji', a)`` takes its transpose. Additionally,
1122
+ ``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
1123
+ ``np.einsum('ij,jh', a, b)`` returns the transpose of the
1124
+ multiplication since subscript 'h' precedes subscript 'i'.
1125
+
1126
+ In *explicit mode* the output can be directly controlled by
1127
+ specifying output subscript labels. This requires the
1128
+ identifier '->' as well as the list of output subscript labels.
1129
+ This feature increases the flexibility of the function since
1130
+ summing can be disabled or forced when required. The call
1131
+ ``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <numpy.sum>`,
1132
+ and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <numpy.diag>`.
1133
+ The difference is that `einsum` does not allow broadcasting by default.
1134
+ Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
1135
+ order of the output subscript labels and therefore returns matrix
1136
+ multiplication, unlike the example above in implicit mode.
1137
+
1138
+ To enable and control broadcasting, use an ellipsis. Default
1139
+ NumPy-style broadcasting is done by adding an ellipsis
1140
+ to the left of each term, like ``np.einsum('...ii->...i', a)``.
1141
+ To take the trace along the first and last axes,
1142
+ you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
1143
+ product with the left-most indices instead of rightmost, one can do
1144
+ ``np.einsum('ij...,jk...->ik...', a, b)``.
1145
+
1146
+ When there is only one operand, no axes are summed, and no output
1147
+ parameter is provided, a view into the operand is returned instead
1148
+ of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
1149
+ produces a view (changed in version 1.10.0).
1150
+
1151
+ `einsum` also provides an alternative way to provide the subscripts
1152
+ and operands as ``einsum(op0, sublist0, op1, sublist1, ..., [sublistout])``.
1153
+ If the output shape is not provided in this format `einsum` will be
1154
+ calculated in implicit mode, otherwise it will be performed explicitly.
1155
+ The examples below have corresponding `einsum` calls with the two
1156
+ parameter methods.
1157
+
1158
+ .. versionadded:: 1.10.0
1159
+
1160
+ Views returned from einsum are now writeable whenever the input array
1161
+ is writeable. For example, ``np.einsum('ijk...->kji...', a)`` will now
1162
+ have the same effect as :py:func:`np.swapaxes(a, 0, 2) <numpy.swapaxes>`
1163
+ and ``np.einsum('ii->i', a)`` will return a writeable view of the diagonal
1164
+ of a 2D array.
1165
+
1166
+ .. versionadded:: 1.12.0
1167
+
1168
+ Added the ``optimize`` argument which will optimize the contraction order
1169
+ of an einsum expression. For a contraction with three or more operands this
1170
+ can greatly increase the computational efficiency at the cost of a larger
1171
+ memory footprint during computation.
1172
+
1173
+ Typically a 'greedy' algorithm is applied which empirical tests have shown
1174
+ returns the optimal path in the majority of cases. In some cases 'optimal'
1175
+ will return the superlative path through a more expensive, exhaustive search.
1176
+ For iterative calculations it may be advisable to calculate the optimal path
1177
+ once and reuse that path by supplying it as an argument. An example is given
1178
+ below.
1179
+
1180
+ See :py:func:`numpy.einsum_path` for more details.
1181
+
1182
+ Examples
1183
+ --------
1184
+ >>> a = np.arange(25).reshape(5,5)
1185
+ >>> b = np.arange(5)
1186
+ >>> c = np.arange(6).reshape(2,3)
1187
+
1188
+ Trace of a matrix:
1189
+
1190
+ >>> np.einsum('ii', a)
1191
+ 60
1192
+ >>> np.einsum(a, [0,0])
1193
+ 60
1194
+ >>> np.trace(a)
1195
+ 60
1196
+
1197
+ Extract the diagonal (requires explicit form):
1198
+
1199
+ >>> np.einsum('ii->i', a)
1200
+ array([ 0, 6, 12, 18, 24])
1201
+ >>> np.einsum(a, [0,0], [0])
1202
+ array([ 0, 6, 12, 18, 24])
1203
+ >>> np.diag(a)
1204
+ array([ 0, 6, 12, 18, 24])
1205
+
1206
+ Sum over an axis (requires explicit form):
1207
+
1208
+ >>> np.einsum('ij->i', a)
1209
+ array([ 10, 35, 60, 85, 110])
1210
+ >>> np.einsum(a, [0,1], [0])
1211
+ array([ 10, 35, 60, 85, 110])
1212
+ >>> np.sum(a, axis=1)
1213
+ array([ 10, 35, 60, 85, 110])
1214
+
1215
+ For higher dimensional arrays summing a single axis can be done with ellipsis:
1216
+
1217
+ >>> np.einsum('...j->...', a)
1218
+ array([ 10, 35, 60, 85, 110])
1219
+ >>> np.einsum(a, [Ellipsis,1], [Ellipsis])
1220
+ array([ 10, 35, 60, 85, 110])
1221
+
1222
+ Compute a matrix transpose, or reorder any number of axes:
1223
+
1224
+ >>> np.einsum('ji', c)
1225
+ array([[0, 3],
1226
+ [1, 4],
1227
+ [2, 5]])
1228
+ >>> np.einsum('ij->ji', c)
1229
+ array([[0, 3],
1230
+ [1, 4],
1231
+ [2, 5]])
1232
+ >>> np.einsum(c, [1,0])
1233
+ array([[0, 3],
1234
+ [1, 4],
1235
+ [2, 5]])
1236
+ >>> np.transpose(c)
1237
+ array([[0, 3],
1238
+ [1, 4],
1239
+ [2, 5]])
1240
+
1241
+ Vector inner products:
1242
+
1243
+ >>> np.einsum('i,i', b, b)
1244
+ 30
1245
+ >>> np.einsum(b, [0], b, [0])
1246
+ 30
1247
+ >>> np.inner(b,b)
1248
+ 30
1249
+
1250
+ Matrix vector multiplication:
1251
+
1252
+ >>> np.einsum('ij,j', a, b)
1253
+ array([ 30, 80, 130, 180, 230])
1254
+ >>> np.einsum(a, [0,1], b, [1])
1255
+ array([ 30, 80, 130, 180, 230])
1256
+ >>> np.dot(a, b)
1257
+ array([ 30, 80, 130, 180, 230])
1258
+ >>> np.einsum('...j,j', a, b)
1259
+ array([ 30, 80, 130, 180, 230])
1260
+
1261
+ Broadcasting and scalar multiplication:
1262
+
1263
+ >>> np.einsum('..., ...', 3, c)
1264
+ array([[ 0, 3, 6],
1265
+ [ 9, 12, 15]])
1266
+ >>> np.einsum(',ij', 3, c)
1267
+ array([[ 0, 3, 6],
1268
+ [ 9, 12, 15]])
1269
+ >>> np.einsum(3, [Ellipsis], c, [Ellipsis])
1270
+ array([[ 0, 3, 6],
1271
+ [ 9, 12, 15]])
1272
+ >>> np.multiply(3, c)
1273
+ array([[ 0, 3, 6],
1274
+ [ 9, 12, 15]])
1275
+
1276
+ Vector outer product:
1277
+
1278
+ >>> np.einsum('i,j', np.arange(2)+1, b)
1279
+ array([[0, 1, 2, 3, 4],
1280
+ [0, 2, 4, 6, 8]])
1281
+ >>> np.einsum(np.arange(2)+1, [0], b, [1])
1282
+ array([[0, 1, 2, 3, 4],
1283
+ [0, 2, 4, 6, 8]])
1284
+ >>> np.outer(np.arange(2)+1, b)
1285
+ array([[0, 1, 2, 3, 4],
1286
+ [0, 2, 4, 6, 8]])
1287
+
1288
+ Tensor contraction:
1289
+
1290
+ >>> a = np.arange(60.).reshape(3,4,5)
1291
+ >>> b = np.arange(24.).reshape(4,3,2)
1292
+ >>> np.einsum('ijk,jil->kl', a, b)
1293
+ array([[4400., 4730.],
1294
+ [4532., 4874.],
1295
+ [4664., 5018.],
1296
+ [4796., 5162.],
1297
+ [4928., 5306.]])
1298
+ >>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
1299
+ array([[4400., 4730.],
1300
+ [4532., 4874.],
1301
+ [4664., 5018.],
1302
+ [4796., 5162.],
1303
+ [4928., 5306.]])
1304
+ >>> np.tensordot(a,b, axes=([1,0],[0,1]))
1305
+ array([[4400., 4730.],
1306
+ [4532., 4874.],
1307
+ [4664., 5018.],
1308
+ [4796., 5162.],
1309
+ [4928., 5306.]])
1310
+
1311
+ Writeable returned arrays (since version 1.10.0):
1312
+
1313
+ >>> a = np.zeros((3, 3))
1314
+ >>> np.einsum('ii->i', a)[:] = 1
1315
+ >>> a
1316
+ array([[1., 0., 0.],
1317
+ [0., 1., 0.],
1318
+ [0., 0., 1.]])
1319
+
1320
+ Example of ellipsis use:
1321
+
1322
+ >>> a = np.arange(6).reshape((3,2))
1323
+ >>> b = np.arange(12).reshape((4,3))
1324
+ >>> np.einsum('ki,jk->ij', a, b)
1325
+ array([[10, 28, 46, 64],
1326
+ [13, 40, 67, 94]])
1327
+ >>> np.einsum('ki,...k->i...', a, b)
1328
+ array([[10, 28, 46, 64],
1329
+ [13, 40, 67, 94]])
1330
+ >>> np.einsum('k...,jk', a, b)
1331
+ array([[10, 28, 46, 64],
1332
+ [13, 40, 67, 94]])
1333
+
1334
+ Chained array operations. For more complicated contractions, speed ups
1335
+ might be achieved by repeatedly computing a 'greedy' path or pre-computing the
1336
+ 'optimal' path and repeatedly applying it, using an
1337
+ `einsum_path` insertion (since version 1.12.0). Performance improvements can be
1338
+ particularly significant with larger arrays:
1339
+
1340
+ >>> a = np.ones(64).reshape(2,4,8)
1341
+
1342
+ Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
1343
+
1344
+ >>> for iteration in range(500):
1345
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
1346
+
1347
+ Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
1348
+
1349
+ >>> for iteration in range(500):
1350
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
1351
+
1352
+ Greedy `einsum` (faster optimal path approximation): ~160ms
1353
+
1354
+ >>> for iteration in range(500):
1355
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
1356
+
1357
+ Optimal `einsum` (best usage pattern in some use cases): ~110ms
1358
+
1359
+ >>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
1360
+ >>> for iteration in range(500):
1361
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
1362
+
1363
+ """
1364
+ # Special handling if out is specified
1365
+ specified_out = out is not None
1366
+
1367
+ # If no optimization, run pure einsum
1368
+ if optimize is False:
1369
+ if specified_out:
1370
+ kwargs['out'] = out
1371
+ return c_einsum(*operands, **kwargs)
1372
+
1373
+ # Check the kwargs to avoid a more cryptic error later, without having to
1374
+ # repeat default values here
1375
+ valid_einsum_kwargs = ['dtype', 'order', 'casting']
1376
+ unknown_kwargs = [k for (k, v) in kwargs.items() if
1377
+ k not in valid_einsum_kwargs]
1378
+ if len(unknown_kwargs):
1379
+ raise TypeError("Did not understand the following kwargs: %s"
1380
+ % unknown_kwargs)
1381
+
1382
+ # Build the contraction list and operand
1383
+ operands, contraction_list = einsum_path(*operands, optimize=optimize,
1384
+ einsum_call=True)
1385
+
1386
+ # Handle order kwarg for output array, c_einsum allows mixed case
1387
+ output_order = kwargs.pop('order', 'K')
1388
+ if output_order.upper() == 'A':
1389
+ if all(arr.flags.f_contiguous for arr in operands):
1390
+ output_order = 'F'
1391
+ else:
1392
+ output_order = 'C'
1393
+
1394
+ # Start contraction loop
1395
+ for num, contraction in enumerate(contraction_list):
1396
+ inds, idx_rm, einsum_str, remaining, blas = contraction
1397
+ tmp_operands = [operands.pop(x) for x in inds]
1398
+
1399
+ # Do we need to deal with the output?
1400
+ handle_out = specified_out and ((num + 1) == len(contraction_list))
1401
+
1402
+ # Call tensordot if still possible
1403
+ if blas:
1404
+ # Checks have already been handled
1405
+ input_str, results_index = einsum_str.split('->')
1406
+ input_left, input_right = input_str.split(',')
1407
+
1408
+ tensor_result = input_left + input_right
1409
+ for s in idx_rm:
1410
+ tensor_result = tensor_result.replace(s, "")
1411
+
1412
+ # Find indices to contract over
1413
+ left_pos, right_pos = [], []
1414
+ for s in sorted(idx_rm):
1415
+ left_pos.append(input_left.find(s))
1416
+ right_pos.append(input_right.find(s))
1417
+
1418
+ # Contract!
1419
+ new_view = tensordot(*tmp_operands, axes=(tuple(left_pos), tuple(right_pos)))
1420
+
1421
+ # Build a new view if needed
1422
+ if (tensor_result != results_index) or handle_out:
1423
+ if handle_out:
1424
+ kwargs["out"] = out
1425
+ new_view = c_einsum(tensor_result + '->' + results_index, new_view, **kwargs)
1426
+
1427
+ # Call einsum
1428
+ else:
1429
+ # If out was specified
1430
+ if handle_out:
1431
+ kwargs["out"] = out
1432
+
1433
+ # Do the contraction
1434
+ new_view = c_einsum(einsum_str, *tmp_operands, **kwargs)
1435
+
1436
+ # Append new items and dereference what we can
1437
+ operands.append(new_view)
1438
+ del tmp_operands, new_view
1439
+
1440
+ if specified_out:
1441
+ return out
1442
+ else:
1443
+ return asanyarray(operands[0], order=output_order)
venv/lib/python3.10/site-packages/numpy/core/einsumfunc.pyi ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Sequence
2
+ from typing import TypeVar, Any, overload, Union, Literal
3
+
4
+ from numpy import (
5
+ ndarray,
6
+ dtype,
7
+ bool_,
8
+ number,
9
+ _OrderKACF,
10
+ )
11
+ from numpy._typing import (
12
+ _ArrayLikeBool_co,
13
+ _ArrayLikeUInt_co,
14
+ _ArrayLikeInt_co,
15
+ _ArrayLikeFloat_co,
16
+ _ArrayLikeComplex_co,
17
+ _ArrayLikeObject_co,
18
+ _DTypeLikeBool,
19
+ _DTypeLikeUInt,
20
+ _DTypeLikeInt,
21
+ _DTypeLikeFloat,
22
+ _DTypeLikeComplex,
23
+ _DTypeLikeComplex_co,
24
+ _DTypeLikeObject,
25
+ )
26
+
27
+ _ArrayType = TypeVar(
28
+ "_ArrayType",
29
+ bound=ndarray[Any, dtype[Union[bool_, number[Any]]]],
30
+ )
31
+
32
+ _OptimizeKind = None | bool | Literal["greedy", "optimal"] | Sequence[Any]
33
+ _CastingSafe = Literal["no", "equiv", "safe", "same_kind"]
34
+ _CastingUnsafe = Literal["unsafe"]
35
+
36
+ __all__: list[str]
37
+
38
+ # TODO: Properly handle the `casting`-based combinatorics
39
+ # TODO: We need to evaluate the content `__subscripts` in order
40
+ # to identify whether or an array or scalar is returned. At a cursory
41
+ # glance this seems like something that can quite easily be done with
42
+ # a mypy plugin.
43
+ # Something like `is_scalar = bool(__subscripts.partition("->")[-1])`
44
+ @overload
45
+ def einsum(
46
+ subscripts: str | _ArrayLikeInt_co,
47
+ /,
48
+ *operands: _ArrayLikeBool_co,
49
+ out: None = ...,
50
+ dtype: None | _DTypeLikeBool = ...,
51
+ order: _OrderKACF = ...,
52
+ casting: _CastingSafe = ...,
53
+ optimize: _OptimizeKind = ...,
54
+ ) -> Any: ...
55
+ @overload
56
+ def einsum(
57
+ subscripts: str | _ArrayLikeInt_co,
58
+ /,
59
+ *operands: _ArrayLikeUInt_co,
60
+ out: None = ...,
61
+ dtype: None | _DTypeLikeUInt = ...,
62
+ order: _OrderKACF = ...,
63
+ casting: _CastingSafe = ...,
64
+ optimize: _OptimizeKind = ...,
65
+ ) -> Any: ...
66
+ @overload
67
+ def einsum(
68
+ subscripts: str | _ArrayLikeInt_co,
69
+ /,
70
+ *operands: _ArrayLikeInt_co,
71
+ out: None = ...,
72
+ dtype: None | _DTypeLikeInt = ...,
73
+ order: _OrderKACF = ...,
74
+ casting: _CastingSafe = ...,
75
+ optimize: _OptimizeKind = ...,
76
+ ) -> Any: ...
77
+ @overload
78
+ def einsum(
79
+ subscripts: str | _ArrayLikeInt_co,
80
+ /,
81
+ *operands: _ArrayLikeFloat_co,
82
+ out: None = ...,
83
+ dtype: None | _DTypeLikeFloat = ...,
84
+ order: _OrderKACF = ...,
85
+ casting: _CastingSafe = ...,
86
+ optimize: _OptimizeKind = ...,
87
+ ) -> Any: ...
88
+ @overload
89
+ def einsum(
90
+ subscripts: str | _ArrayLikeInt_co,
91
+ /,
92
+ *operands: _ArrayLikeComplex_co,
93
+ out: None = ...,
94
+ dtype: None | _DTypeLikeComplex = ...,
95
+ order: _OrderKACF = ...,
96
+ casting: _CastingSafe = ...,
97
+ optimize: _OptimizeKind = ...,
98
+ ) -> Any: ...
99
+ @overload
100
+ def einsum(
101
+ subscripts: str | _ArrayLikeInt_co,
102
+ /,
103
+ *operands: Any,
104
+ casting: _CastingUnsafe,
105
+ dtype: None | _DTypeLikeComplex_co = ...,
106
+ out: None = ...,
107
+ order: _OrderKACF = ...,
108
+ optimize: _OptimizeKind = ...,
109
+ ) -> Any: ...
110
+ @overload
111
+ def einsum(
112
+ subscripts: str | _ArrayLikeInt_co,
113
+ /,
114
+ *operands: _ArrayLikeComplex_co,
115
+ out: _ArrayType,
116
+ dtype: None | _DTypeLikeComplex_co = ...,
117
+ order: _OrderKACF = ...,
118
+ casting: _CastingSafe = ...,
119
+ optimize: _OptimizeKind = ...,
120
+ ) -> _ArrayType: ...
121
+ @overload
122
+ def einsum(
123
+ subscripts: str | _ArrayLikeInt_co,
124
+ /,
125
+ *operands: Any,
126
+ out: _ArrayType,
127
+ casting: _CastingUnsafe,
128
+ dtype: None | _DTypeLikeComplex_co = ...,
129
+ order: _OrderKACF = ...,
130
+ optimize: _OptimizeKind = ...,
131
+ ) -> _ArrayType: ...
132
+
133
+ @overload
134
+ def einsum(
135
+ subscripts: str | _ArrayLikeInt_co,
136
+ /,
137
+ *operands: _ArrayLikeObject_co,
138
+ out: None = ...,
139
+ dtype: None | _DTypeLikeObject = ...,
140
+ order: _OrderKACF = ...,
141
+ casting: _CastingSafe = ...,
142
+ optimize: _OptimizeKind = ...,
143
+ ) -> Any: ...
144
+ @overload
145
+ def einsum(
146
+ subscripts: str | _ArrayLikeInt_co,
147
+ /,
148
+ *operands: Any,
149
+ casting: _CastingUnsafe,
150
+ dtype: None | _DTypeLikeObject = ...,
151
+ out: None = ...,
152
+ order: _OrderKACF = ...,
153
+ optimize: _OptimizeKind = ...,
154
+ ) -> Any: ...
155
+ @overload
156
+ def einsum(
157
+ subscripts: str | _ArrayLikeInt_co,
158
+ /,
159
+ *operands: _ArrayLikeObject_co,
160
+ out: _ArrayType,
161
+ dtype: None | _DTypeLikeObject = ...,
162
+ order: _OrderKACF = ...,
163
+ casting: _CastingSafe = ...,
164
+ optimize: _OptimizeKind = ...,
165
+ ) -> _ArrayType: ...
166
+ @overload
167
+ def einsum(
168
+ subscripts: str | _ArrayLikeInt_co,
169
+ /,
170
+ *operands: Any,
171
+ out: _ArrayType,
172
+ casting: _CastingUnsafe,
173
+ dtype: None | _DTypeLikeObject = ...,
174
+ order: _OrderKACF = ...,
175
+ optimize: _OptimizeKind = ...,
176
+ ) -> _ArrayType: ...
177
+
178
+ # NOTE: `einsum_call` is a hidden kwarg unavailable for public use.
179
+ # It is therefore excluded from the signatures below.
180
+ # NOTE: In practice the list consists of a `str` (first element)
181
+ # and a variable number of integer tuples.
182
+ def einsum_path(
183
+ subscripts: str | _ArrayLikeInt_co,
184
+ /,
185
+ *operands: _ArrayLikeComplex_co | _DTypeLikeObject,
186
+ optimize: _OptimizeKind = ...,
187
+ ) -> tuple[list[Any], str]: ...
venv/lib/python3.10/site-packages/numpy/core/fromnumeric.py ADDED
The diff for this file is too large to render. See raw diff