Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg.pt +3 -0
- ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt +3 -0
- ckpts/universal/global_step40/zero/7.attention.dense.weight/exp_avg_sq.pt +3 -0
- venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE +29 -0
- venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py +116 -0
- venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py +704 -0
- venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py +22 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py +43 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py +1 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py +536 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py +232 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py +158 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py +20 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py +16 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py +79 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py +46 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py +47 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py +22 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py +79 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py +46 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py +40 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py +22 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py +707 -0
- venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py +62 -0
- venv/lib/python3.10/site-packages/scipy/constants/__init__.py +347 -0
- venv/lib/python3.10/site-packages/scipy/constants/__pycache__/__init__.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_codata.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc +0 -0
- venv/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc +0 -0
ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb13ebab68cba0a7509fa628d7990d44d14ef15c0b32b25714a9350d1ecd507a
|
3 |
+
size 9372
|
ckpts/universal/global_step40/zero/6.post_attention_layernorm.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2859762b5b059963806e36692d5907386cfd22ed71d4f8e4ccaf8e5e0e8f7ca8
|
3 |
+
size 9387
|
ckpts/universal/global_step40/zero/7.attention.dense.weight/exp_avg_sq.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad175c6c25f46ed73ac689c66406d96a30a211304a349044cc6a1fa571deadde
|
3 |
+
size 16778411
|
venv/lib/python3.10/site-packages/scipy/_lib/_uarray/LICENSE
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
BSD 3-Clause License
|
2 |
+
|
3 |
+
Copyright (c) 2018, Quansight-Labs
|
4 |
+
All rights reserved.
|
5 |
+
|
6 |
+
Redistribution and use in source and binary forms, with or without
|
7 |
+
modification, are permitted provided that the following conditions are met:
|
8 |
+
|
9 |
+
* Redistributions of source code must retain the above copyright notice, this
|
10 |
+
list of conditions and the following disclaimer.
|
11 |
+
|
12 |
+
* Redistributions in binary form must reproduce the above copyright notice,
|
13 |
+
this list of conditions and the following disclaimer in the documentation
|
14 |
+
and/or other materials provided with the distribution.
|
15 |
+
|
16 |
+
* Neither the name of the copyright holder nor the names of its
|
17 |
+
contributors may be used to endorse or promote products derived from
|
18 |
+
this software without specific prior written permission.
|
19 |
+
|
20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
21 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
22 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
23 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
24 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
25 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
26 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
27 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
28 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
29 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__init__.py
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
.. note:
|
3 |
+
If you are looking for overrides for NumPy-specific methods, see the
|
4 |
+
documentation for :obj:`unumpy`. This page explains how to write
|
5 |
+
back-ends and multimethods.
|
6 |
+
|
7 |
+
``uarray`` is built around a back-end protocol, and overridable multimethods.
|
8 |
+
It is necessary to define multimethods for back-ends to be able to override them.
|
9 |
+
See the documentation of :obj:`generate_multimethod` on how to write multimethods.
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
Let's start with the simplest:
|
14 |
+
|
15 |
+
``__ua_domain__`` defines the back-end *domain*. The domain consists of period-
|
16 |
+
separated string consisting of the modules you extend plus the submodule. For
|
17 |
+
example, if a submodule ``module2.submodule`` extends ``module1``
|
18 |
+
(i.e., it exposes dispatchables marked as types available in ``module1``),
|
19 |
+
then the domain string should be ``"module1.module2.submodule"``.
|
20 |
+
|
21 |
+
|
22 |
+
For the purpose of this demonstration, we'll be creating an object and setting
|
23 |
+
its attributes directly. However, note that you can use a module or your own type
|
24 |
+
as a backend as well.
|
25 |
+
|
26 |
+
>>> class Backend: pass
|
27 |
+
>>> be = Backend()
|
28 |
+
>>> be.__ua_domain__ = "ua_examples"
|
29 |
+
|
30 |
+
It might be useful at this point to sidetrack to the documentation of
|
31 |
+
:obj:`generate_multimethod` to find out how to generate a multimethod
|
32 |
+
overridable by :obj:`uarray`. Needless to say, writing a backend and
|
33 |
+
creating multimethods are mostly orthogonal activities, and knowing
|
34 |
+
one doesn't necessarily require knowledge of the other, although it
|
35 |
+
is certainly helpful. We expect core API designers/specifiers to write the
|
36 |
+
multimethods, and implementors to override them. But, as is often the case,
|
37 |
+
similar people write both.
|
38 |
+
|
39 |
+
Without further ado, here's an example multimethod:
|
40 |
+
|
41 |
+
>>> import uarray as ua
|
42 |
+
>>> from uarray import Dispatchable
|
43 |
+
>>> def override_me(a, b):
|
44 |
+
... return Dispatchable(a, int),
|
45 |
+
>>> def override_replacer(args, kwargs, dispatchables):
|
46 |
+
... return (dispatchables[0], args[1]), {}
|
47 |
+
>>> overridden_me = ua.generate_multimethod(
|
48 |
+
... override_me, override_replacer, "ua_examples"
|
49 |
+
... )
|
50 |
+
|
51 |
+
Next comes the part about overriding the multimethod. This requires
|
52 |
+
the ``__ua_function__`` protocol, and the ``__ua_convert__``
|
53 |
+
protocol. The ``__ua_function__`` protocol has the signature
|
54 |
+
``(method, args, kwargs)`` where ``method`` is the passed
|
55 |
+
multimethod, ``args``/``kwargs`` specify the arguments and ``dispatchables``
|
56 |
+
is the list of converted dispatchables passed in.
|
57 |
+
|
58 |
+
>>> def __ua_function__(method, args, kwargs):
|
59 |
+
... return method.__name__, args, kwargs
|
60 |
+
>>> be.__ua_function__ = __ua_function__
|
61 |
+
|
62 |
+
The other protocol of interest is the ``__ua_convert__`` protocol. It has the
|
63 |
+
signature ``(dispatchables, coerce)``. When ``coerce`` is ``False``, conversion
|
64 |
+
between the formats should ideally be an ``O(1)`` operation, but it means that
|
65 |
+
no memory copying should be involved, only views of the existing data.
|
66 |
+
|
67 |
+
>>> def __ua_convert__(dispatchables, coerce):
|
68 |
+
... for d in dispatchables:
|
69 |
+
... if d.type is int:
|
70 |
+
... if coerce and d.coercible:
|
71 |
+
... yield str(d.value)
|
72 |
+
... else:
|
73 |
+
... yield d.value
|
74 |
+
>>> be.__ua_convert__ = __ua_convert__
|
75 |
+
|
76 |
+
Now that we have defined the backend, the next thing to do is to call the multimethod.
|
77 |
+
|
78 |
+
>>> with ua.set_backend(be):
|
79 |
+
... overridden_me(1, "2")
|
80 |
+
('override_me', (1, '2'), {})
|
81 |
+
|
82 |
+
Note that the marked type has no effect on the actual type of the passed object.
|
83 |
+
We can also coerce the type of the input.
|
84 |
+
|
85 |
+
>>> with ua.set_backend(be, coerce=True):
|
86 |
+
... overridden_me(1, "2")
|
87 |
+
... overridden_me(1.0, "2")
|
88 |
+
('override_me', ('1', '2'), {})
|
89 |
+
('override_me', ('1.0', '2'), {})
|
90 |
+
|
91 |
+
Another feature is that if you remove ``__ua_convert__``, the arguments are not
|
92 |
+
converted at all and it's up to the backend to handle that.
|
93 |
+
|
94 |
+
>>> del be.__ua_convert__
|
95 |
+
>>> with ua.set_backend(be):
|
96 |
+
... overridden_me(1, "2")
|
97 |
+
('override_me', (1, '2'), {})
|
98 |
+
|
99 |
+
You also have the option to return ``NotImplemented``, in which case processing moves on
|
100 |
+
to the next back-end, which in this case, doesn't exist. The same applies to
|
101 |
+
``__ua_convert__``.
|
102 |
+
|
103 |
+
>>> be.__ua_function__ = lambda *a, **kw: NotImplemented
|
104 |
+
>>> with ua.set_backend(be):
|
105 |
+
... overridden_me(1, "2")
|
106 |
+
Traceback (most recent call last):
|
107 |
+
...
|
108 |
+
uarray.BackendNotImplementedError: ...
|
109 |
+
|
110 |
+
The last possibility is if we don't have ``__ua_convert__``, in which case the job is
|
111 |
+
left up to ``__ua_function__``, but putting things back into arrays after conversion
|
112 |
+
will not be possible.
|
113 |
+
"""
|
114 |
+
|
115 |
+
from ._backend import *
|
116 |
+
__version__ = '0.8.8.dev0+aa94c5a4.scipy'
|
venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (4.7 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/_uarray/__pycache__/_backend.cpython-310.pyc
ADDED
Binary file (20.4 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_backend.py
ADDED
@@ -0,0 +1,704 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import typing
|
2 |
+
import types
|
3 |
+
import inspect
|
4 |
+
import functools
|
5 |
+
from . import _uarray
|
6 |
+
import copyreg
|
7 |
+
import pickle
|
8 |
+
import contextlib
|
9 |
+
|
10 |
+
from ._uarray import ( # type: ignore
|
11 |
+
BackendNotImplementedError,
|
12 |
+
_Function,
|
13 |
+
_SkipBackendContext,
|
14 |
+
_SetBackendContext,
|
15 |
+
_BackendState,
|
16 |
+
)
|
17 |
+
|
18 |
+
__all__ = [
|
19 |
+
"set_backend",
|
20 |
+
"set_global_backend",
|
21 |
+
"skip_backend",
|
22 |
+
"register_backend",
|
23 |
+
"determine_backend",
|
24 |
+
"determine_backend_multi",
|
25 |
+
"clear_backends",
|
26 |
+
"create_multimethod",
|
27 |
+
"generate_multimethod",
|
28 |
+
"_Function",
|
29 |
+
"BackendNotImplementedError",
|
30 |
+
"Dispatchable",
|
31 |
+
"wrap_single_convertor",
|
32 |
+
"wrap_single_convertor_instance",
|
33 |
+
"all_of_type",
|
34 |
+
"mark_as",
|
35 |
+
"set_state",
|
36 |
+
"get_state",
|
37 |
+
"reset_state",
|
38 |
+
"_BackendState",
|
39 |
+
"_SkipBackendContext",
|
40 |
+
"_SetBackendContext",
|
41 |
+
]
|
42 |
+
|
43 |
+
ArgumentExtractorType = typing.Callable[..., tuple["Dispatchable", ...]]
|
44 |
+
ArgumentReplacerType = typing.Callable[
|
45 |
+
[tuple, dict, tuple], tuple[tuple, dict]
|
46 |
+
]
|
47 |
+
|
48 |
+
def unpickle_function(mod_name, qname, self_):
|
49 |
+
import importlib
|
50 |
+
|
51 |
+
try:
|
52 |
+
module = importlib.import_module(mod_name)
|
53 |
+
qname = qname.split(".")
|
54 |
+
func = module
|
55 |
+
for q in qname:
|
56 |
+
func = getattr(func, q)
|
57 |
+
|
58 |
+
if self_ is not None:
|
59 |
+
func = types.MethodType(func, self_)
|
60 |
+
|
61 |
+
return func
|
62 |
+
except (ImportError, AttributeError) as e:
|
63 |
+
from pickle import UnpicklingError
|
64 |
+
|
65 |
+
raise UnpicklingError from e
|
66 |
+
|
67 |
+
|
68 |
+
def pickle_function(func):
|
69 |
+
mod_name = getattr(func, "__module__", None)
|
70 |
+
qname = getattr(func, "__qualname__", None)
|
71 |
+
self_ = getattr(func, "__self__", None)
|
72 |
+
|
73 |
+
try:
|
74 |
+
test = unpickle_function(mod_name, qname, self_)
|
75 |
+
except pickle.UnpicklingError:
|
76 |
+
test = None
|
77 |
+
|
78 |
+
if test is not func:
|
79 |
+
raise pickle.PicklingError(
|
80 |
+
f"Can't pickle {func}: it's not the same object as {test}"
|
81 |
+
)
|
82 |
+
|
83 |
+
return unpickle_function, (mod_name, qname, self_)
|
84 |
+
|
85 |
+
|
86 |
+
def pickle_state(state):
|
87 |
+
return _uarray._BackendState._unpickle, state._pickle()
|
88 |
+
|
89 |
+
|
90 |
+
def pickle_set_backend_context(ctx):
|
91 |
+
return _SetBackendContext, ctx._pickle()
|
92 |
+
|
93 |
+
|
94 |
+
def pickle_skip_backend_context(ctx):
|
95 |
+
return _SkipBackendContext, ctx._pickle()
|
96 |
+
|
97 |
+
|
98 |
+
copyreg.pickle(_Function, pickle_function)
|
99 |
+
copyreg.pickle(_uarray._BackendState, pickle_state)
|
100 |
+
copyreg.pickle(_SetBackendContext, pickle_set_backend_context)
|
101 |
+
copyreg.pickle(_SkipBackendContext, pickle_skip_backend_context)
|
102 |
+
|
103 |
+
|
104 |
+
def get_state():
|
105 |
+
"""
|
106 |
+
Returns an opaque object containing the current state of all the backends.
|
107 |
+
|
108 |
+
Can be used for synchronization between threads/processes.
|
109 |
+
|
110 |
+
See Also
|
111 |
+
--------
|
112 |
+
set_state
|
113 |
+
Sets the state returned by this function.
|
114 |
+
"""
|
115 |
+
return _uarray.get_state()
|
116 |
+
|
117 |
+
|
118 |
+
@contextlib.contextmanager
|
119 |
+
def reset_state():
|
120 |
+
"""
|
121 |
+
Returns a context manager that resets all state once exited.
|
122 |
+
|
123 |
+
See Also
|
124 |
+
--------
|
125 |
+
set_state
|
126 |
+
Context manager that sets the backend state.
|
127 |
+
get_state
|
128 |
+
Gets a state to be set by this context manager.
|
129 |
+
"""
|
130 |
+
with set_state(get_state()):
|
131 |
+
yield
|
132 |
+
|
133 |
+
|
134 |
+
@contextlib.contextmanager
|
135 |
+
def set_state(state):
|
136 |
+
"""
|
137 |
+
A context manager that sets the state of the backends to one returned by :obj:`get_state`.
|
138 |
+
|
139 |
+
See Also
|
140 |
+
--------
|
141 |
+
get_state
|
142 |
+
Gets a state to be set by this context manager.
|
143 |
+
""" # noqa: E501
|
144 |
+
old_state = get_state()
|
145 |
+
_uarray.set_state(state)
|
146 |
+
try:
|
147 |
+
yield
|
148 |
+
finally:
|
149 |
+
_uarray.set_state(old_state, True)
|
150 |
+
|
151 |
+
|
152 |
+
def create_multimethod(*args, **kwargs):
|
153 |
+
"""
|
154 |
+
Creates a decorator for generating multimethods.
|
155 |
+
|
156 |
+
This function creates a decorator that can be used with an argument
|
157 |
+
extractor in order to generate a multimethod. Other than for the
|
158 |
+
argument extractor, all arguments are passed on to
|
159 |
+
:obj:`generate_multimethod`.
|
160 |
+
|
161 |
+
See Also
|
162 |
+
--------
|
163 |
+
generate_multimethod
|
164 |
+
Generates a multimethod.
|
165 |
+
"""
|
166 |
+
|
167 |
+
def wrapper(a):
|
168 |
+
return generate_multimethod(a, *args, **kwargs)
|
169 |
+
|
170 |
+
return wrapper
|
171 |
+
|
172 |
+
|
173 |
+
def generate_multimethod(
|
174 |
+
argument_extractor: ArgumentExtractorType,
|
175 |
+
argument_replacer: ArgumentReplacerType,
|
176 |
+
domain: str,
|
177 |
+
default: typing.Optional[typing.Callable] = None,
|
178 |
+
):
|
179 |
+
"""
|
180 |
+
Generates a multimethod.
|
181 |
+
|
182 |
+
Parameters
|
183 |
+
----------
|
184 |
+
argument_extractor : ArgumentExtractorType
|
185 |
+
A callable which extracts the dispatchable arguments. Extracted arguments
|
186 |
+
should be marked by the :obj:`Dispatchable` class. It has the same signature
|
187 |
+
as the desired multimethod.
|
188 |
+
argument_replacer : ArgumentReplacerType
|
189 |
+
A callable with the signature (args, kwargs, dispatchables), which should also
|
190 |
+
return an (args, kwargs) pair with the dispatchables replaced inside the
|
191 |
+
args/kwargs.
|
192 |
+
domain : str
|
193 |
+
A string value indicating the domain of this multimethod.
|
194 |
+
default: Optional[Callable], optional
|
195 |
+
The default implementation of this multimethod, where ``None`` (the default)
|
196 |
+
specifies there is no default implementation.
|
197 |
+
|
198 |
+
Examples
|
199 |
+
--------
|
200 |
+
In this example, ``a`` is to be dispatched over, so we return it, while marking it
|
201 |
+
as an ``int``.
|
202 |
+
The trailing comma is needed because the args have to be returned as an iterable.
|
203 |
+
|
204 |
+
>>> def override_me(a, b):
|
205 |
+
... return Dispatchable(a, int),
|
206 |
+
|
207 |
+
Next, we define the argument replacer that replaces the dispatchables inside
|
208 |
+
args/kwargs with the supplied ones.
|
209 |
+
|
210 |
+
>>> def override_replacer(args, kwargs, dispatchables):
|
211 |
+
... return (dispatchables[0], args[1]), {}
|
212 |
+
|
213 |
+
Next, we define the multimethod.
|
214 |
+
|
215 |
+
>>> overridden_me = generate_multimethod(
|
216 |
+
... override_me, override_replacer, "ua_examples"
|
217 |
+
... )
|
218 |
+
|
219 |
+
Notice that there's no default implementation, unless you supply one.
|
220 |
+
|
221 |
+
>>> overridden_me(1, "a")
|
222 |
+
Traceback (most recent call last):
|
223 |
+
...
|
224 |
+
uarray.BackendNotImplementedError: ...
|
225 |
+
|
226 |
+
>>> overridden_me2 = generate_multimethod(
|
227 |
+
... override_me, override_replacer, "ua_examples", default=lambda x, y: (x, y)
|
228 |
+
... )
|
229 |
+
>>> overridden_me2(1, "a")
|
230 |
+
(1, 'a')
|
231 |
+
|
232 |
+
See Also
|
233 |
+
--------
|
234 |
+
uarray
|
235 |
+
See the module documentation for how to override the method by creating
|
236 |
+
backends.
|
237 |
+
"""
|
238 |
+
kw_defaults, arg_defaults, opts = get_defaults(argument_extractor)
|
239 |
+
ua_func = _Function(
|
240 |
+
argument_extractor,
|
241 |
+
argument_replacer,
|
242 |
+
domain,
|
243 |
+
arg_defaults,
|
244 |
+
kw_defaults,
|
245 |
+
default,
|
246 |
+
)
|
247 |
+
|
248 |
+
return functools.update_wrapper(ua_func, argument_extractor)
|
249 |
+
|
250 |
+
|
251 |
+
def set_backend(backend, coerce=False, only=False):
|
252 |
+
"""
|
253 |
+
A context manager that sets the preferred backend.
|
254 |
+
|
255 |
+
Parameters
|
256 |
+
----------
|
257 |
+
backend
|
258 |
+
The backend to set.
|
259 |
+
coerce
|
260 |
+
Whether or not to coerce to a specific backend's types. Implies ``only``.
|
261 |
+
only
|
262 |
+
Whether or not this should be the last backend to try.
|
263 |
+
|
264 |
+
See Also
|
265 |
+
--------
|
266 |
+
skip_backend: A context manager that allows skipping of backends.
|
267 |
+
set_global_backend: Set a single, global backend for a domain.
|
268 |
+
"""
|
269 |
+
try:
|
270 |
+
return backend.__ua_cache__["set", coerce, only]
|
271 |
+
except AttributeError:
|
272 |
+
backend.__ua_cache__ = {}
|
273 |
+
except KeyError:
|
274 |
+
pass
|
275 |
+
|
276 |
+
ctx = _SetBackendContext(backend, coerce, only)
|
277 |
+
backend.__ua_cache__["set", coerce, only] = ctx
|
278 |
+
return ctx
|
279 |
+
|
280 |
+
|
281 |
+
def skip_backend(backend):
|
282 |
+
"""
|
283 |
+
A context manager that allows one to skip a given backend from processing
|
284 |
+
entirely. This allows one to use another backend's code in a library that
|
285 |
+
is also a consumer of the same backend.
|
286 |
+
|
287 |
+
Parameters
|
288 |
+
----------
|
289 |
+
backend
|
290 |
+
The backend to skip.
|
291 |
+
|
292 |
+
See Also
|
293 |
+
--------
|
294 |
+
set_backend: A context manager that allows setting of backends.
|
295 |
+
set_global_backend: Set a single, global backend for a domain.
|
296 |
+
"""
|
297 |
+
try:
|
298 |
+
return backend.__ua_cache__["skip"]
|
299 |
+
except AttributeError:
|
300 |
+
backend.__ua_cache__ = {}
|
301 |
+
except KeyError:
|
302 |
+
pass
|
303 |
+
|
304 |
+
ctx = _SkipBackendContext(backend)
|
305 |
+
backend.__ua_cache__["skip"] = ctx
|
306 |
+
return ctx
|
307 |
+
|
308 |
+
|
309 |
+
def get_defaults(f):
|
310 |
+
sig = inspect.signature(f)
|
311 |
+
kw_defaults = {}
|
312 |
+
arg_defaults = []
|
313 |
+
opts = set()
|
314 |
+
for k, v in sig.parameters.items():
|
315 |
+
if v.default is not inspect.Parameter.empty:
|
316 |
+
kw_defaults[k] = v.default
|
317 |
+
if v.kind in (
|
318 |
+
inspect.Parameter.POSITIONAL_ONLY,
|
319 |
+
inspect.Parameter.POSITIONAL_OR_KEYWORD,
|
320 |
+
):
|
321 |
+
arg_defaults.append(v.default)
|
322 |
+
opts.add(k)
|
323 |
+
|
324 |
+
return kw_defaults, tuple(arg_defaults), opts
|
325 |
+
|
326 |
+
|
327 |
+
def set_global_backend(backend, coerce=False, only=False, *, try_last=False):
|
328 |
+
"""
|
329 |
+
This utility method replaces the default backend for permanent use. It
|
330 |
+
will be tried in the list of backends automatically, unless the
|
331 |
+
``only`` flag is set on a backend. This will be the first tried
|
332 |
+
backend outside the :obj:`set_backend` context manager.
|
333 |
+
|
334 |
+
Note that this method is not thread-safe.
|
335 |
+
|
336 |
+
.. warning::
|
337 |
+
We caution library authors against using this function in
|
338 |
+
their code. We do *not* support this use-case. This function
|
339 |
+
is meant to be used only by users themselves, or by a reference
|
340 |
+
implementation, if one exists.
|
341 |
+
|
342 |
+
Parameters
|
343 |
+
----------
|
344 |
+
backend
|
345 |
+
The backend to register.
|
346 |
+
coerce : bool
|
347 |
+
Whether to coerce input types when trying this backend.
|
348 |
+
only : bool
|
349 |
+
If ``True``, no more backends will be tried if this fails.
|
350 |
+
Implied by ``coerce=True``.
|
351 |
+
try_last : bool
|
352 |
+
If ``True``, the global backend is tried after registered backends.
|
353 |
+
|
354 |
+
See Also
|
355 |
+
--------
|
356 |
+
set_backend: A context manager that allows setting of backends.
|
357 |
+
skip_backend: A context manager that allows skipping of backends.
|
358 |
+
"""
|
359 |
+
_uarray.set_global_backend(backend, coerce, only, try_last)
|
360 |
+
|
361 |
+
|
362 |
+
def register_backend(backend):
|
363 |
+
"""
|
364 |
+
This utility method sets registers backend for permanent use. It
|
365 |
+
will be tried in the list of backends automatically, unless the
|
366 |
+
``only`` flag is set on a backend.
|
367 |
+
|
368 |
+
Note that this method is not thread-safe.
|
369 |
+
|
370 |
+
Parameters
|
371 |
+
----------
|
372 |
+
backend
|
373 |
+
The backend to register.
|
374 |
+
"""
|
375 |
+
_uarray.register_backend(backend)
|
376 |
+
|
377 |
+
|
378 |
+
def clear_backends(domain, registered=True, globals=False):
|
379 |
+
"""
|
380 |
+
This utility method clears registered backends.
|
381 |
+
|
382 |
+
.. warning::
|
383 |
+
We caution library authors against using this function in
|
384 |
+
their code. We do *not* support this use-case. This function
|
385 |
+
is meant to be used only by users themselves.
|
386 |
+
|
387 |
+
.. warning::
|
388 |
+
Do NOT use this method inside a multimethod call, or the
|
389 |
+
program is likely to crash.
|
390 |
+
|
391 |
+
Parameters
|
392 |
+
----------
|
393 |
+
domain : Optional[str]
|
394 |
+
The domain for which to de-register backends. ``None`` means
|
395 |
+
de-register for all domains.
|
396 |
+
registered : bool
|
397 |
+
Whether or not to clear registered backends. See :obj:`register_backend`.
|
398 |
+
globals : bool
|
399 |
+
Whether or not to clear global backends. See :obj:`set_global_backend`.
|
400 |
+
|
401 |
+
See Also
|
402 |
+
--------
|
403 |
+
register_backend : Register a backend globally.
|
404 |
+
set_global_backend : Set a global backend.
|
405 |
+
"""
|
406 |
+
_uarray.clear_backends(domain, registered, globals)
|
407 |
+
|
408 |
+
|
409 |
+
class Dispatchable:
|
410 |
+
"""
|
411 |
+
A utility class which marks an argument with a specific dispatch type.
|
412 |
+
|
413 |
+
|
414 |
+
Attributes
|
415 |
+
----------
|
416 |
+
value
|
417 |
+
The value of the Dispatchable.
|
418 |
+
|
419 |
+
type
|
420 |
+
The type of the Dispatchable.
|
421 |
+
|
422 |
+
Examples
|
423 |
+
--------
|
424 |
+
>>> x = Dispatchable(1, str)
|
425 |
+
>>> x
|
426 |
+
<Dispatchable: type=<class 'str'>, value=1>
|
427 |
+
|
428 |
+
See Also
|
429 |
+
--------
|
430 |
+
all_of_type
|
431 |
+
Marks all unmarked parameters of a function.
|
432 |
+
|
433 |
+
mark_as
|
434 |
+
Allows one to create a utility function to mark as a given type.
|
435 |
+
"""
|
436 |
+
|
437 |
+
def __init__(self, value, dispatch_type, coercible=True):
|
438 |
+
self.value = value
|
439 |
+
self.type = dispatch_type
|
440 |
+
self.coercible = coercible
|
441 |
+
|
442 |
+
def __getitem__(self, index):
|
443 |
+
return (self.type, self.value)[index]
|
444 |
+
|
445 |
+
def __str__(self):
|
446 |
+
return f"<{type(self).__name__}: type={self.type!r}, value={self.value!r}>"
|
447 |
+
|
448 |
+
__repr__ = __str__
|
449 |
+
|
450 |
+
|
451 |
+
def mark_as(dispatch_type):
|
452 |
+
"""
|
453 |
+
Creates a utility function to mark something as a specific type.
|
454 |
+
|
455 |
+
Examples
|
456 |
+
--------
|
457 |
+
>>> mark_int = mark_as(int)
|
458 |
+
>>> mark_int(1)
|
459 |
+
<Dispatchable: type=<class 'int'>, value=1>
|
460 |
+
"""
|
461 |
+
return functools.partial(Dispatchable, dispatch_type=dispatch_type)
|
462 |
+
|
463 |
+
|
464 |
+
def all_of_type(arg_type):
|
465 |
+
"""
|
466 |
+
Marks all unmarked arguments as a given type.
|
467 |
+
|
468 |
+
Examples
|
469 |
+
--------
|
470 |
+
>>> @all_of_type(str)
|
471 |
+
... def f(a, b):
|
472 |
+
... return a, Dispatchable(b, int)
|
473 |
+
>>> f('a', 1)
|
474 |
+
(<Dispatchable: type=<class 'str'>, value='a'>,
|
475 |
+
<Dispatchable: type=<class 'int'>, value=1>)
|
476 |
+
"""
|
477 |
+
|
478 |
+
def outer(func):
|
479 |
+
@functools.wraps(func)
|
480 |
+
def inner(*args, **kwargs):
|
481 |
+
extracted_args = func(*args, **kwargs)
|
482 |
+
return tuple(
|
483 |
+
Dispatchable(arg, arg_type)
|
484 |
+
if not isinstance(arg, Dispatchable)
|
485 |
+
else arg
|
486 |
+
for arg in extracted_args
|
487 |
+
)
|
488 |
+
|
489 |
+
return inner
|
490 |
+
|
491 |
+
return outer
|
492 |
+
|
493 |
+
|
494 |
+
def wrap_single_convertor(convert_single):
|
495 |
+
"""
|
496 |
+
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
497 |
+
If any of them return ``NotImplemented``, the operation is assumed to be
|
498 |
+
undefined.
|
499 |
+
|
500 |
+
Accepts a signature of (value, type, coerce).
|
501 |
+
"""
|
502 |
+
|
503 |
+
@functools.wraps(convert_single)
|
504 |
+
def __ua_convert__(dispatchables, coerce):
|
505 |
+
converted = []
|
506 |
+
for d in dispatchables:
|
507 |
+
c = convert_single(d.value, d.type, coerce and d.coercible)
|
508 |
+
|
509 |
+
if c is NotImplemented:
|
510 |
+
return NotImplemented
|
511 |
+
|
512 |
+
converted.append(c)
|
513 |
+
|
514 |
+
return converted
|
515 |
+
|
516 |
+
return __ua_convert__
|
517 |
+
|
518 |
+
|
519 |
+
def wrap_single_convertor_instance(convert_single):
|
520 |
+
"""
|
521 |
+
Wraps a ``__ua_convert__`` defined for a single element to all elements.
|
522 |
+
If any of them return ``NotImplemented``, the operation is assumed to be
|
523 |
+
undefined.
|
524 |
+
|
525 |
+
Accepts a signature of (value, type, coerce).
|
526 |
+
"""
|
527 |
+
|
528 |
+
@functools.wraps(convert_single)
|
529 |
+
def __ua_convert__(self, dispatchables, coerce):
|
530 |
+
converted = []
|
531 |
+
for d in dispatchables:
|
532 |
+
c = convert_single(self, d.value, d.type, coerce and d.coercible)
|
533 |
+
|
534 |
+
if c is NotImplemented:
|
535 |
+
return NotImplemented
|
536 |
+
|
537 |
+
converted.append(c)
|
538 |
+
|
539 |
+
return converted
|
540 |
+
|
541 |
+
return __ua_convert__
|
542 |
+
|
543 |
+
|
544 |
+
def determine_backend(value, dispatch_type, *, domain, only=True, coerce=False):
|
545 |
+
"""Set the backend to the first active backend that supports ``value``
|
546 |
+
|
547 |
+
This is useful for functions that call multimethods without any dispatchable
|
548 |
+
arguments. You can use :func:`determine_backend` to ensure the same backend
|
549 |
+
is used everywhere in a block of multimethod calls.
|
550 |
+
|
551 |
+
Parameters
|
552 |
+
----------
|
553 |
+
value
|
554 |
+
The value being tested
|
555 |
+
dispatch_type
|
556 |
+
The dispatch type associated with ``value``, aka
|
557 |
+
":ref:`marking <MarkingGlossary>`".
|
558 |
+
domain: string
|
559 |
+
The domain to query for backends and set.
|
560 |
+
coerce: bool
|
561 |
+
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
562 |
+
only: bool
|
563 |
+
Whether or not this should be the last backend to try.
|
564 |
+
|
565 |
+
See Also
|
566 |
+
--------
|
567 |
+
set_backend: For when you know which backend to set
|
568 |
+
|
569 |
+
Notes
|
570 |
+
-----
|
571 |
+
|
572 |
+
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
573 |
+
supporting the type must return ``NotImplemented`` from their
|
574 |
+
``__ua_convert__`` if they don't support input of that type.
|
575 |
+
|
576 |
+
Examples
|
577 |
+
--------
|
578 |
+
|
579 |
+
Suppose we have two backends ``BackendA`` and ``BackendB`` each supporting
|
580 |
+
different types, ``TypeA`` and ``TypeB``. Neither supporting the other type:
|
581 |
+
|
582 |
+
>>> with ua.set_backend(ex.BackendA):
|
583 |
+
... ex.call_multimethod(ex.TypeB(), ex.TypeB())
|
584 |
+
Traceback (most recent call last):
|
585 |
+
...
|
586 |
+
uarray.BackendNotImplementedError: ...
|
587 |
+
|
588 |
+
Now consider a multimethod that creates a new object of ``TypeA``, or
|
589 |
+
``TypeB`` depending on the active backend.
|
590 |
+
|
591 |
+
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
592 |
+
... res = ex.creation_multimethod()
|
593 |
+
... ex.call_multimethod(res, ex.TypeA())
|
594 |
+
Traceback (most recent call last):
|
595 |
+
...
|
596 |
+
uarray.BackendNotImplementedError: ...
|
597 |
+
|
598 |
+
``res`` is an object of ``TypeB`` because ``BackendB`` is set in the
|
599 |
+
innermost with statement. So, ``call_multimethod`` fails since the types
|
600 |
+
don't match.
|
601 |
+
|
602 |
+
Instead, we need to first find a backend suitable for all of our objects.
|
603 |
+
|
604 |
+
>>> with ua.set_backend(ex.BackendA), ua.set_backend(ex.BackendB):
|
605 |
+
... x = ex.TypeA()
|
606 |
+
... with ua.determine_backend(x, "mark", domain="ua_examples"):
|
607 |
+
... res = ex.creation_multimethod()
|
608 |
+
... ex.call_multimethod(res, x)
|
609 |
+
TypeA
|
610 |
+
|
611 |
+
"""
|
612 |
+
dispatchables = (Dispatchable(value, dispatch_type, coerce),)
|
613 |
+
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
614 |
+
|
615 |
+
return set_backend(backend, coerce=coerce, only=only)
|
616 |
+
|
617 |
+
|
618 |
+
def determine_backend_multi(
|
619 |
+
dispatchables, *, domain, only=True, coerce=False, **kwargs
|
620 |
+
):
|
621 |
+
"""Set a backend supporting all ``dispatchables``
|
622 |
+
|
623 |
+
This is useful for functions that call multimethods without any dispatchable
|
624 |
+
arguments. You can use :func:`determine_backend_multi` to ensure the same
|
625 |
+
backend is used everywhere in a block of multimethod calls involving
|
626 |
+
multiple arrays.
|
627 |
+
|
628 |
+
Parameters
|
629 |
+
----------
|
630 |
+
dispatchables: Sequence[Union[uarray.Dispatchable, Any]]
|
631 |
+
The dispatchables that must be supported
|
632 |
+
domain: string
|
633 |
+
The domain to query for backends and set.
|
634 |
+
coerce: bool
|
635 |
+
Whether or not to allow coercion to the backend's types. Implies ``only``.
|
636 |
+
only: bool
|
637 |
+
Whether or not this should be the last backend to try.
|
638 |
+
dispatch_type: Optional[Any]
|
639 |
+
The default dispatch type associated with ``dispatchables``, aka
|
640 |
+
":ref:`marking <MarkingGlossary>`".
|
641 |
+
|
642 |
+
See Also
|
643 |
+
--------
|
644 |
+
determine_backend: For a single dispatch value
|
645 |
+
set_backend: For when you know which backend to set
|
646 |
+
|
647 |
+
Notes
|
648 |
+
-----
|
649 |
+
|
650 |
+
Support is determined by the ``__ua_convert__`` protocol. Backends not
|
651 |
+
supporting the type must return ``NotImplemented`` from their
|
652 |
+
``__ua_convert__`` if they don't support input of that type.
|
653 |
+
|
654 |
+
Examples
|
655 |
+
--------
|
656 |
+
|
657 |
+
:func:`determine_backend` allows the backend to be set from a single
|
658 |
+
object. :func:`determine_backend_multi` allows multiple objects to be
|
659 |
+
checked simultaneously for support in the backend. Suppose we have a
|
660 |
+
``BackendAB`` which supports ``TypeA`` and ``TypeB`` in the same call,
|
661 |
+
and a ``BackendBC`` that doesn't support ``TypeA``.
|
662 |
+
|
663 |
+
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
664 |
+
... a, b = ex.TypeA(), ex.TypeB()
|
665 |
+
... with ua.determine_backend_multi(
|
666 |
+
... [ua.Dispatchable(a, "mark"), ua.Dispatchable(b, "mark")],
|
667 |
+
... domain="ua_examples"
|
668 |
+
... ):
|
669 |
+
... res = ex.creation_multimethod()
|
670 |
+
... ex.call_multimethod(res, a, b)
|
671 |
+
TypeA
|
672 |
+
|
673 |
+
This won't call ``BackendBC`` because it doesn't support ``TypeA``.
|
674 |
+
|
675 |
+
We can also use leave out the ``ua.Dispatchable`` if we specify the
|
676 |
+
default ``dispatch_type`` for the ``dispatchables`` argument.
|
677 |
+
|
678 |
+
>>> with ua.set_backend(ex.BackendAB), ua.set_backend(ex.BackendBC):
|
679 |
+
... a, b = ex.TypeA(), ex.TypeB()
|
680 |
+
... with ua.determine_backend_multi(
|
681 |
+
... [a, b], dispatch_type="mark", domain="ua_examples"
|
682 |
+
... ):
|
683 |
+
... res = ex.creation_multimethod()
|
684 |
+
... ex.call_multimethod(res, a, b)
|
685 |
+
TypeA
|
686 |
+
|
687 |
+
"""
|
688 |
+
if "dispatch_type" in kwargs:
|
689 |
+
disp_type = kwargs.pop("dispatch_type")
|
690 |
+
dispatchables = tuple(
|
691 |
+
d if isinstance(d, Dispatchable) else Dispatchable(d, disp_type)
|
692 |
+
for d in dispatchables
|
693 |
+
)
|
694 |
+
else:
|
695 |
+
dispatchables = tuple(dispatchables)
|
696 |
+
if not all(isinstance(d, Dispatchable) for d in dispatchables):
|
697 |
+
raise TypeError("dispatchables must be instances of uarray.Dispatchable")
|
698 |
+
|
699 |
+
if len(kwargs) != 0:
|
700 |
+
raise TypeError(f"Received unexpected keyword arguments: {kwargs}")
|
701 |
+
|
702 |
+
backend = _uarray.determine_backend(domain, dispatchables, coerce)
|
703 |
+
|
704 |
+
return set_backend(backend, coerce=coerce, only=only)
|
venv/lib/python3.10/site-packages/scipy/_lib/_uarray/_uarray.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (174 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__init__.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
NumPy Array API compatibility library
|
3 |
+
|
4 |
+
This is a small wrapper around NumPy and CuPy that is compatible with the
|
5 |
+
Array API standard https://data-apis.org/array-api/latest/. See also NEP 47
|
6 |
+
https://numpy.org/neps/nep-0047-array-api-standard.html.
|
7 |
+
|
8 |
+
Unlike numpy.array_api, this is not a strict minimal implementation of the
|
9 |
+
Array API, but rather just an extension of the main NumPy namespace with
|
10 |
+
changes needed to be compliant with the Array API. See
|
11 |
+
https://numpy.org/doc/stable/reference/array_api.html for a full list of
|
12 |
+
changes. In particular, unlike numpy.array_api, this package does not use a
|
13 |
+
separate Array object, but rather just uses numpy.ndarray directly.
|
14 |
+
|
15 |
+
Library authors using the Array API may wish to test against numpy.array_api
|
16 |
+
to ensure they are not using functionality outside of the standard, but prefer
|
17 |
+
this implementation for the default when working with NumPy arrays.
|
18 |
+
|
19 |
+
"""
|
20 |
+
__version__ = '1.4.1'
|
21 |
+
|
22 |
+
from .common import *
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.16 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/__pycache__/_internal.cpython-310.pyc
ADDED
Binary file (1.51 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/_internal.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Internal helpers
|
3 |
+
"""
|
4 |
+
|
5 |
+
from functools import wraps
|
6 |
+
from inspect import signature
|
7 |
+
|
8 |
+
def get_xp(xp):
|
9 |
+
"""
|
10 |
+
Decorator to automatically replace xp with the corresponding array module.
|
11 |
+
|
12 |
+
Use like
|
13 |
+
|
14 |
+
import numpy as np
|
15 |
+
|
16 |
+
@get_xp(np)
|
17 |
+
def func(x, /, xp, kwarg=None):
|
18 |
+
return xp.func(x, kwarg=kwarg)
|
19 |
+
|
20 |
+
Note that xp must be a keyword argument and come after all non-keyword
|
21 |
+
arguments.
|
22 |
+
|
23 |
+
"""
|
24 |
+
def inner(f):
|
25 |
+
@wraps(f)
|
26 |
+
def wrapped_f(*args, **kwargs):
|
27 |
+
return f(*args, xp=xp, **kwargs)
|
28 |
+
|
29 |
+
sig = signature(f)
|
30 |
+
new_sig = sig.replace(parameters=[sig.parameters[i] for i in sig.parameters if i != 'xp'])
|
31 |
+
|
32 |
+
if wrapped_f.__doc__ is None:
|
33 |
+
wrapped_f.__doc__ = f"""\
|
34 |
+
Array API compatibility wrapper for {f.__name__}.
|
35 |
+
|
36 |
+
See the corresponding documentation in NumPy/CuPy and/or the array API
|
37 |
+
specification for more details.
|
38 |
+
|
39 |
+
"""
|
40 |
+
wrapped_f.__signature__ = new_sig
|
41 |
+
return wrapped_f
|
42 |
+
|
43 |
+
return inner
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from ._helpers import *
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (227 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_aliases.cpython-310.pyc
ADDED
Binary file (12.1 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_helpers.cpython-310.pyc
ADDED
Binary file (6.47 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_linalg.cpython-310.pyc
ADDED
Binary file (5.96 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (926 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_aliases.py
ADDED
@@ -0,0 +1,536 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
These are functions that are just aliases of existing functions in NumPy.
|
3 |
+
"""
|
4 |
+
|
5 |
+
from __future__ import annotations
|
6 |
+
|
7 |
+
from typing import TYPE_CHECKING
|
8 |
+
if TYPE_CHECKING:
|
9 |
+
from typing import Optional, Sequence, Tuple, Union, List
|
10 |
+
from ._typing import ndarray, Device, Dtype, NestedSequence, SupportsBufferProtocol
|
11 |
+
|
12 |
+
from typing import NamedTuple
|
13 |
+
from types import ModuleType
|
14 |
+
import inspect
|
15 |
+
|
16 |
+
from ._helpers import _check_device, _is_numpy_array, array_namespace
|
17 |
+
|
18 |
+
# These functions are modified from the NumPy versions.
|
19 |
+
|
20 |
+
def arange(
|
21 |
+
start: Union[int, float],
|
22 |
+
/,
|
23 |
+
stop: Optional[Union[int, float]] = None,
|
24 |
+
step: Union[int, float] = 1,
|
25 |
+
*,
|
26 |
+
xp,
|
27 |
+
dtype: Optional[Dtype] = None,
|
28 |
+
device: Optional[Device] = None,
|
29 |
+
**kwargs
|
30 |
+
) -> ndarray:
|
31 |
+
_check_device(xp, device)
|
32 |
+
return xp.arange(start, stop=stop, step=step, dtype=dtype, **kwargs)
|
33 |
+
|
34 |
+
def empty(
|
35 |
+
shape: Union[int, Tuple[int, ...]],
|
36 |
+
xp,
|
37 |
+
*,
|
38 |
+
dtype: Optional[Dtype] = None,
|
39 |
+
device: Optional[Device] = None,
|
40 |
+
**kwargs
|
41 |
+
) -> ndarray:
|
42 |
+
_check_device(xp, device)
|
43 |
+
return xp.empty(shape, dtype=dtype, **kwargs)
|
44 |
+
|
45 |
+
def empty_like(
|
46 |
+
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
47 |
+
**kwargs
|
48 |
+
) -> ndarray:
|
49 |
+
_check_device(xp, device)
|
50 |
+
return xp.empty_like(x, dtype=dtype, **kwargs)
|
51 |
+
|
52 |
+
def eye(
|
53 |
+
n_rows: int,
|
54 |
+
n_cols: Optional[int] = None,
|
55 |
+
/,
|
56 |
+
*,
|
57 |
+
xp,
|
58 |
+
k: int = 0,
|
59 |
+
dtype: Optional[Dtype] = None,
|
60 |
+
device: Optional[Device] = None,
|
61 |
+
**kwargs,
|
62 |
+
) -> ndarray:
|
63 |
+
_check_device(xp, device)
|
64 |
+
return xp.eye(n_rows, M=n_cols, k=k, dtype=dtype, **kwargs)
|
65 |
+
|
66 |
+
def full(
|
67 |
+
shape: Union[int, Tuple[int, ...]],
|
68 |
+
fill_value: Union[int, float],
|
69 |
+
xp,
|
70 |
+
*,
|
71 |
+
dtype: Optional[Dtype] = None,
|
72 |
+
device: Optional[Device] = None,
|
73 |
+
**kwargs,
|
74 |
+
) -> ndarray:
|
75 |
+
_check_device(xp, device)
|
76 |
+
return xp.full(shape, fill_value, dtype=dtype, **kwargs)
|
77 |
+
|
78 |
+
def full_like(
|
79 |
+
x: ndarray,
|
80 |
+
/,
|
81 |
+
fill_value: Union[int, float],
|
82 |
+
*,
|
83 |
+
xp,
|
84 |
+
dtype: Optional[Dtype] = None,
|
85 |
+
device: Optional[Device] = None,
|
86 |
+
**kwargs,
|
87 |
+
) -> ndarray:
|
88 |
+
_check_device(xp, device)
|
89 |
+
return xp.full_like(x, fill_value, dtype=dtype, **kwargs)
|
90 |
+
|
91 |
+
def linspace(
|
92 |
+
start: Union[int, float],
|
93 |
+
stop: Union[int, float],
|
94 |
+
/,
|
95 |
+
num: int,
|
96 |
+
*,
|
97 |
+
xp,
|
98 |
+
dtype: Optional[Dtype] = None,
|
99 |
+
device: Optional[Device] = None,
|
100 |
+
endpoint: bool = True,
|
101 |
+
**kwargs,
|
102 |
+
) -> ndarray:
|
103 |
+
_check_device(xp, device)
|
104 |
+
return xp.linspace(start, stop, num, dtype=dtype, endpoint=endpoint, **kwargs)
|
105 |
+
|
106 |
+
def ones(
|
107 |
+
shape: Union[int, Tuple[int, ...]],
|
108 |
+
xp,
|
109 |
+
*,
|
110 |
+
dtype: Optional[Dtype] = None,
|
111 |
+
device: Optional[Device] = None,
|
112 |
+
**kwargs,
|
113 |
+
) -> ndarray:
|
114 |
+
_check_device(xp, device)
|
115 |
+
return xp.ones(shape, dtype=dtype, **kwargs)
|
116 |
+
|
117 |
+
def ones_like(
|
118 |
+
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
119 |
+
**kwargs,
|
120 |
+
) -> ndarray:
|
121 |
+
_check_device(xp, device)
|
122 |
+
return xp.ones_like(x, dtype=dtype, **kwargs)
|
123 |
+
|
124 |
+
def zeros(
|
125 |
+
shape: Union[int, Tuple[int, ...]],
|
126 |
+
xp,
|
127 |
+
*,
|
128 |
+
dtype: Optional[Dtype] = None,
|
129 |
+
device: Optional[Device] = None,
|
130 |
+
**kwargs,
|
131 |
+
) -> ndarray:
|
132 |
+
_check_device(xp, device)
|
133 |
+
return xp.zeros(shape, dtype=dtype, **kwargs)
|
134 |
+
|
135 |
+
def zeros_like(
|
136 |
+
x: ndarray, /, xp, *, dtype: Optional[Dtype] = None, device: Optional[Device] = None,
|
137 |
+
**kwargs,
|
138 |
+
) -> ndarray:
|
139 |
+
_check_device(xp, device)
|
140 |
+
return xp.zeros_like(x, dtype=dtype, **kwargs)
|
141 |
+
|
142 |
+
# np.unique() is split into four functions in the array API:
|
143 |
+
# unique_all, unique_counts, unique_inverse, and unique_values (this is done
|
144 |
+
# to remove polymorphic return types).
|
145 |
+
|
146 |
+
# The functions here return namedtuples (np.unique() returns a normal
|
147 |
+
# tuple).
|
148 |
+
class UniqueAllResult(NamedTuple):
|
149 |
+
values: ndarray
|
150 |
+
indices: ndarray
|
151 |
+
inverse_indices: ndarray
|
152 |
+
counts: ndarray
|
153 |
+
|
154 |
+
|
155 |
+
class UniqueCountsResult(NamedTuple):
|
156 |
+
values: ndarray
|
157 |
+
counts: ndarray
|
158 |
+
|
159 |
+
|
160 |
+
class UniqueInverseResult(NamedTuple):
|
161 |
+
values: ndarray
|
162 |
+
inverse_indices: ndarray
|
163 |
+
|
164 |
+
|
165 |
+
def _unique_kwargs(xp):
|
166 |
+
# Older versions of NumPy and CuPy do not have equal_nan. Rather than
|
167 |
+
# trying to parse version numbers, just check if equal_nan is in the
|
168 |
+
# signature.
|
169 |
+
s = inspect.signature(xp.unique)
|
170 |
+
if 'equal_nan' in s.parameters:
|
171 |
+
return {'equal_nan': False}
|
172 |
+
return {}
|
173 |
+
|
174 |
+
def unique_all(x: ndarray, /, xp) -> UniqueAllResult:
|
175 |
+
kwargs = _unique_kwargs(xp)
|
176 |
+
values, indices, inverse_indices, counts = xp.unique(
|
177 |
+
x,
|
178 |
+
return_counts=True,
|
179 |
+
return_index=True,
|
180 |
+
return_inverse=True,
|
181 |
+
**kwargs,
|
182 |
+
)
|
183 |
+
# np.unique() flattens inverse indices, but they need to share x's shape
|
184 |
+
# See https://github.com/numpy/numpy/issues/20638
|
185 |
+
inverse_indices = inverse_indices.reshape(x.shape)
|
186 |
+
return UniqueAllResult(
|
187 |
+
values,
|
188 |
+
indices,
|
189 |
+
inverse_indices,
|
190 |
+
counts,
|
191 |
+
)
|
192 |
+
|
193 |
+
|
194 |
+
def unique_counts(x: ndarray, /, xp) -> UniqueCountsResult:
|
195 |
+
kwargs = _unique_kwargs(xp)
|
196 |
+
res = xp.unique(
|
197 |
+
x,
|
198 |
+
return_counts=True,
|
199 |
+
return_index=False,
|
200 |
+
return_inverse=False,
|
201 |
+
**kwargs
|
202 |
+
)
|
203 |
+
|
204 |
+
return UniqueCountsResult(*res)
|
205 |
+
|
206 |
+
|
207 |
+
def unique_inverse(x: ndarray, /, xp) -> UniqueInverseResult:
|
208 |
+
kwargs = _unique_kwargs(xp)
|
209 |
+
values, inverse_indices = xp.unique(
|
210 |
+
x,
|
211 |
+
return_counts=False,
|
212 |
+
return_index=False,
|
213 |
+
return_inverse=True,
|
214 |
+
**kwargs,
|
215 |
+
)
|
216 |
+
# xp.unique() flattens inverse indices, but they need to share x's shape
|
217 |
+
# See https://github.com/numpy/numpy/issues/20638
|
218 |
+
inverse_indices = inverse_indices.reshape(x.shape)
|
219 |
+
return UniqueInverseResult(values, inverse_indices)
|
220 |
+
|
221 |
+
|
222 |
+
def unique_values(x: ndarray, /, xp) -> ndarray:
|
223 |
+
kwargs = _unique_kwargs(xp)
|
224 |
+
return xp.unique(
|
225 |
+
x,
|
226 |
+
return_counts=False,
|
227 |
+
return_index=False,
|
228 |
+
return_inverse=False,
|
229 |
+
**kwargs,
|
230 |
+
)
|
231 |
+
|
232 |
+
def astype(x: ndarray, dtype: Dtype, /, *, copy: bool = True) -> ndarray:
|
233 |
+
if not copy and dtype == x.dtype:
|
234 |
+
return x
|
235 |
+
return x.astype(dtype=dtype, copy=copy)
|
236 |
+
|
237 |
+
# These functions have different keyword argument names
|
238 |
+
|
239 |
+
def std(
|
240 |
+
x: ndarray,
|
241 |
+
/,
|
242 |
+
xp,
|
243 |
+
*,
|
244 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
245 |
+
correction: Union[int, float] = 0.0, # correction instead of ddof
|
246 |
+
keepdims: bool = False,
|
247 |
+
**kwargs,
|
248 |
+
) -> ndarray:
|
249 |
+
return xp.std(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
|
250 |
+
|
251 |
+
def var(
|
252 |
+
x: ndarray,
|
253 |
+
/,
|
254 |
+
xp,
|
255 |
+
*,
|
256 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
257 |
+
correction: Union[int, float] = 0.0, # correction instead of ddof
|
258 |
+
keepdims: bool = False,
|
259 |
+
**kwargs,
|
260 |
+
) -> ndarray:
|
261 |
+
return xp.var(x, axis=axis, ddof=correction, keepdims=keepdims, **kwargs)
|
262 |
+
|
263 |
+
# Unlike transpose(), the axes argument to permute_dims() is required.
|
264 |
+
def permute_dims(x: ndarray, /, axes: Tuple[int, ...], xp) -> ndarray:
|
265 |
+
return xp.transpose(x, axes)
|
266 |
+
|
267 |
+
# Creation functions add the device keyword (which does nothing for NumPy)
|
268 |
+
|
269 |
+
# asarray also adds the copy keyword
|
270 |
+
def _asarray(
|
271 |
+
obj: Union[
|
272 |
+
ndarray,
|
273 |
+
bool,
|
274 |
+
int,
|
275 |
+
float,
|
276 |
+
NestedSequence[bool | int | float],
|
277 |
+
SupportsBufferProtocol,
|
278 |
+
],
|
279 |
+
/,
|
280 |
+
*,
|
281 |
+
dtype: Optional[Dtype] = None,
|
282 |
+
device: Optional[Device] = None,
|
283 |
+
copy: "Optional[Union[bool, np._CopyMode]]" = None,
|
284 |
+
namespace = None,
|
285 |
+
**kwargs,
|
286 |
+
) -> ndarray:
|
287 |
+
"""
|
288 |
+
Array API compatibility wrapper for asarray().
|
289 |
+
|
290 |
+
See the corresponding documentation in NumPy/CuPy and/or the array API
|
291 |
+
specification for more details.
|
292 |
+
|
293 |
+
"""
|
294 |
+
if namespace is None:
|
295 |
+
try:
|
296 |
+
xp = array_namespace(obj, _use_compat=False)
|
297 |
+
except ValueError:
|
298 |
+
# TODO: What about lists of arrays?
|
299 |
+
raise ValueError("A namespace must be specified for asarray() with non-array input")
|
300 |
+
elif isinstance(namespace, ModuleType):
|
301 |
+
xp = namespace
|
302 |
+
elif namespace == 'numpy':
|
303 |
+
import numpy as xp
|
304 |
+
elif namespace == 'cupy':
|
305 |
+
import cupy as xp
|
306 |
+
else:
|
307 |
+
raise ValueError("Unrecognized namespace argument to asarray()")
|
308 |
+
|
309 |
+
_check_device(xp, device)
|
310 |
+
if _is_numpy_array(obj):
|
311 |
+
import numpy as np
|
312 |
+
if hasattr(np, '_CopyMode'):
|
313 |
+
# Not present in older NumPys
|
314 |
+
COPY_FALSE = (False, np._CopyMode.IF_NEEDED)
|
315 |
+
COPY_TRUE = (True, np._CopyMode.ALWAYS)
|
316 |
+
else:
|
317 |
+
COPY_FALSE = (False,)
|
318 |
+
COPY_TRUE = (True,)
|
319 |
+
else:
|
320 |
+
COPY_FALSE = (False,)
|
321 |
+
COPY_TRUE = (True,)
|
322 |
+
if copy in COPY_FALSE:
|
323 |
+
# copy=False is not yet implemented in xp.asarray
|
324 |
+
raise NotImplementedError("copy=False is not yet implemented")
|
325 |
+
if isinstance(obj, xp.ndarray):
|
326 |
+
if dtype is not None and obj.dtype != dtype:
|
327 |
+
copy = True
|
328 |
+
if copy in COPY_TRUE:
|
329 |
+
return xp.array(obj, copy=True, dtype=dtype)
|
330 |
+
return obj
|
331 |
+
|
332 |
+
return xp.asarray(obj, dtype=dtype, **kwargs)
|
333 |
+
|
334 |
+
# np.reshape calls the keyword argument 'newshape' instead of 'shape'
|
335 |
+
def reshape(x: ndarray,
|
336 |
+
/,
|
337 |
+
shape: Tuple[int, ...],
|
338 |
+
xp, copy: Optional[bool] = None,
|
339 |
+
**kwargs) -> ndarray:
|
340 |
+
if copy is True:
|
341 |
+
x = x.copy()
|
342 |
+
elif copy is False:
|
343 |
+
y = x.view()
|
344 |
+
y.shape = shape
|
345 |
+
return y
|
346 |
+
return xp.reshape(x, shape, **kwargs)
|
347 |
+
|
348 |
+
# The descending keyword is new in sort and argsort, and 'kind' replaced with
|
349 |
+
# 'stable'
|
350 |
+
def argsort(
|
351 |
+
x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True,
|
352 |
+
**kwargs,
|
353 |
+
) -> ndarray:
|
354 |
+
# Note: this keyword argument is different, and the default is different.
|
355 |
+
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
|
356 |
+
# as the default whereas cupy.sort uses kind=None.
|
357 |
+
if stable:
|
358 |
+
kwargs['kind'] = "stable"
|
359 |
+
if not descending:
|
360 |
+
res = xp.argsort(x, axis=axis, **kwargs)
|
361 |
+
else:
|
362 |
+
# As NumPy has no native descending sort, we imitate it here. Note that
|
363 |
+
# simply flipping the results of xp.argsort(x, ...) would not
|
364 |
+
# respect the relative order like it would in native descending sorts.
|
365 |
+
res = xp.flip(
|
366 |
+
xp.argsort(xp.flip(x, axis=axis), axis=axis, **kwargs),
|
367 |
+
axis=axis,
|
368 |
+
)
|
369 |
+
# Rely on flip()/argsort() to validate axis
|
370 |
+
normalised_axis = axis if axis >= 0 else x.ndim + axis
|
371 |
+
max_i = x.shape[normalised_axis] - 1
|
372 |
+
res = max_i - res
|
373 |
+
return res
|
374 |
+
|
375 |
+
def sort(
|
376 |
+
x: ndarray, /, xp, *, axis: int = -1, descending: bool = False, stable: bool = True,
|
377 |
+
**kwargs,
|
378 |
+
) -> ndarray:
|
379 |
+
# Note: this keyword argument is different, and the default is different.
|
380 |
+
# We set it in kwargs like this because numpy.sort uses kind='quicksort'
|
381 |
+
# as the default whereas cupy.sort uses kind=None.
|
382 |
+
if stable:
|
383 |
+
kwargs['kind'] = "stable"
|
384 |
+
res = xp.sort(x, axis=axis, **kwargs)
|
385 |
+
if descending:
|
386 |
+
res = xp.flip(res, axis=axis)
|
387 |
+
return res
|
388 |
+
|
389 |
+
# nonzero should error for zero-dimensional arrays
|
390 |
+
def nonzero(x: ndarray, /, xp, **kwargs) -> Tuple[ndarray, ...]:
|
391 |
+
if x.ndim == 0:
|
392 |
+
raise ValueError("nonzero() does not support zero-dimensional arrays")
|
393 |
+
return xp.nonzero(x, **kwargs)
|
394 |
+
|
395 |
+
# sum() and prod() should always upcast when dtype=None
|
396 |
+
def sum(
|
397 |
+
x: ndarray,
|
398 |
+
/,
|
399 |
+
xp,
|
400 |
+
*,
|
401 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
402 |
+
dtype: Optional[Dtype] = None,
|
403 |
+
keepdims: bool = False,
|
404 |
+
**kwargs,
|
405 |
+
) -> ndarray:
|
406 |
+
# `xp.sum` already upcasts integers, but not floats or complexes
|
407 |
+
if dtype is None:
|
408 |
+
if x.dtype == xp.float32:
|
409 |
+
dtype = xp.float64
|
410 |
+
elif x.dtype == xp.complex64:
|
411 |
+
dtype = xp.complex128
|
412 |
+
return xp.sum(x, axis=axis, dtype=dtype, keepdims=keepdims, **kwargs)
|
413 |
+
|
414 |
+
def prod(
|
415 |
+
x: ndarray,
|
416 |
+
/,
|
417 |
+
xp,
|
418 |
+
*,
|
419 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
420 |
+
dtype: Optional[Dtype] = None,
|
421 |
+
keepdims: bool = False,
|
422 |
+
**kwargs,
|
423 |
+
) -> ndarray:
|
424 |
+
if dtype is None:
|
425 |
+
if x.dtype == xp.float32:
|
426 |
+
dtype = xp.float64
|
427 |
+
elif x.dtype == xp.complex64:
|
428 |
+
dtype = xp.complex128
|
429 |
+
return xp.prod(x, dtype=dtype, axis=axis, keepdims=keepdims, **kwargs)
|
430 |
+
|
431 |
+
# ceil, floor, and trunc return integers for integer inputs
|
432 |
+
|
433 |
+
def ceil(x: ndarray, /, xp, **kwargs) -> ndarray:
|
434 |
+
if xp.issubdtype(x.dtype, xp.integer):
|
435 |
+
return x
|
436 |
+
return xp.ceil(x, **kwargs)
|
437 |
+
|
438 |
+
def floor(x: ndarray, /, xp, **kwargs) -> ndarray:
|
439 |
+
if xp.issubdtype(x.dtype, xp.integer):
|
440 |
+
return x
|
441 |
+
return xp.floor(x, **kwargs)
|
442 |
+
|
443 |
+
def trunc(x: ndarray, /, xp, **kwargs) -> ndarray:
|
444 |
+
if xp.issubdtype(x.dtype, xp.integer):
|
445 |
+
return x
|
446 |
+
return xp.trunc(x, **kwargs)
|
447 |
+
|
448 |
+
# linear algebra functions
|
449 |
+
|
450 |
+
def matmul(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray:
|
451 |
+
return xp.matmul(x1, x2, **kwargs)
|
452 |
+
|
453 |
+
# Unlike transpose, matrix_transpose only transposes the last two axes.
|
454 |
+
def matrix_transpose(x: ndarray, /, xp) -> ndarray:
|
455 |
+
if x.ndim < 2:
|
456 |
+
raise ValueError("x must be at least 2-dimensional for matrix_transpose")
|
457 |
+
return xp.swapaxes(x, -1, -2)
|
458 |
+
|
459 |
+
def tensordot(x1: ndarray,
|
460 |
+
x2: ndarray,
|
461 |
+
/,
|
462 |
+
xp,
|
463 |
+
*,
|
464 |
+
axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2,
|
465 |
+
**kwargs,
|
466 |
+
) -> ndarray:
|
467 |
+
return xp.tensordot(x1, x2, axes=axes, **kwargs)
|
468 |
+
|
469 |
+
def vecdot(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1) -> ndarray:
|
470 |
+
ndim = max(x1.ndim, x2.ndim)
|
471 |
+
x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape)
|
472 |
+
x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape)
|
473 |
+
if x1_shape[axis] != x2_shape[axis]:
|
474 |
+
raise ValueError("x1 and x2 must have the same size along the given axis")
|
475 |
+
|
476 |
+
if hasattr(xp, 'broadcast_tensors'):
|
477 |
+
_broadcast = xp.broadcast_tensors
|
478 |
+
else:
|
479 |
+
_broadcast = xp.broadcast_arrays
|
480 |
+
|
481 |
+
x1_, x2_ = _broadcast(x1, x2)
|
482 |
+
x1_ = xp.moveaxis(x1_, axis, -1)
|
483 |
+
x2_ = xp.moveaxis(x2_, axis, -1)
|
484 |
+
|
485 |
+
res = x1_[..., None, :] @ x2_[..., None]
|
486 |
+
return res[..., 0, 0]
|
487 |
+
|
488 |
+
# isdtype is a new function in the 2022.12 array API specification.
|
489 |
+
|
490 |
+
def isdtype(
|
491 |
+
dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]], xp,
|
492 |
+
*, _tuple=True, # Disallow nested tuples
|
493 |
+
) -> bool:
|
494 |
+
"""
|
495 |
+
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
|
496 |
+
|
497 |
+
Note that outside of this function, this compat library does not yet fully
|
498 |
+
support complex numbers.
|
499 |
+
|
500 |
+
See
|
501 |
+
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
|
502 |
+
for more details
|
503 |
+
"""
|
504 |
+
if isinstance(kind, tuple) and _tuple:
|
505 |
+
return any(isdtype(dtype, k, xp, _tuple=False) for k in kind)
|
506 |
+
elif isinstance(kind, str):
|
507 |
+
if kind == 'bool':
|
508 |
+
return dtype == xp.bool_
|
509 |
+
elif kind == 'signed integer':
|
510 |
+
return xp.issubdtype(dtype, xp.signedinteger)
|
511 |
+
elif kind == 'unsigned integer':
|
512 |
+
return xp.issubdtype(dtype, xp.unsignedinteger)
|
513 |
+
elif kind == 'integral':
|
514 |
+
return xp.issubdtype(dtype, xp.integer)
|
515 |
+
elif kind == 'real floating':
|
516 |
+
return xp.issubdtype(dtype, xp.floating)
|
517 |
+
elif kind == 'complex floating':
|
518 |
+
return xp.issubdtype(dtype, xp.complexfloating)
|
519 |
+
elif kind == 'numeric':
|
520 |
+
return xp.issubdtype(dtype, xp.number)
|
521 |
+
else:
|
522 |
+
raise ValueError(f"Unrecognized data type kind: {kind!r}")
|
523 |
+
else:
|
524 |
+
# This will allow things that aren't required by the spec, like
|
525 |
+
# isdtype(np.float64, float) or isdtype(np.int64, 'l'). Should we be
|
526 |
+
# more strict here to match the type annotation? Note that the
|
527 |
+
# numpy.array_api implementation will be very strict.
|
528 |
+
return dtype == kind
|
529 |
+
|
530 |
+
__all__ = ['arange', 'empty', 'empty_like', 'eye', 'full', 'full_like',
|
531 |
+
'linspace', 'ones', 'ones_like', 'zeros', 'zeros_like',
|
532 |
+
'UniqueAllResult', 'UniqueCountsResult', 'UniqueInverseResult',
|
533 |
+
'unique_all', 'unique_counts', 'unique_inverse', 'unique_values',
|
534 |
+
'astype', 'std', 'var', 'permute_dims', 'reshape', 'argsort',
|
535 |
+
'sort', 'nonzero', 'sum', 'prod', 'ceil', 'floor', 'trunc',
|
536 |
+
'matmul', 'matrix_transpose', 'tensordot', 'vecdot', 'isdtype']
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_helpers.py
ADDED
@@ -0,0 +1,232 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Various helper functions which are not part of the spec.
|
3 |
+
|
4 |
+
Functions which start with an underscore are for internal use only but helpers
|
5 |
+
that are in __all__ are intended as additional helper functions for use by end
|
6 |
+
users of the compat library.
|
7 |
+
"""
|
8 |
+
from __future__ import annotations
|
9 |
+
|
10 |
+
import sys
|
11 |
+
import math
|
12 |
+
|
13 |
+
def _is_numpy_array(x):
|
14 |
+
# Avoid importing NumPy if it isn't already
|
15 |
+
if 'numpy' not in sys.modules:
|
16 |
+
return False
|
17 |
+
|
18 |
+
import numpy as np
|
19 |
+
|
20 |
+
# TODO: Should we reject ndarray subclasses?
|
21 |
+
return isinstance(x, (np.ndarray, np.generic))
|
22 |
+
|
23 |
+
def _is_cupy_array(x):
|
24 |
+
# Avoid importing NumPy if it isn't already
|
25 |
+
if 'cupy' not in sys.modules:
|
26 |
+
return False
|
27 |
+
|
28 |
+
import cupy as cp
|
29 |
+
|
30 |
+
# TODO: Should we reject ndarray subclasses?
|
31 |
+
return isinstance(x, (cp.ndarray, cp.generic))
|
32 |
+
|
33 |
+
def _is_torch_array(x):
|
34 |
+
# Avoid importing torch if it isn't already
|
35 |
+
if 'torch' not in sys.modules:
|
36 |
+
return False
|
37 |
+
|
38 |
+
import torch
|
39 |
+
|
40 |
+
# TODO: Should we reject ndarray subclasses?
|
41 |
+
return isinstance(x, torch.Tensor)
|
42 |
+
|
43 |
+
def is_array_api_obj(x):
|
44 |
+
"""
|
45 |
+
Check if x is an array API compatible array object.
|
46 |
+
"""
|
47 |
+
return _is_numpy_array(x) \
|
48 |
+
or _is_cupy_array(x) \
|
49 |
+
or _is_torch_array(x) \
|
50 |
+
or hasattr(x, '__array_namespace__')
|
51 |
+
|
52 |
+
def _check_api_version(api_version):
|
53 |
+
if api_version is not None and api_version != '2021.12':
|
54 |
+
raise ValueError("Only the 2021.12 version of the array API specification is currently supported")
|
55 |
+
|
56 |
+
def array_namespace(*xs, api_version=None, _use_compat=True):
|
57 |
+
"""
|
58 |
+
Get the array API compatible namespace for the arrays `xs`.
|
59 |
+
|
60 |
+
`xs` should contain one or more arrays.
|
61 |
+
|
62 |
+
Typical usage is
|
63 |
+
|
64 |
+
def your_function(x, y):
|
65 |
+
xp = array_api_compat.array_namespace(x, y)
|
66 |
+
# Now use xp as the array library namespace
|
67 |
+
return xp.mean(x, axis=0) + 2*xp.std(y, axis=0)
|
68 |
+
|
69 |
+
api_version should be the newest version of the spec that you need support
|
70 |
+
for (currently the compat library wrapped APIs only support v2021.12).
|
71 |
+
"""
|
72 |
+
namespaces = set()
|
73 |
+
for x in xs:
|
74 |
+
if _is_numpy_array(x):
|
75 |
+
_check_api_version(api_version)
|
76 |
+
if _use_compat:
|
77 |
+
from .. import numpy as numpy_namespace
|
78 |
+
namespaces.add(numpy_namespace)
|
79 |
+
else:
|
80 |
+
import numpy as np
|
81 |
+
namespaces.add(np)
|
82 |
+
elif _is_cupy_array(x):
|
83 |
+
_check_api_version(api_version)
|
84 |
+
if _use_compat:
|
85 |
+
from .. import cupy as cupy_namespace
|
86 |
+
namespaces.add(cupy_namespace)
|
87 |
+
else:
|
88 |
+
import cupy as cp
|
89 |
+
namespaces.add(cp)
|
90 |
+
elif _is_torch_array(x):
|
91 |
+
_check_api_version(api_version)
|
92 |
+
if _use_compat:
|
93 |
+
from .. import torch as torch_namespace
|
94 |
+
namespaces.add(torch_namespace)
|
95 |
+
else:
|
96 |
+
import torch
|
97 |
+
namespaces.add(torch)
|
98 |
+
elif hasattr(x, '__array_namespace__'):
|
99 |
+
namespaces.add(x.__array_namespace__(api_version=api_version))
|
100 |
+
else:
|
101 |
+
# TODO: Support Python scalars?
|
102 |
+
raise TypeError(f"{type(x).__name__} is not a supported array type")
|
103 |
+
|
104 |
+
if not namespaces:
|
105 |
+
raise TypeError("Unrecognized array input")
|
106 |
+
|
107 |
+
if len(namespaces) != 1:
|
108 |
+
raise TypeError(f"Multiple namespaces for array inputs: {namespaces}")
|
109 |
+
|
110 |
+
xp, = namespaces
|
111 |
+
|
112 |
+
return xp
|
113 |
+
|
114 |
+
# backwards compatibility alias
|
115 |
+
get_namespace = array_namespace
|
116 |
+
|
117 |
+
def _check_device(xp, device):
|
118 |
+
if xp == sys.modules.get('numpy'):
|
119 |
+
if device not in ["cpu", None]:
|
120 |
+
raise ValueError(f"Unsupported device for NumPy: {device!r}")
|
121 |
+
|
122 |
+
# device() is not on numpy.ndarray and and to_device() is not on numpy.ndarray
|
123 |
+
# or cupy.ndarray. They are not included in array objects of this library
|
124 |
+
# because this library just reuses the respective ndarray classes without
|
125 |
+
# wrapping or subclassing them. These helper functions can be used instead of
|
126 |
+
# the wrapper functions for libraries that need to support both NumPy/CuPy and
|
127 |
+
# other libraries that use devices.
|
128 |
+
def device(x: "Array", /) -> "Device":
|
129 |
+
"""
|
130 |
+
Hardware device the array data resides on.
|
131 |
+
|
132 |
+
Parameters
|
133 |
+
----------
|
134 |
+
x: array
|
135 |
+
array instance from NumPy or an array API compatible library.
|
136 |
+
|
137 |
+
Returns
|
138 |
+
-------
|
139 |
+
out: device
|
140 |
+
a ``device`` object (see the "Device Support" section of the array API specification).
|
141 |
+
"""
|
142 |
+
if _is_numpy_array(x):
|
143 |
+
return "cpu"
|
144 |
+
return x.device
|
145 |
+
|
146 |
+
# Based on cupy.array_api.Array.to_device
|
147 |
+
def _cupy_to_device(x, device, /, stream=None):
|
148 |
+
import cupy as cp
|
149 |
+
from cupy.cuda import Device as _Device
|
150 |
+
from cupy.cuda import stream as stream_module
|
151 |
+
from cupy_backends.cuda.api import runtime
|
152 |
+
|
153 |
+
if device == x.device:
|
154 |
+
return x
|
155 |
+
elif device == "cpu":
|
156 |
+
# allowing us to use `to_device(x, "cpu")`
|
157 |
+
# is useful for portable test swapping between
|
158 |
+
# host and device backends
|
159 |
+
return x.get()
|
160 |
+
elif not isinstance(device, _Device):
|
161 |
+
raise ValueError(f"Unsupported device {device!r}")
|
162 |
+
else:
|
163 |
+
# see cupy/cupy#5985 for the reason how we handle device/stream here
|
164 |
+
prev_device = runtime.getDevice()
|
165 |
+
prev_stream: stream_module.Stream = None
|
166 |
+
if stream is not None:
|
167 |
+
prev_stream = stream_module.get_current_stream()
|
168 |
+
# stream can be an int as specified in __dlpack__, or a CuPy stream
|
169 |
+
if isinstance(stream, int):
|
170 |
+
stream = cp.cuda.ExternalStream(stream)
|
171 |
+
elif isinstance(stream, cp.cuda.Stream):
|
172 |
+
pass
|
173 |
+
else:
|
174 |
+
raise ValueError('the input stream is not recognized')
|
175 |
+
stream.use()
|
176 |
+
try:
|
177 |
+
runtime.setDevice(device.id)
|
178 |
+
arr = x.copy()
|
179 |
+
finally:
|
180 |
+
runtime.setDevice(prev_device)
|
181 |
+
if stream is not None:
|
182 |
+
prev_stream.use()
|
183 |
+
return arr
|
184 |
+
|
185 |
+
def _torch_to_device(x, device, /, stream=None):
|
186 |
+
if stream is not None:
|
187 |
+
raise NotImplementedError
|
188 |
+
return x.to(device)
|
189 |
+
|
190 |
+
def to_device(x: "Array", device: "Device", /, *, stream: "Optional[Union[int, Any]]" = None) -> "Array":
|
191 |
+
"""
|
192 |
+
Copy the array from the device on which it currently resides to the specified ``device``.
|
193 |
+
|
194 |
+
Parameters
|
195 |
+
----------
|
196 |
+
x: array
|
197 |
+
array instance from NumPy or an array API compatible library.
|
198 |
+
device: device
|
199 |
+
a ``device`` object (see the "Device Support" section of the array API specification).
|
200 |
+
stream: Optional[Union[int, Any]]
|
201 |
+
stream object to use during copy. In addition to the types supported in ``array.__dlpack__``, implementations may choose to support any library-specific stream object with the caveat that any code using such an object would not be portable.
|
202 |
+
|
203 |
+
Returns
|
204 |
+
-------
|
205 |
+
out: array
|
206 |
+
an array with the same data and data type as ``x`` and located on the specified ``device``.
|
207 |
+
|
208 |
+
.. note::
|
209 |
+
If ``stream`` is given, the copy operation should be enqueued on the provided ``stream``; otherwise, the copy operation should be enqueued on the default stream/queue. Whether the copy is performed synchronously or asynchronously is implementation-dependent. Accordingly, if synchronization is required to guarantee data safety, this must be clearly explained in a conforming library's documentation.
|
210 |
+
"""
|
211 |
+
if _is_numpy_array(x):
|
212 |
+
if stream is not None:
|
213 |
+
raise ValueError("The stream argument to to_device() is not supported")
|
214 |
+
if device == 'cpu':
|
215 |
+
return x
|
216 |
+
raise ValueError(f"Unsupported device {device!r}")
|
217 |
+
elif _is_cupy_array(x):
|
218 |
+
# cupy does not yet have to_device
|
219 |
+
return _cupy_to_device(x, device, stream=stream)
|
220 |
+
elif _is_torch_array(x):
|
221 |
+
return _torch_to_device(x, device, stream=stream)
|
222 |
+
return x.to_device(device, stream=stream)
|
223 |
+
|
224 |
+
def size(x):
|
225 |
+
"""
|
226 |
+
Return the total number of elements of x
|
227 |
+
"""
|
228 |
+
if None in x.shape:
|
229 |
+
return None
|
230 |
+
return math.prod(x.shape)
|
231 |
+
|
232 |
+
__all__ = ['is_array_api_obj', 'array_namespace', 'get_namespace', 'device', 'to_device', 'size']
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_linalg.py
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING, NamedTuple
|
4 |
+
if TYPE_CHECKING:
|
5 |
+
from typing import Literal, Optional, Sequence, Tuple, Union
|
6 |
+
from ._typing import ndarray
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
if np.__version__[0] == "2":
|
10 |
+
from numpy.lib.array_utils import normalize_axis_tuple
|
11 |
+
else:
|
12 |
+
from numpy.core.numeric import normalize_axis_tuple
|
13 |
+
|
14 |
+
from ._aliases import matmul, matrix_transpose, tensordot, vecdot, isdtype
|
15 |
+
from .._internal import get_xp
|
16 |
+
|
17 |
+
# These are in the main NumPy namespace but not in numpy.linalg
|
18 |
+
def cross(x1: ndarray, x2: ndarray, /, xp, *, axis: int = -1, **kwargs) -> ndarray:
|
19 |
+
return xp.cross(x1, x2, axis=axis, **kwargs)
|
20 |
+
|
21 |
+
def outer(x1: ndarray, x2: ndarray, /, xp, **kwargs) -> ndarray:
|
22 |
+
return xp.outer(x1, x2, **kwargs)
|
23 |
+
|
24 |
+
class EighResult(NamedTuple):
|
25 |
+
eigenvalues: ndarray
|
26 |
+
eigenvectors: ndarray
|
27 |
+
|
28 |
+
class QRResult(NamedTuple):
|
29 |
+
Q: ndarray
|
30 |
+
R: ndarray
|
31 |
+
|
32 |
+
class SlogdetResult(NamedTuple):
|
33 |
+
sign: ndarray
|
34 |
+
logabsdet: ndarray
|
35 |
+
|
36 |
+
class SVDResult(NamedTuple):
|
37 |
+
U: ndarray
|
38 |
+
S: ndarray
|
39 |
+
Vh: ndarray
|
40 |
+
|
41 |
+
# These functions are the same as their NumPy counterparts except they return
|
42 |
+
# a namedtuple.
|
43 |
+
def eigh(x: ndarray, /, xp, **kwargs) -> EighResult:
|
44 |
+
return EighResult(*xp.linalg.eigh(x, **kwargs))
|
45 |
+
|
46 |
+
def qr(x: ndarray, /, xp, *, mode: Literal['reduced', 'complete'] = 'reduced',
|
47 |
+
**kwargs) -> QRResult:
|
48 |
+
return QRResult(*xp.linalg.qr(x, mode=mode, **kwargs))
|
49 |
+
|
50 |
+
def slogdet(x: ndarray, /, xp, **kwargs) -> SlogdetResult:
|
51 |
+
return SlogdetResult(*xp.linalg.slogdet(x, **kwargs))
|
52 |
+
|
53 |
+
def svd(x: ndarray, /, xp, *, full_matrices: bool = True, **kwargs) -> SVDResult:
|
54 |
+
return SVDResult(*xp.linalg.svd(x, full_matrices=full_matrices, **kwargs))
|
55 |
+
|
56 |
+
# These functions have additional keyword arguments
|
57 |
+
|
58 |
+
# The upper keyword argument is new from NumPy
|
59 |
+
def cholesky(x: ndarray, /, xp, *, upper: bool = False, **kwargs) -> ndarray:
|
60 |
+
L = xp.linalg.cholesky(x, **kwargs)
|
61 |
+
if upper:
|
62 |
+
U = get_xp(xp)(matrix_transpose)(L)
|
63 |
+
if get_xp(xp)(isdtype)(U.dtype, 'complex floating'):
|
64 |
+
U = xp.conj(U)
|
65 |
+
return U
|
66 |
+
return L
|
67 |
+
|
68 |
+
# The rtol keyword argument of matrix_rank() and pinv() is new from NumPy.
|
69 |
+
# Note that it has a different semantic meaning from tol and rcond.
|
70 |
+
def matrix_rank(x: ndarray,
|
71 |
+
/,
|
72 |
+
xp,
|
73 |
+
*,
|
74 |
+
rtol: Optional[Union[float, ndarray]] = None,
|
75 |
+
**kwargs) -> ndarray:
|
76 |
+
# this is different from xp.linalg.matrix_rank, which supports 1
|
77 |
+
# dimensional arrays.
|
78 |
+
if x.ndim < 2:
|
79 |
+
raise xp.linalg.LinAlgError("1-dimensional array given. Array must be at least two-dimensional")
|
80 |
+
S = xp.linalg.svd(x, compute_uv=False, **kwargs)
|
81 |
+
if rtol is None:
|
82 |
+
tol = S.max(axis=-1, keepdims=True) * max(x.shape[-2:]) * xp.finfo(S.dtype).eps
|
83 |
+
else:
|
84 |
+
# this is different from xp.linalg.matrix_rank, which does not
|
85 |
+
# multiply the tolerance by the largest singular value.
|
86 |
+
tol = S.max(axis=-1, keepdims=True)*xp.asarray(rtol)[..., xp.newaxis]
|
87 |
+
return xp.count_nonzero(S > tol, axis=-1)
|
88 |
+
|
89 |
+
def pinv(x: ndarray, /, xp, *, rtol: Optional[Union[float, ndarray]] = None, **kwargs) -> ndarray:
|
90 |
+
# this is different from xp.linalg.pinv, which does not multiply the
|
91 |
+
# default tolerance by max(M, N).
|
92 |
+
if rtol is None:
|
93 |
+
rtol = max(x.shape[-2:]) * xp.finfo(x.dtype).eps
|
94 |
+
return xp.linalg.pinv(x, rcond=rtol, **kwargs)
|
95 |
+
|
96 |
+
# These functions are new in the array API spec
|
97 |
+
|
98 |
+
def matrix_norm(x: ndarray, /, xp, *, keepdims: bool = False, ord: Optional[Union[int, float, Literal['fro', 'nuc']]] = 'fro') -> ndarray:
|
99 |
+
return xp.linalg.norm(x, axis=(-2, -1), keepdims=keepdims, ord=ord)
|
100 |
+
|
101 |
+
# svdvals is not in NumPy (but it is in SciPy). It is equivalent to
|
102 |
+
# xp.linalg.svd(compute_uv=False).
|
103 |
+
def svdvals(x: ndarray, /, xp) -> Union[ndarray, Tuple[ndarray, ...]]:
|
104 |
+
return xp.linalg.svd(x, compute_uv=False)
|
105 |
+
|
106 |
+
def vector_norm(x: ndarray, /, xp, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ord: Optional[Union[int, float]] = 2) -> ndarray:
|
107 |
+
# xp.linalg.norm tries to do a matrix norm whenever axis is a 2-tuple or
|
108 |
+
# when axis=None and the input is 2-D, so to force a vector norm, we make
|
109 |
+
# it so the input is 1-D (for axis=None), or reshape so that norm is done
|
110 |
+
# on a single dimension.
|
111 |
+
if axis is None:
|
112 |
+
# Note: xp.linalg.norm() doesn't handle 0-D arrays
|
113 |
+
x = x.ravel()
|
114 |
+
_axis = 0
|
115 |
+
elif isinstance(axis, tuple):
|
116 |
+
# Note: The axis argument supports any number of axes, whereas
|
117 |
+
# xp.linalg.norm() only supports a single axis for vector norm.
|
118 |
+
normalized_axis = normalize_axis_tuple(axis, x.ndim)
|
119 |
+
rest = tuple(i for i in range(x.ndim) if i not in normalized_axis)
|
120 |
+
newshape = axis + rest
|
121 |
+
x = xp.transpose(x, newshape).reshape(
|
122 |
+
(xp.prod([x.shape[i] for i in axis], dtype=int), *[x.shape[i] for i in rest]))
|
123 |
+
_axis = 0
|
124 |
+
else:
|
125 |
+
_axis = axis
|
126 |
+
|
127 |
+
res = xp.linalg.norm(x, axis=_axis, ord=ord)
|
128 |
+
|
129 |
+
if keepdims:
|
130 |
+
# We can't reuse xp.linalg.norm(keepdims) because of the reshape hacks
|
131 |
+
# above to avoid matrix norm logic.
|
132 |
+
shape = list(x.shape)
|
133 |
+
_axis = normalize_axis_tuple(range(x.ndim) if axis is None else axis, x.ndim)
|
134 |
+
for i in _axis:
|
135 |
+
shape[i] = 1
|
136 |
+
res = xp.reshape(res, tuple(shape))
|
137 |
+
|
138 |
+
return res
|
139 |
+
|
140 |
+
# xp.diagonal and xp.trace operate on the first two axes whereas these
|
141 |
+
# operates on the last two
|
142 |
+
|
143 |
+
def diagonal(x: ndarray, /, xp, *, offset: int = 0, **kwargs) -> ndarray:
|
144 |
+
return xp.diagonal(x, offset=offset, axis1=-2, axis2=-1, **kwargs)
|
145 |
+
|
146 |
+
def trace(x: ndarray, /, xp, *, offset: int = 0, dtype=None, **kwargs) -> ndarray:
|
147 |
+
if dtype is None:
|
148 |
+
if x.dtype == xp.float32:
|
149 |
+
dtype = xp.float64
|
150 |
+
elif x.dtype == xp.complex64:
|
151 |
+
dtype = xp.complex128
|
152 |
+
return xp.asarray(xp.trace(x, offset=offset, dtype=dtype, axis1=-2, axis2=-1, **kwargs))
|
153 |
+
|
154 |
+
__all__ = ['cross', 'matmul', 'outer', 'tensordot', 'EighResult',
|
155 |
+
'QRResult', 'SlogdetResult', 'SVDResult', 'eigh', 'qr', 'slogdet',
|
156 |
+
'svd', 'cholesky', 'matrix_rank', 'pinv', 'matrix_norm',
|
157 |
+
'matrix_transpose', 'svdvals', 'vecdot', 'vector_norm', 'diagonal',
|
158 |
+
'trace']
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/common/_typing.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"NestedSequence",
|
5 |
+
"SupportsBufferProtocol",
|
6 |
+
]
|
7 |
+
|
8 |
+
from typing import (
|
9 |
+
Any,
|
10 |
+
TypeVar,
|
11 |
+
Protocol,
|
12 |
+
)
|
13 |
+
|
14 |
+
_T_co = TypeVar("_T_co", covariant=True)
|
15 |
+
|
16 |
+
class NestedSequence(Protocol[_T_co]):
|
17 |
+
def __getitem__(self, key: int, /) -> _T_co | NestedSequence[_T_co]: ...
|
18 |
+
def __len__(self, /) -> int: ...
|
19 |
+
|
20 |
+
SupportsBufferProtocol = Any
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__init__.py
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from cupy import *
|
2 |
+
|
3 |
+
# from cupy import * doesn't overwrite these builtin names
|
4 |
+
from cupy import abs, max, min, round
|
5 |
+
|
6 |
+
# These imports may overwrite names from the import * above.
|
7 |
+
from ._aliases import *
|
8 |
+
|
9 |
+
# See the comment in the numpy __init__.py
|
10 |
+
__import__(__package__ + '.linalg')
|
11 |
+
|
12 |
+
from .linalg import matrix_transpose, vecdot
|
13 |
+
|
14 |
+
from ..common._helpers import *
|
15 |
+
|
16 |
+
__array_api_version__ = '2022.12'
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (498 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_aliases.cpython-310.pyc
ADDED
Binary file (1.93 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (738 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/__pycache__/linalg.cpython-310.pyc
ADDED
Binary file (1.05 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_aliases.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
|
5 |
+
from ..common import _aliases
|
6 |
+
|
7 |
+
from .._internal import get_xp
|
8 |
+
|
9 |
+
asarray = asarray_cupy = partial(_aliases._asarray, namespace='cupy')
|
10 |
+
asarray.__doc__ = _aliases._asarray.__doc__
|
11 |
+
del partial
|
12 |
+
|
13 |
+
import cupy as cp
|
14 |
+
bool = cp.bool_
|
15 |
+
|
16 |
+
# Basic renames
|
17 |
+
acos = cp.arccos
|
18 |
+
acosh = cp.arccosh
|
19 |
+
asin = cp.arcsin
|
20 |
+
asinh = cp.arcsinh
|
21 |
+
atan = cp.arctan
|
22 |
+
atan2 = cp.arctan2
|
23 |
+
atanh = cp.arctanh
|
24 |
+
bitwise_left_shift = cp.left_shift
|
25 |
+
bitwise_invert = cp.invert
|
26 |
+
bitwise_right_shift = cp.right_shift
|
27 |
+
concat = cp.concatenate
|
28 |
+
pow = cp.power
|
29 |
+
|
30 |
+
arange = get_xp(cp)(_aliases.arange)
|
31 |
+
empty = get_xp(cp)(_aliases.empty)
|
32 |
+
empty_like = get_xp(cp)(_aliases.empty_like)
|
33 |
+
eye = get_xp(cp)(_aliases.eye)
|
34 |
+
full = get_xp(cp)(_aliases.full)
|
35 |
+
full_like = get_xp(cp)(_aliases.full_like)
|
36 |
+
linspace = get_xp(cp)(_aliases.linspace)
|
37 |
+
ones = get_xp(cp)(_aliases.ones)
|
38 |
+
ones_like = get_xp(cp)(_aliases.ones_like)
|
39 |
+
zeros = get_xp(cp)(_aliases.zeros)
|
40 |
+
zeros_like = get_xp(cp)(_aliases.zeros_like)
|
41 |
+
UniqueAllResult = get_xp(cp)(_aliases.UniqueAllResult)
|
42 |
+
UniqueCountsResult = get_xp(cp)(_aliases.UniqueCountsResult)
|
43 |
+
UniqueInverseResult = get_xp(cp)(_aliases.UniqueInverseResult)
|
44 |
+
unique_all = get_xp(cp)(_aliases.unique_all)
|
45 |
+
unique_counts = get_xp(cp)(_aliases.unique_counts)
|
46 |
+
unique_inverse = get_xp(cp)(_aliases.unique_inverse)
|
47 |
+
unique_values = get_xp(cp)(_aliases.unique_values)
|
48 |
+
astype = _aliases.astype
|
49 |
+
std = get_xp(cp)(_aliases.std)
|
50 |
+
var = get_xp(cp)(_aliases.var)
|
51 |
+
permute_dims = get_xp(cp)(_aliases.permute_dims)
|
52 |
+
reshape = get_xp(cp)(_aliases.reshape)
|
53 |
+
argsort = get_xp(cp)(_aliases.argsort)
|
54 |
+
sort = get_xp(cp)(_aliases.sort)
|
55 |
+
nonzero = get_xp(cp)(_aliases.nonzero)
|
56 |
+
sum = get_xp(cp)(_aliases.sum)
|
57 |
+
prod = get_xp(cp)(_aliases.prod)
|
58 |
+
ceil = get_xp(cp)(_aliases.ceil)
|
59 |
+
floor = get_xp(cp)(_aliases.floor)
|
60 |
+
trunc = get_xp(cp)(_aliases.trunc)
|
61 |
+
matmul = get_xp(cp)(_aliases.matmul)
|
62 |
+
matrix_transpose = get_xp(cp)(_aliases.matrix_transpose)
|
63 |
+
tensordot = get_xp(cp)(_aliases.tensordot)
|
64 |
+
|
65 |
+
# These functions are completely new here. If the library already has them
|
66 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
67 |
+
if hasattr(cp, 'vecdot'):
|
68 |
+
vecdot = cp.vecdot
|
69 |
+
else:
|
70 |
+
vecdot = get_xp(cp)(_aliases.vecdot)
|
71 |
+
if hasattr(cp, 'isdtype'):
|
72 |
+
isdtype = cp.isdtype
|
73 |
+
else:
|
74 |
+
isdtype = get_xp(cp)(_aliases.isdtype)
|
75 |
+
|
76 |
+
__all__ = _aliases.__all__ + ['asarray', 'asarray_cupy', 'bool', 'acos',
|
77 |
+
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
78 |
+
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
79 |
+
'bitwise_right_shift', 'concat', 'pow']
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/_typing.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"ndarray",
|
5 |
+
"Device",
|
6 |
+
"Dtype",
|
7 |
+
]
|
8 |
+
|
9 |
+
import sys
|
10 |
+
from typing import (
|
11 |
+
Union,
|
12 |
+
TYPE_CHECKING,
|
13 |
+
)
|
14 |
+
|
15 |
+
from cupy import (
|
16 |
+
ndarray,
|
17 |
+
dtype,
|
18 |
+
int8,
|
19 |
+
int16,
|
20 |
+
int32,
|
21 |
+
int64,
|
22 |
+
uint8,
|
23 |
+
uint16,
|
24 |
+
uint32,
|
25 |
+
uint64,
|
26 |
+
float32,
|
27 |
+
float64,
|
28 |
+
)
|
29 |
+
|
30 |
+
from cupy.cuda.device import Device
|
31 |
+
|
32 |
+
if TYPE_CHECKING or sys.version_info >= (3, 9):
|
33 |
+
Dtype = dtype[Union[
|
34 |
+
int8,
|
35 |
+
int16,
|
36 |
+
int32,
|
37 |
+
int64,
|
38 |
+
uint8,
|
39 |
+
uint16,
|
40 |
+
uint32,
|
41 |
+
uint64,
|
42 |
+
float32,
|
43 |
+
float64,
|
44 |
+
]]
|
45 |
+
else:
|
46 |
+
Dtype = dtype
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/cupy/linalg.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from cupy.linalg import *
|
2 |
+
# cupy.linalg doesn't have __all__. If it is added, replace this with
|
3 |
+
#
|
4 |
+
# from cupy.linalg import __all__ as linalg_all
|
5 |
+
_n = {}
|
6 |
+
exec('from cupy.linalg import *', _n)
|
7 |
+
del _n['__builtins__']
|
8 |
+
linalg_all = list(_n)
|
9 |
+
del _n
|
10 |
+
|
11 |
+
from ..common import _linalg
|
12 |
+
from .._internal import get_xp
|
13 |
+
from ._aliases import (matmul, matrix_transpose, tensordot, vecdot)
|
14 |
+
|
15 |
+
import cupy as cp
|
16 |
+
|
17 |
+
cross = get_xp(cp)(_linalg.cross)
|
18 |
+
outer = get_xp(cp)(_linalg.outer)
|
19 |
+
EighResult = _linalg.EighResult
|
20 |
+
QRResult = _linalg.QRResult
|
21 |
+
SlogdetResult = _linalg.SlogdetResult
|
22 |
+
SVDResult = _linalg.SVDResult
|
23 |
+
eigh = get_xp(cp)(_linalg.eigh)
|
24 |
+
qr = get_xp(cp)(_linalg.qr)
|
25 |
+
slogdet = get_xp(cp)(_linalg.slogdet)
|
26 |
+
svd = get_xp(cp)(_linalg.svd)
|
27 |
+
cholesky = get_xp(cp)(_linalg.cholesky)
|
28 |
+
matrix_rank = get_xp(cp)(_linalg.matrix_rank)
|
29 |
+
pinv = get_xp(cp)(_linalg.pinv)
|
30 |
+
matrix_norm = get_xp(cp)(_linalg.matrix_norm)
|
31 |
+
svdvals = get_xp(cp)(_linalg.svdvals)
|
32 |
+
diagonal = get_xp(cp)(_linalg.diagonal)
|
33 |
+
trace = get_xp(cp)(_linalg.trace)
|
34 |
+
|
35 |
+
# These functions are completely new here. If the library already has them
|
36 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
37 |
+
if hasattr(cp.linalg, 'vector_norm'):
|
38 |
+
vector_norm = cp.linalg.vector_norm
|
39 |
+
else:
|
40 |
+
vector_norm = get_xp(cp)(_linalg.vector_norm)
|
41 |
+
|
42 |
+
__all__ = linalg_all + _linalg.__all__
|
43 |
+
|
44 |
+
del get_xp
|
45 |
+
del cp
|
46 |
+
del linalg_all
|
47 |
+
del _linalg
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__init__.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy import *
|
2 |
+
|
3 |
+
# from numpy import * doesn't overwrite these builtin names
|
4 |
+
from numpy import abs, max, min, round
|
5 |
+
|
6 |
+
# These imports may overwrite names from the import * above.
|
7 |
+
from ._aliases import *
|
8 |
+
|
9 |
+
# Don't know why, but we have to do an absolute import to import linalg. If we
|
10 |
+
# instead do
|
11 |
+
#
|
12 |
+
# from . import linalg
|
13 |
+
#
|
14 |
+
# It doesn't overwrite np.linalg from above. The import is generated
|
15 |
+
# dynamically so that the library can be vendored.
|
16 |
+
__import__(__package__ + '.linalg')
|
17 |
+
|
18 |
+
from .linalg import matrix_transpose, vecdot
|
19 |
+
|
20 |
+
from ..common._helpers import *
|
21 |
+
|
22 |
+
__array_api_version__ = '2022.12'
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (500 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_aliases.cpython-310.pyc
ADDED
Binary file (1.93 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/_typing.cpython-310.pyc
ADDED
Binary file (734 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/__pycache__/linalg.cpython-310.pyc
ADDED
Binary file (981 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_aliases.py
ADDED
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import partial
|
4 |
+
|
5 |
+
from ..common import _aliases
|
6 |
+
|
7 |
+
from .._internal import get_xp
|
8 |
+
|
9 |
+
asarray = asarray_numpy = partial(_aliases._asarray, namespace='numpy')
|
10 |
+
asarray.__doc__ = _aliases._asarray.__doc__
|
11 |
+
del partial
|
12 |
+
|
13 |
+
import numpy as np
|
14 |
+
bool = np.bool_
|
15 |
+
|
16 |
+
# Basic renames
|
17 |
+
acos = np.arccos
|
18 |
+
acosh = np.arccosh
|
19 |
+
asin = np.arcsin
|
20 |
+
asinh = np.arcsinh
|
21 |
+
atan = np.arctan
|
22 |
+
atan2 = np.arctan2
|
23 |
+
atanh = np.arctanh
|
24 |
+
bitwise_left_shift = np.left_shift
|
25 |
+
bitwise_invert = np.invert
|
26 |
+
bitwise_right_shift = np.right_shift
|
27 |
+
concat = np.concatenate
|
28 |
+
pow = np.power
|
29 |
+
|
30 |
+
arange = get_xp(np)(_aliases.arange)
|
31 |
+
empty = get_xp(np)(_aliases.empty)
|
32 |
+
empty_like = get_xp(np)(_aliases.empty_like)
|
33 |
+
eye = get_xp(np)(_aliases.eye)
|
34 |
+
full = get_xp(np)(_aliases.full)
|
35 |
+
full_like = get_xp(np)(_aliases.full_like)
|
36 |
+
linspace = get_xp(np)(_aliases.linspace)
|
37 |
+
ones = get_xp(np)(_aliases.ones)
|
38 |
+
ones_like = get_xp(np)(_aliases.ones_like)
|
39 |
+
zeros = get_xp(np)(_aliases.zeros)
|
40 |
+
zeros_like = get_xp(np)(_aliases.zeros_like)
|
41 |
+
UniqueAllResult = get_xp(np)(_aliases.UniqueAllResult)
|
42 |
+
UniqueCountsResult = get_xp(np)(_aliases.UniqueCountsResult)
|
43 |
+
UniqueInverseResult = get_xp(np)(_aliases.UniqueInverseResult)
|
44 |
+
unique_all = get_xp(np)(_aliases.unique_all)
|
45 |
+
unique_counts = get_xp(np)(_aliases.unique_counts)
|
46 |
+
unique_inverse = get_xp(np)(_aliases.unique_inverse)
|
47 |
+
unique_values = get_xp(np)(_aliases.unique_values)
|
48 |
+
astype = _aliases.astype
|
49 |
+
std = get_xp(np)(_aliases.std)
|
50 |
+
var = get_xp(np)(_aliases.var)
|
51 |
+
permute_dims = get_xp(np)(_aliases.permute_dims)
|
52 |
+
reshape = get_xp(np)(_aliases.reshape)
|
53 |
+
argsort = get_xp(np)(_aliases.argsort)
|
54 |
+
sort = get_xp(np)(_aliases.sort)
|
55 |
+
nonzero = get_xp(np)(_aliases.nonzero)
|
56 |
+
sum = get_xp(np)(_aliases.sum)
|
57 |
+
prod = get_xp(np)(_aliases.prod)
|
58 |
+
ceil = get_xp(np)(_aliases.ceil)
|
59 |
+
floor = get_xp(np)(_aliases.floor)
|
60 |
+
trunc = get_xp(np)(_aliases.trunc)
|
61 |
+
matmul = get_xp(np)(_aliases.matmul)
|
62 |
+
matrix_transpose = get_xp(np)(_aliases.matrix_transpose)
|
63 |
+
tensordot = get_xp(np)(_aliases.tensordot)
|
64 |
+
|
65 |
+
# These functions are completely new here. If the library already has them
|
66 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
67 |
+
if hasattr(np, 'vecdot'):
|
68 |
+
vecdot = np.vecdot
|
69 |
+
else:
|
70 |
+
vecdot = get_xp(np)(_aliases.vecdot)
|
71 |
+
if hasattr(np, 'isdtype'):
|
72 |
+
isdtype = np.isdtype
|
73 |
+
else:
|
74 |
+
isdtype = get_xp(np)(_aliases.isdtype)
|
75 |
+
|
76 |
+
__all__ = _aliases.__all__ + ['asarray', 'asarray_numpy', 'bool', 'acos',
|
77 |
+
'acosh', 'asin', 'asinh', 'atan', 'atan2',
|
78 |
+
'atanh', 'bitwise_left_shift', 'bitwise_invert',
|
79 |
+
'bitwise_right_shift', 'concat', 'pow']
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/_typing.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
__all__ = [
|
4 |
+
"ndarray",
|
5 |
+
"Device",
|
6 |
+
"Dtype",
|
7 |
+
]
|
8 |
+
|
9 |
+
import sys
|
10 |
+
from typing import (
|
11 |
+
Literal,
|
12 |
+
Union,
|
13 |
+
TYPE_CHECKING,
|
14 |
+
)
|
15 |
+
|
16 |
+
from numpy import (
|
17 |
+
ndarray,
|
18 |
+
dtype,
|
19 |
+
int8,
|
20 |
+
int16,
|
21 |
+
int32,
|
22 |
+
int64,
|
23 |
+
uint8,
|
24 |
+
uint16,
|
25 |
+
uint32,
|
26 |
+
uint64,
|
27 |
+
float32,
|
28 |
+
float64,
|
29 |
+
)
|
30 |
+
|
31 |
+
Device = Literal["cpu"]
|
32 |
+
if TYPE_CHECKING or sys.version_info >= (3, 9):
|
33 |
+
Dtype = dtype[Union[
|
34 |
+
int8,
|
35 |
+
int16,
|
36 |
+
int32,
|
37 |
+
int64,
|
38 |
+
uint8,
|
39 |
+
uint16,
|
40 |
+
uint32,
|
41 |
+
uint64,
|
42 |
+
float32,
|
43 |
+
float64,
|
44 |
+
]]
|
45 |
+
else:
|
46 |
+
Dtype = dtype
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/numpy/linalg.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from numpy.linalg import *
|
2 |
+
from numpy.linalg import __all__ as linalg_all
|
3 |
+
|
4 |
+
from ..common import _linalg
|
5 |
+
from .._internal import get_xp
|
6 |
+
from ._aliases import (matmul, matrix_transpose, tensordot, vecdot)
|
7 |
+
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
cross = get_xp(np)(_linalg.cross)
|
11 |
+
outer = get_xp(np)(_linalg.outer)
|
12 |
+
EighResult = _linalg.EighResult
|
13 |
+
QRResult = _linalg.QRResult
|
14 |
+
SlogdetResult = _linalg.SlogdetResult
|
15 |
+
SVDResult = _linalg.SVDResult
|
16 |
+
eigh = get_xp(np)(_linalg.eigh)
|
17 |
+
qr = get_xp(np)(_linalg.qr)
|
18 |
+
slogdet = get_xp(np)(_linalg.slogdet)
|
19 |
+
svd = get_xp(np)(_linalg.svd)
|
20 |
+
cholesky = get_xp(np)(_linalg.cholesky)
|
21 |
+
matrix_rank = get_xp(np)(_linalg.matrix_rank)
|
22 |
+
pinv = get_xp(np)(_linalg.pinv)
|
23 |
+
matrix_norm = get_xp(np)(_linalg.matrix_norm)
|
24 |
+
svdvals = get_xp(np)(_linalg.svdvals)
|
25 |
+
diagonal = get_xp(np)(_linalg.diagonal)
|
26 |
+
trace = get_xp(np)(_linalg.trace)
|
27 |
+
|
28 |
+
# These functions are completely new here. If the library already has them
|
29 |
+
# (i.e., numpy 2.0), use the library version instead of our wrapper.
|
30 |
+
if hasattr(np.linalg, 'vector_norm'):
|
31 |
+
vector_norm = np.linalg.vector_norm
|
32 |
+
else:
|
33 |
+
vector_norm = get_xp(np)(_linalg.vector_norm)
|
34 |
+
|
35 |
+
__all__ = linalg_all + _linalg.__all__
|
36 |
+
|
37 |
+
del get_xp
|
38 |
+
del np
|
39 |
+
del linalg_all
|
40 |
+
del _linalg
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__init__.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch import *
|
2 |
+
|
3 |
+
# Several names are not included in the above import *
|
4 |
+
import torch
|
5 |
+
for n in dir(torch):
|
6 |
+
if (n.startswith('_')
|
7 |
+
or n.endswith('_')
|
8 |
+
or 'cuda' in n
|
9 |
+
or 'cpu' in n
|
10 |
+
or 'backward' in n):
|
11 |
+
continue
|
12 |
+
exec(n + ' = torch.' + n)
|
13 |
+
|
14 |
+
# These imports may overwrite names from the import * above.
|
15 |
+
from ._aliases import *
|
16 |
+
|
17 |
+
# See the comment in the numpy __init__.py
|
18 |
+
__import__(__package__ + '.linalg')
|
19 |
+
|
20 |
+
from ..common._helpers import *
|
21 |
+
|
22 |
+
__array_api_version__ = '2022.12'
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (541 Bytes). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/_aliases.cpython-310.pyc
ADDED
Binary file (17.5 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/__pycache__/linalg.cpython-310.pyc
ADDED
Binary file (2.35 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/_aliases.py
ADDED
@@ -0,0 +1,707 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from functools import wraps
|
4 |
+
from builtins import all as builtin_all, any as builtin_any
|
5 |
+
|
6 |
+
from ..common._aliases import (UniqueAllResult, UniqueCountsResult,
|
7 |
+
UniqueInverseResult,
|
8 |
+
matrix_transpose as _aliases_matrix_transpose,
|
9 |
+
vecdot as _aliases_vecdot)
|
10 |
+
from .._internal import get_xp
|
11 |
+
|
12 |
+
import torch
|
13 |
+
|
14 |
+
from typing import TYPE_CHECKING
|
15 |
+
if TYPE_CHECKING:
|
16 |
+
from typing import List, Optional, Sequence, Tuple, Union
|
17 |
+
from ..common._typing import Device
|
18 |
+
from torch import dtype as Dtype
|
19 |
+
|
20 |
+
array = torch.Tensor
|
21 |
+
|
22 |
+
_int_dtypes = {
|
23 |
+
torch.uint8,
|
24 |
+
torch.int8,
|
25 |
+
torch.int16,
|
26 |
+
torch.int32,
|
27 |
+
torch.int64,
|
28 |
+
}
|
29 |
+
|
30 |
+
_array_api_dtypes = {
|
31 |
+
torch.bool,
|
32 |
+
*_int_dtypes,
|
33 |
+
torch.float32,
|
34 |
+
torch.float64,
|
35 |
+
torch.complex64,
|
36 |
+
torch.complex128,
|
37 |
+
}
|
38 |
+
|
39 |
+
_promotion_table = {
|
40 |
+
# bool
|
41 |
+
(torch.bool, torch.bool): torch.bool,
|
42 |
+
# ints
|
43 |
+
(torch.int8, torch.int8): torch.int8,
|
44 |
+
(torch.int8, torch.int16): torch.int16,
|
45 |
+
(torch.int8, torch.int32): torch.int32,
|
46 |
+
(torch.int8, torch.int64): torch.int64,
|
47 |
+
(torch.int16, torch.int8): torch.int16,
|
48 |
+
(torch.int16, torch.int16): torch.int16,
|
49 |
+
(torch.int16, torch.int32): torch.int32,
|
50 |
+
(torch.int16, torch.int64): torch.int64,
|
51 |
+
(torch.int32, torch.int8): torch.int32,
|
52 |
+
(torch.int32, torch.int16): torch.int32,
|
53 |
+
(torch.int32, torch.int32): torch.int32,
|
54 |
+
(torch.int32, torch.int64): torch.int64,
|
55 |
+
(torch.int64, torch.int8): torch.int64,
|
56 |
+
(torch.int64, torch.int16): torch.int64,
|
57 |
+
(torch.int64, torch.int32): torch.int64,
|
58 |
+
(torch.int64, torch.int64): torch.int64,
|
59 |
+
# uints
|
60 |
+
(torch.uint8, torch.uint8): torch.uint8,
|
61 |
+
# ints and uints (mixed sign)
|
62 |
+
(torch.int8, torch.uint8): torch.int16,
|
63 |
+
(torch.int16, torch.uint8): torch.int16,
|
64 |
+
(torch.int32, torch.uint8): torch.int32,
|
65 |
+
(torch.int64, torch.uint8): torch.int64,
|
66 |
+
(torch.uint8, torch.int8): torch.int16,
|
67 |
+
(torch.uint8, torch.int16): torch.int16,
|
68 |
+
(torch.uint8, torch.int32): torch.int32,
|
69 |
+
(torch.uint8, torch.int64): torch.int64,
|
70 |
+
# floats
|
71 |
+
(torch.float32, torch.float32): torch.float32,
|
72 |
+
(torch.float32, torch.float64): torch.float64,
|
73 |
+
(torch.float64, torch.float32): torch.float64,
|
74 |
+
(torch.float64, torch.float64): torch.float64,
|
75 |
+
# complexes
|
76 |
+
(torch.complex64, torch.complex64): torch.complex64,
|
77 |
+
(torch.complex64, torch.complex128): torch.complex128,
|
78 |
+
(torch.complex128, torch.complex64): torch.complex128,
|
79 |
+
(torch.complex128, torch.complex128): torch.complex128,
|
80 |
+
# Mixed float and complex
|
81 |
+
(torch.float32, torch.complex64): torch.complex64,
|
82 |
+
(torch.float32, torch.complex128): torch.complex128,
|
83 |
+
(torch.float64, torch.complex64): torch.complex128,
|
84 |
+
(torch.float64, torch.complex128): torch.complex128,
|
85 |
+
}
|
86 |
+
|
87 |
+
|
88 |
+
def _two_arg(f):
|
89 |
+
@wraps(f)
|
90 |
+
def _f(x1, x2, /, **kwargs):
|
91 |
+
x1, x2 = _fix_promotion(x1, x2)
|
92 |
+
return f(x1, x2, **kwargs)
|
93 |
+
if _f.__doc__ is None:
|
94 |
+
_f.__doc__ = f"""\
|
95 |
+
Array API compatibility wrapper for torch.{f.__name__}.
|
96 |
+
|
97 |
+
See the corresponding PyTorch documentation and/or the array API specification
|
98 |
+
for more details.
|
99 |
+
|
100 |
+
"""
|
101 |
+
return _f
|
102 |
+
|
103 |
+
def _fix_promotion(x1, x2, only_scalar=True):
|
104 |
+
if x1.dtype not in _array_api_dtypes or x2.dtype not in _array_api_dtypes:
|
105 |
+
return x1, x2
|
106 |
+
# If an argument is 0-D pytorch downcasts the other argument
|
107 |
+
if not only_scalar or x1.shape == ():
|
108 |
+
dtype = result_type(x1, x2)
|
109 |
+
x2 = x2.to(dtype)
|
110 |
+
if not only_scalar or x2.shape == ():
|
111 |
+
dtype = result_type(x1, x2)
|
112 |
+
x1 = x1.to(dtype)
|
113 |
+
return x1, x2
|
114 |
+
|
115 |
+
def result_type(*arrays_and_dtypes: Union[array, Dtype]) -> Dtype:
|
116 |
+
if len(arrays_and_dtypes) == 0:
|
117 |
+
raise TypeError("At least one array or dtype must be provided")
|
118 |
+
if len(arrays_and_dtypes) == 1:
|
119 |
+
x = arrays_and_dtypes[0]
|
120 |
+
if isinstance(x, torch.dtype):
|
121 |
+
return x
|
122 |
+
return x.dtype
|
123 |
+
if len(arrays_and_dtypes) > 2:
|
124 |
+
return result_type(arrays_and_dtypes[0], result_type(*arrays_and_dtypes[1:]))
|
125 |
+
|
126 |
+
x, y = arrays_and_dtypes
|
127 |
+
xdt = x.dtype if not isinstance(x, torch.dtype) else x
|
128 |
+
ydt = y.dtype if not isinstance(y, torch.dtype) else y
|
129 |
+
|
130 |
+
if (xdt, ydt) in _promotion_table:
|
131 |
+
return _promotion_table[xdt, ydt]
|
132 |
+
|
133 |
+
# This doesn't result_type(dtype, dtype) for non-array API dtypes
|
134 |
+
# because torch.result_type only accepts tensors. This does however, allow
|
135 |
+
# cross-kind promotion.
|
136 |
+
x = torch.tensor([], dtype=x) if isinstance(x, torch.dtype) else x
|
137 |
+
y = torch.tensor([], dtype=y) if isinstance(y, torch.dtype) else y
|
138 |
+
return torch.result_type(x, y)
|
139 |
+
|
140 |
+
def can_cast(from_: Union[Dtype, array], to: Dtype, /) -> bool:
|
141 |
+
if not isinstance(from_, torch.dtype):
|
142 |
+
from_ = from_.dtype
|
143 |
+
return torch.can_cast(from_, to)
|
144 |
+
|
145 |
+
# Basic renames
|
146 |
+
bitwise_invert = torch.bitwise_not
|
147 |
+
newaxis = None
|
148 |
+
|
149 |
+
# Two-arg elementwise functions
|
150 |
+
# These require a wrapper to do the correct type promotion on 0-D tensors
|
151 |
+
add = _two_arg(torch.add)
|
152 |
+
atan2 = _two_arg(torch.atan2)
|
153 |
+
bitwise_and = _two_arg(torch.bitwise_and)
|
154 |
+
bitwise_left_shift = _two_arg(torch.bitwise_left_shift)
|
155 |
+
bitwise_or = _two_arg(torch.bitwise_or)
|
156 |
+
bitwise_right_shift = _two_arg(torch.bitwise_right_shift)
|
157 |
+
bitwise_xor = _two_arg(torch.bitwise_xor)
|
158 |
+
divide = _two_arg(torch.divide)
|
159 |
+
# Also a rename. torch.equal does not broadcast
|
160 |
+
equal = _two_arg(torch.eq)
|
161 |
+
floor_divide = _two_arg(torch.floor_divide)
|
162 |
+
greater = _two_arg(torch.greater)
|
163 |
+
greater_equal = _two_arg(torch.greater_equal)
|
164 |
+
less = _two_arg(torch.less)
|
165 |
+
less_equal = _two_arg(torch.less_equal)
|
166 |
+
logaddexp = _two_arg(torch.logaddexp)
|
167 |
+
# logical functions are not included here because they only accept bool in the
|
168 |
+
# spec, so type promotion is irrelevant.
|
169 |
+
multiply = _two_arg(torch.multiply)
|
170 |
+
not_equal = _two_arg(torch.not_equal)
|
171 |
+
pow = _two_arg(torch.pow)
|
172 |
+
remainder = _two_arg(torch.remainder)
|
173 |
+
subtract = _two_arg(torch.subtract)
|
174 |
+
|
175 |
+
# These wrappers are mostly based on the fact that pytorch uses 'dim' instead
|
176 |
+
# of 'axis'.
|
177 |
+
|
178 |
+
# torch.min and torch.max return a tuple and don't support multiple axes https://github.com/pytorch/pytorch/issues/58745
|
179 |
+
def max(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:
|
180 |
+
# https://github.com/pytorch/pytorch/issues/29137
|
181 |
+
if axis == ():
|
182 |
+
return torch.clone(x)
|
183 |
+
return torch.amax(x, axis, keepdims=keepdims)
|
184 |
+
|
185 |
+
def min(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> array:
|
186 |
+
# https://github.com/pytorch/pytorch/issues/29137
|
187 |
+
if axis == ():
|
188 |
+
return torch.clone(x)
|
189 |
+
return torch.amin(x, axis, keepdims=keepdims)
|
190 |
+
|
191 |
+
# torch.sort also returns a tuple
|
192 |
+
# https://github.com/pytorch/pytorch/issues/70921
|
193 |
+
def sort(x: array, /, *, axis: int = -1, descending: bool = False, stable: bool = True, **kwargs) -> array:
|
194 |
+
return torch.sort(x, dim=axis, descending=descending, stable=stable, **kwargs).values
|
195 |
+
|
196 |
+
def _normalize_axes(axis, ndim):
|
197 |
+
axes = []
|
198 |
+
if ndim == 0 and axis:
|
199 |
+
# Better error message in this case
|
200 |
+
raise IndexError(f"Dimension out of range: {axis[0]}")
|
201 |
+
lower, upper = -ndim, ndim - 1
|
202 |
+
for a in axis:
|
203 |
+
if a < lower or a > upper:
|
204 |
+
# Match torch error message (e.g., from sum())
|
205 |
+
raise IndexError(f"Dimension out of range (expected to be in range of [{lower}, {upper}], but got {a}")
|
206 |
+
if a < 0:
|
207 |
+
a = a + ndim
|
208 |
+
if a in axes:
|
209 |
+
# Use IndexError instead of RuntimeError, and "axis" instead of "dim"
|
210 |
+
raise IndexError(f"Axis {a} appears multiple times in the list of axes")
|
211 |
+
axes.append(a)
|
212 |
+
return sorted(axes)
|
213 |
+
|
214 |
+
def _axis_none_keepdims(x, ndim, keepdims):
|
215 |
+
# Apply keepdims when axis=None
|
216 |
+
# (https://github.com/pytorch/pytorch/issues/71209)
|
217 |
+
# Note that this is only valid for the axis=None case.
|
218 |
+
if keepdims:
|
219 |
+
for i in range(ndim):
|
220 |
+
x = torch.unsqueeze(x, 0)
|
221 |
+
return x
|
222 |
+
|
223 |
+
def _reduce_multiple_axes(f, x, axis, keepdims=False, **kwargs):
|
224 |
+
# Some reductions don't support multiple axes
|
225 |
+
# (https://github.com/pytorch/pytorch/issues/56586).
|
226 |
+
axes = _normalize_axes(axis, x.ndim)
|
227 |
+
for a in reversed(axes):
|
228 |
+
x = torch.movedim(x, a, -1)
|
229 |
+
x = torch.flatten(x, -len(axes))
|
230 |
+
|
231 |
+
out = f(x, -1, **kwargs)
|
232 |
+
|
233 |
+
if keepdims:
|
234 |
+
for a in axes:
|
235 |
+
out = torch.unsqueeze(out, a)
|
236 |
+
return out
|
237 |
+
|
238 |
+
def prod(x: array,
|
239 |
+
/,
|
240 |
+
*,
|
241 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
242 |
+
dtype: Optional[Dtype] = None,
|
243 |
+
keepdims: bool = False,
|
244 |
+
**kwargs) -> array:
|
245 |
+
x = torch.asarray(x)
|
246 |
+
ndim = x.ndim
|
247 |
+
|
248 |
+
# https://github.com/pytorch/pytorch/issues/29137. Separate from the logic
|
249 |
+
# below because it still needs to upcast.
|
250 |
+
if axis == ():
|
251 |
+
if dtype is None:
|
252 |
+
# We can't upcast uint8 according to the spec because there is no
|
253 |
+
# torch.uint64, so at least upcast to int64 which is what sum does
|
254 |
+
# when axis=None.
|
255 |
+
if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]:
|
256 |
+
return x.to(torch.int64)
|
257 |
+
return x.clone()
|
258 |
+
return x.to(dtype)
|
259 |
+
|
260 |
+
# torch.prod doesn't support multiple axes
|
261 |
+
# (https://github.com/pytorch/pytorch/issues/56586).
|
262 |
+
if isinstance(axis, tuple):
|
263 |
+
return _reduce_multiple_axes(torch.prod, x, axis, keepdims=keepdims, dtype=dtype, **kwargs)
|
264 |
+
if axis is None:
|
265 |
+
# torch doesn't support keepdims with axis=None
|
266 |
+
# (https://github.com/pytorch/pytorch/issues/71209)
|
267 |
+
res = torch.prod(x, dtype=dtype, **kwargs)
|
268 |
+
res = _axis_none_keepdims(res, ndim, keepdims)
|
269 |
+
return res
|
270 |
+
|
271 |
+
return torch.prod(x, axis, dtype=dtype, keepdims=keepdims, **kwargs)
|
272 |
+
|
273 |
+
|
274 |
+
def sum(x: array,
|
275 |
+
/,
|
276 |
+
*,
|
277 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
278 |
+
dtype: Optional[Dtype] = None,
|
279 |
+
keepdims: bool = False,
|
280 |
+
**kwargs) -> array:
|
281 |
+
x = torch.asarray(x)
|
282 |
+
ndim = x.ndim
|
283 |
+
|
284 |
+
# https://github.com/pytorch/pytorch/issues/29137.
|
285 |
+
# Make sure it upcasts.
|
286 |
+
if axis == ():
|
287 |
+
if dtype is None:
|
288 |
+
# We can't upcast uint8 according to the spec because there is no
|
289 |
+
# torch.uint64, so at least upcast to int64 which is what sum does
|
290 |
+
# when axis=None.
|
291 |
+
if x.dtype in [torch.int8, torch.int16, torch.int32, torch.uint8]:
|
292 |
+
return x.to(torch.int64)
|
293 |
+
return x.clone()
|
294 |
+
return x.to(dtype)
|
295 |
+
|
296 |
+
if axis is None:
|
297 |
+
# torch doesn't support keepdims with axis=None
|
298 |
+
# (https://github.com/pytorch/pytorch/issues/71209)
|
299 |
+
res = torch.sum(x, dtype=dtype, **kwargs)
|
300 |
+
res = _axis_none_keepdims(res, ndim, keepdims)
|
301 |
+
return res
|
302 |
+
|
303 |
+
return torch.sum(x, axis, dtype=dtype, keepdims=keepdims, **kwargs)
|
304 |
+
|
305 |
+
def any(x: array,
|
306 |
+
/,
|
307 |
+
*,
|
308 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
309 |
+
keepdims: bool = False,
|
310 |
+
**kwargs) -> array:
|
311 |
+
x = torch.asarray(x)
|
312 |
+
ndim = x.ndim
|
313 |
+
if axis == ():
|
314 |
+
return x.to(torch.bool)
|
315 |
+
# torch.any doesn't support multiple axes
|
316 |
+
# (https://github.com/pytorch/pytorch/issues/56586).
|
317 |
+
if isinstance(axis, tuple):
|
318 |
+
res = _reduce_multiple_axes(torch.any, x, axis, keepdims=keepdims, **kwargs)
|
319 |
+
return res.to(torch.bool)
|
320 |
+
if axis is None:
|
321 |
+
# torch doesn't support keepdims with axis=None
|
322 |
+
# (https://github.com/pytorch/pytorch/issues/71209)
|
323 |
+
res = torch.any(x, **kwargs)
|
324 |
+
res = _axis_none_keepdims(res, ndim, keepdims)
|
325 |
+
return res.to(torch.bool)
|
326 |
+
|
327 |
+
# torch.any doesn't return bool for uint8
|
328 |
+
return torch.any(x, axis, keepdims=keepdims).to(torch.bool)
|
329 |
+
|
330 |
+
def all(x: array,
|
331 |
+
/,
|
332 |
+
*,
|
333 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
334 |
+
keepdims: bool = False,
|
335 |
+
**kwargs) -> array:
|
336 |
+
x = torch.asarray(x)
|
337 |
+
ndim = x.ndim
|
338 |
+
if axis == ():
|
339 |
+
return x.to(torch.bool)
|
340 |
+
# torch.all doesn't support multiple axes
|
341 |
+
# (https://github.com/pytorch/pytorch/issues/56586).
|
342 |
+
if isinstance(axis, tuple):
|
343 |
+
res = _reduce_multiple_axes(torch.all, x, axis, keepdims=keepdims, **kwargs)
|
344 |
+
return res.to(torch.bool)
|
345 |
+
if axis is None:
|
346 |
+
# torch doesn't support keepdims with axis=None
|
347 |
+
# (https://github.com/pytorch/pytorch/issues/71209)
|
348 |
+
res = torch.all(x, **kwargs)
|
349 |
+
res = _axis_none_keepdims(res, ndim, keepdims)
|
350 |
+
return res.to(torch.bool)
|
351 |
+
|
352 |
+
# torch.all doesn't return bool for uint8
|
353 |
+
return torch.all(x, axis, keepdims=keepdims).to(torch.bool)
|
354 |
+
|
355 |
+
def mean(x: array,
|
356 |
+
/,
|
357 |
+
*,
|
358 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
359 |
+
keepdims: bool = False,
|
360 |
+
**kwargs) -> array:
|
361 |
+
# https://github.com/pytorch/pytorch/issues/29137
|
362 |
+
if axis == ():
|
363 |
+
return torch.clone(x)
|
364 |
+
if axis is None:
|
365 |
+
# torch doesn't support keepdims with axis=None
|
366 |
+
# (https://github.com/pytorch/pytorch/issues/71209)
|
367 |
+
res = torch.mean(x, **kwargs)
|
368 |
+
res = _axis_none_keepdims(res, x.ndim, keepdims)
|
369 |
+
return res
|
370 |
+
return torch.mean(x, axis, keepdims=keepdims, **kwargs)
|
371 |
+
|
372 |
+
def std(x: array,
|
373 |
+
/,
|
374 |
+
*,
|
375 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
376 |
+
correction: Union[int, float] = 0.0,
|
377 |
+
keepdims: bool = False,
|
378 |
+
**kwargs) -> array:
|
379 |
+
# Note, float correction is not supported
|
380 |
+
# https://github.com/pytorch/pytorch/issues/61492. We don't try to
|
381 |
+
# implement it here for now.
|
382 |
+
|
383 |
+
if isinstance(correction, float):
|
384 |
+
_correction = int(correction)
|
385 |
+
if correction != _correction:
|
386 |
+
raise NotImplementedError("float correction in torch std() is not yet supported")
|
387 |
+
|
388 |
+
# https://github.com/pytorch/pytorch/issues/29137
|
389 |
+
if axis == ():
|
390 |
+
return torch.zeros_like(x)
|
391 |
+
if isinstance(axis, int):
|
392 |
+
axis = (axis,)
|
393 |
+
if axis is None:
|
394 |
+
# torch doesn't support keepdims with axis=None
|
395 |
+
# (https://github.com/pytorch/pytorch/issues/71209)
|
396 |
+
res = torch.std(x, tuple(range(x.ndim)), correction=_correction, **kwargs)
|
397 |
+
res = _axis_none_keepdims(res, x.ndim, keepdims)
|
398 |
+
return res
|
399 |
+
return torch.std(x, axis, correction=_correction, keepdims=keepdims, **kwargs)
|
400 |
+
|
401 |
+
def var(x: array,
|
402 |
+
/,
|
403 |
+
*,
|
404 |
+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
|
405 |
+
correction: Union[int, float] = 0.0,
|
406 |
+
keepdims: bool = False,
|
407 |
+
**kwargs) -> array:
|
408 |
+
# Note, float correction is not supported
|
409 |
+
# https://github.com/pytorch/pytorch/issues/61492. We don't try to
|
410 |
+
# implement it here for now.
|
411 |
+
|
412 |
+
# if isinstance(correction, float):
|
413 |
+
# correction = int(correction)
|
414 |
+
|
415 |
+
# https://github.com/pytorch/pytorch/issues/29137
|
416 |
+
if axis == ():
|
417 |
+
return torch.zeros_like(x)
|
418 |
+
if isinstance(axis, int):
|
419 |
+
axis = (axis,)
|
420 |
+
if axis is None:
|
421 |
+
# torch doesn't support keepdims with axis=None
|
422 |
+
# (https://github.com/pytorch/pytorch/issues/71209)
|
423 |
+
res = torch.var(x, tuple(range(x.ndim)), correction=correction, **kwargs)
|
424 |
+
res = _axis_none_keepdims(res, x.ndim, keepdims)
|
425 |
+
return res
|
426 |
+
return torch.var(x, axis, correction=correction, keepdims=keepdims, **kwargs)
|
427 |
+
|
428 |
+
# torch.concat doesn't support dim=None
|
429 |
+
# https://github.com/pytorch/pytorch/issues/70925
|
430 |
+
def concat(arrays: Union[Tuple[array, ...], List[array]],
|
431 |
+
/,
|
432 |
+
*,
|
433 |
+
axis: Optional[int] = 0,
|
434 |
+
**kwargs) -> array:
|
435 |
+
if axis is None:
|
436 |
+
arrays = tuple(ar.flatten() for ar in arrays)
|
437 |
+
axis = 0
|
438 |
+
return torch.concat(arrays, axis, **kwargs)
|
439 |
+
|
440 |
+
# torch.squeeze only accepts int dim and doesn't require it
|
441 |
+
# https://github.com/pytorch/pytorch/issues/70924. Support for tuple dim was
|
442 |
+
# added at https://github.com/pytorch/pytorch/pull/89017.
|
443 |
+
def squeeze(x: array, /, axis: Union[int, Tuple[int, ...]]) -> array:
|
444 |
+
if isinstance(axis, int):
|
445 |
+
axis = (axis,)
|
446 |
+
for a in axis:
|
447 |
+
if x.shape[a] != 1:
|
448 |
+
raise ValueError("squeezed dimensions must be equal to 1")
|
449 |
+
axes = _normalize_axes(axis, x.ndim)
|
450 |
+
# Remove this once pytorch 1.14 is released with the above PR #89017.
|
451 |
+
sequence = [a - i for i, a in enumerate(axes)]
|
452 |
+
for a in sequence:
|
453 |
+
x = torch.squeeze(x, a)
|
454 |
+
return x
|
455 |
+
|
456 |
+
# torch.broadcast_to uses size instead of shape
|
457 |
+
def broadcast_to(x: array, /, shape: Tuple[int, ...], **kwargs) -> array:
|
458 |
+
return torch.broadcast_to(x, shape, **kwargs)
|
459 |
+
|
460 |
+
# torch.permute uses dims instead of axes
|
461 |
+
def permute_dims(x: array, /, axes: Tuple[int, ...]) -> array:
|
462 |
+
return torch.permute(x, axes)
|
463 |
+
|
464 |
+
# The axis parameter doesn't work for flip() and roll()
|
465 |
+
# https://github.com/pytorch/pytorch/issues/71210. Also torch.flip() doesn't
|
466 |
+
# accept axis=None
|
467 |
+
def flip(x: array, /, *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array:
|
468 |
+
if axis is None:
|
469 |
+
axis = tuple(range(x.ndim))
|
470 |
+
# torch.flip doesn't accept dim as an int but the method does
|
471 |
+
# https://github.com/pytorch/pytorch/issues/18095
|
472 |
+
return x.flip(axis, **kwargs)
|
473 |
+
|
474 |
+
def roll(x: array, /, shift: Union[int, Tuple[int, ...]], *, axis: Optional[Union[int, Tuple[int, ...]]] = None, **kwargs) -> array:
|
475 |
+
return torch.roll(x, shift, axis, **kwargs)
|
476 |
+
|
477 |
+
def nonzero(x: array, /, **kwargs) -> Tuple[array, ...]:
|
478 |
+
if x.ndim == 0:
|
479 |
+
raise ValueError("nonzero() does not support zero-dimensional arrays")
|
480 |
+
return torch.nonzero(x, as_tuple=True, **kwargs)
|
481 |
+
|
482 |
+
def where(condition: array, x1: array, x2: array, /) -> array:
|
483 |
+
x1, x2 = _fix_promotion(x1, x2)
|
484 |
+
return torch.where(condition, x1, x2)
|
485 |
+
|
486 |
+
# torch.reshape doesn't have the copy keyword
|
487 |
+
def reshape(x: array,
|
488 |
+
/,
|
489 |
+
shape: Tuple[int, ...],
|
490 |
+
copy: Optional[bool] = None,
|
491 |
+
**kwargs) -> array:
|
492 |
+
if copy is not None:
|
493 |
+
raise NotImplementedError("torch.reshape doesn't yet support the copy keyword")
|
494 |
+
return torch.reshape(x, shape, **kwargs)
|
495 |
+
|
496 |
+
# torch.arange doesn't support returning empty arrays
|
497 |
+
# (https://github.com/pytorch/pytorch/issues/70915), and doesn't support some
|
498 |
+
# keyword argument combinations
|
499 |
+
# (https://github.com/pytorch/pytorch/issues/70914)
|
500 |
+
def arange(start: Union[int, float],
|
501 |
+
/,
|
502 |
+
stop: Optional[Union[int, float]] = None,
|
503 |
+
step: Union[int, float] = 1,
|
504 |
+
*,
|
505 |
+
dtype: Optional[Dtype] = None,
|
506 |
+
device: Optional[Device] = None,
|
507 |
+
**kwargs) -> array:
|
508 |
+
if stop is None:
|
509 |
+
start, stop = 0, start
|
510 |
+
if step > 0 and stop <= start or step < 0 and stop >= start:
|
511 |
+
if dtype is None:
|
512 |
+
if builtin_all(isinstance(i, int) for i in [start, stop, step]):
|
513 |
+
dtype = torch.int64
|
514 |
+
else:
|
515 |
+
dtype = torch.float32
|
516 |
+
return torch.empty(0, dtype=dtype, device=device, **kwargs)
|
517 |
+
return torch.arange(start, stop, step, dtype=dtype, device=device, **kwargs)
|
518 |
+
|
519 |
+
# torch.eye does not accept None as a default for the second argument and
|
520 |
+
# doesn't support off-diagonals (https://github.com/pytorch/pytorch/issues/70910)
|
521 |
+
def eye(n_rows: int,
|
522 |
+
n_cols: Optional[int] = None,
|
523 |
+
/,
|
524 |
+
*,
|
525 |
+
k: int = 0,
|
526 |
+
dtype: Optional[Dtype] = None,
|
527 |
+
device: Optional[Device] = None,
|
528 |
+
**kwargs) -> array:
|
529 |
+
if n_cols is None:
|
530 |
+
n_cols = n_rows
|
531 |
+
z = torch.zeros(n_rows, n_cols, dtype=dtype, device=device, **kwargs)
|
532 |
+
if abs(k) <= n_rows + n_cols:
|
533 |
+
z.diagonal(k).fill_(1)
|
534 |
+
return z
|
535 |
+
|
536 |
+
# torch.linspace doesn't have the endpoint parameter
|
537 |
+
def linspace(start: Union[int, float],
|
538 |
+
stop: Union[int, float],
|
539 |
+
/,
|
540 |
+
num: int,
|
541 |
+
*,
|
542 |
+
dtype: Optional[Dtype] = None,
|
543 |
+
device: Optional[Device] = None,
|
544 |
+
endpoint: bool = True,
|
545 |
+
**kwargs) -> array:
|
546 |
+
if not endpoint:
|
547 |
+
return torch.linspace(start, stop, num+1, dtype=dtype, device=device, **kwargs)[:-1]
|
548 |
+
return torch.linspace(start, stop, num, dtype=dtype, device=device, **kwargs)
|
549 |
+
|
550 |
+
# torch.full does not accept an int size
|
551 |
+
# https://github.com/pytorch/pytorch/issues/70906
|
552 |
+
def full(shape: Union[int, Tuple[int, ...]],
|
553 |
+
fill_value: Union[bool, int, float, complex],
|
554 |
+
*,
|
555 |
+
dtype: Optional[Dtype] = None,
|
556 |
+
device: Optional[Device] = None,
|
557 |
+
**kwargs) -> array:
|
558 |
+
if isinstance(shape, int):
|
559 |
+
shape = (shape,)
|
560 |
+
|
561 |
+
return torch.full(shape, fill_value, dtype=dtype, device=device, **kwargs)
|
562 |
+
|
563 |
+
# ones, zeros, and empty do not accept shape as a keyword argument
|
564 |
+
def ones(shape: Union[int, Tuple[int, ...]],
|
565 |
+
*,
|
566 |
+
dtype: Optional[Dtype] = None,
|
567 |
+
device: Optional[Device] = None,
|
568 |
+
**kwargs) -> array:
|
569 |
+
return torch.ones(shape, dtype=dtype, device=device, **kwargs)
|
570 |
+
|
571 |
+
def zeros(shape: Union[int, Tuple[int, ...]],
|
572 |
+
*,
|
573 |
+
dtype: Optional[Dtype] = None,
|
574 |
+
device: Optional[Device] = None,
|
575 |
+
**kwargs) -> array:
|
576 |
+
return torch.zeros(shape, dtype=dtype, device=device, **kwargs)
|
577 |
+
|
578 |
+
def empty(shape: Union[int, Tuple[int, ...]],
|
579 |
+
*,
|
580 |
+
dtype: Optional[Dtype] = None,
|
581 |
+
device: Optional[Device] = None,
|
582 |
+
**kwargs) -> array:
|
583 |
+
return torch.empty(shape, dtype=dtype, device=device, **kwargs)
|
584 |
+
|
585 |
+
# tril and triu do not call the keyword argument k
|
586 |
+
|
587 |
+
def tril(x: array, /, *, k: int = 0) -> array:
|
588 |
+
return torch.tril(x, k)
|
589 |
+
|
590 |
+
def triu(x: array, /, *, k: int = 0) -> array:
|
591 |
+
return torch.triu(x, k)
|
592 |
+
|
593 |
+
# Functions that aren't in torch https://github.com/pytorch/pytorch/issues/58742
|
594 |
+
def expand_dims(x: array, /, *, axis: int = 0) -> array:
|
595 |
+
return torch.unsqueeze(x, axis)
|
596 |
+
|
597 |
+
def astype(x: array, dtype: Dtype, /, *, copy: bool = True) -> array:
|
598 |
+
return x.to(dtype, copy=copy)
|
599 |
+
|
600 |
+
def broadcast_arrays(*arrays: array) -> List[array]:
|
601 |
+
shape = torch.broadcast_shapes(*[a.shape for a in arrays])
|
602 |
+
return [torch.broadcast_to(a, shape) for a in arrays]
|
603 |
+
|
604 |
+
# https://github.com/pytorch/pytorch/issues/70920
|
605 |
+
def unique_all(x: array) -> UniqueAllResult:
|
606 |
+
# torch.unique doesn't support returning indices.
|
607 |
+
# https://github.com/pytorch/pytorch/issues/36748. The workaround
|
608 |
+
# suggested in that issue doesn't actually function correctly (it relies
|
609 |
+
# on non-deterministic behavior of scatter()).
|
610 |
+
raise NotImplementedError("unique_all() not yet implemented for pytorch (see https://github.com/pytorch/pytorch/issues/36748)")
|
611 |
+
|
612 |
+
# values, inverse_indices, counts = torch.unique(x, return_counts=True, return_inverse=True)
|
613 |
+
# # torch.unique incorrectly gives a 0 count for nan values.
|
614 |
+
# # https://github.com/pytorch/pytorch/issues/94106
|
615 |
+
# counts[torch.isnan(values)] = 1
|
616 |
+
# return UniqueAllResult(values, indices, inverse_indices, counts)
|
617 |
+
|
618 |
+
def unique_counts(x: array) -> UniqueCountsResult:
|
619 |
+
values, counts = torch.unique(x, return_counts=True)
|
620 |
+
|
621 |
+
# torch.unique incorrectly gives a 0 count for nan values.
|
622 |
+
# https://github.com/pytorch/pytorch/issues/94106
|
623 |
+
counts[torch.isnan(values)] = 1
|
624 |
+
return UniqueCountsResult(values, counts)
|
625 |
+
|
626 |
+
def unique_inverse(x: array) -> UniqueInverseResult:
|
627 |
+
values, inverse = torch.unique(x, return_inverse=True)
|
628 |
+
return UniqueInverseResult(values, inverse)
|
629 |
+
|
630 |
+
def unique_values(x: array) -> array:
|
631 |
+
return torch.unique(x)
|
632 |
+
|
633 |
+
def matmul(x1: array, x2: array, /, **kwargs) -> array:
|
634 |
+
# torch.matmul doesn't type promote (but differently from _fix_promotion)
|
635 |
+
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
|
636 |
+
return torch.matmul(x1, x2, **kwargs)
|
637 |
+
|
638 |
+
matrix_transpose = get_xp(torch)(_aliases_matrix_transpose)
|
639 |
+
_vecdot = get_xp(torch)(_aliases_vecdot)
|
640 |
+
|
641 |
+
def vecdot(x1: array, x2: array, /, *, axis: int = -1) -> array:
|
642 |
+
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
|
643 |
+
return _vecdot(x1, x2, axis=axis)
|
644 |
+
|
645 |
+
# torch.tensordot uses dims instead of axes
|
646 |
+
def tensordot(x1: array, x2: array, /, *, axes: Union[int, Tuple[Sequence[int], Sequence[int]]] = 2, **kwargs) -> array:
|
647 |
+
# Note: torch.tensordot fails with integer dtypes when there is only 1
|
648 |
+
# element in the axis (https://github.com/pytorch/pytorch/issues/84530).
|
649 |
+
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
|
650 |
+
return torch.tensordot(x1, x2, dims=axes, **kwargs)
|
651 |
+
|
652 |
+
|
653 |
+
def isdtype(
|
654 |
+
dtype: Dtype, kind: Union[Dtype, str, Tuple[Union[Dtype, str], ...]],
|
655 |
+
*, _tuple=True, # Disallow nested tuples
|
656 |
+
) -> bool:
|
657 |
+
"""
|
658 |
+
Returns a boolean indicating whether a provided dtype is of a specified data type ``kind``.
|
659 |
+
|
660 |
+
Note that outside of this function, this compat library does not yet fully
|
661 |
+
support complex numbers.
|
662 |
+
|
663 |
+
See
|
664 |
+
https://data-apis.org/array-api/latest/API_specification/generated/array_api.isdtype.html
|
665 |
+
for more details
|
666 |
+
"""
|
667 |
+
if isinstance(kind, tuple) and _tuple:
|
668 |
+
return builtin_any(isdtype(dtype, k, _tuple=False) for k in kind)
|
669 |
+
elif isinstance(kind, str):
|
670 |
+
if kind == 'bool':
|
671 |
+
return dtype == torch.bool
|
672 |
+
elif kind == 'signed integer':
|
673 |
+
return dtype in _int_dtypes and dtype.is_signed
|
674 |
+
elif kind == 'unsigned integer':
|
675 |
+
return dtype in _int_dtypes and not dtype.is_signed
|
676 |
+
elif kind == 'integral':
|
677 |
+
return dtype in _int_dtypes
|
678 |
+
elif kind == 'real floating':
|
679 |
+
return dtype.is_floating_point
|
680 |
+
elif kind == 'complex floating':
|
681 |
+
return dtype.is_complex
|
682 |
+
elif kind == 'numeric':
|
683 |
+
return isdtype(dtype, ('integral', 'real floating', 'complex floating'))
|
684 |
+
else:
|
685 |
+
raise ValueError(f"Unrecognized data type kind: {kind!r}")
|
686 |
+
else:
|
687 |
+
return dtype == kind
|
688 |
+
|
689 |
+
def take(x: array, indices: array, /, *, axis: Optional[int] = None, **kwargs) -> array:
|
690 |
+
if axis is None:
|
691 |
+
if x.ndim != 1:
|
692 |
+
raise ValueError("axis must be specified when ndim > 1")
|
693 |
+
axis = 0
|
694 |
+
return torch.index_select(x, axis, indices, **kwargs)
|
695 |
+
|
696 |
+
__all__ = ['result_type', 'can_cast', 'permute_dims', 'bitwise_invert', 'newaxis',
|
697 |
+
'add', 'atan2', 'bitwise_and', 'bitwise_left_shift', 'bitwise_or',
|
698 |
+
'bitwise_right_shift', 'bitwise_xor', 'divide', 'equal',
|
699 |
+
'floor_divide', 'greater', 'greater_equal', 'less', 'less_equal',
|
700 |
+
'logaddexp', 'multiply', 'not_equal', 'pow', 'remainder',
|
701 |
+
'subtract', 'max', 'min', 'sort', 'prod', 'sum', 'any', 'all',
|
702 |
+
'mean', 'std', 'var', 'concat', 'squeeze', 'broadcast_to', 'flip', 'roll',
|
703 |
+
'nonzero', 'where', 'reshape', 'arange', 'eye', 'linspace', 'full',
|
704 |
+
'ones', 'zeros', 'empty', 'tril', 'triu', 'expand_dims', 'astype',
|
705 |
+
'broadcast_arrays', 'unique_all', 'unique_counts',
|
706 |
+
'unique_inverse', 'unique_values', 'matmul', 'matrix_transpose',
|
707 |
+
'vecdot', 'tensordot', 'isdtype', 'take']
|
venv/lib/python3.10/site-packages/scipy/_lib/array_api_compat/torch/linalg.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import TYPE_CHECKING
|
4 |
+
if TYPE_CHECKING:
|
5 |
+
import torch
|
6 |
+
array = torch.Tensor
|
7 |
+
from torch import dtype as Dtype
|
8 |
+
from typing import Optional
|
9 |
+
|
10 |
+
from torch.linalg import *
|
11 |
+
|
12 |
+
# torch.linalg doesn't define __all__
|
13 |
+
# from torch.linalg import __all__ as linalg_all
|
14 |
+
from torch import linalg as torch_linalg
|
15 |
+
linalg_all = [i for i in dir(torch_linalg) if not i.startswith('_')]
|
16 |
+
|
17 |
+
# outer is implemented in torch but aren't in the linalg namespace
|
18 |
+
from torch import outer
|
19 |
+
from ._aliases import _fix_promotion, matrix_transpose, tensordot, sum
|
20 |
+
|
21 |
+
# Note: torch.linalg.cross does not default to axis=-1 (it defaults to the
|
22 |
+
# first axis with size 3), see https://github.com/pytorch/pytorch/issues/58743
|
23 |
+
def cross(x1: array, x2: array, /, *, axis: int = -1) -> array:
|
24 |
+
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
|
25 |
+
return torch_linalg.cross(x1, x2, dim=axis)
|
26 |
+
|
27 |
+
def vecdot(x1: array, x2: array, /, *, axis: int = -1, **kwargs) -> array:
|
28 |
+
from ._aliases import isdtype
|
29 |
+
|
30 |
+
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
|
31 |
+
|
32 |
+
# torch.linalg.vecdot doesn't support integer dtypes
|
33 |
+
if isdtype(x1.dtype, 'integral') or isdtype(x2.dtype, 'integral'):
|
34 |
+
if kwargs:
|
35 |
+
raise RuntimeError("vecdot kwargs not supported for integral dtypes")
|
36 |
+
ndim = max(x1.ndim, x2.ndim)
|
37 |
+
x1_shape = (1,)*(ndim - x1.ndim) + tuple(x1.shape)
|
38 |
+
x2_shape = (1,)*(ndim - x2.ndim) + tuple(x2.shape)
|
39 |
+
if x1_shape[axis] != x2_shape[axis]:
|
40 |
+
raise ValueError("x1 and x2 must have the same size along the given axis")
|
41 |
+
|
42 |
+
x1_, x2_ = torch.broadcast_tensors(x1, x2)
|
43 |
+
x1_ = torch.moveaxis(x1_, axis, -1)
|
44 |
+
x2_ = torch.moveaxis(x2_, axis, -1)
|
45 |
+
|
46 |
+
res = x1_[..., None, :] @ x2_[..., None]
|
47 |
+
return res[..., 0, 0]
|
48 |
+
return torch.linalg.vecdot(x1, x2, dim=axis, **kwargs)
|
49 |
+
|
50 |
+
def solve(x1: array, x2: array, /, **kwargs) -> array:
|
51 |
+
x1, x2 = _fix_promotion(x1, x2, only_scalar=False)
|
52 |
+
return torch.linalg.solve(x1, x2, **kwargs)
|
53 |
+
|
54 |
+
# torch.trace doesn't support the offset argument and doesn't support stacking
|
55 |
+
def trace(x: array, /, *, offset: int = 0, dtype: Optional[Dtype] = None) -> array:
|
56 |
+
# Use our wrapped sum to make sure it does upcasting correctly
|
57 |
+
return sum(torch.diagonal(x, offset=offset, dim1=-2, dim2=-1), axis=-1, dtype=dtype)
|
58 |
+
|
59 |
+
__all__ = linalg_all + ['outer', 'trace', 'matrix_transpose', 'tensordot',
|
60 |
+
'vecdot', 'solve']
|
61 |
+
|
62 |
+
del linalg_all
|
venv/lib/python3.10/site-packages/scipy/constants/__init__.py
ADDED
@@ -0,0 +1,347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
r"""
|
2 |
+
==================================
|
3 |
+
Constants (:mod:`scipy.constants`)
|
4 |
+
==================================
|
5 |
+
|
6 |
+
.. currentmodule:: scipy.constants
|
7 |
+
|
8 |
+
Physical and mathematical constants and units.
|
9 |
+
|
10 |
+
|
11 |
+
Mathematical constants
|
12 |
+
======================
|
13 |
+
|
14 |
+
================ =================================================================
|
15 |
+
``pi`` Pi
|
16 |
+
``golden`` Golden ratio
|
17 |
+
``golden_ratio`` Golden ratio
|
18 |
+
================ =================================================================
|
19 |
+
|
20 |
+
|
21 |
+
Physical constants
|
22 |
+
==================
|
23 |
+
|
24 |
+
=========================== =================================================================
|
25 |
+
``c`` speed of light in vacuum
|
26 |
+
``speed_of_light`` speed of light in vacuum
|
27 |
+
``mu_0`` the magnetic constant :math:`\mu_0`
|
28 |
+
``epsilon_0`` the electric constant (vacuum permittivity), :math:`\epsilon_0`
|
29 |
+
``h`` the Planck constant :math:`h`
|
30 |
+
``Planck`` the Planck constant :math:`h`
|
31 |
+
``hbar`` :math:`\hbar = h/(2\pi)`
|
32 |
+
``G`` Newtonian constant of gravitation
|
33 |
+
``gravitational_constant`` Newtonian constant of gravitation
|
34 |
+
``g`` standard acceleration of gravity
|
35 |
+
``e`` elementary charge
|
36 |
+
``elementary_charge`` elementary charge
|
37 |
+
``R`` molar gas constant
|
38 |
+
``gas_constant`` molar gas constant
|
39 |
+
``alpha`` fine-structure constant
|
40 |
+
``fine_structure`` fine-structure constant
|
41 |
+
``N_A`` Avogadro constant
|
42 |
+
``Avogadro`` Avogadro constant
|
43 |
+
``k`` Boltzmann constant
|
44 |
+
``Boltzmann`` Boltzmann constant
|
45 |
+
``sigma`` Stefan-Boltzmann constant :math:`\sigma`
|
46 |
+
``Stefan_Boltzmann`` Stefan-Boltzmann constant :math:`\sigma`
|
47 |
+
``Wien`` Wien displacement law constant
|
48 |
+
``Rydberg`` Rydberg constant
|
49 |
+
``m_e`` electron mass
|
50 |
+
``electron_mass`` electron mass
|
51 |
+
``m_p`` proton mass
|
52 |
+
``proton_mass`` proton mass
|
53 |
+
``m_n`` neutron mass
|
54 |
+
``neutron_mass`` neutron mass
|
55 |
+
=========================== =================================================================
|
56 |
+
|
57 |
+
|
58 |
+
Constants database
|
59 |
+
------------------
|
60 |
+
|
61 |
+
In addition to the above variables, :mod:`scipy.constants` also contains the
|
62 |
+
2018 CODATA recommended values [CODATA2018]_ database containing more physical
|
63 |
+
constants.
|
64 |
+
|
65 |
+
.. autosummary::
|
66 |
+
:toctree: generated/
|
67 |
+
|
68 |
+
value -- Value in physical_constants indexed by key
|
69 |
+
unit -- Unit in physical_constants indexed by key
|
70 |
+
precision -- Relative precision in physical_constants indexed by key
|
71 |
+
find -- Return list of physical_constant keys with a given string
|
72 |
+
ConstantWarning -- Constant sought not in newest CODATA data set
|
73 |
+
|
74 |
+
.. data:: physical_constants
|
75 |
+
|
76 |
+
Dictionary of physical constants, of the format
|
77 |
+
``physical_constants[name] = (value, unit, uncertainty)``.
|
78 |
+
|
79 |
+
Available constants:
|
80 |
+
|
81 |
+
====================================================================== ====
|
82 |
+
%(constant_names)s
|
83 |
+
====================================================================== ====
|
84 |
+
|
85 |
+
|
86 |
+
Units
|
87 |
+
=====
|
88 |
+
|
89 |
+
SI prefixes
|
90 |
+
-----------
|
91 |
+
|
92 |
+
============ =================================================================
|
93 |
+
``quetta`` :math:`10^{30}`
|
94 |
+
``ronna`` :math:`10^{27}`
|
95 |
+
``yotta`` :math:`10^{24}`
|
96 |
+
``zetta`` :math:`10^{21}`
|
97 |
+
``exa`` :math:`10^{18}`
|
98 |
+
``peta`` :math:`10^{15}`
|
99 |
+
``tera`` :math:`10^{12}`
|
100 |
+
``giga`` :math:`10^{9}`
|
101 |
+
``mega`` :math:`10^{6}`
|
102 |
+
``kilo`` :math:`10^{3}`
|
103 |
+
``hecto`` :math:`10^{2}`
|
104 |
+
``deka`` :math:`10^{1}`
|
105 |
+
``deci`` :math:`10^{-1}`
|
106 |
+
``centi`` :math:`10^{-2}`
|
107 |
+
``milli`` :math:`10^{-3}`
|
108 |
+
``micro`` :math:`10^{-6}`
|
109 |
+
``nano`` :math:`10^{-9}`
|
110 |
+
``pico`` :math:`10^{-12}`
|
111 |
+
``femto`` :math:`10^{-15}`
|
112 |
+
``atto`` :math:`10^{-18}`
|
113 |
+
``zepto`` :math:`10^{-21}`
|
114 |
+
``yocto`` :math:`10^{-24}`
|
115 |
+
``ronto`` :math:`10^{-27}`
|
116 |
+
``quecto`` :math:`10^{-30}`
|
117 |
+
============ =================================================================
|
118 |
+
|
119 |
+
Binary prefixes
|
120 |
+
---------------
|
121 |
+
|
122 |
+
============ =================================================================
|
123 |
+
``kibi`` :math:`2^{10}`
|
124 |
+
``mebi`` :math:`2^{20}`
|
125 |
+
``gibi`` :math:`2^{30}`
|
126 |
+
``tebi`` :math:`2^{40}`
|
127 |
+
``pebi`` :math:`2^{50}`
|
128 |
+
``exbi`` :math:`2^{60}`
|
129 |
+
``zebi`` :math:`2^{70}`
|
130 |
+
``yobi`` :math:`2^{80}`
|
131 |
+
============ =================================================================
|
132 |
+
|
133 |
+
Mass
|
134 |
+
----
|
135 |
+
|
136 |
+
================= ============================================================
|
137 |
+
``gram`` :math:`10^{-3}` kg
|
138 |
+
``metric_ton`` :math:`10^{3}` kg
|
139 |
+
``grain`` one grain in kg
|
140 |
+
``lb`` one pound (avoirdupous) in kg
|
141 |
+
``pound`` one pound (avoirdupous) in kg
|
142 |
+
``blob`` one inch version of a slug in kg (added in 1.0.0)
|
143 |
+
``slinch`` one inch version of a slug in kg (added in 1.0.0)
|
144 |
+
``slug`` one slug in kg (added in 1.0.0)
|
145 |
+
``oz`` one ounce in kg
|
146 |
+
``ounce`` one ounce in kg
|
147 |
+
``stone`` one stone in kg
|
148 |
+
``grain`` one grain in kg
|
149 |
+
``long_ton`` one long ton in kg
|
150 |
+
``short_ton`` one short ton in kg
|
151 |
+
``troy_ounce`` one Troy ounce in kg
|
152 |
+
``troy_pound`` one Troy pound in kg
|
153 |
+
``carat`` one carat in kg
|
154 |
+
``m_u`` atomic mass constant (in kg)
|
155 |
+
``u`` atomic mass constant (in kg)
|
156 |
+
``atomic_mass`` atomic mass constant (in kg)
|
157 |
+
================= ============================================================
|
158 |
+
|
159 |
+
Angle
|
160 |
+
-----
|
161 |
+
|
162 |
+
================= ============================================================
|
163 |
+
``degree`` degree in radians
|
164 |
+
``arcmin`` arc minute in radians
|
165 |
+
``arcminute`` arc minute in radians
|
166 |
+
``arcsec`` arc second in radians
|
167 |
+
``arcsecond`` arc second in radians
|
168 |
+
================= ============================================================
|
169 |
+
|
170 |
+
|
171 |
+
Time
|
172 |
+
----
|
173 |
+
|
174 |
+
================= ============================================================
|
175 |
+
``minute`` one minute in seconds
|
176 |
+
``hour`` one hour in seconds
|
177 |
+
``day`` one day in seconds
|
178 |
+
``week`` one week in seconds
|
179 |
+
``year`` one year (365 days) in seconds
|
180 |
+
``Julian_year`` one Julian year (365.25 days) in seconds
|
181 |
+
================= ============================================================
|
182 |
+
|
183 |
+
|
184 |
+
Length
|
185 |
+
------
|
186 |
+
|
187 |
+
===================== ============================================================
|
188 |
+
``inch`` one inch in meters
|
189 |
+
``foot`` one foot in meters
|
190 |
+
``yard`` one yard in meters
|
191 |
+
``mile`` one mile in meters
|
192 |
+
``mil`` one mil in meters
|
193 |
+
``pt`` one point in meters
|
194 |
+
``point`` one point in meters
|
195 |
+
``survey_foot`` one survey foot in meters
|
196 |
+
``survey_mile`` one survey mile in meters
|
197 |
+
``nautical_mile`` one nautical mile in meters
|
198 |
+
``fermi`` one Fermi in meters
|
199 |
+
``angstrom`` one Angstrom in meters
|
200 |
+
``micron`` one micron in meters
|
201 |
+
``au`` one astronomical unit in meters
|
202 |
+
``astronomical_unit`` one astronomical unit in meters
|
203 |
+
``light_year`` one light year in meters
|
204 |
+
``parsec`` one parsec in meters
|
205 |
+
===================== ============================================================
|
206 |
+
|
207 |
+
Pressure
|
208 |
+
--------
|
209 |
+
|
210 |
+
================= ============================================================
|
211 |
+
``atm`` standard atmosphere in pascals
|
212 |
+
``atmosphere`` standard atmosphere in pascals
|
213 |
+
``bar`` one bar in pascals
|
214 |
+
``torr`` one torr (mmHg) in pascals
|
215 |
+
``mmHg`` one torr (mmHg) in pascals
|
216 |
+
``psi`` one psi in pascals
|
217 |
+
================= ============================================================
|
218 |
+
|
219 |
+
Area
|
220 |
+
----
|
221 |
+
|
222 |
+
================= ============================================================
|
223 |
+
``hectare`` one hectare in square meters
|
224 |
+
``acre`` one acre in square meters
|
225 |
+
================= ============================================================
|
226 |
+
|
227 |
+
|
228 |
+
Volume
|
229 |
+
------
|
230 |
+
|
231 |
+
=================== ========================================================
|
232 |
+
``liter`` one liter in cubic meters
|
233 |
+
``litre`` one liter in cubic meters
|
234 |
+
``gallon`` one gallon (US) in cubic meters
|
235 |
+
``gallon_US`` one gallon (US) in cubic meters
|
236 |
+
``gallon_imp`` one gallon (UK) in cubic meters
|
237 |
+
``fluid_ounce`` one fluid ounce (US) in cubic meters
|
238 |
+
``fluid_ounce_US`` one fluid ounce (US) in cubic meters
|
239 |
+
``fluid_ounce_imp`` one fluid ounce (UK) in cubic meters
|
240 |
+
``bbl`` one barrel in cubic meters
|
241 |
+
``barrel`` one barrel in cubic meters
|
242 |
+
=================== ========================================================
|
243 |
+
|
244 |
+
Speed
|
245 |
+
-----
|
246 |
+
|
247 |
+
================== ==========================================================
|
248 |
+
``kmh`` kilometers per hour in meters per second
|
249 |
+
``mph`` miles per hour in meters per second
|
250 |
+
``mach`` one Mach (approx., at 15 C, 1 atm) in meters per second
|
251 |
+
``speed_of_sound`` one Mach (approx., at 15 C, 1 atm) in meters per second
|
252 |
+
``knot`` one knot in meters per second
|
253 |
+
================== ==========================================================
|
254 |
+
|
255 |
+
|
256 |
+
Temperature
|
257 |
+
-----------
|
258 |
+
|
259 |
+
===================== =======================================================
|
260 |
+
``zero_Celsius`` zero of Celsius scale in Kelvin
|
261 |
+
``degree_Fahrenheit`` one Fahrenheit (only differences) in Kelvins
|
262 |
+
===================== =======================================================
|
263 |
+
|
264 |
+
.. autosummary::
|
265 |
+
:toctree: generated/
|
266 |
+
|
267 |
+
convert_temperature
|
268 |
+
|
269 |
+
Energy
|
270 |
+
------
|
271 |
+
|
272 |
+
==================== =======================================================
|
273 |
+
``eV`` one electron volt in Joules
|
274 |
+
``electron_volt`` one electron volt in Joules
|
275 |
+
``calorie`` one calorie (thermochemical) in Joules
|
276 |
+
``calorie_th`` one calorie (thermochemical) in Joules
|
277 |
+
``calorie_IT`` one calorie (International Steam Table calorie, 1956) in Joules
|
278 |
+
``erg`` one erg in Joules
|
279 |
+
``Btu`` one British thermal unit (International Steam Table) in Joules
|
280 |
+
``Btu_IT`` one British thermal unit (International Steam Table) in Joules
|
281 |
+
``Btu_th`` one British thermal unit (thermochemical) in Joules
|
282 |
+
``ton_TNT`` one ton of TNT in Joules
|
283 |
+
==================== =======================================================
|
284 |
+
|
285 |
+
Power
|
286 |
+
-----
|
287 |
+
|
288 |
+
==================== =======================================================
|
289 |
+
``hp`` one horsepower in watts
|
290 |
+
``horsepower`` one horsepower in watts
|
291 |
+
==================== =======================================================
|
292 |
+
|
293 |
+
Force
|
294 |
+
-----
|
295 |
+
|
296 |
+
==================== =======================================================
|
297 |
+
``dyn`` one dyne in newtons
|
298 |
+
``dyne`` one dyne in newtons
|
299 |
+
``lbf`` one pound force in newtons
|
300 |
+
``pound_force`` one pound force in newtons
|
301 |
+
``kgf`` one kilogram force in newtons
|
302 |
+
``kilogram_force`` one kilogram force in newtons
|
303 |
+
==================== =======================================================
|
304 |
+
|
305 |
+
Optics
|
306 |
+
------
|
307 |
+
|
308 |
+
.. autosummary::
|
309 |
+
:toctree: generated/
|
310 |
+
|
311 |
+
lambda2nu
|
312 |
+
nu2lambda
|
313 |
+
|
314 |
+
References
|
315 |
+
==========
|
316 |
+
|
317 |
+
.. [CODATA2018] CODATA Recommended Values of the Fundamental
|
318 |
+
Physical Constants 2018.
|
319 |
+
|
320 |
+
https://physics.nist.gov/cuu/Constants/
|
321 |
+
|
322 |
+
""" # noqa: E501
|
323 |
+
# Modules contributed by BasSw ([email protected])
|
324 |
+
from ._codata import *
|
325 |
+
from ._constants import *
|
326 |
+
from ._codata import _obsolete_constants, physical_constants
|
327 |
+
|
328 |
+
# Deprecated namespaces, to be removed in v2.0.0
|
329 |
+
from . import codata, constants
|
330 |
+
|
331 |
+
_constant_names_list = [(_k.lower(), _k, _v)
|
332 |
+
for _k, _v in physical_constants.items()
|
333 |
+
if _k not in _obsolete_constants]
|
334 |
+
_constant_names = "\n".join(["``{}``{} {} {}".format(_x[1], " "*(66-len(_x[1])),
|
335 |
+
_x[2][0], _x[2][1])
|
336 |
+
for _x in sorted(_constant_names_list)])
|
337 |
+
if __doc__:
|
338 |
+
__doc__ = __doc__ % dict(constant_names=_constant_names)
|
339 |
+
|
340 |
+
del _constant_names
|
341 |
+
del _constant_names_list
|
342 |
+
|
343 |
+
__all__ = [s for s in dir() if not s.startswith('_')]
|
344 |
+
|
345 |
+
from scipy._lib._testutils import PytestTester
|
346 |
+
test = PytestTester(__name__)
|
347 |
+
del PytestTester
|
venv/lib/python3.10/site-packages/scipy/constants/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (12.7 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_codata.cpython-310.pyc
ADDED
Binary file (154 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/constants/__pycache__/_constants.cpython-310.pyc
ADDED
Binary file (8.71 kB). View file
|
|
venv/lib/python3.10/site-packages/scipy/constants/__pycache__/codata.cpython-310.pyc
ADDED
Binary file (814 Bytes). View file
|
|