Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- env-llmeval/lib/python3.10/site-packages/certifi/cacert.pem +0 -0
- env-llmeval/lib/python3.10/site-packages/certifi/core.py +114 -0
- env-llmeval/lib/python3.10/site-packages/certifi/py.typed +0 -0
- env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/LICENSE.txt +20 -0
- env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/WHEEL +5 -0
- env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/INSTALLER +1 -0
- env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/LICENSE.txt +933 -0
- env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/METADATA +1074 -0
- env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/RECORD +0 -0
- env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/WHEEL +6 -0
- env-llmeval/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_VF.py +30 -0
- env-llmeval/lib/python3.10/site-packages/torch/_VF.pyi +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/__config__.py +22 -0
- env-llmeval/lib/python3.10/site-packages/torch/__future__.py +21 -0
- env-llmeval/lib/python3.10/site-packages/torch/__init__.py +1995 -0
- env-llmeval/lib/python3.10/site-packages/torch/_appdirs.py +666 -0
- env-llmeval/lib/python3.10/site-packages/torch/_awaits/__init__.py +54 -0
- env-llmeval/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_classes.py +55 -0
- env-llmeval/lib/python3.10/site-packages/torch/_compile.py +30 -0
- env-llmeval/lib/python3.10/site-packages/torch/_custom_ops.py +322 -0
- env-llmeval/lib/python3.10/site-packages/torch/_deploy.py +105 -0
- env-llmeval/lib/python3.10/site-packages/torch/_guards.py +833 -0
- env-llmeval/lib/python3.10/site-packages/torch/_jit_internal.py +1510 -0
- env-llmeval/lib/python3.10/site-packages/torch/_linalg_utils.py +164 -0
- env-llmeval/lib/python3.10/site-packages/torch/_lobpcg.py +1167 -0
- env-llmeval/lib/python3.10/site-packages/torch/_lowrank.py +298 -0
- env-llmeval/lib/python3.10/site-packages/torch/_meta_registrations.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_ops.py +938 -0
- env-llmeval/lib/python3.10/site-packages/torch/_sources.py +137 -0
- env-llmeval/lib/python3.10/site-packages/torch/_storage_docs.py +43 -0
- env-llmeval/lib/python3.10/site-packages/torch/_streambase.py +45 -0
- env-llmeval/lib/python3.10/site-packages/torch/_tensor.py +1518 -0
- env-llmeval/lib/python3.10/site-packages/torch/_tensor_docs.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_tensor_str.py +677 -0
- env-llmeval/lib/python3.10/site-packages/torch/_torch_docs.py +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/_utils.py +918 -0
- env-llmeval/lib/python3.10/site-packages/torch/_vmap_internals.py +237 -0
- env-llmeval/lib/python3.10/site-packages/torch/_weights_only_unpickler.py +304 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/__init__.py +68 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py +91 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__init__.py +47 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__init__.py +6 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py +110 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc +0 -0
- env-llmeval/lib/python3.10/site-packages/torch/backends/xnnpack/__init__.py +28 -0
env-llmeval/lib/python3.10/site-packages/certifi/cacert.pem
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/certifi/core.py
ADDED
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
certifi.py
|
3 |
+
~~~~~~~~~~
|
4 |
+
|
5 |
+
This module returns the installation location of cacert.pem or its contents.
|
6 |
+
"""
|
7 |
+
import sys
|
8 |
+
import atexit
|
9 |
+
|
10 |
+
def exit_cacert_ctx() -> None:
|
11 |
+
_CACERT_CTX.__exit__(None, None, None) # type: ignore[union-attr]
|
12 |
+
|
13 |
+
|
14 |
+
if sys.version_info >= (3, 11):
|
15 |
+
|
16 |
+
from importlib.resources import as_file, files
|
17 |
+
|
18 |
+
_CACERT_CTX = None
|
19 |
+
_CACERT_PATH = None
|
20 |
+
|
21 |
+
def where() -> str:
|
22 |
+
# This is slightly terrible, but we want to delay extracting the file
|
23 |
+
# in cases where we're inside of a zipimport situation until someone
|
24 |
+
# actually calls where(), but we don't want to re-extract the file
|
25 |
+
# on every call of where(), so we'll do it once then store it in a
|
26 |
+
# global variable.
|
27 |
+
global _CACERT_CTX
|
28 |
+
global _CACERT_PATH
|
29 |
+
if _CACERT_PATH is None:
|
30 |
+
# This is slightly janky, the importlib.resources API wants you to
|
31 |
+
# manage the cleanup of this file, so it doesn't actually return a
|
32 |
+
# path, it returns a context manager that will give you the path
|
33 |
+
# when you enter it and will do any cleanup when you leave it. In
|
34 |
+
# the common case of not needing a temporary file, it will just
|
35 |
+
# return the file system location and the __exit__() is a no-op.
|
36 |
+
#
|
37 |
+
# We also have to hold onto the actual context manager, because
|
38 |
+
# it will do the cleanup whenever it gets garbage collected, so
|
39 |
+
# we will also store that at the global level as well.
|
40 |
+
_CACERT_CTX = as_file(files("certifi").joinpath("cacert.pem"))
|
41 |
+
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
42 |
+
atexit.register(exit_cacert_ctx)
|
43 |
+
|
44 |
+
return _CACERT_PATH
|
45 |
+
|
46 |
+
def contents() -> str:
|
47 |
+
return files("certifi").joinpath("cacert.pem").read_text(encoding="ascii")
|
48 |
+
|
49 |
+
elif sys.version_info >= (3, 7):
|
50 |
+
|
51 |
+
from importlib.resources import path as get_path, read_text
|
52 |
+
|
53 |
+
_CACERT_CTX = None
|
54 |
+
_CACERT_PATH = None
|
55 |
+
|
56 |
+
def where() -> str:
|
57 |
+
# This is slightly terrible, but we want to delay extracting the
|
58 |
+
# file in cases where we're inside of a zipimport situation until
|
59 |
+
# someone actually calls where(), but we don't want to re-extract
|
60 |
+
# the file on every call of where(), so we'll do it once then store
|
61 |
+
# it in a global variable.
|
62 |
+
global _CACERT_CTX
|
63 |
+
global _CACERT_PATH
|
64 |
+
if _CACERT_PATH is None:
|
65 |
+
# This is slightly janky, the importlib.resources API wants you
|
66 |
+
# to manage the cleanup of this file, so it doesn't actually
|
67 |
+
# return a path, it returns a context manager that will give
|
68 |
+
# you the path when you enter it and will do any cleanup when
|
69 |
+
# you leave it. In the common case of not needing a temporary
|
70 |
+
# file, it will just return the file system location and the
|
71 |
+
# __exit__() is a no-op.
|
72 |
+
#
|
73 |
+
# We also have to hold onto the actual context manager, because
|
74 |
+
# it will do the cleanup whenever it gets garbage collected, so
|
75 |
+
# we will also store that at the global level as well.
|
76 |
+
_CACERT_CTX = get_path("certifi", "cacert.pem")
|
77 |
+
_CACERT_PATH = str(_CACERT_CTX.__enter__())
|
78 |
+
atexit.register(exit_cacert_ctx)
|
79 |
+
|
80 |
+
return _CACERT_PATH
|
81 |
+
|
82 |
+
def contents() -> str:
|
83 |
+
return read_text("certifi", "cacert.pem", encoding="ascii")
|
84 |
+
|
85 |
+
else:
|
86 |
+
import os
|
87 |
+
import types
|
88 |
+
from typing import Union
|
89 |
+
|
90 |
+
Package = Union[types.ModuleType, str]
|
91 |
+
Resource = Union[str, "os.PathLike"]
|
92 |
+
|
93 |
+
# This fallback will work for Python versions prior to 3.7 that lack the
|
94 |
+
# importlib.resources module but relies on the existing `where` function
|
95 |
+
# so won't address issues with environments like PyOxidizer that don't set
|
96 |
+
# __file__ on modules.
|
97 |
+
def read_text(
|
98 |
+
package: Package,
|
99 |
+
resource: Resource,
|
100 |
+
encoding: str = 'utf-8',
|
101 |
+
errors: str = 'strict'
|
102 |
+
) -> str:
|
103 |
+
with open(where(), encoding=encoding) as data:
|
104 |
+
return data.read()
|
105 |
+
|
106 |
+
# If we don't have importlib.resources, then we will just do the old logic
|
107 |
+
# of assuming we're on the filesystem and munge the path directly.
|
108 |
+
def where() -> str:
|
109 |
+
f = os.path.dirname(__file__)
|
110 |
+
|
111 |
+
return os.path.join(f, "cacert.pem")
|
112 |
+
|
113 |
+
def contents() -> str:
|
114 |
+
return read_text("certifi", "cacert.pem", encoding="ascii")
|
env-llmeval/lib/python3.10/site-packages/certifi/py.typed
ADDED
File without changes
|
env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/LICENSE.txt
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2008-present The pip developers (see AUTHORS.txt file)
|
2 |
+
|
3 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
4 |
+
a copy of this software and associated documentation files (the
|
5 |
+
"Software"), to deal in the Software without restriction, including
|
6 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
7 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8 |
+
permit persons to whom the Software is furnished to do so, subject to
|
9 |
+
the following conditions:
|
10 |
+
|
11 |
+
The above copyright notice and this permission notice shall be
|
12 |
+
included in all copies or substantial portions of the Software.
|
13 |
+
|
14 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
17 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18 |
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19 |
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20 |
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
env-llmeval/lib/python3.10/site-packages/pip-22.0.2.dist-info/WHEEL
ADDED
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: bdist_wheel (0.37.1)
|
3 |
+
Root-Is-Purelib: true
|
4 |
+
Tag: py3-none-any
|
5 |
+
|
env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/INSTALLER
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
pip
|
env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/LICENSE.txt
ADDED
@@ -0,0 +1,933 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Copyright (c) 2001-2002 Enthought, Inc. 2003-2024, SciPy Developers.
|
2 |
+
All rights reserved.
|
3 |
+
|
4 |
+
Redistribution and use in source and binary forms, with or without
|
5 |
+
modification, are permitted provided that the following conditions
|
6 |
+
are met:
|
7 |
+
|
8 |
+
1. Redistributions of source code must retain the above copyright
|
9 |
+
notice, this list of conditions and the following disclaimer.
|
10 |
+
|
11 |
+
2. Redistributions in binary form must reproduce the above
|
12 |
+
copyright notice, this list of conditions and the following
|
13 |
+
disclaimer in the documentation and/or other materials provided
|
14 |
+
with the distribution.
|
15 |
+
|
16 |
+
3. Neither the name of the copyright holder nor the names of its
|
17 |
+
contributors may be used to endorse or promote products derived
|
18 |
+
from this software without specific prior written permission.
|
19 |
+
|
20 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
21 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
22 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
23 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
24 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
25 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
26 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
27 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
28 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
29 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
30 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
31 |
+
|
32 |
+
----
|
33 |
+
|
34 |
+
This binary distribution of SciPy also bundles the following software:
|
35 |
+
|
36 |
+
|
37 |
+
Name: OpenBLAS
|
38 |
+
Files: scipy.libs/libopenblas*.so
|
39 |
+
Description: bundled as a dynamically linked library
|
40 |
+
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
41 |
+
License: BSD-3-Clause-Attribution
|
42 |
+
Copyright (c) 2011-2014, The OpenBLAS Project
|
43 |
+
All rights reserved.
|
44 |
+
|
45 |
+
Redistribution and use in source and binary forms, with or without
|
46 |
+
modification, are permitted provided that the following conditions are
|
47 |
+
met:
|
48 |
+
|
49 |
+
1. Redistributions of source code must retain the above copyright
|
50 |
+
notice, this list of conditions and the following disclaimer.
|
51 |
+
|
52 |
+
2. Redistributions in binary form must reproduce the above copyright
|
53 |
+
notice, this list of conditions and the following disclaimer in
|
54 |
+
the documentation and/or other materials provided with the
|
55 |
+
distribution.
|
56 |
+
3. Neither the name of the OpenBLAS project nor the names of
|
57 |
+
its contributors may be used to endorse or promote products
|
58 |
+
derived from this software without specific prior written
|
59 |
+
permission.
|
60 |
+
|
61 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
62 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
63 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
64 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
65 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
66 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
67 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
68 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
69 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
70 |
+
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
71 |
+
|
72 |
+
|
73 |
+
Name: LAPACK
|
74 |
+
Files: scipy.libs/libopenblas*.so
|
75 |
+
Description: bundled in OpenBLAS
|
76 |
+
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
77 |
+
License: BSD-3-Clause-Attribution
|
78 |
+
Copyright (c) 1992-2013 The University of Tennessee and The University
|
79 |
+
of Tennessee Research Foundation. All rights
|
80 |
+
reserved.
|
81 |
+
Copyright (c) 2000-2013 The University of California Berkeley. All
|
82 |
+
rights reserved.
|
83 |
+
Copyright (c) 2006-2013 The University of Colorado Denver. All rights
|
84 |
+
reserved.
|
85 |
+
|
86 |
+
$COPYRIGHT$
|
87 |
+
|
88 |
+
Additional copyrights may follow
|
89 |
+
|
90 |
+
$HEADER$
|
91 |
+
|
92 |
+
Redistribution and use in source and binary forms, with or without
|
93 |
+
modification, are permitted provided that the following conditions are
|
94 |
+
met:
|
95 |
+
|
96 |
+
- Redistributions of source code must retain the above copyright
|
97 |
+
notice, this list of conditions and the following disclaimer.
|
98 |
+
|
99 |
+
- Redistributions in binary form must reproduce the above copyright
|
100 |
+
notice, this list of conditions and the following disclaimer listed
|
101 |
+
in this license in the documentation and/or other materials
|
102 |
+
provided with the distribution.
|
103 |
+
|
104 |
+
- Neither the name of the copyright holders nor the names of its
|
105 |
+
contributors may be used to endorse or promote products derived from
|
106 |
+
this software without specific prior written permission.
|
107 |
+
|
108 |
+
The copyright holders provide no reassurances that the source code
|
109 |
+
provided does not infringe any patent, copyright, or any other
|
110 |
+
intellectual property rights of third parties. The copyright holders
|
111 |
+
disclaim any liability to any recipient for claims brought against
|
112 |
+
recipient by any third party for infringement of that parties
|
113 |
+
intellectual property rights.
|
114 |
+
|
115 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
116 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
117 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
118 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
119 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
120 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
121 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
122 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
123 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
124 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
125 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
126 |
+
|
127 |
+
|
128 |
+
Name: GCC runtime library
|
129 |
+
Files: scipy.libs/libgfortran*.so
|
130 |
+
Description: dynamically linked to files compiled with gcc
|
131 |
+
Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
|
132 |
+
License: GPL-3.0-with-GCC-exception
|
133 |
+
Copyright (C) 2002-2017 Free Software Foundation, Inc.
|
134 |
+
|
135 |
+
Libgfortran is free software; you can redistribute it and/or modify
|
136 |
+
it under the terms of the GNU General Public License as published by
|
137 |
+
the Free Software Foundation; either version 3, or (at your option)
|
138 |
+
any later version.
|
139 |
+
|
140 |
+
Libgfortran is distributed in the hope that it will be useful,
|
141 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
142 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
143 |
+
GNU General Public License for more details.
|
144 |
+
|
145 |
+
Under Section 7 of GPL version 3, you are granted additional
|
146 |
+
permissions described in the GCC Runtime Library Exception, version
|
147 |
+
3.1, as published by the Free Software Foundation.
|
148 |
+
|
149 |
+
You should have received a copy of the GNU General Public License and
|
150 |
+
a copy of the GCC Runtime Library Exception along with this program;
|
151 |
+
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
152 |
+
<http://www.gnu.org/licenses/>.
|
153 |
+
|
154 |
+
----
|
155 |
+
|
156 |
+
Full text of license texts referred to above follows (that they are
|
157 |
+
listed below does not necessarily imply the conditions apply to the
|
158 |
+
present binary release):
|
159 |
+
|
160 |
+
----
|
161 |
+
|
162 |
+
GCC RUNTIME LIBRARY EXCEPTION
|
163 |
+
|
164 |
+
Version 3.1, 31 March 2009
|
165 |
+
|
166 |
+
Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
|
167 |
+
|
168 |
+
Everyone is permitted to copy and distribute verbatim copies of this
|
169 |
+
license document, but changing it is not allowed.
|
170 |
+
|
171 |
+
This GCC Runtime Library Exception ("Exception") is an additional
|
172 |
+
permission under section 7 of the GNU General Public License, version
|
173 |
+
3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
|
174 |
+
bears a notice placed by the copyright holder of the file stating that
|
175 |
+
the file is governed by GPLv3 along with this Exception.
|
176 |
+
|
177 |
+
When you use GCC to compile a program, GCC may combine portions of
|
178 |
+
certain GCC header files and runtime libraries with the compiled
|
179 |
+
program. The purpose of this Exception is to allow compilation of
|
180 |
+
non-GPL (including proprietary) programs to use, in this way, the
|
181 |
+
header files and runtime libraries covered by this Exception.
|
182 |
+
|
183 |
+
0. Definitions.
|
184 |
+
|
185 |
+
A file is an "Independent Module" if it either requires the Runtime
|
186 |
+
Library for execution after a Compilation Process, or makes use of an
|
187 |
+
interface provided by the Runtime Library, but is not otherwise based
|
188 |
+
on the Runtime Library.
|
189 |
+
|
190 |
+
"GCC" means a version of the GNU Compiler Collection, with or without
|
191 |
+
modifications, governed by version 3 (or a specified later version) of
|
192 |
+
the GNU General Public License (GPL) with the option of using any
|
193 |
+
subsequent versions published by the FSF.
|
194 |
+
|
195 |
+
"GPL-compatible Software" is software whose conditions of propagation,
|
196 |
+
modification and use would permit combination with GCC in accord with
|
197 |
+
the license of GCC.
|
198 |
+
|
199 |
+
"Target Code" refers to output from any compiler for a real or virtual
|
200 |
+
target processor architecture, in executable form or suitable for
|
201 |
+
input to an assembler, loader, linker and/or execution
|
202 |
+
phase. Notwithstanding that, Target Code does not include data in any
|
203 |
+
format that is used as a compiler intermediate representation, or used
|
204 |
+
for producing a compiler intermediate representation.
|
205 |
+
|
206 |
+
The "Compilation Process" transforms code entirely represented in
|
207 |
+
non-intermediate languages designed for human-written code, and/or in
|
208 |
+
Java Virtual Machine byte code, into Target Code. Thus, for example,
|
209 |
+
use of source code generators and preprocessors need not be considered
|
210 |
+
part of the Compilation Process, since the Compilation Process can be
|
211 |
+
understood as starting with the output of the generators or
|
212 |
+
preprocessors.
|
213 |
+
|
214 |
+
A Compilation Process is "Eligible" if it is done using GCC, alone or
|
215 |
+
with other GPL-compatible software, or if it is done without using any
|
216 |
+
work based on GCC. For example, using non-GPL-compatible Software to
|
217 |
+
optimize any GCC intermediate representations would not qualify as an
|
218 |
+
Eligible Compilation Process.
|
219 |
+
|
220 |
+
1. Grant of Additional Permission.
|
221 |
+
|
222 |
+
You have permission to propagate a work of Target Code formed by
|
223 |
+
combining the Runtime Library with Independent Modules, even if such
|
224 |
+
propagation would otherwise violate the terms of GPLv3, provided that
|
225 |
+
all Target Code was generated by Eligible Compilation Processes. You
|
226 |
+
may then convey such a combination under terms of your choice,
|
227 |
+
consistent with the licensing of the Independent Modules.
|
228 |
+
|
229 |
+
2. No Weakening of GCC Copyleft.
|
230 |
+
|
231 |
+
The availability of this Exception does not imply any general
|
232 |
+
presumption that third-party software is unaffected by the copyleft
|
233 |
+
requirements of the license of GCC.
|
234 |
+
|
235 |
+
----
|
236 |
+
|
237 |
+
GNU GENERAL PUBLIC LICENSE
|
238 |
+
Version 3, 29 June 2007
|
239 |
+
|
240 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
241 |
+
Everyone is permitted to copy and distribute verbatim copies
|
242 |
+
of this license document, but changing it is not allowed.
|
243 |
+
|
244 |
+
Preamble
|
245 |
+
|
246 |
+
The GNU General Public License is a free, copyleft license for
|
247 |
+
software and other kinds of works.
|
248 |
+
|
249 |
+
The licenses for most software and other practical works are designed
|
250 |
+
to take away your freedom to share and change the works. By contrast,
|
251 |
+
the GNU General Public License is intended to guarantee your freedom to
|
252 |
+
share and change all versions of a program--to make sure it remains free
|
253 |
+
software for all its users. We, the Free Software Foundation, use the
|
254 |
+
GNU General Public License for most of our software; it applies also to
|
255 |
+
any other work released this way by its authors. You can apply it to
|
256 |
+
your programs, too.
|
257 |
+
|
258 |
+
When we speak of free software, we are referring to freedom, not
|
259 |
+
price. Our General Public Licenses are designed to make sure that you
|
260 |
+
have the freedom to distribute copies of free software (and charge for
|
261 |
+
them if you wish), that you receive source code or can get it if you
|
262 |
+
want it, that you can change the software or use pieces of it in new
|
263 |
+
free programs, and that you know you can do these things.
|
264 |
+
|
265 |
+
To protect your rights, we need to prevent others from denying you
|
266 |
+
these rights or asking you to surrender the rights. Therefore, you have
|
267 |
+
certain responsibilities if you distribute copies of the software, or if
|
268 |
+
you modify it: responsibilities to respect the freedom of others.
|
269 |
+
|
270 |
+
For example, if you distribute copies of such a program, whether
|
271 |
+
gratis or for a fee, you must pass on to the recipients the same
|
272 |
+
freedoms that you received. You must make sure that they, too, receive
|
273 |
+
or can get the source code. And you must show them these terms so they
|
274 |
+
know their rights.
|
275 |
+
|
276 |
+
Developers that use the GNU GPL protect your rights with two steps:
|
277 |
+
(1) assert copyright on the software, and (2) offer you this License
|
278 |
+
giving you legal permission to copy, distribute and/or modify it.
|
279 |
+
|
280 |
+
For the developers' and authors' protection, the GPL clearly explains
|
281 |
+
that there is no warranty for this free software. For both users' and
|
282 |
+
authors' sake, the GPL requires that modified versions be marked as
|
283 |
+
changed, so that their problems will not be attributed erroneously to
|
284 |
+
authors of previous versions.
|
285 |
+
|
286 |
+
Some devices are designed to deny users access to install or run
|
287 |
+
modified versions of the software inside them, although the manufacturer
|
288 |
+
can do so. This is fundamentally incompatible with the aim of
|
289 |
+
protecting users' freedom to change the software. The systematic
|
290 |
+
pattern of such abuse occurs in the area of products for individuals to
|
291 |
+
use, which is precisely where it is most unacceptable. Therefore, we
|
292 |
+
have designed this version of the GPL to prohibit the practice for those
|
293 |
+
products. If such problems arise substantially in other domains, we
|
294 |
+
stand ready to extend this provision to those domains in future versions
|
295 |
+
of the GPL, as needed to protect the freedom of users.
|
296 |
+
|
297 |
+
Finally, every program is threatened constantly by software patents.
|
298 |
+
States should not allow patents to restrict development and use of
|
299 |
+
software on general-purpose computers, but in those that do, we wish to
|
300 |
+
avoid the special danger that patents applied to a free program could
|
301 |
+
make it effectively proprietary. To prevent this, the GPL assures that
|
302 |
+
patents cannot be used to render the program non-free.
|
303 |
+
|
304 |
+
The precise terms and conditions for copying, distribution and
|
305 |
+
modification follow.
|
306 |
+
|
307 |
+
TERMS AND CONDITIONS
|
308 |
+
|
309 |
+
0. Definitions.
|
310 |
+
|
311 |
+
"This License" refers to version 3 of the GNU General Public License.
|
312 |
+
|
313 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
314 |
+
works, such as semiconductor masks.
|
315 |
+
|
316 |
+
"The Program" refers to any copyrightable work licensed under this
|
317 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
318 |
+
"recipients" may be individuals or organizations.
|
319 |
+
|
320 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
321 |
+
in a fashion requiring copyright permission, other than the making of an
|
322 |
+
exact copy. The resulting work is called a "modified version" of the
|
323 |
+
earlier work or a work "based on" the earlier work.
|
324 |
+
|
325 |
+
A "covered work" means either the unmodified Program or a work based
|
326 |
+
on the Program.
|
327 |
+
|
328 |
+
To "propagate" a work means to do anything with it that, without
|
329 |
+
permission, would make you directly or secondarily liable for
|
330 |
+
infringement under applicable copyright law, except executing it on a
|
331 |
+
computer or modifying a private copy. Propagation includes copying,
|
332 |
+
distribution (with or without modification), making available to the
|
333 |
+
public, and in some countries other activities as well.
|
334 |
+
|
335 |
+
To "convey" a work means any kind of propagation that enables other
|
336 |
+
parties to make or receive copies. Mere interaction with a user through
|
337 |
+
a computer network, with no transfer of a copy, is not conveying.
|
338 |
+
|
339 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
340 |
+
to the extent that it includes a convenient and prominently visible
|
341 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
342 |
+
tells the user that there is no warranty for the work (except to the
|
343 |
+
extent that warranties are provided), that licensees may convey the
|
344 |
+
work under this License, and how to view a copy of this License. If
|
345 |
+
the interface presents a list of user commands or options, such as a
|
346 |
+
menu, a prominent item in the list meets this criterion.
|
347 |
+
|
348 |
+
1. Source Code.
|
349 |
+
|
350 |
+
The "source code" for a work means the preferred form of the work
|
351 |
+
for making modifications to it. "Object code" means any non-source
|
352 |
+
form of a work.
|
353 |
+
|
354 |
+
A "Standard Interface" means an interface that either is an official
|
355 |
+
standard defined by a recognized standards body, or, in the case of
|
356 |
+
interfaces specified for a particular programming language, one that
|
357 |
+
is widely used among developers working in that language.
|
358 |
+
|
359 |
+
The "System Libraries" of an executable work include anything, other
|
360 |
+
than the work as a whole, that (a) is included in the normal form of
|
361 |
+
packaging a Major Component, but which is not part of that Major
|
362 |
+
Component, and (b) serves only to enable use of the work with that
|
363 |
+
Major Component, or to implement a Standard Interface for which an
|
364 |
+
implementation is available to the public in source code form. A
|
365 |
+
"Major Component", in this context, means a major essential component
|
366 |
+
(kernel, window system, and so on) of the specific operating system
|
367 |
+
(if any) on which the executable work runs, or a compiler used to
|
368 |
+
produce the work, or an object code interpreter used to run it.
|
369 |
+
|
370 |
+
The "Corresponding Source" for a work in object code form means all
|
371 |
+
the source code needed to generate, install, and (for an executable
|
372 |
+
work) run the object code and to modify the work, including scripts to
|
373 |
+
control those activities. However, it does not include the work's
|
374 |
+
System Libraries, or general-purpose tools or generally available free
|
375 |
+
programs which are used unmodified in performing those activities but
|
376 |
+
which are not part of the work. For example, Corresponding Source
|
377 |
+
includes interface definition files associated with source files for
|
378 |
+
the work, and the source code for shared libraries and dynamically
|
379 |
+
linked subprograms that the work is specifically designed to require,
|
380 |
+
such as by intimate data communication or control flow between those
|
381 |
+
subprograms and other parts of the work.
|
382 |
+
|
383 |
+
The Corresponding Source need not include anything that users
|
384 |
+
can regenerate automatically from other parts of the Corresponding
|
385 |
+
Source.
|
386 |
+
|
387 |
+
The Corresponding Source for a work in source code form is that
|
388 |
+
same work.
|
389 |
+
|
390 |
+
2. Basic Permissions.
|
391 |
+
|
392 |
+
All rights granted under this License are granted for the term of
|
393 |
+
copyright on the Program, and are irrevocable provided the stated
|
394 |
+
conditions are met. This License explicitly affirms your unlimited
|
395 |
+
permission to run the unmodified Program. The output from running a
|
396 |
+
covered work is covered by this License only if the output, given its
|
397 |
+
content, constitutes a covered work. This License acknowledges your
|
398 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
399 |
+
|
400 |
+
You may make, run and propagate covered works that you do not
|
401 |
+
convey, without conditions so long as your license otherwise remains
|
402 |
+
in force. You may convey covered works to others for the sole purpose
|
403 |
+
of having them make modifications exclusively for you, or provide you
|
404 |
+
with facilities for running those works, provided that you comply with
|
405 |
+
the terms of this License in conveying all material for which you do
|
406 |
+
not control copyright. Those thus making or running the covered works
|
407 |
+
for you must do so exclusively on your behalf, under your direction
|
408 |
+
and control, on terms that prohibit them from making any copies of
|
409 |
+
your copyrighted material outside their relationship with you.
|
410 |
+
|
411 |
+
Conveying under any other circumstances is permitted solely under
|
412 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
413 |
+
makes it unnecessary.
|
414 |
+
|
415 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
416 |
+
|
417 |
+
No covered work shall be deemed part of an effective technological
|
418 |
+
measure under any applicable law fulfilling obligations under article
|
419 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
420 |
+
similar laws prohibiting or restricting circumvention of such
|
421 |
+
measures.
|
422 |
+
|
423 |
+
When you convey a covered work, you waive any legal power to forbid
|
424 |
+
circumvention of technological measures to the extent such circumvention
|
425 |
+
is effected by exercising rights under this License with respect to
|
426 |
+
the covered work, and you disclaim any intention to limit operation or
|
427 |
+
modification of the work as a means of enforcing, against the work's
|
428 |
+
users, your or third parties' legal rights to forbid circumvention of
|
429 |
+
technological measures.
|
430 |
+
|
431 |
+
4. Conveying Verbatim Copies.
|
432 |
+
|
433 |
+
You may convey verbatim copies of the Program's source code as you
|
434 |
+
receive it, in any medium, provided that you conspicuously and
|
435 |
+
appropriately publish on each copy an appropriate copyright notice;
|
436 |
+
keep intact all notices stating that this License and any
|
437 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
438 |
+
keep intact all notices of the absence of any warranty; and give all
|
439 |
+
recipients a copy of this License along with the Program.
|
440 |
+
|
441 |
+
You may charge any price or no price for each copy that you convey,
|
442 |
+
and you may offer support or warranty protection for a fee.
|
443 |
+
|
444 |
+
5. Conveying Modified Source Versions.
|
445 |
+
|
446 |
+
You may convey a work based on the Program, or the modifications to
|
447 |
+
produce it from the Program, in the form of source code under the
|
448 |
+
terms of section 4, provided that you also meet all of these conditions:
|
449 |
+
|
450 |
+
a) The work must carry prominent notices stating that you modified
|
451 |
+
it, and giving a relevant date.
|
452 |
+
|
453 |
+
b) The work must carry prominent notices stating that it is
|
454 |
+
released under this License and any conditions added under section
|
455 |
+
7. This requirement modifies the requirement in section 4 to
|
456 |
+
"keep intact all notices".
|
457 |
+
|
458 |
+
c) You must license the entire work, as a whole, under this
|
459 |
+
License to anyone who comes into possession of a copy. This
|
460 |
+
License will therefore apply, along with any applicable section 7
|
461 |
+
additional terms, to the whole of the work, and all its parts,
|
462 |
+
regardless of how they are packaged. This License gives no
|
463 |
+
permission to license the work in any other way, but it does not
|
464 |
+
invalidate such permission if you have separately received it.
|
465 |
+
|
466 |
+
d) If the work has interactive user interfaces, each must display
|
467 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
468 |
+
interfaces that do not display Appropriate Legal Notices, your
|
469 |
+
work need not make them do so.
|
470 |
+
|
471 |
+
A compilation of a covered work with other separate and independent
|
472 |
+
works, which are not by their nature extensions of the covered work,
|
473 |
+
and which are not combined with it such as to form a larger program,
|
474 |
+
in or on a volume of a storage or distribution medium, is called an
|
475 |
+
"aggregate" if the compilation and its resulting copyright are not
|
476 |
+
used to limit the access or legal rights of the compilation's users
|
477 |
+
beyond what the individual works permit. Inclusion of a covered work
|
478 |
+
in an aggregate does not cause this License to apply to the other
|
479 |
+
parts of the aggregate.
|
480 |
+
|
481 |
+
6. Conveying Non-Source Forms.
|
482 |
+
|
483 |
+
You may convey a covered work in object code form under the terms
|
484 |
+
of sections 4 and 5, provided that you also convey the
|
485 |
+
machine-readable Corresponding Source under the terms of this License,
|
486 |
+
in one of these ways:
|
487 |
+
|
488 |
+
a) Convey the object code in, or embodied in, a physical product
|
489 |
+
(including a physical distribution medium), accompanied by the
|
490 |
+
Corresponding Source fixed on a durable physical medium
|
491 |
+
customarily used for software interchange.
|
492 |
+
|
493 |
+
b) Convey the object code in, or embodied in, a physical product
|
494 |
+
(including a physical distribution medium), accompanied by a
|
495 |
+
written offer, valid for at least three years and valid for as
|
496 |
+
long as you offer spare parts or customer support for that product
|
497 |
+
model, to give anyone who possesses the object code either (1) a
|
498 |
+
copy of the Corresponding Source for all the software in the
|
499 |
+
product that is covered by this License, on a durable physical
|
500 |
+
medium customarily used for software interchange, for a price no
|
501 |
+
more than your reasonable cost of physically performing this
|
502 |
+
conveying of source, or (2) access to copy the
|
503 |
+
Corresponding Source from a network server at no charge.
|
504 |
+
|
505 |
+
c) Convey individual copies of the object code with a copy of the
|
506 |
+
written offer to provide the Corresponding Source. This
|
507 |
+
alternative is allowed only occasionally and noncommercially, and
|
508 |
+
only if you received the object code with such an offer, in accord
|
509 |
+
with subsection 6b.
|
510 |
+
|
511 |
+
d) Convey the object code by offering access from a designated
|
512 |
+
place (gratis or for a charge), and offer equivalent access to the
|
513 |
+
Corresponding Source in the same way through the same place at no
|
514 |
+
further charge. You need not require recipients to copy the
|
515 |
+
Corresponding Source along with the object code. If the place to
|
516 |
+
copy the object code is a network server, the Corresponding Source
|
517 |
+
may be on a different server (operated by you or a third party)
|
518 |
+
that supports equivalent copying facilities, provided you maintain
|
519 |
+
clear directions next to the object code saying where to find the
|
520 |
+
Corresponding Source. Regardless of what server hosts the
|
521 |
+
Corresponding Source, you remain obligated to ensure that it is
|
522 |
+
available for as long as needed to satisfy these requirements.
|
523 |
+
|
524 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
525 |
+
you inform other peers where the object code and Corresponding
|
526 |
+
Source of the work are being offered to the general public at no
|
527 |
+
charge under subsection 6d.
|
528 |
+
|
529 |
+
A separable portion of the object code, whose source code is excluded
|
530 |
+
from the Corresponding Source as a System Library, need not be
|
531 |
+
included in conveying the object code work.
|
532 |
+
|
533 |
+
A "User Product" is either (1) a "consumer product", which means any
|
534 |
+
tangible personal property which is normally used for personal, family,
|
535 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
536 |
+
into a dwelling. In determining whether a product is a consumer product,
|
537 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
538 |
+
product received by a particular user, "normally used" refers to a
|
539 |
+
typical or common use of that class of product, regardless of the status
|
540 |
+
of the particular user or of the way in which the particular user
|
541 |
+
actually uses, or expects or is expected to use, the product. A product
|
542 |
+
is a consumer product regardless of whether the product has substantial
|
543 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
544 |
+
the only significant mode of use of the product.
|
545 |
+
|
546 |
+
"Installation Information" for a User Product means any methods,
|
547 |
+
procedures, authorization keys, or other information required to install
|
548 |
+
and execute modified versions of a covered work in that User Product from
|
549 |
+
a modified version of its Corresponding Source. The information must
|
550 |
+
suffice to ensure that the continued functioning of the modified object
|
551 |
+
code is in no case prevented or interfered with solely because
|
552 |
+
modification has been made.
|
553 |
+
|
554 |
+
If you convey an object code work under this section in, or with, or
|
555 |
+
specifically for use in, a User Product, and the conveying occurs as
|
556 |
+
part of a transaction in which the right of possession and use of the
|
557 |
+
User Product is transferred to the recipient in perpetuity or for a
|
558 |
+
fixed term (regardless of how the transaction is characterized), the
|
559 |
+
Corresponding Source conveyed under this section must be accompanied
|
560 |
+
by the Installation Information. But this requirement does not apply
|
561 |
+
if neither you nor any third party retains the ability to install
|
562 |
+
modified object code on the User Product (for example, the work has
|
563 |
+
been installed in ROM).
|
564 |
+
|
565 |
+
The requirement to provide Installation Information does not include a
|
566 |
+
requirement to continue to provide support service, warranty, or updates
|
567 |
+
for a work that has been modified or installed by the recipient, or for
|
568 |
+
the User Product in which it has been modified or installed. Access to a
|
569 |
+
network may be denied when the modification itself materially and
|
570 |
+
adversely affects the operation of the network or violates the rules and
|
571 |
+
protocols for communication across the network.
|
572 |
+
|
573 |
+
Corresponding Source conveyed, and Installation Information provided,
|
574 |
+
in accord with this section must be in a format that is publicly
|
575 |
+
documented (and with an implementation available to the public in
|
576 |
+
source code form), and must require no special password or key for
|
577 |
+
unpacking, reading or copying.
|
578 |
+
|
579 |
+
7. Additional Terms.
|
580 |
+
|
581 |
+
"Additional permissions" are terms that supplement the terms of this
|
582 |
+
License by making exceptions from one or more of its conditions.
|
583 |
+
Additional permissions that are applicable to the entire Program shall
|
584 |
+
be treated as though they were included in this License, to the extent
|
585 |
+
that they are valid under applicable law. If additional permissions
|
586 |
+
apply only to part of the Program, that part may be used separately
|
587 |
+
under those permissions, but the entire Program remains governed by
|
588 |
+
this License without regard to the additional permissions.
|
589 |
+
|
590 |
+
When you convey a copy of a covered work, you may at your option
|
591 |
+
remove any additional permissions from that copy, or from any part of
|
592 |
+
it. (Additional permissions may be written to require their own
|
593 |
+
removal in certain cases when you modify the work.) You may place
|
594 |
+
additional permissions on material, added by you to a covered work,
|
595 |
+
for which you have or can give appropriate copyright permission.
|
596 |
+
|
597 |
+
Notwithstanding any other provision of this License, for material you
|
598 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
599 |
+
that material) supplement the terms of this License with terms:
|
600 |
+
|
601 |
+
a) Disclaiming warranty or limiting liability differently from the
|
602 |
+
terms of sections 15 and 16 of this License; or
|
603 |
+
|
604 |
+
b) Requiring preservation of specified reasonable legal notices or
|
605 |
+
author attributions in that material or in the Appropriate Legal
|
606 |
+
Notices displayed by works containing it; or
|
607 |
+
|
608 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
609 |
+
requiring that modified versions of such material be marked in
|
610 |
+
reasonable ways as different from the original version; or
|
611 |
+
|
612 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
613 |
+
authors of the material; or
|
614 |
+
|
615 |
+
e) Declining to grant rights under trademark law for use of some
|
616 |
+
trade names, trademarks, or service marks; or
|
617 |
+
|
618 |
+
f) Requiring indemnification of licensors and authors of that
|
619 |
+
material by anyone who conveys the material (or modified versions of
|
620 |
+
it) with contractual assumptions of liability to the recipient, for
|
621 |
+
any liability that these contractual assumptions directly impose on
|
622 |
+
those licensors and authors.
|
623 |
+
|
624 |
+
All other non-permissive additional terms are considered "further
|
625 |
+
restrictions" within the meaning of section 10. If the Program as you
|
626 |
+
received it, or any part of it, contains a notice stating that it is
|
627 |
+
governed by this License along with a term that is a further
|
628 |
+
restriction, you may remove that term. If a license document contains
|
629 |
+
a further restriction but permits relicensing or conveying under this
|
630 |
+
License, you may add to a covered work material governed by the terms
|
631 |
+
of that license document, provided that the further restriction does
|
632 |
+
not survive such relicensing or conveying.
|
633 |
+
|
634 |
+
If you add terms to a covered work in accord with this section, you
|
635 |
+
must place, in the relevant source files, a statement of the
|
636 |
+
additional terms that apply to those files, or a notice indicating
|
637 |
+
where to find the applicable terms.
|
638 |
+
|
639 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
640 |
+
form of a separately written license, or stated as exceptions;
|
641 |
+
the above requirements apply either way.
|
642 |
+
|
643 |
+
8. Termination.
|
644 |
+
|
645 |
+
You may not propagate or modify a covered work except as expressly
|
646 |
+
provided under this License. Any attempt otherwise to propagate or
|
647 |
+
modify it is void, and will automatically terminate your rights under
|
648 |
+
this License (including any patent licenses granted under the third
|
649 |
+
paragraph of section 11).
|
650 |
+
|
651 |
+
However, if you cease all violation of this License, then your
|
652 |
+
license from a particular copyright holder is reinstated (a)
|
653 |
+
provisionally, unless and until the copyright holder explicitly and
|
654 |
+
finally terminates your license, and (b) permanently, if the copyright
|
655 |
+
holder fails to notify you of the violation by some reasonable means
|
656 |
+
prior to 60 days after the cessation.
|
657 |
+
|
658 |
+
Moreover, your license from a particular copyright holder is
|
659 |
+
reinstated permanently if the copyright holder notifies you of the
|
660 |
+
violation by some reasonable means, this is the first time you have
|
661 |
+
received notice of violation of this License (for any work) from that
|
662 |
+
copyright holder, and you cure the violation prior to 30 days after
|
663 |
+
your receipt of the notice.
|
664 |
+
|
665 |
+
Termination of your rights under this section does not terminate the
|
666 |
+
licenses of parties who have received copies or rights from you under
|
667 |
+
this License. If your rights have been terminated and not permanently
|
668 |
+
reinstated, you do not qualify to receive new licenses for the same
|
669 |
+
material under section 10.
|
670 |
+
|
671 |
+
9. Acceptance Not Required for Having Copies.
|
672 |
+
|
673 |
+
You are not required to accept this License in order to receive or
|
674 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
675 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
676 |
+
to receive a copy likewise does not require acceptance. However,
|
677 |
+
nothing other than this License grants you permission to propagate or
|
678 |
+
modify any covered work. These actions infringe copyright if you do
|
679 |
+
not accept this License. Therefore, by modifying or propagating a
|
680 |
+
covered work, you indicate your acceptance of this License to do so.
|
681 |
+
|
682 |
+
10. Automatic Licensing of Downstream Recipients.
|
683 |
+
|
684 |
+
Each time you convey a covered work, the recipient automatically
|
685 |
+
receives a license from the original licensors, to run, modify and
|
686 |
+
propagate that work, subject to this License. You are not responsible
|
687 |
+
for enforcing compliance by third parties with this License.
|
688 |
+
|
689 |
+
An "entity transaction" is a transaction transferring control of an
|
690 |
+
organization, or substantially all assets of one, or subdividing an
|
691 |
+
organization, or merging organizations. If propagation of a covered
|
692 |
+
work results from an entity transaction, each party to that
|
693 |
+
transaction who receives a copy of the work also receives whatever
|
694 |
+
licenses to the work the party's predecessor in interest had or could
|
695 |
+
give under the previous paragraph, plus a right to possession of the
|
696 |
+
Corresponding Source of the work from the predecessor in interest, if
|
697 |
+
the predecessor has it or can get it with reasonable efforts.
|
698 |
+
|
699 |
+
You may not impose any further restrictions on the exercise of the
|
700 |
+
rights granted or affirmed under this License. For example, you may
|
701 |
+
not impose a license fee, royalty, or other charge for exercise of
|
702 |
+
rights granted under this License, and you may not initiate litigation
|
703 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
704 |
+
any patent claim is infringed by making, using, selling, offering for
|
705 |
+
sale, or importing the Program or any portion of it.
|
706 |
+
|
707 |
+
11. Patents.
|
708 |
+
|
709 |
+
A "contributor" is a copyright holder who authorizes use under this
|
710 |
+
License of the Program or a work on which the Program is based. The
|
711 |
+
work thus licensed is called the contributor's "contributor version".
|
712 |
+
|
713 |
+
A contributor's "essential patent claims" are all patent claims
|
714 |
+
owned or controlled by the contributor, whether already acquired or
|
715 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
716 |
+
by this License, of making, using, or selling its contributor version,
|
717 |
+
but do not include claims that would be infringed only as a
|
718 |
+
consequence of further modification of the contributor version. For
|
719 |
+
purposes of this definition, "control" includes the right to grant
|
720 |
+
patent sublicenses in a manner consistent with the requirements of
|
721 |
+
this License.
|
722 |
+
|
723 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
724 |
+
patent license under the contributor's essential patent claims, to
|
725 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
726 |
+
propagate the contents of its contributor version.
|
727 |
+
|
728 |
+
In the following three paragraphs, a "patent license" is any express
|
729 |
+
agreement or commitment, however denominated, not to enforce a patent
|
730 |
+
(such as an express permission to practice a patent or covenant not to
|
731 |
+
sue for patent infringement). To "grant" such a patent license to a
|
732 |
+
party means to make such an agreement or commitment not to enforce a
|
733 |
+
patent against the party.
|
734 |
+
|
735 |
+
If you convey a covered work, knowingly relying on a patent license,
|
736 |
+
and the Corresponding Source of the work is not available for anyone
|
737 |
+
to copy, free of charge and under the terms of this License, through a
|
738 |
+
publicly available network server or other readily accessible means,
|
739 |
+
then you must either (1) cause the Corresponding Source to be so
|
740 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
741 |
+
patent license for this particular work, or (3) arrange, in a manner
|
742 |
+
consistent with the requirements of this License, to extend the patent
|
743 |
+
license to downstream recipients. "Knowingly relying" means you have
|
744 |
+
actual knowledge that, but for the patent license, your conveying the
|
745 |
+
covered work in a country, or your recipient's use of the covered work
|
746 |
+
in a country, would infringe one or more identifiable patents in that
|
747 |
+
country that you have reason to believe are valid.
|
748 |
+
|
749 |
+
If, pursuant to or in connection with a single transaction or
|
750 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
751 |
+
covered work, and grant a patent license to some of the parties
|
752 |
+
receiving the covered work authorizing them to use, propagate, modify
|
753 |
+
or convey a specific copy of the covered work, then the patent license
|
754 |
+
you grant is automatically extended to all recipients of the covered
|
755 |
+
work and works based on it.
|
756 |
+
|
757 |
+
A patent license is "discriminatory" if it does not include within
|
758 |
+
the scope of its coverage, prohibits the exercise of, or is
|
759 |
+
conditioned on the non-exercise of one or more of the rights that are
|
760 |
+
specifically granted under this License. You may not convey a covered
|
761 |
+
work if you are a party to an arrangement with a third party that is
|
762 |
+
in the business of distributing software, under which you make payment
|
763 |
+
to the third party based on the extent of your activity of conveying
|
764 |
+
the work, and under which the third party grants, to any of the
|
765 |
+
parties who would receive the covered work from you, a discriminatory
|
766 |
+
patent license (a) in connection with copies of the covered work
|
767 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
768 |
+
for and in connection with specific products or compilations that
|
769 |
+
contain the covered work, unless you entered into that arrangement,
|
770 |
+
or that patent license was granted, prior to 28 March 2007.
|
771 |
+
|
772 |
+
Nothing in this License shall be construed as excluding or limiting
|
773 |
+
any implied license or other defenses to infringement that may
|
774 |
+
otherwise be available to you under applicable patent law.
|
775 |
+
|
776 |
+
12. No Surrender of Others' Freedom.
|
777 |
+
|
778 |
+
If conditions are imposed on you (whether by court order, agreement or
|
779 |
+
otherwise) that contradict the conditions of this License, they do not
|
780 |
+
excuse you from the conditions of this License. If you cannot convey a
|
781 |
+
covered work so as to satisfy simultaneously your obligations under this
|
782 |
+
License and any other pertinent obligations, then as a consequence you may
|
783 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
784 |
+
to collect a royalty for further conveying from those to whom you convey
|
785 |
+
the Program, the only way you could satisfy both those terms and this
|
786 |
+
License would be to refrain entirely from conveying the Program.
|
787 |
+
|
788 |
+
13. Use with the GNU Affero General Public License.
|
789 |
+
|
790 |
+
Notwithstanding any other provision of this License, you have
|
791 |
+
permission to link or combine any covered work with a work licensed
|
792 |
+
under version 3 of the GNU Affero General Public License into a single
|
793 |
+
combined work, and to convey the resulting work. The terms of this
|
794 |
+
License will continue to apply to the part which is the covered work,
|
795 |
+
but the special requirements of the GNU Affero General Public License,
|
796 |
+
section 13, concerning interaction through a network will apply to the
|
797 |
+
combination as such.
|
798 |
+
|
799 |
+
14. Revised Versions of this License.
|
800 |
+
|
801 |
+
The Free Software Foundation may publish revised and/or new versions of
|
802 |
+
the GNU General Public License from time to time. Such new versions will
|
803 |
+
be similar in spirit to the present version, but may differ in detail to
|
804 |
+
address new problems or concerns.
|
805 |
+
|
806 |
+
Each version is given a distinguishing version number. If the
|
807 |
+
Program specifies that a certain numbered version of the GNU General
|
808 |
+
Public License "or any later version" applies to it, you have the
|
809 |
+
option of following the terms and conditions either of that numbered
|
810 |
+
version or of any later version published by the Free Software
|
811 |
+
Foundation. If the Program does not specify a version number of the
|
812 |
+
GNU General Public License, you may choose any version ever published
|
813 |
+
by the Free Software Foundation.
|
814 |
+
|
815 |
+
If the Program specifies that a proxy can decide which future
|
816 |
+
versions of the GNU General Public License can be used, that proxy's
|
817 |
+
public statement of acceptance of a version permanently authorizes you
|
818 |
+
to choose that version for the Program.
|
819 |
+
|
820 |
+
Later license versions may give you additional or different
|
821 |
+
permissions. However, no additional obligations are imposed on any
|
822 |
+
author or copyright holder as a result of your choosing to follow a
|
823 |
+
later version.
|
824 |
+
|
825 |
+
15. Disclaimer of Warranty.
|
826 |
+
|
827 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
828 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
829 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
830 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
831 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
832 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
833 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
834 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
835 |
+
|
836 |
+
16. Limitation of Liability.
|
837 |
+
|
838 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
839 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
840 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
841 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
842 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
843 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
844 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
845 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
846 |
+
SUCH DAMAGES.
|
847 |
+
|
848 |
+
17. Interpretation of Sections 15 and 16.
|
849 |
+
|
850 |
+
If the disclaimer of warranty and limitation of liability provided
|
851 |
+
above cannot be given local legal effect according to their terms,
|
852 |
+
reviewing courts shall apply local law that most closely approximates
|
853 |
+
an absolute waiver of all civil liability in connection with the
|
854 |
+
Program, unless a warranty or assumption of liability accompanies a
|
855 |
+
copy of the Program in return for a fee.
|
856 |
+
|
857 |
+
END OF TERMS AND CONDITIONS
|
858 |
+
|
859 |
+
How to Apply These Terms to Your New Programs
|
860 |
+
|
861 |
+
If you develop a new program, and you want it to be of the greatest
|
862 |
+
possible use to the public, the best way to achieve this is to make it
|
863 |
+
free software which everyone can redistribute and change under these terms.
|
864 |
+
|
865 |
+
To do so, attach the following notices to the program. It is safest
|
866 |
+
to attach them to the start of each source file to most effectively
|
867 |
+
state the exclusion of warranty; and each file should have at least
|
868 |
+
the "copyright" line and a pointer to where the full notice is found.
|
869 |
+
|
870 |
+
<one line to give the program's name and a brief idea of what it does.>
|
871 |
+
Copyright (C) <year> <name of author>
|
872 |
+
|
873 |
+
This program is free software: you can redistribute it and/or modify
|
874 |
+
it under the terms of the GNU General Public License as published by
|
875 |
+
the Free Software Foundation, either version 3 of the License, or
|
876 |
+
(at your option) any later version.
|
877 |
+
|
878 |
+
This program is distributed in the hope that it will be useful,
|
879 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
880 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
881 |
+
GNU General Public License for more details.
|
882 |
+
|
883 |
+
You should have received a copy of the GNU General Public License
|
884 |
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
885 |
+
|
886 |
+
Also add information on how to contact you by electronic and paper mail.
|
887 |
+
|
888 |
+
If the program does terminal interaction, make it output a short
|
889 |
+
notice like this when it starts in an interactive mode:
|
890 |
+
|
891 |
+
<program> Copyright (C) <year> <name of author>
|
892 |
+
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
893 |
+
This is free software, and you are welcome to redistribute it
|
894 |
+
under certain conditions; type `show c' for details.
|
895 |
+
|
896 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
897 |
+
parts of the General Public License. Of course, your program's commands
|
898 |
+
might be different; for a GUI interface, you would use an "about box".
|
899 |
+
|
900 |
+
You should also get your employer (if you work as a programmer) or school,
|
901 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
902 |
+
For more information on this, and how to apply and follow the GNU GPL, see
|
903 |
+
<http://www.gnu.org/licenses/>.
|
904 |
+
|
905 |
+
The GNU General Public License does not permit incorporating your program
|
906 |
+
into proprietary programs. If your program is a subroutine library, you
|
907 |
+
may consider it more useful to permit linking proprietary applications with
|
908 |
+
the library. If this is what you want to do, use the GNU Lesser General
|
909 |
+
Public License instead of this License. But first, please read
|
910 |
+
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
911 |
+
|
912 |
+
|
913 |
+
Name: libquadmath
|
914 |
+
Files: scipy.libs/libquadmath*.so
|
915 |
+
Description: dynamically linked to files compiled with gcc
|
916 |
+
Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
|
917 |
+
License: LGPL-2.1-or-later
|
918 |
+
|
919 |
+
GCC Quad-Precision Math Library
|
920 |
+
Copyright (C) 2010-2019 Free Software Foundation, Inc.
|
921 |
+
Written by Francois-Xavier Coudert <[email protected]>
|
922 |
+
|
923 |
+
This file is part of the libquadmath library.
|
924 |
+
Libquadmath is free software; you can redistribute it and/or
|
925 |
+
modify it under the terms of the GNU Library General Public
|
926 |
+
License as published by the Free Software Foundation; either
|
927 |
+
version 2.1 of the License, or (at your option) any later version.
|
928 |
+
|
929 |
+
Libquadmath is distributed in the hope that it will be useful,
|
930 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
931 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
932 |
+
Lesser General Public License for more details.
|
933 |
+
https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
|
env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/METADATA
ADDED
@@ -0,0 +1,1074 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Metadata-Version: 2.1
|
2 |
+
Name: scipy
|
3 |
+
Version: 1.13.0
|
4 |
+
Summary: Fundamental algorithms for scientific computing in Python
|
5 |
+
Home-page: https://scipy.org/
|
6 |
+
Maintainer-Email: SciPy Developers <[email protected]>
|
7 |
+
License: Copyright (c) 2001-2002 Enthought, Inc. 2003-2024, SciPy Developers.
|
8 |
+
All rights reserved.
|
9 |
+
|
10 |
+
Redistribution and use in source and binary forms, with or without
|
11 |
+
modification, are permitted provided that the following conditions
|
12 |
+
are met:
|
13 |
+
|
14 |
+
1. Redistributions of source code must retain the above copyright
|
15 |
+
notice, this list of conditions and the following disclaimer.
|
16 |
+
|
17 |
+
2. Redistributions in binary form must reproduce the above
|
18 |
+
copyright notice, this list of conditions and the following
|
19 |
+
disclaimer in the documentation and/or other materials provided
|
20 |
+
with the distribution.
|
21 |
+
|
22 |
+
3. Neither the name of the copyright holder nor the names of its
|
23 |
+
contributors may be used to endorse or promote products derived
|
24 |
+
from this software without specific prior written permission.
|
25 |
+
|
26 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
27 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
28 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
29 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
30 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
31 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
32 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
33 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
34 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
35 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
36 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
37 |
+
|
38 |
+
----
|
39 |
+
|
40 |
+
This binary distribution of SciPy also bundles the following software:
|
41 |
+
|
42 |
+
|
43 |
+
Name: OpenBLAS
|
44 |
+
Files: scipy.libs/libopenblas*.so
|
45 |
+
Description: bundled as a dynamically linked library
|
46 |
+
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
47 |
+
License: BSD-3-Clause-Attribution
|
48 |
+
Copyright (c) 2011-2014, The OpenBLAS Project
|
49 |
+
All rights reserved.
|
50 |
+
|
51 |
+
Redistribution and use in source and binary forms, with or without
|
52 |
+
modification, are permitted provided that the following conditions are
|
53 |
+
met:
|
54 |
+
|
55 |
+
1. Redistributions of source code must retain the above copyright
|
56 |
+
notice, this list of conditions and the following disclaimer.
|
57 |
+
|
58 |
+
2. Redistributions in binary form must reproduce the above copyright
|
59 |
+
notice, this list of conditions and the following disclaimer in
|
60 |
+
the documentation and/or other materials provided with the
|
61 |
+
distribution.
|
62 |
+
3. Neither the name of the OpenBLAS project nor the names of
|
63 |
+
its contributors may be used to endorse or promote products
|
64 |
+
derived from this software without specific prior written
|
65 |
+
permission.
|
66 |
+
|
67 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
68 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
69 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
70 |
+
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
|
71 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
72 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
73 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
74 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
75 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
76 |
+
USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
77 |
+
|
78 |
+
|
79 |
+
Name: LAPACK
|
80 |
+
Files: scipy.libs/libopenblas*.so
|
81 |
+
Description: bundled in OpenBLAS
|
82 |
+
Availability: https://github.com/OpenMathLib/OpenBLAS/
|
83 |
+
License: BSD-3-Clause-Attribution
|
84 |
+
Copyright (c) 1992-2013 The University of Tennessee and The University
|
85 |
+
of Tennessee Research Foundation. All rights
|
86 |
+
reserved.
|
87 |
+
Copyright (c) 2000-2013 The University of California Berkeley. All
|
88 |
+
rights reserved.
|
89 |
+
Copyright (c) 2006-2013 The University of Colorado Denver. All rights
|
90 |
+
reserved.
|
91 |
+
|
92 |
+
$COPYRIGHT$
|
93 |
+
|
94 |
+
Additional copyrights may follow
|
95 |
+
|
96 |
+
$HEADER$
|
97 |
+
|
98 |
+
Redistribution and use in source and binary forms, with or without
|
99 |
+
modification, are permitted provided that the following conditions are
|
100 |
+
met:
|
101 |
+
|
102 |
+
- Redistributions of source code must retain the above copyright
|
103 |
+
notice, this list of conditions and the following disclaimer.
|
104 |
+
|
105 |
+
- Redistributions in binary form must reproduce the above copyright
|
106 |
+
notice, this list of conditions and the following disclaimer listed
|
107 |
+
in this license in the documentation and/or other materials
|
108 |
+
provided with the distribution.
|
109 |
+
|
110 |
+
- Neither the name of the copyright holders nor the names of its
|
111 |
+
contributors may be used to endorse or promote products derived from
|
112 |
+
this software without specific prior written permission.
|
113 |
+
|
114 |
+
The copyright holders provide no reassurances that the source code
|
115 |
+
provided does not infringe any patent, copyright, or any other
|
116 |
+
intellectual property rights of third parties. The copyright holders
|
117 |
+
disclaim any liability to any recipient for claims brought against
|
118 |
+
recipient by any third party for infringement of that parties
|
119 |
+
intellectual property rights.
|
120 |
+
|
121 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
122 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
123 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
124 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
125 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
126 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
127 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
128 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
129 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
130 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
131 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
132 |
+
|
133 |
+
|
134 |
+
Name: GCC runtime library
|
135 |
+
Files: scipy.libs/libgfortran*.so
|
136 |
+
Description: dynamically linked to files compiled with gcc
|
137 |
+
Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libgfortran
|
138 |
+
License: GPL-3.0-with-GCC-exception
|
139 |
+
Copyright (C) 2002-2017 Free Software Foundation, Inc.
|
140 |
+
|
141 |
+
Libgfortran is free software; you can redistribute it and/or modify
|
142 |
+
it under the terms of the GNU General Public License as published by
|
143 |
+
the Free Software Foundation; either version 3, or (at your option)
|
144 |
+
any later version.
|
145 |
+
|
146 |
+
Libgfortran is distributed in the hope that it will be useful,
|
147 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
148 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
149 |
+
GNU General Public License for more details.
|
150 |
+
|
151 |
+
Under Section 7 of GPL version 3, you are granted additional
|
152 |
+
permissions described in the GCC Runtime Library Exception, version
|
153 |
+
3.1, as published by the Free Software Foundation.
|
154 |
+
|
155 |
+
You should have received a copy of the GNU General Public License and
|
156 |
+
a copy of the GCC Runtime Library Exception along with this program;
|
157 |
+
see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
|
158 |
+
<http://www.gnu.org/licenses/>.
|
159 |
+
|
160 |
+
----
|
161 |
+
|
162 |
+
Full text of license texts referred to above follows (that they are
|
163 |
+
listed below does not necessarily imply the conditions apply to the
|
164 |
+
present binary release):
|
165 |
+
|
166 |
+
----
|
167 |
+
|
168 |
+
GCC RUNTIME LIBRARY EXCEPTION
|
169 |
+
|
170 |
+
Version 3.1, 31 March 2009
|
171 |
+
|
172 |
+
Copyright (C) 2009 Free Software Foundation, Inc. <http://fsf.org/>
|
173 |
+
|
174 |
+
Everyone is permitted to copy and distribute verbatim copies of this
|
175 |
+
license document, but changing it is not allowed.
|
176 |
+
|
177 |
+
This GCC Runtime Library Exception ("Exception") is an additional
|
178 |
+
permission under section 7 of the GNU General Public License, version
|
179 |
+
3 ("GPLv3"). It applies to a given file (the "Runtime Library") that
|
180 |
+
bears a notice placed by the copyright holder of the file stating that
|
181 |
+
the file is governed by GPLv3 along with this Exception.
|
182 |
+
|
183 |
+
When you use GCC to compile a program, GCC may combine portions of
|
184 |
+
certain GCC header files and runtime libraries with the compiled
|
185 |
+
program. The purpose of this Exception is to allow compilation of
|
186 |
+
non-GPL (including proprietary) programs to use, in this way, the
|
187 |
+
header files and runtime libraries covered by this Exception.
|
188 |
+
|
189 |
+
0. Definitions.
|
190 |
+
|
191 |
+
A file is an "Independent Module" if it either requires the Runtime
|
192 |
+
Library for execution after a Compilation Process, or makes use of an
|
193 |
+
interface provided by the Runtime Library, but is not otherwise based
|
194 |
+
on the Runtime Library.
|
195 |
+
|
196 |
+
"GCC" means a version of the GNU Compiler Collection, with or without
|
197 |
+
modifications, governed by version 3 (or a specified later version) of
|
198 |
+
the GNU General Public License (GPL) with the option of using any
|
199 |
+
subsequent versions published by the FSF.
|
200 |
+
|
201 |
+
"GPL-compatible Software" is software whose conditions of propagation,
|
202 |
+
modification and use would permit combination with GCC in accord with
|
203 |
+
the license of GCC.
|
204 |
+
|
205 |
+
"Target Code" refers to output from any compiler for a real or virtual
|
206 |
+
target processor architecture, in executable form or suitable for
|
207 |
+
input to an assembler, loader, linker and/or execution
|
208 |
+
phase. Notwithstanding that, Target Code does not include data in any
|
209 |
+
format that is used as a compiler intermediate representation, or used
|
210 |
+
for producing a compiler intermediate representation.
|
211 |
+
|
212 |
+
The "Compilation Process" transforms code entirely represented in
|
213 |
+
non-intermediate languages designed for human-written code, and/or in
|
214 |
+
Java Virtual Machine byte code, into Target Code. Thus, for example,
|
215 |
+
use of source code generators and preprocessors need not be considered
|
216 |
+
part of the Compilation Process, since the Compilation Process can be
|
217 |
+
understood as starting with the output of the generators or
|
218 |
+
preprocessors.
|
219 |
+
|
220 |
+
A Compilation Process is "Eligible" if it is done using GCC, alone or
|
221 |
+
with other GPL-compatible software, or if it is done without using any
|
222 |
+
work based on GCC. For example, using non-GPL-compatible Software to
|
223 |
+
optimize any GCC intermediate representations would not qualify as an
|
224 |
+
Eligible Compilation Process.
|
225 |
+
|
226 |
+
1. Grant of Additional Permission.
|
227 |
+
|
228 |
+
You have permission to propagate a work of Target Code formed by
|
229 |
+
combining the Runtime Library with Independent Modules, even if such
|
230 |
+
propagation would otherwise violate the terms of GPLv3, provided that
|
231 |
+
all Target Code was generated by Eligible Compilation Processes. You
|
232 |
+
may then convey such a combination under terms of your choice,
|
233 |
+
consistent with the licensing of the Independent Modules.
|
234 |
+
|
235 |
+
2. No Weakening of GCC Copyleft.
|
236 |
+
|
237 |
+
The availability of this Exception does not imply any general
|
238 |
+
presumption that third-party software is unaffected by the copyleft
|
239 |
+
requirements of the license of GCC.
|
240 |
+
|
241 |
+
----
|
242 |
+
|
243 |
+
GNU GENERAL PUBLIC LICENSE
|
244 |
+
Version 3, 29 June 2007
|
245 |
+
|
246 |
+
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
247 |
+
Everyone is permitted to copy and distribute verbatim copies
|
248 |
+
of this license document, but changing it is not allowed.
|
249 |
+
|
250 |
+
Preamble
|
251 |
+
|
252 |
+
The GNU General Public License is a free, copyleft license for
|
253 |
+
software and other kinds of works.
|
254 |
+
|
255 |
+
The licenses for most software and other practical works are designed
|
256 |
+
to take away your freedom to share and change the works. By contrast,
|
257 |
+
the GNU General Public License is intended to guarantee your freedom to
|
258 |
+
share and change all versions of a program--to make sure it remains free
|
259 |
+
software for all its users. We, the Free Software Foundation, use the
|
260 |
+
GNU General Public License for most of our software; it applies also to
|
261 |
+
any other work released this way by its authors. You can apply it to
|
262 |
+
your programs, too.
|
263 |
+
|
264 |
+
When we speak of free software, we are referring to freedom, not
|
265 |
+
price. Our General Public Licenses are designed to make sure that you
|
266 |
+
have the freedom to distribute copies of free software (and charge for
|
267 |
+
them if you wish), that you receive source code or can get it if you
|
268 |
+
want it, that you can change the software or use pieces of it in new
|
269 |
+
free programs, and that you know you can do these things.
|
270 |
+
|
271 |
+
To protect your rights, we need to prevent others from denying you
|
272 |
+
these rights or asking you to surrender the rights. Therefore, you have
|
273 |
+
certain responsibilities if you distribute copies of the software, or if
|
274 |
+
you modify it: responsibilities to respect the freedom of others.
|
275 |
+
|
276 |
+
For example, if you distribute copies of such a program, whether
|
277 |
+
gratis or for a fee, you must pass on to the recipients the same
|
278 |
+
freedoms that you received. You must make sure that they, too, receive
|
279 |
+
or can get the source code. And you must show them these terms so they
|
280 |
+
know their rights.
|
281 |
+
|
282 |
+
Developers that use the GNU GPL protect your rights with two steps:
|
283 |
+
(1) assert copyright on the software, and (2) offer you this License
|
284 |
+
giving you legal permission to copy, distribute and/or modify it.
|
285 |
+
|
286 |
+
For the developers' and authors' protection, the GPL clearly explains
|
287 |
+
that there is no warranty for this free software. For both users' and
|
288 |
+
authors' sake, the GPL requires that modified versions be marked as
|
289 |
+
changed, so that their problems will not be attributed erroneously to
|
290 |
+
authors of previous versions.
|
291 |
+
|
292 |
+
Some devices are designed to deny users access to install or run
|
293 |
+
modified versions of the software inside them, although the manufacturer
|
294 |
+
can do so. This is fundamentally incompatible with the aim of
|
295 |
+
protecting users' freedom to change the software. The systematic
|
296 |
+
pattern of such abuse occurs in the area of products for individuals to
|
297 |
+
use, which is precisely where it is most unacceptable. Therefore, we
|
298 |
+
have designed this version of the GPL to prohibit the practice for those
|
299 |
+
products. If such problems arise substantially in other domains, we
|
300 |
+
stand ready to extend this provision to those domains in future versions
|
301 |
+
of the GPL, as needed to protect the freedom of users.
|
302 |
+
|
303 |
+
Finally, every program is threatened constantly by software patents.
|
304 |
+
States should not allow patents to restrict development and use of
|
305 |
+
software on general-purpose computers, but in those that do, we wish to
|
306 |
+
avoid the special danger that patents applied to a free program could
|
307 |
+
make it effectively proprietary. To prevent this, the GPL assures that
|
308 |
+
patents cannot be used to render the program non-free.
|
309 |
+
|
310 |
+
The precise terms and conditions for copying, distribution and
|
311 |
+
modification follow.
|
312 |
+
|
313 |
+
TERMS AND CONDITIONS
|
314 |
+
|
315 |
+
0. Definitions.
|
316 |
+
|
317 |
+
"This License" refers to version 3 of the GNU General Public License.
|
318 |
+
|
319 |
+
"Copyright" also means copyright-like laws that apply to other kinds of
|
320 |
+
works, such as semiconductor masks.
|
321 |
+
|
322 |
+
"The Program" refers to any copyrightable work licensed under this
|
323 |
+
License. Each licensee is addressed as "you". "Licensees" and
|
324 |
+
"recipients" may be individuals or organizations.
|
325 |
+
|
326 |
+
To "modify" a work means to copy from or adapt all or part of the work
|
327 |
+
in a fashion requiring copyright permission, other than the making of an
|
328 |
+
exact copy. The resulting work is called a "modified version" of the
|
329 |
+
earlier work or a work "based on" the earlier work.
|
330 |
+
|
331 |
+
A "covered work" means either the unmodified Program or a work based
|
332 |
+
on the Program.
|
333 |
+
|
334 |
+
To "propagate" a work means to do anything with it that, without
|
335 |
+
permission, would make you directly or secondarily liable for
|
336 |
+
infringement under applicable copyright law, except executing it on a
|
337 |
+
computer or modifying a private copy. Propagation includes copying,
|
338 |
+
distribution (with or without modification), making available to the
|
339 |
+
public, and in some countries other activities as well.
|
340 |
+
|
341 |
+
To "convey" a work means any kind of propagation that enables other
|
342 |
+
parties to make or receive copies. Mere interaction with a user through
|
343 |
+
a computer network, with no transfer of a copy, is not conveying.
|
344 |
+
|
345 |
+
An interactive user interface displays "Appropriate Legal Notices"
|
346 |
+
to the extent that it includes a convenient and prominently visible
|
347 |
+
feature that (1) displays an appropriate copyright notice, and (2)
|
348 |
+
tells the user that there is no warranty for the work (except to the
|
349 |
+
extent that warranties are provided), that licensees may convey the
|
350 |
+
work under this License, and how to view a copy of this License. If
|
351 |
+
the interface presents a list of user commands or options, such as a
|
352 |
+
menu, a prominent item in the list meets this criterion.
|
353 |
+
|
354 |
+
1. Source Code.
|
355 |
+
|
356 |
+
The "source code" for a work means the preferred form of the work
|
357 |
+
for making modifications to it. "Object code" means any non-source
|
358 |
+
form of a work.
|
359 |
+
|
360 |
+
A "Standard Interface" means an interface that either is an official
|
361 |
+
standard defined by a recognized standards body, or, in the case of
|
362 |
+
interfaces specified for a particular programming language, one that
|
363 |
+
is widely used among developers working in that language.
|
364 |
+
|
365 |
+
The "System Libraries" of an executable work include anything, other
|
366 |
+
than the work as a whole, that (a) is included in the normal form of
|
367 |
+
packaging a Major Component, but which is not part of that Major
|
368 |
+
Component, and (b) serves only to enable use of the work with that
|
369 |
+
Major Component, or to implement a Standard Interface for which an
|
370 |
+
implementation is available to the public in source code form. A
|
371 |
+
"Major Component", in this context, means a major essential component
|
372 |
+
(kernel, window system, and so on) of the specific operating system
|
373 |
+
(if any) on which the executable work runs, or a compiler used to
|
374 |
+
produce the work, or an object code interpreter used to run it.
|
375 |
+
|
376 |
+
The "Corresponding Source" for a work in object code form means all
|
377 |
+
the source code needed to generate, install, and (for an executable
|
378 |
+
work) run the object code and to modify the work, including scripts to
|
379 |
+
control those activities. However, it does not include the work's
|
380 |
+
System Libraries, or general-purpose tools or generally available free
|
381 |
+
programs which are used unmodified in performing those activities but
|
382 |
+
which are not part of the work. For example, Corresponding Source
|
383 |
+
includes interface definition files associated with source files for
|
384 |
+
the work, and the source code for shared libraries and dynamically
|
385 |
+
linked subprograms that the work is specifically designed to require,
|
386 |
+
such as by intimate data communication or control flow between those
|
387 |
+
subprograms and other parts of the work.
|
388 |
+
|
389 |
+
The Corresponding Source need not include anything that users
|
390 |
+
can regenerate automatically from other parts of the Corresponding
|
391 |
+
Source.
|
392 |
+
|
393 |
+
The Corresponding Source for a work in source code form is that
|
394 |
+
same work.
|
395 |
+
|
396 |
+
2. Basic Permissions.
|
397 |
+
|
398 |
+
All rights granted under this License are granted for the term of
|
399 |
+
copyright on the Program, and are irrevocable provided the stated
|
400 |
+
conditions are met. This License explicitly affirms your unlimited
|
401 |
+
permission to run the unmodified Program. The output from running a
|
402 |
+
covered work is covered by this License only if the output, given its
|
403 |
+
content, constitutes a covered work. This License acknowledges your
|
404 |
+
rights of fair use or other equivalent, as provided by copyright law.
|
405 |
+
|
406 |
+
You may make, run and propagate covered works that you do not
|
407 |
+
convey, without conditions so long as your license otherwise remains
|
408 |
+
in force. You may convey covered works to others for the sole purpose
|
409 |
+
of having them make modifications exclusively for you, or provide you
|
410 |
+
with facilities for running those works, provided that you comply with
|
411 |
+
the terms of this License in conveying all material for which you do
|
412 |
+
not control copyright. Those thus making or running the covered works
|
413 |
+
for you must do so exclusively on your behalf, under your direction
|
414 |
+
and control, on terms that prohibit them from making any copies of
|
415 |
+
your copyrighted material outside their relationship with you.
|
416 |
+
|
417 |
+
Conveying under any other circumstances is permitted solely under
|
418 |
+
the conditions stated below. Sublicensing is not allowed; section 10
|
419 |
+
makes it unnecessary.
|
420 |
+
|
421 |
+
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
422 |
+
|
423 |
+
No covered work shall be deemed part of an effective technological
|
424 |
+
measure under any applicable law fulfilling obligations under article
|
425 |
+
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
426 |
+
similar laws prohibiting or restricting circumvention of such
|
427 |
+
measures.
|
428 |
+
|
429 |
+
When you convey a covered work, you waive any legal power to forbid
|
430 |
+
circumvention of technological measures to the extent such circumvention
|
431 |
+
is effected by exercising rights under this License with respect to
|
432 |
+
the covered work, and you disclaim any intention to limit operation or
|
433 |
+
modification of the work as a means of enforcing, against the work's
|
434 |
+
users, your or third parties' legal rights to forbid circumvention of
|
435 |
+
technological measures.
|
436 |
+
|
437 |
+
4. Conveying Verbatim Copies.
|
438 |
+
|
439 |
+
You may convey verbatim copies of the Program's source code as you
|
440 |
+
receive it, in any medium, provided that you conspicuously and
|
441 |
+
appropriately publish on each copy an appropriate copyright notice;
|
442 |
+
keep intact all notices stating that this License and any
|
443 |
+
non-permissive terms added in accord with section 7 apply to the code;
|
444 |
+
keep intact all notices of the absence of any warranty; and give all
|
445 |
+
recipients a copy of this License along with the Program.
|
446 |
+
|
447 |
+
You may charge any price or no price for each copy that you convey,
|
448 |
+
and you may offer support or warranty protection for a fee.
|
449 |
+
|
450 |
+
5. Conveying Modified Source Versions.
|
451 |
+
|
452 |
+
You may convey a work based on the Program, or the modifications to
|
453 |
+
produce it from the Program, in the form of source code under the
|
454 |
+
terms of section 4, provided that you also meet all of these conditions:
|
455 |
+
|
456 |
+
a) The work must carry prominent notices stating that you modified
|
457 |
+
it, and giving a relevant date.
|
458 |
+
|
459 |
+
b) The work must carry prominent notices stating that it is
|
460 |
+
released under this License and any conditions added under section
|
461 |
+
7. This requirement modifies the requirement in section 4 to
|
462 |
+
"keep intact all notices".
|
463 |
+
|
464 |
+
c) You must license the entire work, as a whole, under this
|
465 |
+
License to anyone who comes into possession of a copy. This
|
466 |
+
License will therefore apply, along with any applicable section 7
|
467 |
+
additional terms, to the whole of the work, and all its parts,
|
468 |
+
regardless of how they are packaged. This License gives no
|
469 |
+
permission to license the work in any other way, but it does not
|
470 |
+
invalidate such permission if you have separately received it.
|
471 |
+
|
472 |
+
d) If the work has interactive user interfaces, each must display
|
473 |
+
Appropriate Legal Notices; however, if the Program has interactive
|
474 |
+
interfaces that do not display Appropriate Legal Notices, your
|
475 |
+
work need not make them do so.
|
476 |
+
|
477 |
+
A compilation of a covered work with other separate and independent
|
478 |
+
works, which are not by their nature extensions of the covered work,
|
479 |
+
and which are not combined with it such as to form a larger program,
|
480 |
+
in or on a volume of a storage or distribution medium, is called an
|
481 |
+
"aggregate" if the compilation and its resulting copyright are not
|
482 |
+
used to limit the access or legal rights of the compilation's users
|
483 |
+
beyond what the individual works permit. Inclusion of a covered work
|
484 |
+
in an aggregate does not cause this License to apply to the other
|
485 |
+
parts of the aggregate.
|
486 |
+
|
487 |
+
6. Conveying Non-Source Forms.
|
488 |
+
|
489 |
+
You may convey a covered work in object code form under the terms
|
490 |
+
of sections 4 and 5, provided that you also convey the
|
491 |
+
machine-readable Corresponding Source under the terms of this License,
|
492 |
+
in one of these ways:
|
493 |
+
|
494 |
+
a) Convey the object code in, or embodied in, a physical product
|
495 |
+
(including a physical distribution medium), accompanied by the
|
496 |
+
Corresponding Source fixed on a durable physical medium
|
497 |
+
customarily used for software interchange.
|
498 |
+
|
499 |
+
b) Convey the object code in, or embodied in, a physical product
|
500 |
+
(including a physical distribution medium), accompanied by a
|
501 |
+
written offer, valid for at least three years and valid for as
|
502 |
+
long as you offer spare parts or customer support for that product
|
503 |
+
model, to give anyone who possesses the object code either (1) a
|
504 |
+
copy of the Corresponding Source for all the software in the
|
505 |
+
product that is covered by this License, on a durable physical
|
506 |
+
medium customarily used for software interchange, for a price no
|
507 |
+
more than your reasonable cost of physically performing this
|
508 |
+
conveying of source, or (2) access to copy the
|
509 |
+
Corresponding Source from a network server at no charge.
|
510 |
+
|
511 |
+
c) Convey individual copies of the object code with a copy of the
|
512 |
+
written offer to provide the Corresponding Source. This
|
513 |
+
alternative is allowed only occasionally and noncommercially, and
|
514 |
+
only if you received the object code with such an offer, in accord
|
515 |
+
with subsection 6b.
|
516 |
+
|
517 |
+
d) Convey the object code by offering access from a designated
|
518 |
+
place (gratis or for a charge), and offer equivalent access to the
|
519 |
+
Corresponding Source in the same way through the same place at no
|
520 |
+
further charge. You need not require recipients to copy the
|
521 |
+
Corresponding Source along with the object code. If the place to
|
522 |
+
copy the object code is a network server, the Corresponding Source
|
523 |
+
may be on a different server (operated by you or a third party)
|
524 |
+
that supports equivalent copying facilities, provided you maintain
|
525 |
+
clear directions next to the object code saying where to find the
|
526 |
+
Corresponding Source. Regardless of what server hosts the
|
527 |
+
Corresponding Source, you remain obligated to ensure that it is
|
528 |
+
available for as long as needed to satisfy these requirements.
|
529 |
+
|
530 |
+
e) Convey the object code using peer-to-peer transmission, provided
|
531 |
+
you inform other peers where the object code and Corresponding
|
532 |
+
Source of the work are being offered to the general public at no
|
533 |
+
charge under subsection 6d.
|
534 |
+
|
535 |
+
A separable portion of the object code, whose source code is excluded
|
536 |
+
from the Corresponding Source as a System Library, need not be
|
537 |
+
included in conveying the object code work.
|
538 |
+
|
539 |
+
A "User Product" is either (1) a "consumer product", which means any
|
540 |
+
tangible personal property which is normally used for personal, family,
|
541 |
+
or household purposes, or (2) anything designed or sold for incorporation
|
542 |
+
into a dwelling. In determining whether a product is a consumer product,
|
543 |
+
doubtful cases shall be resolved in favor of coverage. For a particular
|
544 |
+
product received by a particular user, "normally used" refers to a
|
545 |
+
typical or common use of that class of product, regardless of the status
|
546 |
+
of the particular user or of the way in which the particular user
|
547 |
+
actually uses, or expects or is expected to use, the product. A product
|
548 |
+
is a consumer product regardless of whether the product has substantial
|
549 |
+
commercial, industrial or non-consumer uses, unless such uses represent
|
550 |
+
the only significant mode of use of the product.
|
551 |
+
|
552 |
+
"Installation Information" for a User Product means any methods,
|
553 |
+
procedures, authorization keys, or other information required to install
|
554 |
+
and execute modified versions of a covered work in that User Product from
|
555 |
+
a modified version of its Corresponding Source. The information must
|
556 |
+
suffice to ensure that the continued functioning of the modified object
|
557 |
+
code is in no case prevented or interfered with solely because
|
558 |
+
modification has been made.
|
559 |
+
|
560 |
+
If you convey an object code work under this section in, or with, or
|
561 |
+
specifically for use in, a User Product, and the conveying occurs as
|
562 |
+
part of a transaction in which the right of possession and use of the
|
563 |
+
User Product is transferred to the recipient in perpetuity or for a
|
564 |
+
fixed term (regardless of how the transaction is characterized), the
|
565 |
+
Corresponding Source conveyed under this section must be accompanied
|
566 |
+
by the Installation Information. But this requirement does not apply
|
567 |
+
if neither you nor any third party retains the ability to install
|
568 |
+
modified object code on the User Product (for example, the work has
|
569 |
+
been installed in ROM).
|
570 |
+
|
571 |
+
The requirement to provide Installation Information does not include a
|
572 |
+
requirement to continue to provide support service, warranty, or updates
|
573 |
+
for a work that has been modified or installed by the recipient, or for
|
574 |
+
the User Product in which it has been modified or installed. Access to a
|
575 |
+
network may be denied when the modification itself materially and
|
576 |
+
adversely affects the operation of the network or violates the rules and
|
577 |
+
protocols for communication across the network.
|
578 |
+
|
579 |
+
Corresponding Source conveyed, and Installation Information provided,
|
580 |
+
in accord with this section must be in a format that is publicly
|
581 |
+
documented (and with an implementation available to the public in
|
582 |
+
source code form), and must require no special password or key for
|
583 |
+
unpacking, reading or copying.
|
584 |
+
|
585 |
+
7. Additional Terms.
|
586 |
+
|
587 |
+
"Additional permissions" are terms that supplement the terms of this
|
588 |
+
License by making exceptions from one or more of its conditions.
|
589 |
+
Additional permissions that are applicable to the entire Program shall
|
590 |
+
be treated as though they were included in this License, to the extent
|
591 |
+
that they are valid under applicable law. If additional permissions
|
592 |
+
apply only to part of the Program, that part may be used separately
|
593 |
+
under those permissions, but the entire Program remains governed by
|
594 |
+
this License without regard to the additional permissions.
|
595 |
+
|
596 |
+
When you convey a copy of a covered work, you may at your option
|
597 |
+
remove any additional permissions from that copy, or from any part of
|
598 |
+
it. (Additional permissions may be written to require their own
|
599 |
+
removal in certain cases when you modify the work.) You may place
|
600 |
+
additional permissions on material, added by you to a covered work,
|
601 |
+
for which you have or can give appropriate copyright permission.
|
602 |
+
|
603 |
+
Notwithstanding any other provision of this License, for material you
|
604 |
+
add to a covered work, you may (if authorized by the copyright holders of
|
605 |
+
that material) supplement the terms of this License with terms:
|
606 |
+
|
607 |
+
a) Disclaiming warranty or limiting liability differently from the
|
608 |
+
terms of sections 15 and 16 of this License; or
|
609 |
+
|
610 |
+
b) Requiring preservation of specified reasonable legal notices or
|
611 |
+
author attributions in that material or in the Appropriate Legal
|
612 |
+
Notices displayed by works containing it; or
|
613 |
+
|
614 |
+
c) Prohibiting misrepresentation of the origin of that material, or
|
615 |
+
requiring that modified versions of such material be marked in
|
616 |
+
reasonable ways as different from the original version; or
|
617 |
+
|
618 |
+
d) Limiting the use for publicity purposes of names of licensors or
|
619 |
+
authors of the material; or
|
620 |
+
|
621 |
+
e) Declining to grant rights under trademark law for use of some
|
622 |
+
trade names, trademarks, or service marks; or
|
623 |
+
|
624 |
+
f) Requiring indemnification of licensors and authors of that
|
625 |
+
material by anyone who conveys the material (or modified versions of
|
626 |
+
it) with contractual assumptions of liability to the recipient, for
|
627 |
+
any liability that these contractual assumptions directly impose on
|
628 |
+
those licensors and authors.
|
629 |
+
|
630 |
+
All other non-permissive additional terms are considered "further
|
631 |
+
restrictions" within the meaning of section 10. If the Program as you
|
632 |
+
received it, or any part of it, contains a notice stating that it is
|
633 |
+
governed by this License along with a term that is a further
|
634 |
+
restriction, you may remove that term. If a license document contains
|
635 |
+
a further restriction but permits relicensing or conveying under this
|
636 |
+
License, you may add to a covered work material governed by the terms
|
637 |
+
of that license document, provided that the further restriction does
|
638 |
+
not survive such relicensing or conveying.
|
639 |
+
|
640 |
+
If you add terms to a covered work in accord with this section, you
|
641 |
+
must place, in the relevant source files, a statement of the
|
642 |
+
additional terms that apply to those files, or a notice indicating
|
643 |
+
where to find the applicable terms.
|
644 |
+
|
645 |
+
Additional terms, permissive or non-permissive, may be stated in the
|
646 |
+
form of a separately written license, or stated as exceptions;
|
647 |
+
the above requirements apply either way.
|
648 |
+
|
649 |
+
8. Termination.
|
650 |
+
|
651 |
+
You may not propagate or modify a covered work except as expressly
|
652 |
+
provided under this License. Any attempt otherwise to propagate or
|
653 |
+
modify it is void, and will automatically terminate your rights under
|
654 |
+
this License (including any patent licenses granted under the third
|
655 |
+
paragraph of section 11).
|
656 |
+
|
657 |
+
However, if you cease all violation of this License, then your
|
658 |
+
license from a particular copyright holder is reinstated (a)
|
659 |
+
provisionally, unless and until the copyright holder explicitly and
|
660 |
+
finally terminates your license, and (b) permanently, if the copyright
|
661 |
+
holder fails to notify you of the violation by some reasonable means
|
662 |
+
prior to 60 days after the cessation.
|
663 |
+
|
664 |
+
Moreover, your license from a particular copyright holder is
|
665 |
+
reinstated permanently if the copyright holder notifies you of the
|
666 |
+
violation by some reasonable means, this is the first time you have
|
667 |
+
received notice of violation of this License (for any work) from that
|
668 |
+
copyright holder, and you cure the violation prior to 30 days after
|
669 |
+
your receipt of the notice.
|
670 |
+
|
671 |
+
Termination of your rights under this section does not terminate the
|
672 |
+
licenses of parties who have received copies or rights from you under
|
673 |
+
this License. If your rights have been terminated and not permanently
|
674 |
+
reinstated, you do not qualify to receive new licenses for the same
|
675 |
+
material under section 10.
|
676 |
+
|
677 |
+
9. Acceptance Not Required for Having Copies.
|
678 |
+
|
679 |
+
You are not required to accept this License in order to receive or
|
680 |
+
run a copy of the Program. Ancillary propagation of a covered work
|
681 |
+
occurring solely as a consequence of using peer-to-peer transmission
|
682 |
+
to receive a copy likewise does not require acceptance. However,
|
683 |
+
nothing other than this License grants you permission to propagate or
|
684 |
+
modify any covered work. These actions infringe copyright if you do
|
685 |
+
not accept this License. Therefore, by modifying or propagating a
|
686 |
+
covered work, you indicate your acceptance of this License to do so.
|
687 |
+
|
688 |
+
10. Automatic Licensing of Downstream Recipients.
|
689 |
+
|
690 |
+
Each time you convey a covered work, the recipient automatically
|
691 |
+
receives a license from the original licensors, to run, modify and
|
692 |
+
propagate that work, subject to this License. You are not responsible
|
693 |
+
for enforcing compliance by third parties with this License.
|
694 |
+
|
695 |
+
An "entity transaction" is a transaction transferring control of an
|
696 |
+
organization, or substantially all assets of one, or subdividing an
|
697 |
+
organization, or merging organizations. If propagation of a covered
|
698 |
+
work results from an entity transaction, each party to that
|
699 |
+
transaction who receives a copy of the work also receives whatever
|
700 |
+
licenses to the work the party's predecessor in interest had or could
|
701 |
+
give under the previous paragraph, plus a right to possession of the
|
702 |
+
Corresponding Source of the work from the predecessor in interest, if
|
703 |
+
the predecessor has it or can get it with reasonable efforts.
|
704 |
+
|
705 |
+
You may not impose any further restrictions on the exercise of the
|
706 |
+
rights granted or affirmed under this License. For example, you may
|
707 |
+
not impose a license fee, royalty, or other charge for exercise of
|
708 |
+
rights granted under this License, and you may not initiate litigation
|
709 |
+
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
710 |
+
any patent claim is infringed by making, using, selling, offering for
|
711 |
+
sale, or importing the Program or any portion of it.
|
712 |
+
|
713 |
+
11. Patents.
|
714 |
+
|
715 |
+
A "contributor" is a copyright holder who authorizes use under this
|
716 |
+
License of the Program or a work on which the Program is based. The
|
717 |
+
work thus licensed is called the contributor's "contributor version".
|
718 |
+
|
719 |
+
A contributor's "essential patent claims" are all patent claims
|
720 |
+
owned or controlled by the contributor, whether already acquired or
|
721 |
+
hereafter acquired, that would be infringed by some manner, permitted
|
722 |
+
by this License, of making, using, or selling its contributor version,
|
723 |
+
but do not include claims that would be infringed only as a
|
724 |
+
consequence of further modification of the contributor version. For
|
725 |
+
purposes of this definition, "control" includes the right to grant
|
726 |
+
patent sublicenses in a manner consistent with the requirements of
|
727 |
+
this License.
|
728 |
+
|
729 |
+
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
730 |
+
patent license under the contributor's essential patent claims, to
|
731 |
+
make, use, sell, offer for sale, import and otherwise run, modify and
|
732 |
+
propagate the contents of its contributor version.
|
733 |
+
|
734 |
+
In the following three paragraphs, a "patent license" is any express
|
735 |
+
agreement or commitment, however denominated, not to enforce a patent
|
736 |
+
(such as an express permission to practice a patent or covenant not to
|
737 |
+
sue for patent infringement). To "grant" such a patent license to a
|
738 |
+
party means to make such an agreement or commitment not to enforce a
|
739 |
+
patent against the party.
|
740 |
+
|
741 |
+
If you convey a covered work, knowingly relying on a patent license,
|
742 |
+
and the Corresponding Source of the work is not available for anyone
|
743 |
+
to copy, free of charge and under the terms of this License, through a
|
744 |
+
publicly available network server or other readily accessible means,
|
745 |
+
then you must either (1) cause the Corresponding Source to be so
|
746 |
+
available, or (2) arrange to deprive yourself of the benefit of the
|
747 |
+
patent license for this particular work, or (3) arrange, in a manner
|
748 |
+
consistent with the requirements of this License, to extend the patent
|
749 |
+
license to downstream recipients. "Knowingly relying" means you have
|
750 |
+
actual knowledge that, but for the patent license, your conveying the
|
751 |
+
covered work in a country, or your recipient's use of the covered work
|
752 |
+
in a country, would infringe one or more identifiable patents in that
|
753 |
+
country that you have reason to believe are valid.
|
754 |
+
|
755 |
+
If, pursuant to or in connection with a single transaction or
|
756 |
+
arrangement, you convey, or propagate by procuring conveyance of, a
|
757 |
+
covered work, and grant a patent license to some of the parties
|
758 |
+
receiving the covered work authorizing them to use, propagate, modify
|
759 |
+
or convey a specific copy of the covered work, then the patent license
|
760 |
+
you grant is automatically extended to all recipients of the covered
|
761 |
+
work and works based on it.
|
762 |
+
|
763 |
+
A patent license is "discriminatory" if it does not include within
|
764 |
+
the scope of its coverage, prohibits the exercise of, or is
|
765 |
+
conditioned on the non-exercise of one or more of the rights that are
|
766 |
+
specifically granted under this License. You may not convey a covered
|
767 |
+
work if you are a party to an arrangement with a third party that is
|
768 |
+
in the business of distributing software, under which you make payment
|
769 |
+
to the third party based on the extent of your activity of conveying
|
770 |
+
the work, and under which the third party grants, to any of the
|
771 |
+
parties who would receive the covered work from you, a discriminatory
|
772 |
+
patent license (a) in connection with copies of the covered work
|
773 |
+
conveyed by you (or copies made from those copies), or (b) primarily
|
774 |
+
for and in connection with specific products or compilations that
|
775 |
+
contain the covered work, unless you entered into that arrangement,
|
776 |
+
or that patent license was granted, prior to 28 March 2007.
|
777 |
+
|
778 |
+
Nothing in this License shall be construed as excluding or limiting
|
779 |
+
any implied license or other defenses to infringement that may
|
780 |
+
otherwise be available to you under applicable patent law.
|
781 |
+
|
782 |
+
12. No Surrender of Others' Freedom.
|
783 |
+
|
784 |
+
If conditions are imposed on you (whether by court order, agreement or
|
785 |
+
otherwise) that contradict the conditions of this License, they do not
|
786 |
+
excuse you from the conditions of this License. If you cannot convey a
|
787 |
+
covered work so as to satisfy simultaneously your obligations under this
|
788 |
+
License and any other pertinent obligations, then as a consequence you may
|
789 |
+
not convey it at all. For example, if you agree to terms that obligate you
|
790 |
+
to collect a royalty for further conveying from those to whom you convey
|
791 |
+
the Program, the only way you could satisfy both those terms and this
|
792 |
+
License would be to refrain entirely from conveying the Program.
|
793 |
+
|
794 |
+
13. Use with the GNU Affero General Public License.
|
795 |
+
|
796 |
+
Notwithstanding any other provision of this License, you have
|
797 |
+
permission to link or combine any covered work with a work licensed
|
798 |
+
under version 3 of the GNU Affero General Public License into a single
|
799 |
+
combined work, and to convey the resulting work. The terms of this
|
800 |
+
License will continue to apply to the part which is the covered work,
|
801 |
+
but the special requirements of the GNU Affero General Public License,
|
802 |
+
section 13, concerning interaction through a network will apply to the
|
803 |
+
combination as such.
|
804 |
+
|
805 |
+
14. Revised Versions of this License.
|
806 |
+
|
807 |
+
The Free Software Foundation may publish revised and/or new versions of
|
808 |
+
the GNU General Public License from time to time. Such new versions will
|
809 |
+
be similar in spirit to the present version, but may differ in detail to
|
810 |
+
address new problems or concerns.
|
811 |
+
|
812 |
+
Each version is given a distinguishing version number. If the
|
813 |
+
Program specifies that a certain numbered version of the GNU General
|
814 |
+
Public License "or any later version" applies to it, you have the
|
815 |
+
option of following the terms and conditions either of that numbered
|
816 |
+
version or of any later version published by the Free Software
|
817 |
+
Foundation. If the Program does not specify a version number of the
|
818 |
+
GNU General Public License, you may choose any version ever published
|
819 |
+
by the Free Software Foundation.
|
820 |
+
|
821 |
+
If the Program specifies that a proxy can decide which future
|
822 |
+
versions of the GNU General Public License can be used, that proxy's
|
823 |
+
public statement of acceptance of a version permanently authorizes you
|
824 |
+
to choose that version for the Program.
|
825 |
+
|
826 |
+
Later license versions may give you additional or different
|
827 |
+
permissions. However, no additional obligations are imposed on any
|
828 |
+
author or copyright holder as a result of your choosing to follow a
|
829 |
+
later version.
|
830 |
+
|
831 |
+
15. Disclaimer of Warranty.
|
832 |
+
|
833 |
+
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
834 |
+
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
835 |
+
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
836 |
+
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
837 |
+
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
838 |
+
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
839 |
+
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
840 |
+
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
841 |
+
|
842 |
+
16. Limitation of Liability.
|
843 |
+
|
844 |
+
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
845 |
+
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
846 |
+
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
847 |
+
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
848 |
+
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
849 |
+
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
850 |
+
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
851 |
+
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
852 |
+
SUCH DAMAGES.
|
853 |
+
|
854 |
+
17. Interpretation of Sections 15 and 16.
|
855 |
+
|
856 |
+
If the disclaimer of warranty and limitation of liability provided
|
857 |
+
above cannot be given local legal effect according to their terms,
|
858 |
+
reviewing courts shall apply local law that most closely approximates
|
859 |
+
an absolute waiver of all civil liability in connection with the
|
860 |
+
Program, unless a warranty or assumption of liability accompanies a
|
861 |
+
copy of the Program in return for a fee.
|
862 |
+
|
863 |
+
END OF TERMS AND CONDITIONS
|
864 |
+
|
865 |
+
How to Apply These Terms to Your New Programs
|
866 |
+
|
867 |
+
If you develop a new program, and you want it to be of the greatest
|
868 |
+
possible use to the public, the best way to achieve this is to make it
|
869 |
+
free software which everyone can redistribute and change under these terms.
|
870 |
+
|
871 |
+
To do so, attach the following notices to the program. It is safest
|
872 |
+
to attach them to the start of each source file to most effectively
|
873 |
+
state the exclusion of warranty; and each file should have at least
|
874 |
+
the "copyright" line and a pointer to where the full notice is found.
|
875 |
+
|
876 |
+
<one line to give the program's name and a brief idea of what it does.>
|
877 |
+
Copyright (C) <year> <name of author>
|
878 |
+
|
879 |
+
This program is free software: you can redistribute it and/or modify
|
880 |
+
it under the terms of the GNU General Public License as published by
|
881 |
+
the Free Software Foundation, either version 3 of the License, or
|
882 |
+
(at your option) any later version.
|
883 |
+
|
884 |
+
This program is distributed in the hope that it will be useful,
|
885 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
886 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
887 |
+
GNU General Public License for more details.
|
888 |
+
|
889 |
+
You should have received a copy of the GNU General Public License
|
890 |
+
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
891 |
+
|
892 |
+
Also add information on how to contact you by electronic and paper mail.
|
893 |
+
|
894 |
+
If the program does terminal interaction, make it output a short
|
895 |
+
notice like this when it starts in an interactive mode:
|
896 |
+
|
897 |
+
<program> Copyright (C) <year> <name of author>
|
898 |
+
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
899 |
+
This is free software, and you are welcome to redistribute it
|
900 |
+
under certain conditions; type `show c' for details.
|
901 |
+
|
902 |
+
The hypothetical commands `show w' and `show c' should show the appropriate
|
903 |
+
parts of the General Public License. Of course, your program's commands
|
904 |
+
might be different; for a GUI interface, you would use an "about box".
|
905 |
+
|
906 |
+
You should also get your employer (if you work as a programmer) or school,
|
907 |
+
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
908 |
+
For more information on this, and how to apply and follow the GNU GPL, see
|
909 |
+
<http://www.gnu.org/licenses/>.
|
910 |
+
|
911 |
+
The GNU General Public License does not permit incorporating your program
|
912 |
+
into proprietary programs. If your program is a subroutine library, you
|
913 |
+
may consider it more useful to permit linking proprietary applications with
|
914 |
+
the library. If this is what you want to do, use the GNU Lesser General
|
915 |
+
Public License instead of this License. But first, please read
|
916 |
+
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
917 |
+
|
918 |
+
|
919 |
+
Name: libquadmath
|
920 |
+
Files: scipy.libs/libquadmath*.so
|
921 |
+
Description: dynamically linked to files compiled with gcc
|
922 |
+
Availability: https://gcc.gnu.org/git/?p=gcc.git;a=tree;f=libquadmath
|
923 |
+
License: LGPL-2.1-or-later
|
924 |
+
|
925 |
+
GCC Quad-Precision Math Library
|
926 |
+
Copyright (C) 2010-2019 Free Software Foundation, Inc.
|
927 |
+
Written by Francois-Xavier Coudert <[email protected]>
|
928 |
+
|
929 |
+
This file is part of the libquadmath library.
|
930 |
+
Libquadmath is free software; you can redistribute it and/or
|
931 |
+
modify it under the terms of the GNU Library General Public
|
932 |
+
License as published by the Free Software Foundation; either
|
933 |
+
version 2.1 of the License, or (at your option) any later version.
|
934 |
+
|
935 |
+
Libquadmath is distributed in the hope that it will be useful,
|
936 |
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
937 |
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
938 |
+
Lesser General Public License for more details.
|
939 |
+
https://www.gnu.org/licenses/old-licenses/lgpl-2.1.html
|
940 |
+
Classifier: Development Status :: 5 - Production/Stable
|
941 |
+
Classifier: Intended Audience :: Science/Research
|
942 |
+
Classifier: Intended Audience :: Developers
|
943 |
+
Classifier: License :: OSI Approved :: BSD License
|
944 |
+
Classifier: Programming Language :: C
|
945 |
+
Classifier: Programming Language :: Python
|
946 |
+
Classifier: Programming Language :: Python :: 3
|
947 |
+
Classifier: Programming Language :: Python :: 3.9
|
948 |
+
Classifier: Programming Language :: Python :: 3.10
|
949 |
+
Classifier: Programming Language :: Python :: 3.11
|
950 |
+
Classifier: Programming Language :: Python :: 3.12
|
951 |
+
Classifier: Topic :: Software Development :: Libraries
|
952 |
+
Classifier: Topic :: Scientific/Engineering
|
953 |
+
Classifier: Operating System :: Microsoft :: Windows
|
954 |
+
Classifier: Operating System :: POSIX :: Linux
|
955 |
+
Classifier: Operating System :: POSIX
|
956 |
+
Classifier: Operating System :: Unix
|
957 |
+
Classifier: Operating System :: MacOS
|
958 |
+
Project-URL: Homepage, https://scipy.org/
|
959 |
+
Project-URL: Documentation, https://docs.scipy.org/doc/scipy/
|
960 |
+
Project-URL: Source, https://github.com/scipy/scipy
|
961 |
+
Project-URL: Download, https://github.com/scipy/scipy/releases
|
962 |
+
Project-URL: Tracker, https://github.com/scipy/scipy/issues
|
963 |
+
Requires-Python: >=3.9
|
964 |
+
Requires-Dist: numpy<2.3,>=1.22.4
|
965 |
+
Requires-Dist: pytest; extra == "test"
|
966 |
+
Requires-Dist: pytest-cov; extra == "test"
|
967 |
+
Requires-Dist: pytest-timeout; extra == "test"
|
968 |
+
Requires-Dist: pytest-xdist; extra == "test"
|
969 |
+
Requires-Dist: asv; extra == "test"
|
970 |
+
Requires-Dist: mpmath; extra == "test"
|
971 |
+
Requires-Dist: gmpy2; extra == "test"
|
972 |
+
Requires-Dist: threadpoolctl; extra == "test"
|
973 |
+
Requires-Dist: scikit-umfpack; extra == "test"
|
974 |
+
Requires-Dist: pooch; extra == "test"
|
975 |
+
Requires-Dist: hypothesis>=6.30; extra == "test"
|
976 |
+
Requires-Dist: array-api-strict; extra == "test"
|
977 |
+
Requires-Dist: sphinx>=5.0.0; extra == "doc"
|
978 |
+
Requires-Dist: pydata-sphinx-theme>=0.15.2; extra == "doc"
|
979 |
+
Requires-Dist: sphinx-design>=0.4.0; extra == "doc"
|
980 |
+
Requires-Dist: matplotlib>=3.5; extra == "doc"
|
981 |
+
Requires-Dist: numpydoc; extra == "doc"
|
982 |
+
Requires-Dist: jupytext; extra == "doc"
|
983 |
+
Requires-Dist: myst-nb; extra == "doc"
|
984 |
+
Requires-Dist: pooch; extra == "doc"
|
985 |
+
Requires-Dist: jupyterlite-sphinx>=0.12.0; extra == "doc"
|
986 |
+
Requires-Dist: jupyterlite-pyodide-kernel; extra == "doc"
|
987 |
+
Requires-Dist: mypy; extra == "dev"
|
988 |
+
Requires-Dist: typing_extensions; extra == "dev"
|
989 |
+
Requires-Dist: types-psutil; extra == "dev"
|
990 |
+
Requires-Dist: pycodestyle; extra == "dev"
|
991 |
+
Requires-Dist: ruff; extra == "dev"
|
992 |
+
Requires-Dist: cython-lint>=0.12.2; extra == "dev"
|
993 |
+
Requires-Dist: rich-click; extra == "dev"
|
994 |
+
Requires-Dist: doit>=0.36.0; extra == "dev"
|
995 |
+
Requires-Dist: pydevtool; extra == "dev"
|
996 |
+
Provides-Extra: test
|
997 |
+
Provides-Extra: doc
|
998 |
+
Provides-Extra: dev
|
999 |
+
Description-Content-Type: text/x-rst
|
1000 |
+
|
1001 |
+
.. image:: https://raw.githubusercontent.com/scipy/scipy/main/doc/source/_static/logo.svg
|
1002 |
+
:target: https://scipy.org
|
1003 |
+
:width: 110
|
1004 |
+
:height: 110
|
1005 |
+
:align: left
|
1006 |
+
|
1007 |
+
.. image:: https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A
|
1008 |
+
:target: https://numfocus.org
|
1009 |
+
|
1010 |
+
.. image:: https://img.shields.io/pypi/dm/scipy.svg?label=Pypi%20downloads
|
1011 |
+
:target: https://pypi.org/project/scipy/
|
1012 |
+
|
1013 |
+
.. image:: https://img.shields.io/conda/dn/conda-forge/scipy.svg?label=Conda%20downloads
|
1014 |
+
:target: https://anaconda.org/conda-forge/scipy
|
1015 |
+
|
1016 |
+
.. image:: https://img.shields.io/badge/stackoverflow-Ask%20questions-blue.svg
|
1017 |
+
:target: https://stackoverflow.com/questions/tagged/scipy
|
1018 |
+
|
1019 |
+
.. image:: https://img.shields.io/badge/DOI-10.1038%2Fs41592--019--0686--2-blue
|
1020 |
+
:target: https://www.nature.com/articles/s41592-019-0686-2
|
1021 |
+
|
1022 |
+
SciPy (pronounced "Sigh Pie") is an open-source software for mathematics,
|
1023 |
+
science, and engineering. It includes modules for statistics, optimization,
|
1024 |
+
integration, linear algebra, Fourier transforms, signal and image processing,
|
1025 |
+
ODE solvers, and more.
|
1026 |
+
|
1027 |
+
- **Website:** https://scipy.org
|
1028 |
+
- **Documentation:** https://docs.scipy.org/doc/scipy/
|
1029 |
+
- **Development version of the documentation:** https://scipy.github.io/devdocs
|
1030 |
+
- **Mailing list:** https://mail.python.org/mailman3/lists/scipy-dev.python.org/
|
1031 |
+
- **Source code:** https://github.com/scipy/scipy
|
1032 |
+
- **Contributing:** https://scipy.github.io/devdocs/dev/index.html
|
1033 |
+
- **Bug reports:** https://github.com/scipy/scipy/issues
|
1034 |
+
- **Code of Conduct:** https://docs.scipy.org/doc/scipy/dev/conduct/code_of_conduct.html
|
1035 |
+
- **Report a security vulnerability:** https://tidelift.com/docs/security
|
1036 |
+
- **Citing in your work:** https://www.scipy.org/citing-scipy/
|
1037 |
+
|
1038 |
+
SciPy is built to work with
|
1039 |
+
NumPy arrays, and provides many user-friendly and efficient numerical routines,
|
1040 |
+
such as routines for numerical integration and optimization. Together, they
|
1041 |
+
run on all popular operating systems, are quick to install, and are free of
|
1042 |
+
charge. NumPy and SciPy are easy to use, but powerful enough to be depended
|
1043 |
+
upon by some of the world's leading scientists and engineers. If you need to
|
1044 |
+
manipulate numbers on a computer and display or publish the results, give
|
1045 |
+
SciPy a try!
|
1046 |
+
|
1047 |
+
For the installation instructions, see `our install
|
1048 |
+
guide <https://scipy.org/install/>`__.
|
1049 |
+
|
1050 |
+
|
1051 |
+
Call for Contributions
|
1052 |
+
----------------------
|
1053 |
+
|
1054 |
+
We appreciate and welcome contributions. Small improvements or fixes are always appreciated; issues labeled as "good
|
1055 |
+
first issue" may be a good starting point. Have a look at `our contributing
|
1056 |
+
guide <https://scipy.github.io/devdocs/dev/index.html>`__.
|
1057 |
+
|
1058 |
+
Writing code isn’t the only way to contribute to SciPy. You can also:
|
1059 |
+
|
1060 |
+
- review pull requests
|
1061 |
+
- triage issues
|
1062 |
+
- develop tutorials, presentations, and other educational materials
|
1063 |
+
- maintain and improve `our website <https://github.com/scipy/scipy.org>`__
|
1064 |
+
- develop graphic design for our brand assets and promotional materials
|
1065 |
+
- help with outreach and onboard new contributors
|
1066 |
+
- write grant proposals and help with other fundraising efforts
|
1067 |
+
|
1068 |
+
If you’re unsure where to start or how your skills fit in, reach out! You can
|
1069 |
+
ask on the mailing list or here, on GitHub, by leaving a
|
1070 |
+
comment on a relevant issue that is already open.
|
1071 |
+
|
1072 |
+
If you are new to contributing to open source, `this
|
1073 |
+
guide <https://opensource.guide/how-to-contribute/>`__ helps explain why, what,
|
1074 |
+
and how to get involved.
|
env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/RECORD
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/scipy-1.13.0.dist-info/WHEEL
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Wheel-Version: 1.0
|
2 |
+
Generator: meson
|
3 |
+
Root-Is-Purelib: false
|
4 |
+
Tag: cp310-cp310-manylinux_2_17_x86_64
|
5 |
+
Tag: cp310-cp310-manylinux2014_x86_64
|
6 |
+
|
env-llmeval/lib/python3.10/site-packages/torch/_C.cpython-310-x86_64-linux-gnu.so
ADDED
Binary file (37.9 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_VF.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This makes the functions in torch._C._VariableFunctions available as
|
3 |
+
torch._VF.<funcname>
|
4 |
+
without mypy being able to find them.
|
5 |
+
|
6 |
+
A subset of those functions are mapped to ATen functions in
|
7 |
+
torch/jit/_builtins.py
|
8 |
+
|
9 |
+
See https://github.com/pytorch/pytorch/issues/21478 for the reason for
|
10 |
+
introducing torch._VF
|
11 |
+
|
12 |
+
"""
|
13 |
+
import sys
|
14 |
+
import types
|
15 |
+
|
16 |
+
import torch
|
17 |
+
|
18 |
+
|
19 |
+
class VFModule(types.ModuleType):
|
20 |
+
vf: types.ModuleType
|
21 |
+
|
22 |
+
def __init__(self, name):
|
23 |
+
super().__init__(name)
|
24 |
+
self.vf = torch._C._VariableFunctions
|
25 |
+
|
26 |
+
def __getattr__(self, attr):
|
27 |
+
return getattr(self.vf, attr)
|
28 |
+
|
29 |
+
|
30 |
+
sys.modules[__name__] = VFModule(__name__)
|
env-llmeval/lib/python3.10/site-packages/torch/_VF.pyi
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/__config__.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
def show():
|
5 |
+
"""
|
6 |
+
Return a human-readable string with descriptions of the
|
7 |
+
configuration of PyTorch.
|
8 |
+
"""
|
9 |
+
return torch._C._show_config()
|
10 |
+
|
11 |
+
|
12 |
+
# TODO: In principle, we could provide more structured version/config
|
13 |
+
# information here. For now only CXX_FLAGS is exposed, as Timer
|
14 |
+
# uses them.
|
15 |
+
def _cxx_flags():
|
16 |
+
"""Returns the CXX_FLAGS used when building PyTorch."""
|
17 |
+
return torch._C._cxx_flags()
|
18 |
+
|
19 |
+
|
20 |
+
def parallel_info():
|
21 |
+
r"""Returns detailed string with parallelization settings"""
|
22 |
+
return torch._C._parallel_info()
|
env-llmeval/lib/python3.10/site-packages/torch/__future__.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
This global flag controls whether to assign new tensors to the parameters
|
3 |
+
instead of changing the existing parameters in-place when converting an `nn.Module`
|
4 |
+
using the following methods:
|
5 |
+
1. `module.cuda()` / `.cpu()` (for moving `module` between devices)
|
6 |
+
2. `module.float()` / `.double()` / `.half()` (for converting `module` to a different dtype)
|
7 |
+
3. `module.to()` / `.type()` (for changing `module`'s device or dtype)
|
8 |
+
4. `module._apply(fn)` (for generic functions applied to `module`)
|
9 |
+
|
10 |
+
Default: False
|
11 |
+
"""
|
12 |
+
_overwrite_module_params_on_conversion = False
|
13 |
+
|
14 |
+
|
15 |
+
def set_overwrite_module_params_on_conversion(value):
|
16 |
+
global _overwrite_module_params_on_conversion
|
17 |
+
_overwrite_module_params_on_conversion = value
|
18 |
+
|
19 |
+
|
20 |
+
def get_overwrite_module_params_on_conversion():
|
21 |
+
return _overwrite_module_params_on_conversion
|
env-llmeval/lib/python3.10/site-packages/torch/__init__.py
ADDED
@@ -0,0 +1,1995 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
r"""
|
3 |
+
The torch package contains data structures for multi-dimensional
|
4 |
+
tensors and defines mathematical operations over these tensors.
|
5 |
+
Additionally, it provides many utilities for efficient serialization of
|
6 |
+
Tensors and arbitrary types, and other useful utilities.
|
7 |
+
|
8 |
+
It has a CUDA counterpart, that enables you to run your tensor computations
|
9 |
+
on an NVIDIA GPU with compute capability >= 3.0.
|
10 |
+
"""
|
11 |
+
|
12 |
+
import math
|
13 |
+
import os
|
14 |
+
import sys
|
15 |
+
import platform
|
16 |
+
import textwrap
|
17 |
+
import ctypes
|
18 |
+
import inspect
|
19 |
+
|
20 |
+
# multipy/deploy is setting this import before importing torch, this is the most
|
21 |
+
# reliable way we have to detect if we're running within deploy.
|
22 |
+
# https://github.com/pytorch/multipy/blob/d60f34ad38c371e441fe7ffdb77a3c3dda5a5d19/multipy/runtime/interpreter/interpreter_impl.cpp#L134-L137
|
23 |
+
def _running_with_deploy():
|
24 |
+
return sys.modules.get("torch._meta_registrations", None) is object
|
25 |
+
|
26 |
+
from ._utils import _import_dotted_name, classproperty
|
27 |
+
from ._utils import _functionalize_sync as _sync
|
28 |
+
from ._utils_internal import get_file_path, prepare_multiprocessing_environment, \
|
29 |
+
USE_RTLD_GLOBAL_WITH_LIBTORCH, USE_GLOBAL_DEPS
|
30 |
+
|
31 |
+
# TODO(torch_deploy) figure out how to freeze version.py in fbcode build
|
32 |
+
if _running_with_deploy():
|
33 |
+
__version__ = "torch-deploy-1.8"
|
34 |
+
else:
|
35 |
+
from .torch_version import __version__ as __version__
|
36 |
+
|
37 |
+
from typing import Any, Callable, Dict, Optional, Set, Tuple, Type, TYPE_CHECKING, Union, List
|
38 |
+
import builtins
|
39 |
+
|
40 |
+
__all__ = [
|
41 |
+
'typename', 'is_tensor', 'is_storage',
|
42 |
+
'set_default_tensor_type', 'set_default_device',
|
43 |
+
'set_rng_state', 'get_rng_state', 'manual_seed', 'initial_seed', 'seed',
|
44 |
+
'save', 'load', 'set_printoptions', 'chunk', 'split', 'stack', 'matmul',
|
45 |
+
'no_grad', 'enable_grad', 'rand', 'randn', 'inference_mode',
|
46 |
+
'DoubleStorage', 'FloatStorage', 'LongStorage', 'IntStorage',
|
47 |
+
'ShortStorage', 'CharStorage', 'ByteStorage', 'BoolStorage',
|
48 |
+
'TypedStorage', 'UntypedStorage',
|
49 |
+
'DoubleTensor', 'FloatTensor', 'LongTensor', 'IntTensor',
|
50 |
+
'ShortTensor', 'CharTensor', 'ByteTensor', 'BoolTensor', 'Tensor',
|
51 |
+
'lobpcg', 'use_deterministic_algorithms',
|
52 |
+
'are_deterministic_algorithms_enabled',
|
53 |
+
'is_deterministic_algorithms_warn_only_enabled',
|
54 |
+
'set_deterministic_debug_mode', 'get_deterministic_debug_mode',
|
55 |
+
'set_float32_matmul_precision', 'get_float32_matmul_precision',
|
56 |
+
'set_warn_always', 'is_warn_always_enabled', 'SymInt', 'SymFloat',
|
57 |
+
'SymBool', 'sym_not', 'unravel_index',
|
58 |
+
'sym_int', 'sym_float', 'sym_max', 'sym_min', 'sym_ite', 'compile', 'vmap',
|
59 |
+
'sym_sqrt',
|
60 |
+
'export', 'autocast', 'cond',
|
61 |
+
]
|
62 |
+
|
63 |
+
################################################################################
|
64 |
+
# Load the extension module
|
65 |
+
################################################################################
|
66 |
+
|
67 |
+
if sys.platform == 'win32':
|
68 |
+
pfiles_path = os.getenv('ProgramFiles', 'C:\\Program Files')
|
69 |
+
py_dll_path = os.path.join(sys.exec_prefix, 'Library', 'bin')
|
70 |
+
th_dll_path = os.path.join(os.path.dirname(__file__), 'lib')
|
71 |
+
|
72 |
+
# When users create a virtualenv that inherits the base environment,
|
73 |
+
# we will need to add the corresponding library directory into
|
74 |
+
# DLL search directories. Otherwise, it will rely on `PATH` which
|
75 |
+
# is dependent on user settings.
|
76 |
+
if sys.exec_prefix != sys.base_exec_prefix:
|
77 |
+
base_py_dll_path = os.path.join(sys.base_exec_prefix, 'Library', 'bin')
|
78 |
+
else:
|
79 |
+
base_py_dll_path = ''
|
80 |
+
|
81 |
+
dll_paths = list(filter(os.path.exists, [th_dll_path, py_dll_path, base_py_dll_path]))
|
82 |
+
|
83 |
+
if all(not os.path.exists(os.path.join(p, 'nvToolsExt64_1.dll')) for p in dll_paths):
|
84 |
+
nvtoolsext_dll_path = os.path.join(
|
85 |
+
os.getenv('NVTOOLSEXT_PATH', os.path.join(pfiles_path, 'NVIDIA Corporation', 'NvToolsExt')), 'bin', 'x64')
|
86 |
+
else:
|
87 |
+
nvtoolsext_dll_path = ''
|
88 |
+
|
89 |
+
from .version import cuda as cuda_version
|
90 |
+
import glob
|
91 |
+
if cuda_version and all(not glob.glob(os.path.join(p, 'cudart64*.dll')) for p in dll_paths):
|
92 |
+
cuda_version_1 = cuda_version.replace('.', '_')
|
93 |
+
cuda_path_var = 'CUDA_PATH_V' + cuda_version_1
|
94 |
+
default_path = os.path.join(pfiles_path, 'NVIDIA GPU Computing Toolkit', 'CUDA', 'v' + cuda_version)
|
95 |
+
cuda_path = os.path.join(os.getenv(cuda_path_var, default_path), 'bin')
|
96 |
+
else:
|
97 |
+
cuda_path = ''
|
98 |
+
|
99 |
+
dll_paths.extend(filter(os.path.exists, [nvtoolsext_dll_path, cuda_path]))
|
100 |
+
|
101 |
+
kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True)
|
102 |
+
with_load_library_flags = hasattr(kernel32, 'AddDllDirectory')
|
103 |
+
prev_error_mode = kernel32.SetErrorMode(0x0001)
|
104 |
+
|
105 |
+
kernel32.LoadLibraryW.restype = ctypes.c_void_p
|
106 |
+
if with_load_library_flags:
|
107 |
+
kernel32.LoadLibraryExW.restype = ctypes.c_void_p
|
108 |
+
|
109 |
+
for dll_path in dll_paths:
|
110 |
+
os.add_dll_directory(dll_path)
|
111 |
+
|
112 |
+
try:
|
113 |
+
ctypes.CDLL('vcruntime140.dll')
|
114 |
+
ctypes.CDLL('msvcp140.dll')
|
115 |
+
ctypes.CDLL('vcruntime140_1.dll')
|
116 |
+
except OSError:
|
117 |
+
print('''Microsoft Visual C++ Redistributable is not installed, this may lead to the DLL load failure.
|
118 |
+
It can be downloaded at https://aka.ms/vs/16/release/vc_redist.x64.exe''')
|
119 |
+
|
120 |
+
dlls = glob.glob(os.path.join(th_dll_path, '*.dll'))
|
121 |
+
path_patched = False
|
122 |
+
for dll in dlls:
|
123 |
+
is_loaded = False
|
124 |
+
if with_load_library_flags:
|
125 |
+
res = kernel32.LoadLibraryExW(dll, None, 0x00001100)
|
126 |
+
last_error = ctypes.get_last_error()
|
127 |
+
if res is None and last_error != 126:
|
128 |
+
err = ctypes.WinError(last_error)
|
129 |
+
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
|
130 |
+
raise err
|
131 |
+
elif res is not None:
|
132 |
+
is_loaded = True
|
133 |
+
if not is_loaded:
|
134 |
+
if not path_patched:
|
135 |
+
os.environ['PATH'] = ';'.join(dll_paths + [os.environ['PATH']])
|
136 |
+
path_patched = True
|
137 |
+
res = kernel32.LoadLibraryW(dll)
|
138 |
+
if res is None:
|
139 |
+
err = ctypes.WinError(ctypes.get_last_error())
|
140 |
+
err.strerror += f' Error loading "{dll}" or one of its dependencies.'
|
141 |
+
raise err
|
142 |
+
|
143 |
+
kernel32.SetErrorMode(prev_error_mode)
|
144 |
+
|
145 |
+
|
146 |
+
def _preload_cuda_deps(lib_folder, lib_name):
|
147 |
+
"""Preloads cuda deps if they could not be found otherwise."""
|
148 |
+
# Should only be called on Linux if default path resolution have failed
|
149 |
+
assert platform.system() == 'Linux', 'Should only be called on Linux'
|
150 |
+
import glob
|
151 |
+
lib_path = None
|
152 |
+
for path in sys.path:
|
153 |
+
nvidia_path = os.path.join(path, 'nvidia')
|
154 |
+
if not os.path.exists(nvidia_path):
|
155 |
+
continue
|
156 |
+
candidate_lib_paths = glob.glob(os.path.join(nvidia_path, lib_folder, 'lib', lib_name))
|
157 |
+
if candidate_lib_paths and not lib_path:
|
158 |
+
lib_path = candidate_lib_paths[0]
|
159 |
+
if lib_path:
|
160 |
+
break
|
161 |
+
if not lib_path:
|
162 |
+
raise ValueError(f"{lib_name} not found in the system path {sys.path}")
|
163 |
+
ctypes.CDLL(lib_path)
|
164 |
+
|
165 |
+
|
166 |
+
# See Note [Global dependencies]
|
167 |
+
def _load_global_deps() -> None:
|
168 |
+
if _running_with_deploy() or platform.system() == 'Windows':
|
169 |
+
return
|
170 |
+
|
171 |
+
lib_name = 'libtorch_global_deps' + ('.dylib' if platform.system() == 'Darwin' else '.so')
|
172 |
+
here = os.path.abspath(__file__)
|
173 |
+
lib_path = os.path.join(os.path.dirname(here), 'lib', lib_name)
|
174 |
+
|
175 |
+
try:
|
176 |
+
ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
|
177 |
+
except OSError as err:
|
178 |
+
# Can only happen for wheel with cuda libs as PYPI deps
|
179 |
+
# As PyTorch is not purelib, but nvidia-*-cu12 is
|
180 |
+
cuda_libs: Dict[str, str] = {
|
181 |
+
'cublas': 'libcublas.so.*[0-9]',
|
182 |
+
'cudnn': 'libcudnn.so.*[0-9]',
|
183 |
+
'cuda_nvrtc': 'libnvrtc.so.*[0-9]',
|
184 |
+
'cuda_runtime': 'libcudart.so.*[0-9]',
|
185 |
+
'cuda_cupti': 'libcupti.so.*[0-9]',
|
186 |
+
'cufft': 'libcufft.so.*[0-9]',
|
187 |
+
'curand': 'libcurand.so.*[0-9]',
|
188 |
+
'cusolver': 'libcusolver.so.*[0-9]',
|
189 |
+
'cusparse': 'libcusparse.so.*[0-9]',
|
190 |
+
'nccl': 'libnccl.so.*[0-9]',
|
191 |
+
'nvtx': 'libnvToolsExt.so.*[0-9]',
|
192 |
+
}
|
193 |
+
is_cuda_lib_err = [lib for lib in cuda_libs.values() if(lib.split('.')[0] in err.args[0])]
|
194 |
+
if not is_cuda_lib_err:
|
195 |
+
raise err
|
196 |
+
for lib_folder, lib_name in cuda_libs.items():
|
197 |
+
_preload_cuda_deps(lib_folder, lib_name)
|
198 |
+
ctypes.CDLL(lib_path, mode=ctypes.RTLD_GLOBAL)
|
199 |
+
|
200 |
+
|
201 |
+
if (USE_RTLD_GLOBAL_WITH_LIBTORCH or os.getenv('TORCH_USE_RTLD_GLOBAL')) and \
|
202 |
+
(_running_with_deploy() or platform.system() != 'Windows'):
|
203 |
+
# Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a
|
204 |
+
# few circumstances:
|
205 |
+
#
|
206 |
+
# 1. You're in a build environment (e.g., fbcode) where
|
207 |
+
# libtorch_global_deps is not available, but you still need
|
208 |
+
# to get mkl to link in with RTLD_GLOBAL or it will just
|
209 |
+
# not work.
|
210 |
+
#
|
211 |
+
# 2. You're trying to run PyTorch under UBSAN and you need
|
212 |
+
# to ensure that only one copy of libtorch is loaded, so
|
213 |
+
# vptr checks work properly
|
214 |
+
#
|
215 |
+
# If you're using this setting, you must verify that all the libraries
|
216 |
+
# you load consistently use the same libstdc++, or you may have
|
217 |
+
# mysterious segfaults.
|
218 |
+
#
|
219 |
+
old_flags = sys.getdlopenflags()
|
220 |
+
sys.setdlopenflags(os.RTLD_GLOBAL | os.RTLD_LAZY)
|
221 |
+
from torch._C import * # noqa: F403
|
222 |
+
sys.setdlopenflags(old_flags)
|
223 |
+
del old_flags
|
224 |
+
|
225 |
+
else:
|
226 |
+
# Easy way. You want this most of the time, because it will prevent
|
227 |
+
# C++ symbols from libtorch clobbering C++ symbols from other
|
228 |
+
# libraries, leading to mysterious segfaults.
|
229 |
+
#
|
230 |
+
# If building in an environment where libtorch_global_deps isn't available
|
231 |
+
# like parts of fbsource, but where RTLD_GLOBAL causes segfaults, you will
|
232 |
+
# want USE_RTLD_GLOBAL_WITH_LIBTORCH = False and USE_GLOBAL_DEPS = False
|
233 |
+
#
|
234 |
+
# See Note [Global dependencies]
|
235 |
+
if USE_GLOBAL_DEPS:
|
236 |
+
_load_global_deps()
|
237 |
+
from torch._C import * # noqa: F403
|
238 |
+
|
239 |
+
# Appease the type checker; ordinarily this binding is inserted by the
|
240 |
+
# torch._C module initialization code in C
|
241 |
+
if TYPE_CHECKING:
|
242 |
+
from . import _C as _C
|
243 |
+
|
244 |
+
class SymInt:
|
245 |
+
"""
|
246 |
+
Like an int (including magic methods), but redirects all operations on the
|
247 |
+
wrapped node. This is used in particular to symbolically record operations
|
248 |
+
in the symbolic shape workflow.
|
249 |
+
"""
|
250 |
+
|
251 |
+
def __init__(self, node):
|
252 |
+
# This field MUST be named node; C++ binding code assumes that this
|
253 |
+
# class has a field named node that stores SymNode
|
254 |
+
self.node = node
|
255 |
+
|
256 |
+
def __bool__(self):
|
257 |
+
return builtins.bool(self != 0)
|
258 |
+
|
259 |
+
def __int__(self):
|
260 |
+
return self.node.int_()
|
261 |
+
|
262 |
+
def __index__(self):
|
263 |
+
return self.node.int_()
|
264 |
+
|
265 |
+
# Magic methods installed by torch.fx.experimental.sym_node
|
266 |
+
|
267 |
+
def __eq__(self, other: object) -> builtins.bool:
|
268 |
+
raise AssertionError("type stub not overridden")
|
269 |
+
|
270 |
+
def __lt__(self, other) -> builtins.bool:
|
271 |
+
raise AssertionError("type stub not overridden")
|
272 |
+
|
273 |
+
def __gt__(self, other) -> builtins.bool:
|
274 |
+
raise AssertionError("type stub not overridden")
|
275 |
+
|
276 |
+
def __le__(self, other) -> builtins.bool:
|
277 |
+
raise AssertionError("type stub not overridden")
|
278 |
+
|
279 |
+
def __ge__(self, other) -> builtins.bool:
|
280 |
+
raise AssertionError("type stub not overridden")
|
281 |
+
|
282 |
+
def __sym_max__(self, other):
|
283 |
+
raise AssertionError("type stub not overridden")
|
284 |
+
|
285 |
+
def __sym_min__(self, other):
|
286 |
+
raise AssertionError("type stub not overridden")
|
287 |
+
|
288 |
+
def __sym_float__(self):
|
289 |
+
raise AssertionError("type stub not overridden")
|
290 |
+
|
291 |
+
def __neg__(self):
|
292 |
+
raise AssertionError("type stub not overridden")
|
293 |
+
|
294 |
+
def __repr__(self):
|
295 |
+
return str(self.node)
|
296 |
+
|
297 |
+
def __hash__(self) -> builtins.int:
|
298 |
+
ret = self.node.singleton_int()
|
299 |
+
if ret is not None:
|
300 |
+
return hash(ret)
|
301 |
+
else:
|
302 |
+
# We could support constant SymInts as well, but not doing it for now
|
303 |
+
raise TypeError("unhashable type: non-singleton SymInt")
|
304 |
+
|
305 |
+
class SymFloat:
|
306 |
+
"""
|
307 |
+
Like an float (including magic methods), but redirects all operations on the
|
308 |
+
wrapped node. This is used in particular to symbolically record operations
|
309 |
+
in the symbolic shape workflow.
|
310 |
+
"""
|
311 |
+
|
312 |
+
def __init__(self, node):
|
313 |
+
# This field MUST be named node; C++ binding code assumes that this
|
314 |
+
# class has a field named node that stores SymNode
|
315 |
+
self.node = node
|
316 |
+
|
317 |
+
def __bool__(self):
|
318 |
+
return self.node.bool_()
|
319 |
+
|
320 |
+
# Magic methods installed by torch.fx.experimental.sym_node
|
321 |
+
|
322 |
+
def __eq__(self, other: object) -> builtins.bool:
|
323 |
+
raise AssertionError("type stub not overridden")
|
324 |
+
|
325 |
+
def __lt__(self, other) -> builtins.bool:
|
326 |
+
raise AssertionError("type stub not overridden")
|
327 |
+
|
328 |
+
def __gt__(self, other) -> builtins.bool:
|
329 |
+
raise AssertionError("type stub not overridden")
|
330 |
+
|
331 |
+
def __le__(self, other) -> builtins.bool:
|
332 |
+
raise AssertionError("type stub not overridden")
|
333 |
+
|
334 |
+
def __ge__(self, other) -> builtins.bool:
|
335 |
+
raise AssertionError("type stub not overridden")
|
336 |
+
|
337 |
+
def __sym_max__(self, other):
|
338 |
+
raise AssertionError("type stub not overridden")
|
339 |
+
|
340 |
+
def __sym_min__(self, other):
|
341 |
+
raise AssertionError("type stub not overridden")
|
342 |
+
|
343 |
+
def __sym_int__(self):
|
344 |
+
raise AssertionError("type stub not overridden")
|
345 |
+
|
346 |
+
def __repr__(self):
|
347 |
+
return self.node.str()
|
348 |
+
|
349 |
+
class SymBool:
|
350 |
+
"""
|
351 |
+
Like an bool (including magic methods), but redirects all operations on the
|
352 |
+
wrapped node. This is used in particular to symbolically record operations
|
353 |
+
in the symbolic shape workflow.
|
354 |
+
|
355 |
+
Unlike regular bools, regular boolean operators will force extra guards instead
|
356 |
+
of symbolically evaluate. Use the bitwise operators instead to handle this.
|
357 |
+
"""
|
358 |
+
|
359 |
+
def __init__(self, node):
|
360 |
+
# This field MUST be named node; C++ binding code assumes that this
|
361 |
+
# class has a field named node that stores SymNode
|
362 |
+
self.node = node
|
363 |
+
|
364 |
+
def __bool__(self):
|
365 |
+
return self.node.bool_()
|
366 |
+
|
367 |
+
def __int__(self):
|
368 |
+
return builtins.int(self.node.bool_())
|
369 |
+
|
370 |
+
# Magic methods installed by torch.fx.experimental.sym_node
|
371 |
+
def __and__(self, other) -> "SymBool":
|
372 |
+
raise AssertionError("type stub not overridden")
|
373 |
+
|
374 |
+
def __or__(self, other) -> "SymBool":
|
375 |
+
raise AssertionError("type stub not overridden")
|
376 |
+
|
377 |
+
# We very carefully define __sym_not__, and not a number of other
|
378 |
+
# plausible alternatives:
|
379 |
+
#
|
380 |
+
# - We do not override __not__ because this is not a real magic
|
381 |
+
# method; you cannot override the meaning of the not builtin in
|
382 |
+
# Python. We use the name 'sym_not' to clarify that in user code you
|
383 |
+
# cannot use the builtin not or operator.not_ or operator.__not__ and
|
384 |
+
# hit this magic method; you must use our custom sym_not operator.
|
385 |
+
#
|
386 |
+
# - We do not override the __invert__ method because SymBool is
|
387 |
+
# meant to be usable in situations where bool is expected. However,
|
388 |
+
# bitwise negation ~a does the wrong thing with booleans (because
|
389 |
+
# bool is a subclass of int, so ~1 = -2 which is not falseish.)
|
390 |
+
# This would be a giant footgun, so we get around it by defining
|
391 |
+
# our own operator. Note that bitwise and/or do the right thing,
|
392 |
+
# so we reuse the conventional operators there for readability.
|
393 |
+
#
|
394 |
+
def __sym_not__(self) -> "SymBool":
|
395 |
+
raise AssertionError("type stub not overridden")
|
396 |
+
|
397 |
+
def __sym_ite__(self, then_val, else_val):
|
398 |
+
raise AssertionError("type stub not overridden")
|
399 |
+
|
400 |
+
def __eq__(self, other) -> builtins.bool:
|
401 |
+
raise AssertionError("type stub not overridden")
|
402 |
+
|
403 |
+
def __repr__(self):
|
404 |
+
return str(self.node)
|
405 |
+
|
406 |
+
def __hash__(self):
|
407 |
+
if self.node.is_constant():
|
408 |
+
return hash(self.node.bool_())
|
409 |
+
else:
|
410 |
+
raise TypeError("unhashable type: SymBool")
|
411 |
+
|
412 |
+
def sym_not(a):
|
413 |
+
r""" SymInt-aware utility for logical negation.
|
414 |
+
|
415 |
+
Args:
|
416 |
+
a (SymBool or bool): Object to negate
|
417 |
+
"""
|
418 |
+
import sympy
|
419 |
+
from .overrides import has_torch_function_unary, handle_torch_function
|
420 |
+
|
421 |
+
if has_torch_function_unary(a):
|
422 |
+
return handle_torch_function(sym_not, (a,), a)
|
423 |
+
if hasattr(a, '__sym_not__'):
|
424 |
+
return a.__sym_not__()
|
425 |
+
if isinstance(a, sympy.Basic):
|
426 |
+
return ~a # type: ignore[operator]
|
427 |
+
return not a
|
428 |
+
|
429 |
+
def sym_float(a):
|
430 |
+
r""" SymInt-aware utility for float casting.
|
431 |
+
|
432 |
+
Args:
|
433 |
+
a (SymInt, SymFloat, or object): Object to cast
|
434 |
+
"""
|
435 |
+
from .overrides import has_torch_function_unary, handle_torch_function
|
436 |
+
|
437 |
+
if has_torch_function_unary(a):
|
438 |
+
return handle_torch_function(sym_float, (a,), a)
|
439 |
+
if isinstance(a, SymFloat):
|
440 |
+
return a
|
441 |
+
elif hasattr(a, '__sym_float__'):
|
442 |
+
return a.__sym_float__()
|
443 |
+
return py_float(a) # type: ignore[operator]
|
444 |
+
|
445 |
+
|
446 |
+
def sym_int(a):
|
447 |
+
r""" SymInt-aware utility for int casting.
|
448 |
+
|
449 |
+
Args:
|
450 |
+
a (SymInt, SymFloat, or object): Object to cast
|
451 |
+
"""
|
452 |
+
from .overrides import has_torch_function_unary, handle_torch_function
|
453 |
+
|
454 |
+
if has_torch_function_unary(a):
|
455 |
+
return handle_torch_function(sym_int, (a,), a)
|
456 |
+
if isinstance(a, SymInt):
|
457 |
+
return a
|
458 |
+
elif isinstance(a, SymFloat):
|
459 |
+
return math.floor(a) if a >= 0 else math.ceil(a) # type: ignore[arg-type, call-overload]
|
460 |
+
return py_int(a) # type: ignore[operator]
|
461 |
+
|
462 |
+
def sym_max(a, b):
|
463 |
+
""" SymInt-aware utility for max()."""
|
464 |
+
from .overrides import has_torch_function, handle_torch_function
|
465 |
+
|
466 |
+
if has_torch_function((a, b)):
|
467 |
+
return handle_torch_function(sym_max, (a, b), a, b)
|
468 |
+
if isinstance(a, (SymInt, SymFloat)):
|
469 |
+
return a.__sym_max__(b)
|
470 |
+
elif isinstance(b, (SymInt, SymFloat)):
|
471 |
+
# NB: If you actually care about preserving output type exactly
|
472 |
+
# if you do something like max(0, 0.0), it is NOT sound to treat
|
473 |
+
# min/max as commutative
|
474 |
+
return b.__sym_max__(a)
|
475 |
+
return builtins.max(a, b) # type: ignore[operator]
|
476 |
+
|
477 |
+
def sym_min(a, b):
|
478 |
+
""" SymInt-aware utility for max()."""
|
479 |
+
from .overrides import has_torch_function, handle_torch_function
|
480 |
+
|
481 |
+
if has_torch_function((a, b)):
|
482 |
+
return handle_torch_function(sym_min, (a, b), a, b)
|
483 |
+
if isinstance(a, (SymInt, SymFloat)):
|
484 |
+
return a.__sym_min__(b)
|
485 |
+
elif isinstance(b, (SymInt, SymFloat)):
|
486 |
+
return b.__sym_min__(a)
|
487 |
+
return builtins.min(a, b) # type: ignore[operator]
|
488 |
+
|
489 |
+
# Drop in replacement for math.sqrt
|
490 |
+
def sym_sqrt(a):
|
491 |
+
from .overrides import has_torch_function_unary, handle_torch_function
|
492 |
+
|
493 |
+
if has_torch_function_unary(a):
|
494 |
+
return handle_torch_function(sym_sqrt, (a,), a)
|
495 |
+
if hasattr(a, "__sym_sqrt__"):
|
496 |
+
return a.__sym_sqrt__()
|
497 |
+
return math.sqrt(a)
|
498 |
+
|
499 |
+
def sym_ite(b, t, f):
|
500 |
+
from .overrides import has_torch_function, handle_torch_function
|
501 |
+
|
502 |
+
if has_torch_function((b, t, f)):
|
503 |
+
return handle_torch_function(sym_ite, (b, t, f), b, t, f)
|
504 |
+
assert isinstance(b, (SymBool, builtins.bool)) and type(t) == type(f)
|
505 |
+
if isinstance(b, SymBool):
|
506 |
+
return b.__sym_ite__(t, f)
|
507 |
+
return t if b else f
|
508 |
+
|
509 |
+
# Check to see if we can load C extensions, and if not provide some guidance
|
510 |
+
# on what the problem might be.
|
511 |
+
try:
|
512 |
+
# _initExtension is chosen (arbitrarily) as a sentinel.
|
513 |
+
from torch._C import _initExtension
|
514 |
+
except ImportError:
|
515 |
+
import torch._C as _C_for_compiled_check
|
516 |
+
|
517 |
+
# The __file__ check only works for Python 3.7 and above.
|
518 |
+
if _C_for_compiled_check.__file__ is None:
|
519 |
+
raise ImportError(textwrap.dedent('''
|
520 |
+
Failed to load PyTorch C extensions:
|
521 |
+
It appears that PyTorch has loaded the `torch/_C` folder
|
522 |
+
of the PyTorch repository rather than the C extensions which
|
523 |
+
are expected in the `torch._C` namespace. This can occur when
|
524 |
+
using the `install` workflow. e.g.
|
525 |
+
$ python setup.py install && python -c "import torch"
|
526 |
+
|
527 |
+
This error can generally be solved using the `develop` workflow
|
528 |
+
$ python setup.py develop && python -c "import torch" # This should succeed
|
529 |
+
or by running Python from a different directory.
|
530 |
+
''').strip()) from None
|
531 |
+
raise # If __file__ is not None the cause is unknown, so just re-raise.
|
532 |
+
|
533 |
+
for name in dir(_C):
|
534 |
+
if name[0] != '_' and not name.endswith('Base'):
|
535 |
+
__all__.append(name)
|
536 |
+
obj = getattr(_C, name)
|
537 |
+
if (isinstance(obj, Callable) or inspect.isclass(obj)): # type: ignore[arg-type]
|
538 |
+
if (obj.__module__ != 'torch'):
|
539 |
+
# TODO: fix their module from C++ side
|
540 |
+
if name not in ['DisableTorchFunctionSubclass', 'DisableTorchFunction', 'Generator']:
|
541 |
+
obj.__module__ = 'torch'
|
542 |
+
elif name == 'TensorBase':
|
543 |
+
# issue 109438 / pr 109940. Prevent TensorBase from being copied into torch.
|
544 |
+
delattr(sys.modules[__name__], name)
|
545 |
+
|
546 |
+
if not TYPE_CHECKING:
|
547 |
+
# issue 38137 and python issue 43367. Submodules of a C extension are
|
548 |
+
# non-standard, and attributes of those submodules cannot be pickled since
|
549 |
+
# pickle expect to be able to import them as "from _C.sub import attr"
|
550 |
+
# which fails with "_C is not a package
|
551 |
+
for attr in dir(_C):
|
552 |
+
candidate = getattr(_C, attr)
|
553 |
+
if type(candidate) is type(_C):
|
554 |
+
# submodule
|
555 |
+
if f'torch._C.{attr}' not in sys.modules:
|
556 |
+
sys.modules[f'torch._C.{attr}'] = candidate
|
557 |
+
|
558 |
+
|
559 |
+
################################################################################
|
560 |
+
# Define basic utilities
|
561 |
+
################################################################################
|
562 |
+
|
563 |
+
|
564 |
+
def typename(o):
|
565 |
+
if isinstance(o, torch.Tensor):
|
566 |
+
return o.type()
|
567 |
+
|
568 |
+
module = ''
|
569 |
+
class_name = ''
|
570 |
+
if hasattr(o, '__module__') and o.__module__ != 'builtins' \
|
571 |
+
and o.__module__ != '__builtin__' and o.__module__ is not None:
|
572 |
+
module = o.__module__ + '.'
|
573 |
+
|
574 |
+
if hasattr(o, '__qualname__'):
|
575 |
+
class_name = o.__qualname__
|
576 |
+
elif hasattr(o, '__name__'):
|
577 |
+
class_name = o.__name__
|
578 |
+
else:
|
579 |
+
class_name = o.__class__.__name__
|
580 |
+
|
581 |
+
return module + class_name
|
582 |
+
|
583 |
+
|
584 |
+
def is_tensor(obj):
|
585 |
+
r"""Returns True if `obj` is a PyTorch tensor.
|
586 |
+
|
587 |
+
Note that this function is simply doing ``isinstance(obj, Tensor)``.
|
588 |
+
Using that ``isinstance`` check is better for typechecking with mypy,
|
589 |
+
and more explicit - so it's recommended to use that instead of
|
590 |
+
``is_tensor``.
|
591 |
+
|
592 |
+
Args:
|
593 |
+
obj (Object): Object to test
|
594 |
+
Example::
|
595 |
+
|
596 |
+
>>> x = torch.tensor([1, 2, 3])
|
597 |
+
>>> torch.is_tensor(x)
|
598 |
+
True
|
599 |
+
|
600 |
+
"""
|
601 |
+
return isinstance(obj, torch.Tensor)
|
602 |
+
|
603 |
+
|
604 |
+
def is_storage(obj):
|
605 |
+
r"""Returns True if `obj` is a PyTorch storage object.
|
606 |
+
|
607 |
+
Args:
|
608 |
+
obj (Object): Object to test
|
609 |
+
"""
|
610 |
+
return type(obj) in _storage_classes
|
611 |
+
|
612 |
+
|
613 |
+
_GLOBAL_DEVICE_CONTEXT = None
|
614 |
+
|
615 |
+
def set_default_device(device):
|
616 |
+
"""Sets the default ``torch.Tensor`` to be allocated on ``device``. This
|
617 |
+
does not affect factory function calls which are called with an explicit
|
618 |
+
``device`` argument. Factory calls will be performed as if they
|
619 |
+
were passed ``device`` as an argument.
|
620 |
+
|
621 |
+
To only temporarily change the default device instead of setting it
|
622 |
+
globally, use ``with torch.device(device):`` instead.
|
623 |
+
|
624 |
+
The default device is initially ``cpu``. If you set the default tensor
|
625 |
+
device to another device (e.g., ``cuda``) without a device index, tensors
|
626 |
+
will be allocated on whatever the current device for the device type,
|
627 |
+
even after :func:`torch.cuda.set_device` is called.
|
628 |
+
|
629 |
+
.. warning::
|
630 |
+
|
631 |
+
This function imposes a slight performance cost on every Python
|
632 |
+
call to the torch API (not just factory functions). If this
|
633 |
+
is causing problems for you, please comment on
|
634 |
+
https://github.com/pytorch/pytorch/issues/92701
|
635 |
+
|
636 |
+
.. note::
|
637 |
+
|
638 |
+
This doesn't affect functions that create tensors that share the same memory as the input, like:
|
639 |
+
:func:`torch.from_numpy` and :func:`torch.frombuffer`
|
640 |
+
|
641 |
+
Args:
|
642 |
+
device (device or string): the device to set as default
|
643 |
+
|
644 |
+
Example::
|
645 |
+
|
646 |
+
>>> # xdoctest: +SKIP("requires cuda, changes global state")
|
647 |
+
>>> torch.tensor([1.2, 3]).device
|
648 |
+
device(type='cpu')
|
649 |
+
>>> torch.set_default_device('cuda') # current device is 0
|
650 |
+
>>> torch.tensor([1.2, 3]).device
|
651 |
+
device(type='cuda', index=0)
|
652 |
+
>>> torch.set_default_device('cuda:1')
|
653 |
+
>>> torch.tensor([1.2, 3]).device
|
654 |
+
device(type='cuda', index=1)
|
655 |
+
|
656 |
+
"""
|
657 |
+
global _GLOBAL_DEVICE_CONTEXT
|
658 |
+
if _GLOBAL_DEVICE_CONTEXT is not None:
|
659 |
+
_GLOBAL_DEVICE_CONTEXT.__exit__(None, None, None)
|
660 |
+
if device is None:
|
661 |
+
_GLOBAL_DEVICE_CONTEXT = None
|
662 |
+
return
|
663 |
+
from torch.utils._device import DeviceContext
|
664 |
+
_GLOBAL_DEVICE_CONTEXT = DeviceContext(device)
|
665 |
+
_GLOBAL_DEVICE_CONTEXT.__enter__()
|
666 |
+
|
667 |
+
|
668 |
+
def set_default_tensor_type(t):
|
669 |
+
r"""
|
670 |
+
.. warning::
|
671 |
+
|
672 |
+
This function is deprecated as of PyTorch 2.1, please use :func:`torch.set_default_dtype()` and
|
673 |
+
:func:`torch.set_default_device()` as alternatives.
|
674 |
+
|
675 |
+
Sets the default ``torch.Tensor`` type to floating point tensor type
|
676 |
+
``t``. This type will also be used as default floating point type for
|
677 |
+
type inference in :func:`torch.tensor`.
|
678 |
+
|
679 |
+
The default floating point tensor type is initially ``torch.FloatTensor``.
|
680 |
+
|
681 |
+
Args:
|
682 |
+
t (type or string): the floating point tensor type or its name
|
683 |
+
|
684 |
+
Example::
|
685 |
+
|
686 |
+
>>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
|
687 |
+
>>> torch.tensor([1.2, 3]).dtype # initial default for floating point is torch.float32
|
688 |
+
torch.float32
|
689 |
+
>>> torch.set_default_tensor_type(torch.DoubleTensor)
|
690 |
+
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
|
691 |
+
torch.float64
|
692 |
+
|
693 |
+
"""
|
694 |
+
if isinstance(t, str):
|
695 |
+
t = _import_dotted_name(t)
|
696 |
+
_C._set_default_tensor_type(t)
|
697 |
+
|
698 |
+
|
699 |
+
def set_default_dtype(d):
|
700 |
+
r"""
|
701 |
+
|
702 |
+
Sets the default floating point dtype to :attr:`d`. Supports torch.float32
|
703 |
+
and torch.float64 as inputs. Other dtypes may be accepted without complaint
|
704 |
+
but are not supported and are unlikely to work as expected.
|
705 |
+
|
706 |
+
When PyTorch is initialized its default floating point dtype is torch.float32,
|
707 |
+
and the intent of set_default_dtype(torch.float64) is to facilitate NumPy-like
|
708 |
+
type inference. The default floating point dtype is used to:
|
709 |
+
|
710 |
+
1. Implicitly determine the default complex dtype. When the default floating point
|
711 |
+
type is float32 the default complex dtype is complex64, and when the default
|
712 |
+
floating point type is float64 the default complex type is complex128.
|
713 |
+
2. Infer the dtype for tensors constructed using Python floats or complex Python
|
714 |
+
numbers. See examples below.
|
715 |
+
3. Determine the result of type promotion between bool and integer tensors and
|
716 |
+
Python floats and complex Python numbers.
|
717 |
+
|
718 |
+
Args:
|
719 |
+
d (:class:`torch.dtype`): the floating point dtype to make the default.
|
720 |
+
Either torch.float32 or torch.float64.
|
721 |
+
|
722 |
+
Example:
|
723 |
+
>>> # xdoctest: +SKIP("Other tests may have changed the default type. Can we reset it?")
|
724 |
+
>>> # initial default for floating point is torch.float32
|
725 |
+
>>> # Python floats are interpreted as float32
|
726 |
+
>>> torch.tensor([1.2, 3]).dtype
|
727 |
+
torch.float32
|
728 |
+
>>> # initial default for floating point is torch.complex64
|
729 |
+
>>> # Complex Python numbers are interpreted as complex64
|
730 |
+
>>> torch.tensor([1.2, 3j]).dtype
|
731 |
+
torch.complex64
|
732 |
+
|
733 |
+
>>> torch.set_default_dtype(torch.float64)
|
734 |
+
|
735 |
+
>>> # Python floats are now interpreted as float64
|
736 |
+
>>> torch.tensor([1.2, 3]).dtype # a new floating point tensor
|
737 |
+
torch.float64
|
738 |
+
>>> # Complex Python numbers are now interpreted as complex128
|
739 |
+
>>> torch.tensor([1.2, 3j]).dtype # a new complex tensor
|
740 |
+
torch.complex128
|
741 |
+
|
742 |
+
"""
|
743 |
+
_C._set_default_dtype(d)
|
744 |
+
|
745 |
+
def use_deterministic_algorithms(mode: builtins.bool, *, warn_only: builtins.bool = False) -> None:
|
746 |
+
r""" Sets whether PyTorch operations must use "deterministic"
|
747 |
+
algorithms. That is, algorithms which, given the same input, and when
|
748 |
+
run on the same software and hardware, always produce the same output.
|
749 |
+
When enabled, operations will use deterministic algorithms when available,
|
750 |
+
and if only nondeterministic algorithms are available they will throw a
|
751 |
+
:class:`RuntimeError` when called.
|
752 |
+
|
753 |
+
.. note:: This setting alone is not always enough to make an application
|
754 |
+
reproducible. Refer to :ref:`reproducibility` for more information.
|
755 |
+
|
756 |
+
.. note:: :func:`torch.set_deterministic_debug_mode` offers an alternative
|
757 |
+
interface for this feature.
|
758 |
+
|
759 |
+
The following normally-nondeterministic operations will act
|
760 |
+
deterministically when ``mode=True``:
|
761 |
+
|
762 |
+
* :class:`torch.nn.Conv1d` when called on CUDA tensor
|
763 |
+
* :class:`torch.nn.Conv2d` when called on CUDA tensor
|
764 |
+
* :class:`torch.nn.Conv3d` when called on CUDA tensor
|
765 |
+
* :class:`torch.nn.ConvTranspose1d` when called on CUDA tensor
|
766 |
+
* :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor
|
767 |
+
* :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor
|
768 |
+
* :class:`torch.nn.ReplicationPad2d` when attempting to differentiate a CUDA tensor
|
769 |
+
* :func:`torch.bmm` when called on sparse-dense CUDA tensors
|
770 |
+
* :func:`torch.Tensor.__getitem__` when attempting to differentiate a CPU tensor
|
771 |
+
and the index is a list of tensors
|
772 |
+
* :func:`torch.Tensor.index_put` with ``accumulate=False``
|
773 |
+
* :func:`torch.Tensor.index_put` with ``accumulate=True`` when called on a CPU
|
774 |
+
tensor
|
775 |
+
* :func:`torch.Tensor.put_` with ``accumulate=True`` when called on a CPU
|
776 |
+
tensor
|
777 |
+
* :func:`torch.Tensor.scatter_add_` when called on a CUDA tensor
|
778 |
+
* :func:`torch.gather` when called on a CUDA tensor that requires grad
|
779 |
+
* :func:`torch.index_add` when called on CUDA tensor
|
780 |
+
* :func:`torch.index_select` when attempting to differentiate a CUDA tensor
|
781 |
+
* :func:`torch.repeat_interleave` when attempting to differentiate a CUDA tensor
|
782 |
+
* :func:`torch.Tensor.index_copy` when called on a CPU or CUDA tensor
|
783 |
+
* :func:`torch.Tensor.scatter` when `src` type is Tensor and called on CUDA tensor
|
784 |
+
* :func:`torch.Tensor.scatter_reduce` when ``reduce='sum'`` or ``reduce='mean'`` and called on CUDA tensor
|
785 |
+
|
786 |
+
The following normally-nondeterministic operations will throw a
|
787 |
+
:class:`RuntimeError` when ``mode=True``:
|
788 |
+
|
789 |
+
* :class:`torch.nn.AvgPool3d` when attempting to differentiate a CUDA tensor
|
790 |
+
* :class:`torch.nn.AdaptiveAvgPool2d` when attempting to differentiate a CUDA tensor
|
791 |
+
* :class:`torch.nn.AdaptiveAvgPool3d` when attempting to differentiate a CUDA tensor
|
792 |
+
* :class:`torch.nn.MaxPool3d` when attempting to differentiate a CUDA tensor
|
793 |
+
* :class:`torch.nn.AdaptiveMaxPool2d` when attempting to differentiate a CUDA tensor
|
794 |
+
* :class:`torch.nn.FractionalMaxPool2d` when attempting to differentiate a CUDA tensor
|
795 |
+
* :class:`torch.nn.FractionalMaxPool3d` when attempting to differentiate a CUDA tensor
|
796 |
+
* :class:`torch.nn.MaxUnpool1d`
|
797 |
+
* :class:`torch.nn.MaxUnpool2d`
|
798 |
+
* :class:`torch.nn.MaxUnpool3d`
|
799 |
+
* :func:`torch.nn.functional.interpolate` when attempting to differentiate a CUDA tensor
|
800 |
+
and one of the following modes is used:
|
801 |
+
|
802 |
+
- ``linear``
|
803 |
+
- ``bilinear``
|
804 |
+
- ``bicubic``
|
805 |
+
- ``trilinear``
|
806 |
+
|
807 |
+
* :class:`torch.nn.ReflectionPad1d` when attempting to differentiate a CUDA tensor
|
808 |
+
* :class:`torch.nn.ReflectionPad2d` when attempting to differentiate a CUDA tensor
|
809 |
+
* :class:`torch.nn.ReflectionPad3d` when attempting to differentiate a CUDA tensor
|
810 |
+
* :class:`torch.nn.ReplicationPad1d` when attempting to differentiate a CUDA tensor
|
811 |
+
* :class:`torch.nn.ReplicationPad3d` when attempting to differentiate a CUDA tensor
|
812 |
+
* :class:`torch.nn.NLLLoss` when called on a CUDA tensor
|
813 |
+
* :class:`torch.nn.CTCLoss` when attempting to differentiate a CUDA tensor
|
814 |
+
* :class:`torch.nn.EmbeddingBag` when attempting to differentiate a CUDA tensor when
|
815 |
+
``mode='max'``
|
816 |
+
* :func:`torch.Tensor.put_` when ``accumulate=False``
|
817 |
+
* :func:`torch.Tensor.put_` when ``accumulate=True`` and called on a CUDA tensor
|
818 |
+
* :func:`torch.histc` when called on a CUDA tensor
|
819 |
+
* :func:`torch.bincount` when called on a CUDA tensor and ``weights``
|
820 |
+
tensor is given
|
821 |
+
* :func:`torch.kthvalue` with called on a CUDA tensor
|
822 |
+
* :func:`torch.median` with indices output when called on a CUDA tensor
|
823 |
+
* :func:`torch.nn.functional.grid_sample` when attempting to differentiate a CUDA tensor
|
824 |
+
* :func:`torch.cumsum` when called on a CUDA tensor when dtype is floating point or complex
|
825 |
+
* :func:`torch.Tensor.scatter_reduce` when ``reduce='prod'`` and called on CUDA tensor
|
826 |
+
* :func:`torch.Tensor.resize_` when called with a quantized tensor
|
827 |
+
|
828 |
+
In addition, several operations fill uninitialized memory when this setting
|
829 |
+
is turned on and when
|
830 |
+
:attr:`torch.utils.deterministic.fill_uninitialized_memory` is turned on.
|
831 |
+
See the documentation for that attribute for more information.
|
832 |
+
|
833 |
+
A handful of CUDA operations are nondeterministic if the CUDA version is
|
834 |
+
10.2 or greater, unless the environment variable ``CUBLAS_WORKSPACE_CONFIG=:4096:8``
|
835 |
+
or ``CUBLAS_WORKSPACE_CONFIG=:16:8`` is set. See the CUDA documentation for more
|
836 |
+
details: `<https://docs.nvidia.com/cuda/cublas/index.html#cublasApi_reproducibility>`_
|
837 |
+
If one of these environment variable configurations is not set, a :class:`RuntimeError`
|
838 |
+
will be raised from these operations when called with CUDA tensors:
|
839 |
+
|
840 |
+
* :func:`torch.mm`
|
841 |
+
* :func:`torch.mv`
|
842 |
+
* :func:`torch.bmm`
|
843 |
+
|
844 |
+
Note that deterministic operations tend to have worse performance than
|
845 |
+
nondeterministic operations.
|
846 |
+
|
847 |
+
.. note::
|
848 |
+
|
849 |
+
This flag does not detect or prevent nondeterministic behavior caused
|
850 |
+
by calling an inplace operation on a tensor with an internal memory
|
851 |
+
overlap or by giving such a tensor as the :attr:`out` argument for an
|
852 |
+
operation. In these cases, multiple writes of different data may target
|
853 |
+
a single memory location, and the order of writes is not guaranteed.
|
854 |
+
|
855 |
+
Args:
|
856 |
+
mode (:class:`bool`): If True, makes potentially nondeterministic
|
857 |
+
operations switch to a deterministic algorithm or throw a runtime
|
858 |
+
error. If False, allows nondeterministic operations.
|
859 |
+
|
860 |
+
Keyword args:
|
861 |
+
warn_only (:class:`bool`, optional): If True, operations that do not
|
862 |
+
have a deterministic implementation will throw a warning instead of
|
863 |
+
an error. Default: ``False``
|
864 |
+
|
865 |
+
Example::
|
866 |
+
|
867 |
+
>>> # xdoctest: +SKIP
|
868 |
+
>>> torch.use_deterministic_algorithms(True)
|
869 |
+
|
870 |
+
# Forward mode nondeterministic error
|
871 |
+
>>> torch.randn(10, device='cuda').kthvalue(1)
|
872 |
+
...
|
873 |
+
RuntimeError: kthvalue CUDA does not have a deterministic implementation...
|
874 |
+
|
875 |
+
# Backward mode nondeterministic error
|
876 |
+
>>> torch.nn.AvgPool3d(1)(torch.randn(3, 4, 5, 6, requires_grad=True).cuda()).sum().backward()
|
877 |
+
...
|
878 |
+
RuntimeError: avg_pool3d_backward_cuda does not have a deterministic implementation...
|
879 |
+
"""
|
880 |
+
_C._set_deterministic_algorithms(mode, warn_only=warn_only)
|
881 |
+
|
882 |
+
def are_deterministic_algorithms_enabled() -> builtins.bool:
|
883 |
+
r"""Returns True if the global deterministic flag is turned on. Refer to
|
884 |
+
:func:`torch.use_deterministic_algorithms` documentation for more details.
|
885 |
+
"""
|
886 |
+
return _C._get_deterministic_algorithms()
|
887 |
+
|
888 |
+
def is_deterministic_algorithms_warn_only_enabled() -> builtins.bool:
|
889 |
+
r"""Returns True if the global deterministic flag is set to warn only.
|
890 |
+
Refer to :func:`torch.use_deterministic_algorithms` documentation for more
|
891 |
+
details.
|
892 |
+
"""
|
893 |
+
return _C._get_deterministic_algorithms_warn_only()
|
894 |
+
|
895 |
+
def set_deterministic_debug_mode(debug_mode: Union[builtins.int, str]) -> None:
|
896 |
+
r"""Sets the debug mode for deterministic operations.
|
897 |
+
|
898 |
+
.. note:: This is an alternative interface for
|
899 |
+
:func:`torch.use_deterministic_algorithms`. Refer to that function's
|
900 |
+
documentation for details about affected operations.
|
901 |
+
|
902 |
+
Args:
|
903 |
+
debug_mode(str or int): If "default" or 0, don't error or warn on
|
904 |
+
nondeterministic operations. If "warn" or 1, warn on
|
905 |
+
nondeterministic operations. If "error" or 2, error on
|
906 |
+
nondeterministic operations.
|
907 |
+
"""
|
908 |
+
|
909 |
+
# NOTE: builtins.int is used here because int in this scope resolves
|
910 |
+
# to torch.int
|
911 |
+
if not isinstance(debug_mode, (builtins.int, str)):
|
912 |
+
raise TypeError(f'debug_mode must be str or int, but got {type(debug_mode)}')
|
913 |
+
|
914 |
+
if isinstance(debug_mode, str):
|
915 |
+
if debug_mode == 'default':
|
916 |
+
debug_mode = 0
|
917 |
+
elif debug_mode == 'warn':
|
918 |
+
debug_mode = 1
|
919 |
+
elif debug_mode == 'error':
|
920 |
+
debug_mode = 2
|
921 |
+
else:
|
922 |
+
raise RuntimeError(
|
923 |
+
'invalid value of debug_mode, expected one of `default`, '
|
924 |
+
f'`warn`, `error`, but got {debug_mode}')
|
925 |
+
|
926 |
+
if debug_mode == 0:
|
927 |
+
_C._set_deterministic_algorithms(False)
|
928 |
+
elif debug_mode == 1:
|
929 |
+
_C._set_deterministic_algorithms(True, warn_only=True)
|
930 |
+
elif debug_mode == 2:
|
931 |
+
_C._set_deterministic_algorithms(True)
|
932 |
+
else:
|
933 |
+
raise RuntimeError(
|
934 |
+
'invalid value of debug_mode, expected 0, 1, or 2, '
|
935 |
+
f'but got {debug_mode}')
|
936 |
+
|
937 |
+
def get_deterministic_debug_mode() -> builtins.int:
|
938 |
+
r"""Returns the current value of the debug mode for deterministic
|
939 |
+
operations. Refer to :func:`torch.set_deterministic_debug_mode`
|
940 |
+
documentation for more details.
|
941 |
+
"""
|
942 |
+
|
943 |
+
if _C._get_deterministic_algorithms():
|
944 |
+
if _C._get_deterministic_algorithms_warn_only():
|
945 |
+
return 1
|
946 |
+
else:
|
947 |
+
return 2
|
948 |
+
else:
|
949 |
+
return 0
|
950 |
+
|
951 |
+
def get_float32_matmul_precision() -> builtins.str:
|
952 |
+
r"""Returns the current value of float32 matrix multiplication precision. Refer to
|
953 |
+
:func:`torch.set_float32_matmul_precision` documentation for more details.
|
954 |
+
"""
|
955 |
+
return _C._get_float32_matmul_precision()
|
956 |
+
|
957 |
+
def set_float32_matmul_precision(precision: str) -> None:
|
958 |
+
r"""Sets the internal precision of float32 matrix multiplications.
|
959 |
+
|
960 |
+
Running float32 matrix multiplications in lower precision may significantly increase
|
961 |
+
performance, and in some programs the loss of precision has a negligible impact.
|
962 |
+
|
963 |
+
Supports three settings:
|
964 |
+
|
965 |
+
* "highest", float32 matrix multiplications use the float32 datatype (24 mantissa
|
966 |
+
bits) for internal computations.
|
967 |
+
* "high", float32 matrix multiplications either use the TensorFloat32 datatype (10
|
968 |
+
mantissa bits) or treat each float32 number as the sum of two bfloat16 numbers
|
969 |
+
(approximately 16 mantissa bits), if the appropriate fast matrix multiplication
|
970 |
+
algorithms are available. Otherwise float32 matrix multiplications are computed
|
971 |
+
as if the precision is "highest". See below for more information on the bfloat16
|
972 |
+
approach.
|
973 |
+
* "medium", float32 matrix multiplications use the bfloat16 datatype (8 mantissa
|
974 |
+
bits) for internal computations, if a fast matrix multiplication algorithm
|
975 |
+
using that datatype internally is available. Otherwise float32
|
976 |
+
matrix multiplications are computed as if the precision is "high".
|
977 |
+
|
978 |
+
When using "high" precision, float32 multiplications may use a bfloat16-based algorithm
|
979 |
+
that is more complicated than simply truncating to some smaller number mantissa bits
|
980 |
+
(e.g. 10 for TensorFloat32, 8 for bfloat16). Refer to [Henry2019]_ for a complete
|
981 |
+
description of this algorithm. To briefly explain here, the first step is to realize
|
982 |
+
that we can perfectly encode a single float32 number as the sum of three bfloat16
|
983 |
+
numbers (because float32 has 24 mantissa bits while bfloat16 has 8, and both have the
|
984 |
+
same number of exponent bits). This means that the product of two float32 numbers can
|
985 |
+
be exactly given by the sum of nine products of bfloat16 numbers. We can then trade
|
986 |
+
accuracy for speed by dropping some of these products. The "high" precision algorithm
|
987 |
+
specifically keeps only the three most significant products, which conveniently excludes
|
988 |
+
all of the products involving the last 8 mantissa bits of either input. This means that
|
989 |
+
we can represent our inputs as the sum of two bfloat16 numbers rather than three.
|
990 |
+
Because bfloat16 fused-multiply-add (FMA) instructions are typically >10x faster than
|
991 |
+
float32 ones, it's faster to do three multiplications and 2 additions with bfloat16
|
992 |
+
precision than it is to do a single multiplication with float32 precision.
|
993 |
+
|
994 |
+
.. [Henry2019] http://arxiv.org/abs/1904.06376
|
995 |
+
|
996 |
+
.. note::
|
997 |
+
|
998 |
+
This does not change the output dtype of float32 matrix multiplications,
|
999 |
+
it controls how the internal computation of the matrix multiplication is performed.
|
1000 |
+
|
1001 |
+
.. note::
|
1002 |
+
|
1003 |
+
This does not change the precision of convolution operations. Other flags,
|
1004 |
+
like `torch.backends.cudnn.allow_tf32`, may control the precision of convolution
|
1005 |
+
operations.
|
1006 |
+
|
1007 |
+
.. note::
|
1008 |
+
|
1009 |
+
This flag currently only affects one native device type: CUDA.
|
1010 |
+
If "high" or "medium" are set then the TensorFloat32 datatype will be used
|
1011 |
+
when computing float32 matrix multiplications, equivalent to setting
|
1012 |
+
`torch.backends.cuda.matmul.allow_tf32 = True`. When "highest" (the default)
|
1013 |
+
is set then the float32 datatype is used for internal computations, equivalent
|
1014 |
+
to setting `torch.backends.cuda.matmul.allow_tf32 = False`.
|
1015 |
+
|
1016 |
+
Args:
|
1017 |
+
precision(str): can be set to "highest" (default), "high", or "medium" (see above).
|
1018 |
+
|
1019 |
+
"""
|
1020 |
+
_C._set_float32_matmul_precision(precision)
|
1021 |
+
|
1022 |
+
def set_warn_always(b: builtins.bool) -> None:
|
1023 |
+
r"""When this flag is False (default) then some PyTorch warnings may only
|
1024 |
+
appear once per process. This helps avoid excessive warning information.
|
1025 |
+
Setting it to True causes these warnings to always appear, which may be
|
1026 |
+
helpful when debugging.
|
1027 |
+
|
1028 |
+
Args:
|
1029 |
+
b (:class:`bool`): If True, force warnings to always be emitted
|
1030 |
+
If False, set to the default behaviour
|
1031 |
+
"""
|
1032 |
+
_C._set_warnAlways(b)
|
1033 |
+
|
1034 |
+
def is_warn_always_enabled() -> builtins.bool:
|
1035 |
+
r"""Returns True if the global warn_always flag is turned on. Refer to
|
1036 |
+
:func:`torch.set_warn_always` documentation for more details.
|
1037 |
+
"""
|
1038 |
+
return _C._get_warnAlways()
|
1039 |
+
|
1040 |
+
################################################################################
|
1041 |
+
# Define error checking functions
|
1042 |
+
################################################################################
|
1043 |
+
|
1044 |
+
# These error checking functions must be kept consistent with their C++
|
1045 |
+
# equivalents. Their C++ equivalents are mentioned where applicable.
|
1046 |
+
|
1047 |
+
def _check_with(error_type, cond: Union[builtins.bool, SymBool], message: Callable[[], str]): # noqa: F811
|
1048 |
+
if not isinstance(cond, (builtins.bool, torch.SymBool)):
|
1049 |
+
raise TypeError(f'cond must be a bool, but got {type(cond)}')
|
1050 |
+
|
1051 |
+
from torch.fx.experimental.symbolic_shapes import expect_true
|
1052 |
+
if expect_true(cond):
|
1053 |
+
return
|
1054 |
+
|
1055 |
+
# error_type must be a subclass of Exception and not subclass of Warning
|
1056 |
+
assert issubclass(error_type, Exception) and not issubclass(error_type, Warning)
|
1057 |
+
|
1058 |
+
if message is None:
|
1059 |
+
message_evaluated = (
|
1060 |
+
'Expected cond to be True, but got False. (Could this error '
|
1061 |
+
'message be improved? If so, please report an enhancement request '
|
1062 |
+
'to PyTorch.)')
|
1063 |
+
|
1064 |
+
else:
|
1065 |
+
if not callable(message):
|
1066 |
+
raise TypeError('message must be a callable')
|
1067 |
+
|
1068 |
+
message_evaluated = str(message())
|
1069 |
+
|
1070 |
+
raise error_type(message_evaluated)
|
1071 |
+
|
1072 |
+
def _check(cond, message=None): # noqa: F811
|
1073 |
+
r"""Throws error containing an optional message if the specified condition
|
1074 |
+
is False.
|
1075 |
+
|
1076 |
+
Error type: ``RuntimeError``
|
1077 |
+
|
1078 |
+
C++ equivalent: ``TORCH_CHECK``
|
1079 |
+
|
1080 |
+
Args:
|
1081 |
+
cond (:class:`bool`): If False, throw error
|
1082 |
+
|
1083 |
+
message (Callable, optional): Callable that returns either a string or
|
1084 |
+
an object that has a ``__str__()`` method to be used as the error
|
1085 |
+
message. Default: ``None``
|
1086 |
+
"""
|
1087 |
+
_check_with(RuntimeError, cond, message)
|
1088 |
+
|
1089 |
+
def _check_is_size(i, message=None):
|
1090 |
+
"""Checks that a given integer is a valid size (i.e., is non-negative).
|
1091 |
+
You should use this over _check(i >= 0) because we can use the semantic
|
1092 |
+
information (that i is a size) to make some further inferences in case
|
1093 |
+
i is an unbacked SymInt.
|
1094 |
+
|
1095 |
+
NB: Do NOT use this in contexts where a -1 size would be valid (indicating
|
1096 |
+
to infer the size from context, or if you should wrap-around or truncate).
|
1097 |
+
Only use this if the only valid value is an honest to goodness size.
|
1098 |
+
"""
|
1099 |
+
# This is responsible for the expect_true
|
1100 |
+
_check(i >= 0, message)
|
1101 |
+
from torch.fx.experimental.symbolic_shapes import _advise_is_size
|
1102 |
+
_advise_is_size(i)
|
1103 |
+
|
1104 |
+
def _check_index(cond, message=None): # noqa: F811
|
1105 |
+
r"""Throws error containing an optional message if the specified condition
|
1106 |
+
is False.
|
1107 |
+
|
1108 |
+
Error type: ``IndexError``
|
1109 |
+
|
1110 |
+
C++ equivalent: ``TORCH_CHECK_INDEX``
|
1111 |
+
|
1112 |
+
Args:
|
1113 |
+
cond (:class:`bool`): If False, throw error
|
1114 |
+
|
1115 |
+
message (Callable, optional): Callable that returns either a string or
|
1116 |
+
an object that has a ``__str__()`` method to be used as the error
|
1117 |
+
message. Default: ``None``
|
1118 |
+
"""
|
1119 |
+
_check_with(IndexError, cond, message)
|
1120 |
+
|
1121 |
+
def _check_value(cond, message=None): # noqa: F811
|
1122 |
+
r"""Throws error containing an optional message if the specified condition
|
1123 |
+
is False.
|
1124 |
+
|
1125 |
+
Error type: ``ValueError``
|
1126 |
+
|
1127 |
+
C++ equivalent: ``TORCH_CHECK_VALUE``
|
1128 |
+
|
1129 |
+
Args:
|
1130 |
+
cond (:class:`bool`): If False, throw error
|
1131 |
+
|
1132 |
+
message (Callable, optional): Callable that returns either a string or
|
1133 |
+
an object that has a ``__str__()`` method to be used as the error
|
1134 |
+
message. Default: ``None``
|
1135 |
+
"""
|
1136 |
+
_check_with(ValueError, cond, message)
|
1137 |
+
|
1138 |
+
def _check_type(cond, message=None): # noqa: F811
|
1139 |
+
r"""Throws error containing an optional message if the specified condition
|
1140 |
+
is False.
|
1141 |
+
|
1142 |
+
Error type: ``TypeError``
|
1143 |
+
|
1144 |
+
C++ equivalent: ``TORCH_CHECK_TYPE``
|
1145 |
+
|
1146 |
+
Args:
|
1147 |
+
cond (:class:`bool`): If False, throw error
|
1148 |
+
|
1149 |
+
message (Callable, optional): Callable that returns either a string or
|
1150 |
+
an object that has a ``__str__()`` method to be used as the error
|
1151 |
+
message. Default: ``None``
|
1152 |
+
"""
|
1153 |
+
_check_with(TypeError, cond, message)
|
1154 |
+
|
1155 |
+
def _check_not_implemented(cond, message=None): # noqa: F811
|
1156 |
+
r"""Throws error containing an optional message if the specified condition
|
1157 |
+
is False.
|
1158 |
+
|
1159 |
+
Error type: ``NotImplementedError``
|
1160 |
+
|
1161 |
+
C++ equivalent: ``TORCH_CHECK_NOT_IMPLEMENTED``
|
1162 |
+
|
1163 |
+
Args:
|
1164 |
+
cond (:class:`bool`): If False, throw error
|
1165 |
+
|
1166 |
+
message (Callable, optional): Callable that returns either a string or
|
1167 |
+
an object that has a ``__str__()`` method to be used as the error
|
1168 |
+
message. Default: ``None``
|
1169 |
+
"""
|
1170 |
+
_check_with(NotImplementedError, cond, message)
|
1171 |
+
|
1172 |
+
def _check_tensor_all_with(error_type, cond, message=None): # noqa: F811
|
1173 |
+
if not torch.is_tensor(cond):
|
1174 |
+
raise TypeError(f'cond must be a tensor, but got {type(cond)}')
|
1175 |
+
|
1176 |
+
if not cond.dtype == torch.bool:
|
1177 |
+
raise TypeError(
|
1178 |
+
f'cond tensor must have dtype torch.bool, but got {cond.dtype}')
|
1179 |
+
|
1180 |
+
_check_with(error_type, cond._is_all_true().item(), message)
|
1181 |
+
|
1182 |
+
# C++ equivalent: `TORCH_CHECK_TENSOR_ALL`
|
1183 |
+
def _check_tensor_all(cond, message=None): # noqa: F811
|
1184 |
+
r"""Throws error containing an optional message if the specified condition
|
1185 |
+
is False.
|
1186 |
+
|
1187 |
+
Error type: ``RuntimeError``
|
1188 |
+
|
1189 |
+
C++ equivalent: ``TORCH_CHECK_TENSOR_ALL``
|
1190 |
+
|
1191 |
+
Args:
|
1192 |
+
cond (:class:`torch.Tensor`): Tensor of dtype ``torch.bool``. If any
|
1193 |
+
element is ``False``, throw error
|
1194 |
+
|
1195 |
+
message (Callable, optional): Callable that returns either a string or
|
1196 |
+
an object that has a ``__str__()`` method to be used as the error
|
1197 |
+
message. Default: ``None``
|
1198 |
+
"""
|
1199 |
+
_check_tensor_all_with(RuntimeError, cond, message)
|
1200 |
+
|
1201 |
+
################################################################################
|
1202 |
+
# Define numeric constants
|
1203 |
+
################################################################################
|
1204 |
+
|
1205 |
+
# For Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) and
|
1206 |
+
# NumPy consistency (https://numpy.org/devdocs/reference/constants.html)
|
1207 |
+
from math import e , nan , inf , pi
|
1208 |
+
__all__.extend(['e', 'pi', 'nan', 'inf'])
|
1209 |
+
|
1210 |
+
################################################################################
|
1211 |
+
# Define Storage and Tensor classes
|
1212 |
+
################################################################################
|
1213 |
+
|
1214 |
+
from ._tensor import Tensor
|
1215 |
+
from .storage import _StorageBase, TypedStorage, _LegacyStorage, UntypedStorage, _warn_typed_storage_removal
|
1216 |
+
|
1217 |
+
# NOTE: New <type>Storage classes should never be added. When adding a new
|
1218 |
+
# dtype, use torch.storage.TypedStorage directly.
|
1219 |
+
|
1220 |
+
class ByteStorage(_LegacyStorage):
|
1221 |
+
@classproperty
|
1222 |
+
def dtype(self):
|
1223 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1224 |
+
return self._dtype
|
1225 |
+
|
1226 |
+
@classproperty
|
1227 |
+
def _dtype(self):
|
1228 |
+
return torch.uint8
|
1229 |
+
|
1230 |
+
class DoubleStorage(_LegacyStorage):
|
1231 |
+
@classproperty
|
1232 |
+
def dtype(self):
|
1233 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1234 |
+
return self._dtype
|
1235 |
+
|
1236 |
+
@classproperty
|
1237 |
+
def _dtype(self):
|
1238 |
+
return torch.double
|
1239 |
+
|
1240 |
+
class FloatStorage(_LegacyStorage):
|
1241 |
+
@classproperty
|
1242 |
+
def dtype(self):
|
1243 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1244 |
+
return self._dtype
|
1245 |
+
|
1246 |
+
@classproperty
|
1247 |
+
def _dtype(self):
|
1248 |
+
return torch.float
|
1249 |
+
|
1250 |
+
class HalfStorage(_LegacyStorage):
|
1251 |
+
@classproperty
|
1252 |
+
def dtype(self):
|
1253 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1254 |
+
return self._dtype
|
1255 |
+
|
1256 |
+
@classproperty
|
1257 |
+
def _dtype(self):
|
1258 |
+
return torch.half
|
1259 |
+
|
1260 |
+
class LongStorage(_LegacyStorage):
|
1261 |
+
@classproperty
|
1262 |
+
def dtype(self):
|
1263 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1264 |
+
return self._dtype
|
1265 |
+
|
1266 |
+
@classproperty
|
1267 |
+
def _dtype(self):
|
1268 |
+
return torch.long
|
1269 |
+
|
1270 |
+
class IntStorage(_LegacyStorage):
|
1271 |
+
@classproperty
|
1272 |
+
def dtype(self):
|
1273 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1274 |
+
return self._dtype
|
1275 |
+
|
1276 |
+
@classproperty
|
1277 |
+
def _dtype(self):
|
1278 |
+
return torch.int
|
1279 |
+
|
1280 |
+
class ShortStorage(_LegacyStorage):
|
1281 |
+
@classproperty
|
1282 |
+
def dtype(self):
|
1283 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1284 |
+
return self._dtype
|
1285 |
+
|
1286 |
+
@classproperty
|
1287 |
+
def _dtype(self):
|
1288 |
+
return torch.short
|
1289 |
+
|
1290 |
+
class CharStorage(_LegacyStorage):
|
1291 |
+
@classproperty
|
1292 |
+
def dtype(self):
|
1293 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1294 |
+
return self._dtype
|
1295 |
+
|
1296 |
+
@classproperty
|
1297 |
+
def _dtype(self):
|
1298 |
+
return torch.int8
|
1299 |
+
|
1300 |
+
class BoolStorage(_LegacyStorage):
|
1301 |
+
@classproperty
|
1302 |
+
def dtype(self):
|
1303 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1304 |
+
return self._dtype
|
1305 |
+
|
1306 |
+
@classproperty
|
1307 |
+
def _dtype(self):
|
1308 |
+
return torch.bool
|
1309 |
+
|
1310 |
+
class BFloat16Storage(_LegacyStorage):
|
1311 |
+
@classproperty
|
1312 |
+
def dtype(self):
|
1313 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1314 |
+
return self._dtype
|
1315 |
+
|
1316 |
+
@classproperty
|
1317 |
+
def _dtype(self):
|
1318 |
+
return torch.bfloat16
|
1319 |
+
|
1320 |
+
class ComplexDoubleStorage(_LegacyStorage):
|
1321 |
+
@classproperty
|
1322 |
+
def dtype(self):
|
1323 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1324 |
+
return self._dtype
|
1325 |
+
|
1326 |
+
@classproperty
|
1327 |
+
def _dtype(self):
|
1328 |
+
return torch.cdouble
|
1329 |
+
|
1330 |
+
class ComplexFloatStorage(_LegacyStorage):
|
1331 |
+
@classproperty
|
1332 |
+
def dtype(self):
|
1333 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1334 |
+
return self._dtype
|
1335 |
+
|
1336 |
+
@classproperty
|
1337 |
+
def _dtype(self):
|
1338 |
+
return torch.cfloat
|
1339 |
+
|
1340 |
+
class QUInt8Storage(_LegacyStorage):
|
1341 |
+
@classproperty
|
1342 |
+
def dtype(self):
|
1343 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1344 |
+
return self._dtype
|
1345 |
+
|
1346 |
+
@classproperty
|
1347 |
+
def _dtype(self):
|
1348 |
+
return torch.quint8
|
1349 |
+
|
1350 |
+
class QInt8Storage(_LegacyStorage):
|
1351 |
+
@classproperty
|
1352 |
+
def dtype(self):
|
1353 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1354 |
+
return self._dtype
|
1355 |
+
|
1356 |
+
@classproperty
|
1357 |
+
def _dtype(self):
|
1358 |
+
return torch.qint8
|
1359 |
+
|
1360 |
+
class QInt32Storage(_LegacyStorage):
|
1361 |
+
@classproperty
|
1362 |
+
def dtype(self):
|
1363 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1364 |
+
return self._dtype
|
1365 |
+
|
1366 |
+
@classproperty
|
1367 |
+
def _dtype(self):
|
1368 |
+
return torch.qint32
|
1369 |
+
|
1370 |
+
class QUInt4x2Storage(_LegacyStorage):
|
1371 |
+
@classproperty
|
1372 |
+
def dtype(self):
|
1373 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1374 |
+
return self._dtype
|
1375 |
+
|
1376 |
+
@classproperty
|
1377 |
+
def _dtype(self):
|
1378 |
+
return torch.quint4x2
|
1379 |
+
|
1380 |
+
class QUInt2x4Storage(_LegacyStorage):
|
1381 |
+
@classproperty
|
1382 |
+
def dtype(self):
|
1383 |
+
_warn_typed_storage_removal(stacklevel=3)
|
1384 |
+
return self._dtype
|
1385 |
+
|
1386 |
+
@classproperty
|
1387 |
+
def _dtype(self):
|
1388 |
+
return torch.quint2x4
|
1389 |
+
|
1390 |
+
_storage_classes = {
|
1391 |
+
UntypedStorage, DoubleStorage, FloatStorage, LongStorage, IntStorage,
|
1392 |
+
ShortStorage, CharStorage, ByteStorage, HalfStorage, BoolStorage,
|
1393 |
+
QUInt8Storage, QInt8Storage, QInt32Storage, BFloat16Storage,
|
1394 |
+
ComplexFloatStorage, ComplexDoubleStorage, QUInt4x2Storage, QUInt2x4Storage,
|
1395 |
+
TypedStorage
|
1396 |
+
}
|
1397 |
+
|
1398 |
+
# The _tensor_classes set is initialized by the call to _C._initialize_tensor_type_bindings()
|
1399 |
+
_tensor_classes: Set[Type] = set()
|
1400 |
+
|
1401 |
+
# If you edit these imports, please update torch/__init__.py.in as well
|
1402 |
+
from .random import set_rng_state, get_rng_state, manual_seed, initial_seed, seed
|
1403 |
+
from .serialization import save, load
|
1404 |
+
from ._tensor_str import set_printoptions
|
1405 |
+
|
1406 |
+
################################################################################
|
1407 |
+
# Initialize extension
|
1408 |
+
################################################################################
|
1409 |
+
|
1410 |
+
def manager_path():
|
1411 |
+
if _running_with_deploy() or platform.system() == 'Windows':
|
1412 |
+
return b""
|
1413 |
+
path = get_file_path('torch', 'bin', 'torch_shm_manager')
|
1414 |
+
prepare_multiprocessing_environment(get_file_path('torch'))
|
1415 |
+
if not os.path.exists(path):
|
1416 |
+
raise RuntimeError("Unable to find torch_shm_manager at " + path)
|
1417 |
+
return path.encode('utf-8')
|
1418 |
+
|
1419 |
+
from torch.amp import autocast
|
1420 |
+
|
1421 |
+
# Initializing the extension shadows the built-in python float / int classes;
|
1422 |
+
# store them for later use by SymInt / SymFloat.
|
1423 |
+
py_float = float
|
1424 |
+
py_int = int
|
1425 |
+
|
1426 |
+
# Shared memory manager needs to know the exact location of manager executable
|
1427 |
+
_C._initExtension(manager_path())
|
1428 |
+
del manager_path
|
1429 |
+
|
1430 |
+
# Appease the type checker: it can't deal with direct setting of globals().
|
1431 |
+
# Note that we will see "too many" functions when reexporting this way; there
|
1432 |
+
# is not a good way to fix this problem. Perhaps, try to redesign VariableFunctions
|
1433 |
+
# so that this import is good enough
|
1434 |
+
if TYPE_CHECKING:
|
1435 |
+
# Some type signatures pulled in from _VariableFunctions here clash with
|
1436 |
+
# signatures already imported. For now these clashes are ignored; see
|
1437 |
+
# PR #43339 for details.
|
1438 |
+
from torch._C._VariableFunctions import * # type: ignore[assignment, misc] # noqa: F403
|
1439 |
+
# Fixup segment_reduce visibility
|
1440 |
+
_segment_reduce = segment_reduce
|
1441 |
+
del segment_reduce
|
1442 |
+
|
1443 |
+
# Ops not to be exposed in `torch` namespace,
|
1444 |
+
# mostly helper ops.
|
1445 |
+
PRIVATE_OPS = (
|
1446 |
+
'unique_dim',
|
1447 |
+
)
|
1448 |
+
|
1449 |
+
for name in dir(_C._VariableFunctions):
|
1450 |
+
if name.startswith('__') or name in PRIVATE_OPS:
|
1451 |
+
continue
|
1452 |
+
obj = getattr(_C._VariableFunctions, name)
|
1453 |
+
obj.__module__ = 'torch'
|
1454 |
+
# Hide some APIs that should not be public
|
1455 |
+
if name == "segment_reduce":
|
1456 |
+
# TODO: Once the undocumented FC window is passed, remove the line bellow
|
1457 |
+
globals()[name] = obj
|
1458 |
+
name = "_" + name
|
1459 |
+
globals()[name] = obj
|
1460 |
+
if not name.startswith("_"):
|
1461 |
+
__all__.append(name)
|
1462 |
+
|
1463 |
+
|
1464 |
+
|
1465 |
+
################################################################################
|
1466 |
+
# Import TorchDynamo's lazy APIs to avoid circular dependenices
|
1467 |
+
################################################################################
|
1468 |
+
|
1469 |
+
# needs to be before from .functional import * to avoid circular dependencies
|
1470 |
+
from ._compile import _disable_dynamo
|
1471 |
+
|
1472 |
+
################################################################################
|
1473 |
+
# Import interface functions defined in Python
|
1474 |
+
################################################################################
|
1475 |
+
|
1476 |
+
# needs to be after the above ATen bindings so we can overwrite from Python side
|
1477 |
+
from .functional import * # noqa: F403
|
1478 |
+
|
1479 |
+
|
1480 |
+
################################################################################
|
1481 |
+
# Remove unnecessary members
|
1482 |
+
################################################################################
|
1483 |
+
|
1484 |
+
del _StorageBase
|
1485 |
+
del _LegacyStorage
|
1486 |
+
|
1487 |
+
################################################################################
|
1488 |
+
# Define _assert
|
1489 |
+
################################################################################
|
1490 |
+
|
1491 |
+
# needs to be before the submodule imports to avoid circular dependencies
|
1492 |
+
def _assert(condition, message):
|
1493 |
+
r"""A wrapper around Python's assert which is symbolically traceable.
|
1494 |
+
"""
|
1495 |
+
from .overrides import has_torch_function, handle_torch_function
|
1496 |
+
|
1497 |
+
if type(condition) is not torch.Tensor and has_torch_function((condition,)):
|
1498 |
+
return handle_torch_function(_assert, (condition,), condition, message)
|
1499 |
+
assert condition, message
|
1500 |
+
|
1501 |
+
################################################################################
|
1502 |
+
# Import most common subpackages
|
1503 |
+
################################################################################
|
1504 |
+
|
1505 |
+
# Use the redundant form so that type checkers know that these are a part of
|
1506 |
+
# the public API. The "regular" import lines are there solely for the runtime
|
1507 |
+
# side effect of adding to the imported module's members for other users.
|
1508 |
+
from torch import cuda as cuda
|
1509 |
+
from torch import cpu as cpu
|
1510 |
+
from torch import mps as mps
|
1511 |
+
from torch import autograd as autograd
|
1512 |
+
from torch.autograd import (
|
1513 |
+
no_grad as no_grad,
|
1514 |
+
enable_grad as enable_grad,
|
1515 |
+
set_grad_enabled as set_grad_enabled,
|
1516 |
+
inference_mode as inference_mode,
|
1517 |
+
)
|
1518 |
+
from torch import fft as fft
|
1519 |
+
from torch import futures as futures
|
1520 |
+
from torch import _awaits as _awaits
|
1521 |
+
from torch import nested as nested
|
1522 |
+
from torch import nn as nn
|
1523 |
+
from torch.signal import windows as windows
|
1524 |
+
from torch import optim as optim
|
1525 |
+
import torch.optim._multi_tensor
|
1526 |
+
from torch import multiprocessing as multiprocessing
|
1527 |
+
from torch import sparse as sparse
|
1528 |
+
from torch import special as special
|
1529 |
+
import torch.utils.backcompat
|
1530 |
+
from torch import jit as jit
|
1531 |
+
from torch import linalg as linalg
|
1532 |
+
from torch import hub as hub
|
1533 |
+
from torch import random as random
|
1534 |
+
from torch import distributions as distributions
|
1535 |
+
from torch import testing as testing
|
1536 |
+
from torch import backends as backends
|
1537 |
+
import torch.utils.data
|
1538 |
+
from torch import __config__ as __config__
|
1539 |
+
from torch import __future__ as __future__
|
1540 |
+
from torch import profiler as profiler
|
1541 |
+
|
1542 |
+
# Quantized, sparse, AO, etc. should be last to get imported, as nothing
|
1543 |
+
# is expected to depend on them.
|
1544 |
+
from torch import ao as ao
|
1545 |
+
# nn.quant* depends on ao -- so should be after those.
|
1546 |
+
import torch.nn.quantizable
|
1547 |
+
import torch.nn.quantized
|
1548 |
+
import torch.nn.qat
|
1549 |
+
import torch.nn.intrinsic
|
1550 |
+
|
1551 |
+
_C._init_names(list(torch._storage_classes))
|
1552 |
+
|
1553 |
+
# attach docstrings to torch and tensor functions
|
1554 |
+
from . import _torch_docs, _tensor_docs, _storage_docs
|
1555 |
+
del _torch_docs, _tensor_docs, _storage_docs
|
1556 |
+
|
1557 |
+
|
1558 |
+
def compiled_with_cxx11_abi() -> builtins.bool:
|
1559 |
+
r"""Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""
|
1560 |
+
return _C._GLIBCXX_USE_CXX11_ABI
|
1561 |
+
|
1562 |
+
|
1563 |
+
# Import the ops "namespace"
|
1564 |
+
from torch._ops import ops
|
1565 |
+
from torch._classes import classes
|
1566 |
+
import torch._library
|
1567 |
+
|
1568 |
+
# quantization depends on torch.fx
|
1569 |
+
# Import quantization
|
1570 |
+
from torch import quantization as quantization
|
1571 |
+
|
1572 |
+
# Import the quasi random sampler
|
1573 |
+
from torch import quasirandom as quasirandom
|
1574 |
+
|
1575 |
+
# If you are seeing this, it means that this call site was not checked if
|
1576 |
+
# the memory format could be preserved, and it was switched to old default
|
1577 |
+
# behaviour of contiguous
|
1578 |
+
legacy_contiguous_format = contiguous_format
|
1579 |
+
|
1580 |
+
# Register fork handler to initialize OpenMP in child processes (see gh-28389)
|
1581 |
+
from torch.multiprocessing._atfork import register_after_fork
|
1582 |
+
register_after_fork(torch.get_num_threads)
|
1583 |
+
del register_after_fork
|
1584 |
+
|
1585 |
+
# Import tools that require fully imported torch (for applying
|
1586 |
+
# torch.jit.script as a decorator, for instance):
|
1587 |
+
from ._lobpcg import lobpcg as lobpcg
|
1588 |
+
|
1589 |
+
# These were previously defined in native_functions.yaml and appeared on the
|
1590 |
+
# `torch` namespace, but we moved them to c10 dispatch to facilitate custom
|
1591 |
+
# class usage. We add these lines here to preserve backward compatibility.
|
1592 |
+
quantized_lstm = torch.ops.aten.quantized_lstm
|
1593 |
+
quantized_gru = torch.ops.aten.quantized_gru
|
1594 |
+
|
1595 |
+
from torch.utils.dlpack import from_dlpack, to_dlpack
|
1596 |
+
|
1597 |
+
# Import experimental masked operations support. See
|
1598 |
+
# [RFC-0016](https://github.com/pytorch/rfcs/pull/27) for more
|
1599 |
+
# information.
|
1600 |
+
from . import masked
|
1601 |
+
|
1602 |
+
# Import removed ops with error message about removal
|
1603 |
+
from ._linalg_utils import ( # type: ignore[misc]
|
1604 |
+
matrix_rank,
|
1605 |
+
eig,
|
1606 |
+
solve,
|
1607 |
+
lstsq,
|
1608 |
+
)
|
1609 |
+
from ._linalg_utils import _symeig as symeig # type: ignore[misc]
|
1610 |
+
|
1611 |
+
class _TorchCompileInductorWrapper:
|
1612 |
+
compiler_name = "inductor"
|
1613 |
+
|
1614 |
+
def __init__(self, mode, options, dynamic):
|
1615 |
+
self.config: Dict[str, Any] = dict()
|
1616 |
+
self.dynamic = dynamic
|
1617 |
+
self.apply_mode(mode)
|
1618 |
+
self.apply_options(options)
|
1619 |
+
|
1620 |
+
if self.config.get("triton.cudagraphs", False):
|
1621 |
+
os.environ["DISABLE_CUPTI_LAZY_REINIT"] = "1"
|
1622 |
+
# FIXME: CUDA Graph does not work well with CUPTI teardown.
|
1623 |
+
# 1) crashes on 1st lazy CUPTI re-init after teardown (CUDA 11)
|
1624 |
+
# 2) crashes on 2nd non-lazy CUPTI re-init after teardown (CUDA 12)
|
1625 |
+
# Workaround: turn off CUPTI teardown when using CUDA Graphs.
|
1626 |
+
os.environ["TEARDOWN_CUPTI"] = "0"
|
1627 |
+
|
1628 |
+
def __eq__(self, other):
|
1629 |
+
return (isinstance(other, _TorchCompileInductorWrapper) and
|
1630 |
+
self.config == other.config and
|
1631 |
+
self.dynamic == other.dynamic)
|
1632 |
+
|
1633 |
+
def apply_mode(self, mode: Optional[str]):
|
1634 |
+
if mode is None or mode == "default":
|
1635 |
+
pass
|
1636 |
+
elif mode in ("reduce-overhead", "max-autotune", "max-autotune-no-cudagraphs"):
|
1637 |
+
from torch._inductor import list_mode_options
|
1638 |
+
self.apply_options(list_mode_options(mode, self.dynamic))
|
1639 |
+
else:
|
1640 |
+
raise RuntimeError(
|
1641 |
+
f"Unrecognized mode={mode}, should be one of: default, reduce-overhead, max-autotune, max-autotune-no-cudagraphs"
|
1642 |
+
)
|
1643 |
+
|
1644 |
+
def apply_options(self, options: Optional[Dict[str, Any]]):
|
1645 |
+
if not options:
|
1646 |
+
return
|
1647 |
+
|
1648 |
+
from torch._inductor import config
|
1649 |
+
current_config: Dict[str, Any] = config.shallow_copy_dict()
|
1650 |
+
|
1651 |
+
for key, val in options.items():
|
1652 |
+
attr_name = key.replace("-", "_")
|
1653 |
+
if attr_name not in current_config:
|
1654 |
+
raise RuntimeError(
|
1655 |
+
f"Unexpected optimization option {key}, known options are {list(current_config.keys())}"
|
1656 |
+
)
|
1657 |
+
if type(val) is not type(current_config[attr_name]):
|
1658 |
+
val_type_str = type(val).__name__
|
1659 |
+
expected_type_str = type(current_config[attr_name]).__name__
|
1660 |
+
raise RuntimeError(
|
1661 |
+
f"Unexpected type of attr {key}, got {val_type_str} should be {expected_type_str}"
|
1662 |
+
)
|
1663 |
+
self.config[attr_name] = val
|
1664 |
+
|
1665 |
+
def __call__(self, model_, inputs_):
|
1666 |
+
from torch._inductor.compile_fx import compile_fx
|
1667 |
+
|
1668 |
+
return compile_fx(model_, inputs_, config_patches=self.config)
|
1669 |
+
|
1670 |
+
def get_compiler_config(self):
|
1671 |
+
from torch._inductor.compile_fx import get_patched_config_dict
|
1672 |
+
return get_patched_config_dict(config_patches=self.config)
|
1673 |
+
|
1674 |
+
def reset(self):
|
1675 |
+
from torch._inductor import config
|
1676 |
+
if "triton.cudagraphs" in self.config or config.triton.cudagraphs:
|
1677 |
+
if self.config.get("triton.cudagraphs", True):
|
1678 |
+
from torch._inductor.cudagraph_trees import reset_cudagraph_trees
|
1679 |
+
reset_cudagraph_trees()
|
1680 |
+
|
1681 |
+
class _TorchCompileWrapper:
|
1682 |
+
def __init__(self, backend, mode, options, dynamic):
|
1683 |
+
from torch._dynamo.backends.registry import lookup_backend
|
1684 |
+
|
1685 |
+
if isinstance(backend, str):
|
1686 |
+
self.compiler_name = backend
|
1687 |
+
elif hasattr(backend, "__name__"):
|
1688 |
+
self.compiler_name = backend.__name__
|
1689 |
+
else:
|
1690 |
+
self.compiler_name = str(backend)
|
1691 |
+
self.dynamic = dynamic
|
1692 |
+
self.compiler_fn = lookup_backend(backend)
|
1693 |
+
self.kwargs = {}
|
1694 |
+
# only pass the args if they non-empty
|
1695 |
+
if mode and mode != "default":
|
1696 |
+
self.kwargs["mode"] = mode
|
1697 |
+
if options:
|
1698 |
+
self.kwargs["options"] = options
|
1699 |
+
|
1700 |
+
def __eq__(self, other):
|
1701 |
+
return (isinstance(other, _TorchCompileWrapper) and
|
1702 |
+
self.compiler_fn == other.compiler_fn and
|
1703 |
+
self.kwargs == other.kwargs and
|
1704 |
+
self.dynamic == other.dynamic)
|
1705 |
+
|
1706 |
+
def __call__(self, model_, inputs_):
|
1707 |
+
return self.compiler_fn(model_, inputs_, **self.kwargs)
|
1708 |
+
|
1709 |
+
|
1710 |
+
def compile(model: Optional[Callable] = None, *,
|
1711 |
+
fullgraph: builtins.bool = False,
|
1712 |
+
dynamic: Optional[builtins.bool] = None,
|
1713 |
+
backend: Union[str, Callable] = "inductor",
|
1714 |
+
mode: Union[str, None] = None,
|
1715 |
+
options: Optional[Dict[str, Union[str, builtins.int, builtins.bool]]] = None,
|
1716 |
+
disable: builtins.bool = False) -> Callable:
|
1717 |
+
"""
|
1718 |
+
Optimizes given model/function using TorchDynamo and specified backend.
|
1719 |
+
|
1720 |
+
Concretely, for every frame executed within the compiled region, we will attempt
|
1721 |
+
to compile it and cache the compiled result on the code object for future
|
1722 |
+
use. A single frame may be compiled multiple times if previous compiled
|
1723 |
+
results are not applicable for subsequent calls (this is called a "guard
|
1724 |
+
failure), you can use TORCH_LOGS=guards to debug these situations.
|
1725 |
+
Multiple compiled results can be associated with a frame up to
|
1726 |
+
``torch._dynamo.config.cache_size_limit``, which defaults to 64; at which
|
1727 |
+
point we will fall back to eager. Note that compile caches are per
|
1728 |
+
*code object*, not frame; if you dynamically create multiple copies of a
|
1729 |
+
function, they will all share the same code cache.
|
1730 |
+
|
1731 |
+
Args:
|
1732 |
+
model (Callable): Module/function to optimize
|
1733 |
+
fullgraph (bool): If False (default), torch.compile attempts to discover compileable regions
|
1734 |
+
in the function that it will optimize. If True, then we require that the entire function be
|
1735 |
+
capturable into a single graph. If this is not possible (that is, if there are graph breaks),
|
1736 |
+
then this will raise an error.
|
1737 |
+
dynamic (bool or None): Use dynamic shape tracing. When this is True, we will up-front attempt
|
1738 |
+
to generate a kernel that is as dynamic as possible to avoid recompilations when
|
1739 |
+
sizes change. This may not always work as some operations/optimizations will
|
1740 |
+
force specialization; use TORCH_LOGS=dynamic to debug overspecialization.
|
1741 |
+
When this is False, we will NEVER generate dynamic kernels, we will always specialize.
|
1742 |
+
By default (None), we automatically detect if dynamism has occurred and compile a more
|
1743 |
+
dynamic kernel upon recompile.
|
1744 |
+
backend (str or Callable): backend to be used
|
1745 |
+
|
1746 |
+
- "inductor" is the default backend, which is a good balance between performance and overhead
|
1747 |
+
|
1748 |
+
- Non experimental in-tree backends can be seen with `torch._dynamo.list_backends()`
|
1749 |
+
|
1750 |
+
- Experimental or debug in-tree backends can be seen with `torch._dynamo.list_backends(None)`
|
1751 |
+
|
1752 |
+
- To register an out-of-tree custom backend: https://pytorch.org/docs/main/compile/custom-backends.html
|
1753 |
+
mode (str): Can be either "default", "reduce-overhead", "max-autotune" or "max-autotune-no-cudagraphs"
|
1754 |
+
|
1755 |
+
- "default" is the default mode, which is a good balance between performance and overhead
|
1756 |
+
|
1757 |
+
- "reduce-overhead" is a mode that reduces the overhead of python with CUDA graphs,
|
1758 |
+
useful for small batches. Reduction of overhead can come at the cost of more memory
|
1759 |
+
usage, as we will cache the workspace memory required for the invocation so that we
|
1760 |
+
do not have to reallocate it on subsequent runs. Reduction of overhead is not guaranteed
|
1761 |
+
to work; today, we only reduce overhead for CUDA only graphs which do not mutate inputs.
|
1762 |
+
There are other circumstances where CUDA graphs are not applicable; use TORCH_LOG=perf_hints
|
1763 |
+
to debug.
|
1764 |
+
|
1765 |
+
- "max-autotune" is a mode that leverages Triton based matrix multiplications and convolutions
|
1766 |
+
It enables CUDA graphs by default.
|
1767 |
+
|
1768 |
+
- "max-autotune-no-cudagraphs" is a mode similar to "max-autotune" but without CUDA graphs
|
1769 |
+
|
1770 |
+
- To see the exact configs that each mode sets you can call `torch._inductor.list_mode_options()`
|
1771 |
+
|
1772 |
+
options (dict): A dictionary of options to pass to the backend. Some notable ones to try out are
|
1773 |
+
|
1774 |
+
- `epilogue_fusion` which fuses pointwise ops into templates. Requires `max_autotune` to also be set
|
1775 |
+
|
1776 |
+
- `max_autotune` which will profile to pick the best matmul configuration
|
1777 |
+
|
1778 |
+
- `fallback_random` which is useful when debugging accuracy issues
|
1779 |
+
|
1780 |
+
- `shape_padding` which pads matrix shapes to better align loads on GPUs especially for tensor cores
|
1781 |
+
|
1782 |
+
- `triton.cudagraphs` which will reduce the overhead of python with CUDA graphs
|
1783 |
+
|
1784 |
+
- `trace.enabled` which is the most useful debugging flag to turn on
|
1785 |
+
|
1786 |
+
- `trace.graph_diagram` which will show you a picture of your graph after fusion
|
1787 |
+
|
1788 |
+
- For inductor you can see the full list of configs that it supports by calling `torch._inductor.list_options()`
|
1789 |
+
disable (bool): Turn torch.compile() into a no-op for testing
|
1790 |
+
|
1791 |
+
Example::
|
1792 |
+
|
1793 |
+
@torch.compile(options={"triton.cudagraphs": True}, fullgraph=True)
|
1794 |
+
def foo(x):
|
1795 |
+
return torch.sin(x) + torch.cos(x)
|
1796 |
+
|
1797 |
+
"""
|
1798 |
+
_C._log_api_usage_once("torch.compile")
|
1799 |
+
# Temporary until we get proper support for python 3.12
|
1800 |
+
if sys.version_info >= (3, 12):
|
1801 |
+
raise RuntimeError("Dynamo is not supported on Python 3.12+")
|
1802 |
+
|
1803 |
+
# Decorator mode
|
1804 |
+
if model is None:
|
1805 |
+
def fn(model: Callable):
|
1806 |
+
if model is None:
|
1807 |
+
raise RuntimeError("Model can't be None")
|
1808 |
+
return compile(model,
|
1809 |
+
fullgraph=fullgraph,
|
1810 |
+
dynamic=dynamic,
|
1811 |
+
backend=backend,
|
1812 |
+
mode=mode,
|
1813 |
+
options=options,
|
1814 |
+
disable=disable)
|
1815 |
+
return fn
|
1816 |
+
|
1817 |
+
if mode is not None and options is not None:
|
1818 |
+
raise RuntimeError("Either mode or options can be specified, but both can't be specified at the same time.")
|
1819 |
+
if mode is None and options is None:
|
1820 |
+
mode = "default"
|
1821 |
+
if backend == "inductor":
|
1822 |
+
backend = _TorchCompileInductorWrapper(mode, options, dynamic)
|
1823 |
+
else:
|
1824 |
+
backend = _TorchCompileWrapper(backend, mode, options, dynamic)
|
1825 |
+
|
1826 |
+
return torch._dynamo.optimize(backend=backend, nopython=fullgraph, dynamic=dynamic, disable=disable)(model)
|
1827 |
+
|
1828 |
+
|
1829 |
+
from torch import export as export
|
1830 |
+
|
1831 |
+
from torch._higher_order_ops import cond
|
1832 |
+
|
1833 |
+
def _register_device_module(device_type, module):
|
1834 |
+
r"""Register an external runtime module of the specific :attr:`device_type`
|
1835 |
+
supported by torch.
|
1836 |
+
|
1837 |
+
After the :attr:`module` is registered correctly, the user can refer
|
1838 |
+
the external runtime module as part of torch with attribute torch.xxx.
|
1839 |
+
"""
|
1840 |
+
# Make sure the device_type represent a supported device type for torch.
|
1841 |
+
device_type = torch.device(device_type).type
|
1842 |
+
m = sys.modules[__name__]
|
1843 |
+
if hasattr(m, device_type):
|
1844 |
+
raise RuntimeError(f"The runtime module of '{device_type}' has already "
|
1845 |
+
f"been registered with '{getattr(m, device_type)}'")
|
1846 |
+
setattr(m, device_type, module)
|
1847 |
+
torch_module_name = '.'.join([__name__, device_type])
|
1848 |
+
sys.modules[torch_module_name] = module
|
1849 |
+
|
1850 |
+
# expose return_types
|
1851 |
+
from . import return_types
|
1852 |
+
from . import library
|
1853 |
+
if not TYPE_CHECKING:
|
1854 |
+
from . import _meta_registrations
|
1855 |
+
|
1856 |
+
# Enable CUDA Sanitizer
|
1857 |
+
if 'TORCH_CUDA_SANITIZER' in os.environ:
|
1858 |
+
import torch.cuda._sanitizer as csan
|
1859 |
+
|
1860 |
+
csan.enable_cuda_sanitizer()
|
1861 |
+
|
1862 |
+
# Populate magic methods on SymInt and SymFloat
|
1863 |
+
import torch.fx.experimental.sym_node
|
1864 |
+
|
1865 |
+
from torch import func as func
|
1866 |
+
from torch.func import vmap
|
1867 |
+
|
1868 |
+
|
1869 |
+
# The function _sparse_coo_tensor_unsafe is removed from PyTorch
|
1870 |
+
# Python API (v. 1.13), here we temporarily provide its replacement
|
1871 |
+
# with a deprecation warning.
|
1872 |
+
# TODO: remove the function for PyTorch v 1.15.
|
1873 |
+
def _sparse_coo_tensor_unsafe(*args, **kwargs):
|
1874 |
+
import warnings
|
1875 |
+
warnings.warn('torch._sparse_coo_tensor_unsafe is deprecated, '
|
1876 |
+
'use torch.sparse_coo_tensor(..., check_invariants=False) instead.')
|
1877 |
+
kwargs['check_invariants'] = False
|
1878 |
+
return torch.sparse_coo_tensor(*args, **kwargs)
|
1879 |
+
|
1880 |
+
# Register MPS specific decomps
|
1881 |
+
torch.backends.mps._init()
|
1882 |
+
|
1883 |
+
if not _running_with_deploy():
|
1884 |
+
from torch import compiler as compiler
|
1885 |
+
|
1886 |
+
class _TritonLibrary:
|
1887 |
+
lib = torch.library.Library("triton", "DEF")
|
1888 |
+
ops_table: Dict[Tuple[str, str], Callable] = {}
|
1889 |
+
|
1890 |
+
@classmethod
|
1891 |
+
def registerOp(cls, op_key, full_schema, op_impl, dispatch_key):
|
1892 |
+
if (op_key, dispatch_key) not in cls.ops_table:
|
1893 |
+
cls.lib.define(full_schema)
|
1894 |
+
cls.lib.impl("triton::" + op_key, op_impl, dispatch_key)
|
1895 |
+
cls.ops_table[(op_key, dispatch_key)] = op_impl
|
1896 |
+
|
1897 |
+
return cls.ops_table[(op_key, dispatch_key)]
|
1898 |
+
|
1899 |
+
|
1900 |
+
# Deprecated attributes
|
1901 |
+
_deprecated_attrs = {
|
1902 |
+
"has_mps": torch.backends.mps.is_built,
|
1903 |
+
"has_cuda": torch.backends.cuda.is_built,
|
1904 |
+
"has_cudnn": torch.backends.cudnn.is_available,
|
1905 |
+
"has_mkldnn": torch.backends.mkldnn.is_available,
|
1906 |
+
}
|
1907 |
+
|
1908 |
+
if TYPE_CHECKING:
|
1909 |
+
# Import the following modules during type checking to enable code intelligence features,
|
1910 |
+
# such as auto-completion in tools like pylance, even when these modules are not explicitly
|
1911 |
+
# imported in user code.
|
1912 |
+
from torch import _dynamo as _dynamo
|
1913 |
+
from torch import _inductor as _inductor
|
1914 |
+
from torch import onnx as onnx
|
1915 |
+
|
1916 |
+
else:
|
1917 |
+
_lazy_modules = {
|
1918 |
+
"_dynamo",
|
1919 |
+
"_inductor",
|
1920 |
+
"_export",
|
1921 |
+
# ONNX must be imported after _dynamo, _ops, _subclasses, fx, func and jit
|
1922 |
+
"onnx",
|
1923 |
+
}
|
1924 |
+
|
1925 |
+
def __getattr__(name):
|
1926 |
+
# Deprecated attrs
|
1927 |
+
replacement = _deprecated_attrs.get(name)
|
1928 |
+
if replacement is not None:
|
1929 |
+
import warnings
|
1930 |
+
warnings.warn(f"'{name}' is deprecated, please use '{replacement.__module__}.{replacement.__name__}()'", stacklevel=2)
|
1931 |
+
return replacement()
|
1932 |
+
|
1933 |
+
# Lazy modules
|
1934 |
+
if name in _lazy_modules:
|
1935 |
+
import importlib
|
1936 |
+
return importlib.import_module(f".{name}", __name__)
|
1937 |
+
|
1938 |
+
raise AttributeError(f"module '{__name__}' has no attribute '{name}'")
|
1939 |
+
|
1940 |
+
|
1941 |
+
def _constrain_as_value(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
|
1942 |
+
"""
|
1943 |
+
Add min/max constraint on the intermediate symbol at tracing time. If called in eager mode,
|
1944 |
+
it will still check if the input value is within the specified range.
|
1945 |
+
"""
|
1946 |
+
torch.sym_constrain_range(symbol, min=min, max=max)
|
1947 |
+
|
1948 |
+
|
1949 |
+
def _constrain_as_size(symbol, min: Optional[builtins.int] = None, max: Optional[builtins.int] = None):
|
1950 |
+
"""
|
1951 |
+
This indicates that a given int is size-like, and can be used in any context where a size is expected.
|
1952 |
+
You will typically use this when reading out integers from Tensors, e.g., max.item() or lengths.tolist()
|
1953 |
+
which then need to be used as tensor constructors. Providing these assertions to PyTorch can help resolve
|
1954 |
+
GuardOnDataDependentSymNode errors upon export, since we cannot guard on unbacked SymInts.
|
1955 |
+
|
1956 |
+
This function has unusual semantics which distinguish it from constrain_as_value.
|
1957 |
+
Specifically, at compile-time, we will unsoundly assume that the resulting int is always >= 2.
|
1958 |
+
As a result, max value you pass in should always be greater than 2.
|
1959 |
+
This makes it easier to use the unbacked int in size contexts, as we will often attempt to guard on a size being zero/one
|
1960 |
+
(e.g., when computing the contiguity of a tensor, or testing if broadcasting can occur),
|
1961 |
+
which will not work on unbacked SymInts. Assuming that the int is >= 2 allows us to
|
1962 |
+
report False to these tests. Although this is technically unsound,
|
1963 |
+
in practice we observe that if your program works for all sizes >= 2,
|
1964 |
+
it probably works for zero and one too. The reason specifically assume size is >= 2 is because
|
1965 |
+
lot of PyTorch code is specialized for 0 and 1 which could result in not general graphs.
|
1966 |
+
At runtime, we only assert that the user provided min/max values are respected.
|
1967 |
+
|
1968 |
+
To demonstrate in a scenario, suppose you do
|
1969 |
+
```
|
1970 |
+
# Case 1
|
1971 |
+
# This will assume symbol is between [2, inf) at compile time, but [0, inf) at runtime
|
1972 |
+
constrain_as_size(symbol, min=0)
|
1973 |
+
|
1974 |
+
# Case 2
|
1975 |
+
# This will assume symbol is between [2, N] at compile time, but [0, N] at runtime
|
1976 |
+
constrain_as_size(symbol, min=0, max=N)
|
1977 |
+
|
1978 |
+
# Case 3
|
1979 |
+
# This is not valid case as max is <= 2
|
1980 |
+
constrain_as_size(symbol, min=0, max=1)
|
1981 |
+
|
1982 |
+
# Case 4
|
1983 |
+
# This will assume symbol is between [2, inf) at compile time, AND [2, inf) at runtime
|
1984 |
+
constrain_as_size(symbol, min=2)
|
1985 |
+
|
1986 |
+
# Case 5
|
1987 |
+
# This will assume symbol is between [2, inf) at compile time, but [1, inf) at runtime
|
1988 |
+
constrain_as_size(symbol, min=1)
|
1989 |
+
```
|
1990 |
+
"""
|
1991 |
+
torch.sym_constrain_range_for_size(symbol, min=min, max=max)
|
1992 |
+
|
1993 |
+
|
1994 |
+
from . import _logging
|
1995 |
+
_logging._init_logs()
|
env-llmeval/lib/python3.10/site-packages/torch/_appdirs.py
ADDED
@@ -0,0 +1,666 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
4 |
+
# Copyright (c) 2013 Eddy Petrișor
|
5 |
+
|
6 |
+
# flake8: noqa
|
7 |
+
|
8 |
+
"""
|
9 |
+
This file is directly from
|
10 |
+
https://github.com/ActiveState/appdirs/blob/3fe6a83776843a46f20c2e5587afcffe05e03b39/appdirs.py
|
11 |
+
|
12 |
+
The license of https://github.com/ActiveState/appdirs copied below:
|
13 |
+
|
14 |
+
|
15 |
+
# This is the MIT license
|
16 |
+
|
17 |
+
Copyright (c) 2010 ActiveState Software Inc.
|
18 |
+
|
19 |
+
Permission is hereby granted, free of charge, to any person obtaining a
|
20 |
+
copy of this software and associated documentation files (the
|
21 |
+
"Software"), to deal in the Software without restriction, including
|
22 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
23 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
24 |
+
permit persons to whom the Software is furnished to do so, subject to
|
25 |
+
the following conditions:
|
26 |
+
|
27 |
+
The above copyright notice and this permission notice shall be included
|
28 |
+
in all copies or substantial portions of the Software.
|
29 |
+
|
30 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
31 |
+
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
32 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
33 |
+
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
34 |
+
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
35 |
+
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
36 |
+
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
37 |
+
"""
|
38 |
+
|
39 |
+
"""Utilities for determining application-specific dirs.
|
40 |
+
|
41 |
+
See <https://github.com/ActiveState/appdirs> for details and usage.
|
42 |
+
"""
|
43 |
+
# Dev Notes:
|
44 |
+
# - MSDN on where to store app data files:
|
45 |
+
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
46 |
+
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
47 |
+
# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
48 |
+
|
49 |
+
__version__ = "1.4.4"
|
50 |
+
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
|
51 |
+
|
52 |
+
|
53 |
+
import os
|
54 |
+
import sys
|
55 |
+
|
56 |
+
unicode = str
|
57 |
+
|
58 |
+
if sys.platform.startswith("java"):
|
59 |
+
import platform
|
60 |
+
|
61 |
+
os_name = platform.java_ver()[3][0]
|
62 |
+
if os_name.startswith("Windows"): # "Windows XP", "Windows 7", etc.
|
63 |
+
system = "win32"
|
64 |
+
elif os_name.startswith("Mac"): # "Mac OS X", etc.
|
65 |
+
system = "darwin"
|
66 |
+
else: # "Linux", "SunOS", "FreeBSD", etc.
|
67 |
+
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
68 |
+
# are actually checked for and the rest of the module expects
|
69 |
+
# *sys.platform* style strings.
|
70 |
+
system = "linux2"
|
71 |
+
else:
|
72 |
+
system = sys.platform
|
73 |
+
|
74 |
+
|
75 |
+
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
76 |
+
r"""Return full path to the user-specific data dir for this application.
|
77 |
+
|
78 |
+
"appname" is the name of application.
|
79 |
+
If None, just the system directory is returned.
|
80 |
+
"appauthor" (only used on Windows) is the name of the
|
81 |
+
appauthor or distributing body for this application. Typically
|
82 |
+
it is the owning company name. This falls back to appname. You may
|
83 |
+
pass False to disable it.
|
84 |
+
"version" is an optional version path element to append to the
|
85 |
+
path. You might want to use this if you want multiple versions
|
86 |
+
of your app to be able to run independently. If used, this
|
87 |
+
would typically be "<major>.<minor>".
|
88 |
+
Only applied when appname is present.
|
89 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
90 |
+
roaming appdata directory. That means that for users on a Windows
|
91 |
+
network setup for roaming profiles, this user data will be
|
92 |
+
sync'd on login. See
|
93 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
94 |
+
for a discussion of issues.
|
95 |
+
|
96 |
+
Typical user data directories are:
|
97 |
+
Mac OS X: ~/Library/Application Support/<AppName>
|
98 |
+
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
99 |
+
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
100 |
+
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
101 |
+
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
102 |
+
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
103 |
+
|
104 |
+
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
105 |
+
That means, by default "~/.local/share/<AppName>".
|
106 |
+
"""
|
107 |
+
if system == "win32":
|
108 |
+
if appauthor is None:
|
109 |
+
appauthor = appname
|
110 |
+
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
111 |
+
path = os.path.normpath(_get_win_folder(const))
|
112 |
+
if appname:
|
113 |
+
if appauthor is not False:
|
114 |
+
path = os.path.join(path, appauthor, appname)
|
115 |
+
else:
|
116 |
+
path = os.path.join(path, appname)
|
117 |
+
elif system == "darwin":
|
118 |
+
path = os.path.expanduser("~/Library/Application Support/")
|
119 |
+
if appname:
|
120 |
+
path = os.path.join(path, appname)
|
121 |
+
else:
|
122 |
+
path = os.getenv("XDG_DATA_HOME", os.path.expanduser("~/.local/share"))
|
123 |
+
if appname:
|
124 |
+
path = os.path.join(path, appname)
|
125 |
+
if appname and version:
|
126 |
+
path = os.path.join(path, version)
|
127 |
+
return path
|
128 |
+
|
129 |
+
|
130 |
+
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
131 |
+
r"""Return full path to the user-shared data dir for this application.
|
132 |
+
|
133 |
+
"appname" is the name of application.
|
134 |
+
If None, just the system directory is returned.
|
135 |
+
"appauthor" (only used on Windows) is the name of the
|
136 |
+
appauthor or distributing body for this application. Typically
|
137 |
+
it is the owning company name. This falls back to appname. You may
|
138 |
+
pass False to disable it.
|
139 |
+
"version" is an optional version path element to append to the
|
140 |
+
path. You might want to use this if you want multiple versions
|
141 |
+
of your app to be able to run independently. If used, this
|
142 |
+
would typically be "<major>.<minor>".
|
143 |
+
Only applied when appname is present.
|
144 |
+
"multipath" is an optional parameter only applicable to *nix
|
145 |
+
which indicates that the entire list of data dirs should be
|
146 |
+
returned. By default, the first item from XDG_DATA_DIRS is
|
147 |
+
returned, or '/usr/local/share/<AppName>',
|
148 |
+
if XDG_DATA_DIRS is not set
|
149 |
+
|
150 |
+
Typical site data directories are:
|
151 |
+
Mac OS X: /Library/Application Support/<AppName>
|
152 |
+
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
153 |
+
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
154 |
+
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
155 |
+
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
156 |
+
|
157 |
+
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
158 |
+
|
159 |
+
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
160 |
+
"""
|
161 |
+
if system == "win32":
|
162 |
+
if appauthor is None:
|
163 |
+
appauthor = appname
|
164 |
+
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
165 |
+
if appname:
|
166 |
+
if appauthor is not False:
|
167 |
+
path = os.path.join(path, appauthor, appname)
|
168 |
+
else:
|
169 |
+
path = os.path.join(path, appname)
|
170 |
+
elif system == "darwin":
|
171 |
+
path = os.path.expanduser("/Library/Application Support")
|
172 |
+
if appname:
|
173 |
+
path = os.path.join(path, appname)
|
174 |
+
else:
|
175 |
+
# XDG default for $XDG_DATA_DIRS
|
176 |
+
# only first, if multipath is False
|
177 |
+
path = os.getenv(
|
178 |
+
"XDG_DATA_DIRS", os.pathsep.join(["/usr/local/share", "/usr/share"])
|
179 |
+
)
|
180 |
+
pathlist = [
|
181 |
+
os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
|
182 |
+
]
|
183 |
+
if appname:
|
184 |
+
if version:
|
185 |
+
appname = os.path.join(appname, version)
|
186 |
+
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
187 |
+
|
188 |
+
if multipath:
|
189 |
+
path = os.pathsep.join(pathlist)
|
190 |
+
else:
|
191 |
+
path = pathlist[0]
|
192 |
+
return path
|
193 |
+
|
194 |
+
if appname and version:
|
195 |
+
path = os.path.join(path, version)
|
196 |
+
return path
|
197 |
+
|
198 |
+
|
199 |
+
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
200 |
+
r"""Return full path to the user-specific config dir for this application.
|
201 |
+
|
202 |
+
"appname" is the name of application.
|
203 |
+
If None, just the system directory is returned.
|
204 |
+
"appauthor" (only used on Windows) is the name of the
|
205 |
+
appauthor or distributing body for this application. Typically
|
206 |
+
it is the owning company name. This falls back to appname. You may
|
207 |
+
pass False to disable it.
|
208 |
+
"version" is an optional version path element to append to the
|
209 |
+
path. You might want to use this if you want multiple versions
|
210 |
+
of your app to be able to run independently. If used, this
|
211 |
+
would typically be "<major>.<minor>".
|
212 |
+
Only applied when appname is present.
|
213 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
214 |
+
roaming appdata directory. That means that for users on a Windows
|
215 |
+
network setup for roaming profiles, this user data will be
|
216 |
+
sync'd on login. See
|
217 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
218 |
+
for a discussion of issues.
|
219 |
+
|
220 |
+
Typical user config directories are:
|
221 |
+
Mac OS X: ~/Library/Preferences/<AppName>
|
222 |
+
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
223 |
+
Win *: same as user_data_dir
|
224 |
+
|
225 |
+
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
226 |
+
That means, by default "~/.config/<AppName>".
|
227 |
+
"""
|
228 |
+
if system == "win32":
|
229 |
+
path = user_data_dir(appname, appauthor, None, roaming)
|
230 |
+
elif system == "darwin":
|
231 |
+
path = os.path.expanduser("~/Library/Preferences/")
|
232 |
+
if appname:
|
233 |
+
path = os.path.join(path, appname)
|
234 |
+
else:
|
235 |
+
path = os.getenv("XDG_CONFIG_HOME", os.path.expanduser("~/.config"))
|
236 |
+
if appname:
|
237 |
+
path = os.path.join(path, appname)
|
238 |
+
if appname and version:
|
239 |
+
path = os.path.join(path, version)
|
240 |
+
return path
|
241 |
+
|
242 |
+
|
243 |
+
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
244 |
+
r"""Return full path to the user-shared data dir for this application.
|
245 |
+
|
246 |
+
"appname" is the name of application.
|
247 |
+
If None, just the system directory is returned.
|
248 |
+
"appauthor" (only used on Windows) is the name of the
|
249 |
+
appauthor or distributing body for this application. Typically
|
250 |
+
it is the owning company name. This falls back to appname. You may
|
251 |
+
pass False to disable it.
|
252 |
+
"version" is an optional version path element to append to the
|
253 |
+
path. You might want to use this if you want multiple versions
|
254 |
+
of your app to be able to run independently. If used, this
|
255 |
+
would typically be "<major>.<minor>".
|
256 |
+
Only applied when appname is present.
|
257 |
+
"multipath" is an optional parameter only applicable to *nix
|
258 |
+
which indicates that the entire list of config dirs should be
|
259 |
+
returned. By default, the first item from XDG_CONFIG_DIRS is
|
260 |
+
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
261 |
+
|
262 |
+
Typical site config directories are:
|
263 |
+
Mac OS X: same as site_data_dir
|
264 |
+
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
265 |
+
$XDG_CONFIG_DIRS
|
266 |
+
Win *: same as site_data_dir
|
267 |
+
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
268 |
+
|
269 |
+
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
270 |
+
|
271 |
+
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
272 |
+
"""
|
273 |
+
if system == "win32":
|
274 |
+
path = site_data_dir(appname, appauthor)
|
275 |
+
if appname and version:
|
276 |
+
path = os.path.join(path, version)
|
277 |
+
elif system == "darwin":
|
278 |
+
path = os.path.expanduser("/Library/Preferences")
|
279 |
+
if appname:
|
280 |
+
path = os.path.join(path, appname)
|
281 |
+
else:
|
282 |
+
# XDG default for $XDG_CONFIG_DIRS
|
283 |
+
# only first, if multipath is False
|
284 |
+
path = os.getenv("XDG_CONFIG_DIRS", "/etc/xdg")
|
285 |
+
pathlist = [
|
286 |
+
os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)
|
287 |
+
]
|
288 |
+
if appname:
|
289 |
+
if version:
|
290 |
+
appname = os.path.join(appname, version)
|
291 |
+
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
292 |
+
|
293 |
+
if multipath:
|
294 |
+
path = os.pathsep.join(pathlist)
|
295 |
+
else:
|
296 |
+
path = pathlist[0]
|
297 |
+
return path
|
298 |
+
|
299 |
+
|
300 |
+
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
301 |
+
r"""Return full path to the user-specific cache dir for this application.
|
302 |
+
|
303 |
+
"appname" is the name of application.
|
304 |
+
If None, just the system directory is returned.
|
305 |
+
"appauthor" (only used on Windows) is the name of the
|
306 |
+
appauthor or distributing body for this application. Typically
|
307 |
+
it is the owning company name. This falls back to appname. You may
|
308 |
+
pass False to disable it.
|
309 |
+
"version" is an optional version path element to append to the
|
310 |
+
path. You might want to use this if you want multiple versions
|
311 |
+
of your app to be able to run independently. If used, this
|
312 |
+
would typically be "<major>.<minor>".
|
313 |
+
Only applied when appname is present.
|
314 |
+
"opinion" (boolean) can be False to disable the appending of
|
315 |
+
"Cache" to the base app data dir for Windows. See
|
316 |
+
discussion below.
|
317 |
+
|
318 |
+
Typical user cache directories are:
|
319 |
+
Mac OS X: ~/Library/Caches/<AppName>
|
320 |
+
Unix: ~/.cache/<AppName> (XDG default)
|
321 |
+
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
322 |
+
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
323 |
+
|
324 |
+
On Windows the only suggestion in the MSDN docs is that local settings go in
|
325 |
+
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
326 |
+
app data dir (the default returned by `user_data_dir` above). Apps typically
|
327 |
+
put cache data somewhere *under* the given dir here. Some examples:
|
328 |
+
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
329 |
+
...\Acme\SuperApp\Cache\1.0
|
330 |
+
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
331 |
+
This can be disabled with the `opinion=False` option.
|
332 |
+
"""
|
333 |
+
if system == "win32":
|
334 |
+
if appauthor is None:
|
335 |
+
appauthor = appname
|
336 |
+
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
337 |
+
if appname:
|
338 |
+
if appauthor is not False:
|
339 |
+
path = os.path.join(path, appauthor, appname)
|
340 |
+
else:
|
341 |
+
path = os.path.join(path, appname)
|
342 |
+
if opinion:
|
343 |
+
path = os.path.join(path, "Cache")
|
344 |
+
elif system == "darwin":
|
345 |
+
path = os.path.expanduser("~/Library/Caches")
|
346 |
+
if appname:
|
347 |
+
path = os.path.join(path, appname)
|
348 |
+
else:
|
349 |
+
path = os.getenv("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
|
350 |
+
if appname:
|
351 |
+
path = os.path.join(path, appname)
|
352 |
+
if appname and version:
|
353 |
+
path = os.path.join(path, version)
|
354 |
+
return path
|
355 |
+
|
356 |
+
|
357 |
+
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
|
358 |
+
r"""Return full path to the user-specific state dir for this application.
|
359 |
+
|
360 |
+
"appname" is the name of application.
|
361 |
+
If None, just the system directory is returned.
|
362 |
+
"appauthor" (only used on Windows) is the name of the
|
363 |
+
appauthor or distributing body for this application. Typically
|
364 |
+
it is the owning company name. This falls back to appname. You may
|
365 |
+
pass False to disable it.
|
366 |
+
"version" is an optional version path element to append to the
|
367 |
+
path. You might want to use this if you want multiple versions
|
368 |
+
of your app to be able to run independently. If used, this
|
369 |
+
would typically be "<major>.<minor>".
|
370 |
+
Only applied when appname is present.
|
371 |
+
"roaming" (boolean, default False) can be set True to use the Windows
|
372 |
+
roaming appdata directory. That means that for users on a Windows
|
373 |
+
network setup for roaming profiles, this user data will be
|
374 |
+
sync'd on login. See
|
375 |
+
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
376 |
+
for a discussion of issues.
|
377 |
+
|
378 |
+
Typical user state directories are:
|
379 |
+
Mac OS X: same as user_data_dir
|
380 |
+
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
|
381 |
+
Win *: same as user_data_dir
|
382 |
+
|
383 |
+
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
|
384 |
+
to extend the XDG spec and support $XDG_STATE_HOME.
|
385 |
+
|
386 |
+
That means, by default "~/.local/state/<AppName>".
|
387 |
+
"""
|
388 |
+
if system in ["win32", "darwin"]:
|
389 |
+
path = user_data_dir(appname, appauthor, None, roaming)
|
390 |
+
else:
|
391 |
+
path = os.getenv("XDG_STATE_HOME", os.path.expanduser("~/.local/state"))
|
392 |
+
if appname:
|
393 |
+
path = os.path.join(path, appname)
|
394 |
+
if appname and version:
|
395 |
+
path = os.path.join(path, version)
|
396 |
+
return path
|
397 |
+
|
398 |
+
|
399 |
+
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
400 |
+
r"""Return full path to the user-specific log dir for this application.
|
401 |
+
|
402 |
+
"appname" is the name of application.
|
403 |
+
If None, just the system directory is returned.
|
404 |
+
"appauthor" (only used on Windows) is the name of the
|
405 |
+
appauthor or distributing body for this application. Typically
|
406 |
+
it is the owning company name. This falls back to appname. You may
|
407 |
+
pass False to disable it.
|
408 |
+
"version" is an optional version path element to append to the
|
409 |
+
path. You might want to use this if you want multiple versions
|
410 |
+
of your app to be able to run independently. If used, this
|
411 |
+
would typically be "<major>.<minor>".
|
412 |
+
Only applied when appname is present.
|
413 |
+
"opinion" (boolean) can be False to disable the appending of
|
414 |
+
"Logs" to the base app data dir for Windows, and "log" to the
|
415 |
+
base cache dir for Unix. See discussion below.
|
416 |
+
|
417 |
+
Typical user log directories are:
|
418 |
+
Mac OS X: ~/Library/Logs/<AppName>
|
419 |
+
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
420 |
+
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
421 |
+
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
422 |
+
|
423 |
+
On Windows the only suggestion in the MSDN docs is that local settings
|
424 |
+
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
425 |
+
examples of what some windows apps use for a logs dir.)
|
426 |
+
|
427 |
+
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
428 |
+
value for Windows and appends "log" to the user cache dir for Unix.
|
429 |
+
This can be disabled with the `opinion=False` option.
|
430 |
+
"""
|
431 |
+
if system == "darwin":
|
432 |
+
path = os.path.join(os.path.expanduser("~/Library/Logs"), appname)
|
433 |
+
elif system == "win32":
|
434 |
+
path = user_data_dir(appname, appauthor, version)
|
435 |
+
version = False
|
436 |
+
if opinion:
|
437 |
+
path = os.path.join(path, "Logs")
|
438 |
+
else:
|
439 |
+
path = user_cache_dir(appname, appauthor, version)
|
440 |
+
version = False
|
441 |
+
if opinion:
|
442 |
+
path = os.path.join(path, "log")
|
443 |
+
if appname and version:
|
444 |
+
path = os.path.join(path, version)
|
445 |
+
return path
|
446 |
+
|
447 |
+
|
448 |
+
class AppDirs(object):
|
449 |
+
"""Convenience wrapper for getting application dirs."""
|
450 |
+
|
451 |
+
def __init__(
|
452 |
+
self, appname=None, appauthor=None, version=None, roaming=False, multipath=False
|
453 |
+
):
|
454 |
+
self.appname = appname
|
455 |
+
self.appauthor = appauthor
|
456 |
+
self.version = version
|
457 |
+
self.roaming = roaming
|
458 |
+
self.multipath = multipath
|
459 |
+
|
460 |
+
@property
|
461 |
+
def user_data_dir(self):
|
462 |
+
return user_data_dir(
|
463 |
+
self.appname, self.appauthor, version=self.version, roaming=self.roaming
|
464 |
+
)
|
465 |
+
|
466 |
+
@property
|
467 |
+
def site_data_dir(self):
|
468 |
+
return site_data_dir(
|
469 |
+
self.appname, self.appauthor, version=self.version, multipath=self.multipath
|
470 |
+
)
|
471 |
+
|
472 |
+
@property
|
473 |
+
def user_config_dir(self):
|
474 |
+
return user_config_dir(
|
475 |
+
self.appname, self.appauthor, version=self.version, roaming=self.roaming
|
476 |
+
)
|
477 |
+
|
478 |
+
@property
|
479 |
+
def site_config_dir(self):
|
480 |
+
return site_config_dir(
|
481 |
+
self.appname, self.appauthor, version=self.version, multipath=self.multipath
|
482 |
+
)
|
483 |
+
|
484 |
+
@property
|
485 |
+
def user_cache_dir(self):
|
486 |
+
return user_cache_dir(self.appname, self.appauthor, version=self.version)
|
487 |
+
|
488 |
+
@property
|
489 |
+
def user_state_dir(self):
|
490 |
+
return user_state_dir(self.appname, self.appauthor, version=self.version)
|
491 |
+
|
492 |
+
@property
|
493 |
+
def user_log_dir(self):
|
494 |
+
return user_log_dir(self.appname, self.appauthor, version=self.version)
|
495 |
+
|
496 |
+
|
497 |
+
# ---- internal support stuff
|
498 |
+
|
499 |
+
|
500 |
+
def _get_win_folder_from_registry(csidl_name):
|
501 |
+
"""This is a fallback technique at best. I'm not sure if using the
|
502 |
+
registry for this guarantees us the correct answer for all CSIDL_*
|
503 |
+
names.
|
504 |
+
"""
|
505 |
+
import winreg as _winreg
|
506 |
+
|
507 |
+
shell_folder_name = {
|
508 |
+
"CSIDL_APPDATA": "AppData",
|
509 |
+
"CSIDL_COMMON_APPDATA": "Common AppData",
|
510 |
+
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
511 |
+
}[csidl_name]
|
512 |
+
|
513 |
+
key = _winreg.OpenKey(
|
514 |
+
_winreg.HKEY_CURRENT_USER,
|
515 |
+
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders",
|
516 |
+
)
|
517 |
+
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
518 |
+
return dir
|
519 |
+
|
520 |
+
|
521 |
+
def _get_win_folder_with_pywin32(csidl_name):
|
522 |
+
from win32com.shell import shell, shellcon
|
523 |
+
|
524 |
+
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
|
525 |
+
# Try to make this a unicode path because SHGetFolderPath does
|
526 |
+
# not return unicode strings when there is unicode data in the
|
527 |
+
# path.
|
528 |
+
try:
|
529 |
+
dir = unicode(dir)
|
530 |
+
|
531 |
+
# Downgrade to short path name if have highbit chars. See
|
532 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
533 |
+
has_high_char = False
|
534 |
+
for c in dir:
|
535 |
+
if ord(c) > 255:
|
536 |
+
has_high_char = True
|
537 |
+
break
|
538 |
+
if has_high_char:
|
539 |
+
try:
|
540 |
+
import win32api
|
541 |
+
|
542 |
+
dir = win32api.GetShortPathName(dir)
|
543 |
+
except ImportError:
|
544 |
+
pass
|
545 |
+
except UnicodeError:
|
546 |
+
pass
|
547 |
+
return dir
|
548 |
+
|
549 |
+
|
550 |
+
def _get_win_folder_with_ctypes(csidl_name):
|
551 |
+
import ctypes
|
552 |
+
|
553 |
+
csidl_const = {
|
554 |
+
"CSIDL_APPDATA": 26,
|
555 |
+
"CSIDL_COMMON_APPDATA": 35,
|
556 |
+
"CSIDL_LOCAL_APPDATA": 28,
|
557 |
+
}[csidl_name]
|
558 |
+
|
559 |
+
buf = ctypes.create_unicode_buffer(1024)
|
560 |
+
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
561 |
+
|
562 |
+
# Downgrade to short path name if have highbit chars. See
|
563 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
564 |
+
has_high_char = False
|
565 |
+
for c in buf:
|
566 |
+
if ord(c) > 255:
|
567 |
+
has_high_char = True
|
568 |
+
break
|
569 |
+
if has_high_char:
|
570 |
+
buf2 = ctypes.create_unicode_buffer(1024)
|
571 |
+
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
572 |
+
buf = buf2
|
573 |
+
|
574 |
+
return buf.value
|
575 |
+
|
576 |
+
|
577 |
+
def _get_win_folder_with_jna(csidl_name):
|
578 |
+
import array
|
579 |
+
|
580 |
+
from com.sun import jna
|
581 |
+
from com.sun.jna.platform import win32
|
582 |
+
|
583 |
+
buf_size = win32.WinDef.MAX_PATH * 2
|
584 |
+
buf = array.zeros("c", buf_size)
|
585 |
+
shell = win32.Shell32.INSTANCE
|
586 |
+
shell.SHGetFolderPath(
|
587 |
+
None,
|
588 |
+
getattr(win32.ShlObj, csidl_name),
|
589 |
+
None,
|
590 |
+
win32.ShlObj.SHGFP_TYPE_CURRENT,
|
591 |
+
buf,
|
592 |
+
)
|
593 |
+
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
594 |
+
|
595 |
+
# Downgrade to short path name if have highbit chars. See
|
596 |
+
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
597 |
+
has_high_char = False
|
598 |
+
for c in dir:
|
599 |
+
if ord(c) > 255:
|
600 |
+
has_high_char = True
|
601 |
+
break
|
602 |
+
if has_high_char:
|
603 |
+
buf = array.zeros("c", buf_size)
|
604 |
+
kernel = win32.Kernel32.INSTANCE
|
605 |
+
if kernel.GetShortPathName(dir, buf, buf_size):
|
606 |
+
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
607 |
+
|
608 |
+
return dir
|
609 |
+
|
610 |
+
|
611 |
+
if system == "win32":
|
612 |
+
try:
|
613 |
+
import win32com.shell
|
614 |
+
|
615 |
+
_get_win_folder = _get_win_folder_with_pywin32
|
616 |
+
except ImportError:
|
617 |
+
try:
|
618 |
+
from ctypes import windll
|
619 |
+
|
620 |
+
_get_win_folder = _get_win_folder_with_ctypes
|
621 |
+
except ImportError:
|
622 |
+
try:
|
623 |
+
import com.sun.jna
|
624 |
+
|
625 |
+
_get_win_folder = _get_win_folder_with_jna
|
626 |
+
except ImportError:
|
627 |
+
_get_win_folder = _get_win_folder_from_registry
|
628 |
+
|
629 |
+
|
630 |
+
# ---- self test code
|
631 |
+
|
632 |
+
if __name__ == "__main__":
|
633 |
+
appname = "MyApp"
|
634 |
+
appauthor = "MyCompany"
|
635 |
+
|
636 |
+
props = (
|
637 |
+
"user_data_dir",
|
638 |
+
"user_config_dir",
|
639 |
+
"user_cache_dir",
|
640 |
+
"user_state_dir",
|
641 |
+
"user_log_dir",
|
642 |
+
"site_data_dir",
|
643 |
+
"site_config_dir",
|
644 |
+
)
|
645 |
+
|
646 |
+
print(f"-- app dirs {__version__} --")
|
647 |
+
|
648 |
+
print("-- app dirs (with optional 'version')")
|
649 |
+
dirs = AppDirs(appname, appauthor, version="1.0")
|
650 |
+
for prop in props:
|
651 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
652 |
+
|
653 |
+
print("\n-- app dirs (without optional 'version')")
|
654 |
+
dirs = AppDirs(appname, appauthor)
|
655 |
+
for prop in props:
|
656 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
657 |
+
|
658 |
+
print("\n-- app dirs (without optional 'appauthor')")
|
659 |
+
dirs = AppDirs(appname)
|
660 |
+
for prop in props:
|
661 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
662 |
+
|
663 |
+
print("\n-- app dirs (with disabled 'appauthor')")
|
664 |
+
dirs = AppDirs(appname, appauthor=False)
|
665 |
+
for prop in props:
|
666 |
+
print(f"{prop}: {getattr(dirs, prop)}")
|
env-llmeval/lib/python3.10/site-packages/torch/_awaits/__init__.py
ADDED
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
from typing import cast, Callable, Generic, Type, TypeVar
|
4 |
+
|
5 |
+
import torch
|
6 |
+
|
7 |
+
__all__ = ['Await']
|
8 |
+
|
9 |
+
W = TypeVar("W")
|
10 |
+
|
11 |
+
class _PyAwaitMeta(type(torch._C._Await), type(Generic)): # type: ignore[misc, no-redef]
|
12 |
+
pass
|
13 |
+
|
14 |
+
class _Await(torch._C._Await, Generic[W], metaclass=_PyAwaitMeta):
|
15 |
+
r"""
|
16 |
+
Wrapper around a ``torch._C.Await`` which encapsulates delayed execution
|
17 |
+
of a callable. All manipulations happen with functions ``torch.jit._awaitable``,
|
18 |
+
``torch.jit._awaitable_wait``, ``torch.jit._awaitable_nowait``.
|
19 |
+
|
20 |
+
Torch scriptable manipulations:
|
21 |
+
``torch.jit._awaitable(func, *args)``
|
22 |
+
Creates ``Await[W]`` object, where W is return type of func.
|
23 |
+
|
24 |
+
Returns:
|
25 |
+
``torch.jit._awaitable_wait(Await[W])``
|
26 |
+
Returns the result of the function, specified at ``_awaitable``, with specified arguments.
|
27 |
+
|
28 |
+
Returns:
|
29 |
+
The result of type ``W`` of the function call. The result is owned by ``Await[W]``
|
30 |
+
and returned on all following ``_awaitable_wait`` calls.
|
31 |
+
|
32 |
+
|
33 |
+
``torch.jit._awaitable_nowait(W)``
|
34 |
+
Returns:
|
35 |
+
Trivial ``Await[W]`` with specified result.
|
36 |
+
|
37 |
+
|
38 |
+
Only in eager mode:
|
39 |
+
``fn() -> Callable[Tuple[Any], W]``
|
40 |
+
Returns:
|
41 |
+
Specified at ``_awaitable`` python function ``func``.
|
42 |
+
|
43 |
+
``args() -> Tuple[Any]``
|
44 |
+
Returns:
|
45 |
+
Specified at ``_awaitable`` python args.
|
46 |
+
|
47 |
+
``is_nowait() -> _bool``
|
48 |
+
Returns:
|
49 |
+
``True`` if this object was created via ``_awaitable_nowait`` call (trivial `Await[W]`).
|
50 |
+
|
51 |
+
In eager mode ``Await[W]`` can be used as ``W`` i.e. attributes of W can be called on ``Await[W]``,
|
52 |
+
``_awaitable_wait()`` call will be transparently added.
|
53 |
+
"""
|
54 |
+
pass
|
env-llmeval/lib/python3.10/site-packages/torch/_awaits/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (2.11 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/_classes.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import types
|
2 |
+
|
3 |
+
import torch._C
|
4 |
+
|
5 |
+
|
6 |
+
class _ClassNamespace(types.ModuleType):
|
7 |
+
def __init__(self, name):
|
8 |
+
super().__init__("torch.classes" + name)
|
9 |
+
self.name = name
|
10 |
+
|
11 |
+
def __getattr__(self, attr):
|
12 |
+
proxy = torch._C._get_custom_class_python_wrapper(self.name, attr)
|
13 |
+
if proxy is None:
|
14 |
+
raise RuntimeError(f"Class {self.name}.{attr} not registered!")
|
15 |
+
return proxy
|
16 |
+
|
17 |
+
|
18 |
+
class _Classes(types.ModuleType):
|
19 |
+
__file__ = "_classes.py"
|
20 |
+
|
21 |
+
def __init__(self):
|
22 |
+
super().__init__("torch.classes")
|
23 |
+
|
24 |
+
def __getattr__(self, name):
|
25 |
+
namespace = _ClassNamespace(name)
|
26 |
+
setattr(self, name, namespace)
|
27 |
+
return namespace
|
28 |
+
|
29 |
+
@property
|
30 |
+
def loaded_libraries(self):
|
31 |
+
return torch.ops.loaded_libraries
|
32 |
+
|
33 |
+
def load_library(self, path):
|
34 |
+
"""
|
35 |
+
Loads a shared library from the given path into the current process.
|
36 |
+
|
37 |
+
The library being loaded may run global initialization code to register
|
38 |
+
custom classes with the PyTorch JIT runtime. This allows dynamically
|
39 |
+
loading custom classes. For this, you should compile your class
|
40 |
+
and the static registration code into a shared library object, and then
|
41 |
+
call ``torch.classes.load_library('path/to/libcustom.so')`` to load the
|
42 |
+
shared object.
|
43 |
+
|
44 |
+
After the library is loaded, it is added to the
|
45 |
+
``torch.classes.loaded_libraries`` attribute, a set that may be inspected
|
46 |
+
for the paths of all libraries loaded using this function.
|
47 |
+
|
48 |
+
Args:
|
49 |
+
path (str): A path to a shared library to load.
|
50 |
+
"""
|
51 |
+
torch.ops.load_library(path)
|
52 |
+
|
53 |
+
|
54 |
+
# The classes "namespace"
|
55 |
+
classes = _Classes()
|
env-llmeval/lib/python3.10/site-packages/torch/_compile.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
APIs related to torch.compile which lazily import torch._dynamo to avoid
|
3 |
+
circular dependencies.
|
4 |
+
"""
|
5 |
+
import functools
|
6 |
+
|
7 |
+
|
8 |
+
def _disable_dynamo(fn=None, recursive=True):
|
9 |
+
"""
|
10 |
+
This API should be only used inside torch, external users should still use
|
11 |
+
torch._dynamo.disable. The main goal of this API is to avoid circular
|
12 |
+
imports issues that is common while using _dynamo.disable inside torch
|
13 |
+
itself.
|
14 |
+
|
15 |
+
This API avoids it by lazily importing torch._dynamo from the import time to
|
16 |
+
the invocation of the decorated function.
|
17 |
+
"""
|
18 |
+
if fn is not None:
|
19 |
+
|
20 |
+
@functools.wraps(fn)
|
21 |
+
def inner(*args, **kwargs):
|
22 |
+
import torch._dynamo
|
23 |
+
|
24 |
+
return torch._dynamo.disable(fn, recursive)(*args, **kwargs)
|
25 |
+
|
26 |
+
return inner
|
27 |
+
else:
|
28 |
+
# decorator usage like @_disable_dynamo(recursive=False). The resulting
|
29 |
+
# object expects the original decorated function as the arg.
|
30 |
+
return functools.partial(_disable_dynamo, recursive=recursive)
|
env-llmeval/lib/python3.10/site-packages/torch/_custom_ops.py
ADDED
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import inspect
|
2 |
+
|
3 |
+
from torch._custom_op.impl import (
|
4 |
+
_custom_op_with_schema,
|
5 |
+
_find_custom_op,
|
6 |
+
infer_schema,
|
7 |
+
parse_qualname,
|
8 |
+
validate_namespace,
|
9 |
+
)
|
10 |
+
from torch.library import get_ctx
|
11 |
+
|
12 |
+
__all__ = [
|
13 |
+
"custom_op",
|
14 |
+
"impl",
|
15 |
+
"impl_abstract",
|
16 |
+
"get_ctx",
|
17 |
+
"impl_save_for_backward",
|
18 |
+
"impl_backward",
|
19 |
+
]
|
20 |
+
|
21 |
+
|
22 |
+
def custom_op(qualname, func_or_schema=None):
|
23 |
+
r"""Register a new custom operator
|
24 |
+
|
25 |
+
In PyTorch, defining an op (short for "operator") is a two step-process:
|
26 |
+
- we need to define the op (by providing an operator name and schema)
|
27 |
+
- we need to implement behavior for how the operator interacts with
|
28 |
+
various PyTorch subsystems, like CPU/CUDA Tensors, Autograd, etc.
|
29 |
+
|
30 |
+
This entrypoint defines the custom operator (the first step)
|
31 |
+
you must then perform the second step by calling various
|
32 |
+
``impl_*`` APIs.
|
33 |
+
|
34 |
+
This API may be used as a decorator (see examples).
|
35 |
+
|
36 |
+
For a detailed guide on custom ops, please see
|
37 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
38 |
+
|
39 |
+
Arguments:
|
40 |
+
qualname (str): Should be a string that looks like
|
41 |
+
"namespace::operator_name". Operators in PyTorch need a namespace to
|
42 |
+
avoid name collisions; a given operator may only be created once.
|
43 |
+
If you are writing a Python library, we recommend the namespace to
|
44 |
+
be the name of your top-level module.
|
45 |
+
func_or_schema (Union[Callable, str]): Each PyTorch operator needs a
|
46 |
+
schema that tells PyTorch the types of the inputs/outputs.
|
47 |
+
If this is a Callable, we will automatically infer the schema from
|
48 |
+
the type annotations on the function (see examples). Otherwise,
|
49 |
+
if you don't want to use type annotations, you may provide us the
|
50 |
+
schema string.
|
51 |
+
|
52 |
+
Example::
|
53 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
54 |
+
>>> import torch
|
55 |
+
>>> import numpy as np
|
56 |
+
>>> from torch import Tensor
|
57 |
+
>>>
|
58 |
+
>>> # Step 1: define the custom op.
|
59 |
+
>>> # We need to provide the API a "prototype function"
|
60 |
+
>>> # (a function that returns NotImplementedError), from which
|
61 |
+
>>> # we will infer the types of the inputs and outputs.
|
62 |
+
>>> @torch._custom_ops.custom_op("mylibrary::numpy_sin")
|
63 |
+
>>> def numpy_sin(x: Tensor) -> Tensor:
|
64 |
+
>>> raise NotImplementedError()
|
65 |
+
>>>
|
66 |
+
>>> # The custom op is now accessible via the torch.ops module:
|
67 |
+
>>> torch.ops.mylibrary.numpy_sin
|
68 |
+
>>>
|
69 |
+
>>> # Step 2: Register an implementation for various PyTorch subsystems
|
70 |
+
>>>
|
71 |
+
>>> # Register an implementation for CPU tensors
|
72 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cpu")
|
73 |
+
>>> def numpy_sin_impl_cpu(x):
|
74 |
+
>>> return torch.from_numpy(np.sin(x.numpy()))
|
75 |
+
>>>
|
76 |
+
>>> # Register an implementation for CUDA tensors
|
77 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_sin", device_types="cuda")
|
78 |
+
>>> def numpy_sin_impl_cuda(x):
|
79 |
+
>>> return torch.from_numpy(np.sin(x.cpu().numpy())).to(x.device)
|
80 |
+
>>>
|
81 |
+
>>> x = torch.randn(3)
|
82 |
+
>>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cpu
|
83 |
+
>>>
|
84 |
+
>>> x_cuda = x.cuda()
|
85 |
+
>>> torch.ops.mylibrary.numpy_sin(x) # calls numpy_sin_impl_cuda
|
86 |
+
|
87 |
+
"""
|
88 |
+
ns, name = parse_qualname(qualname)
|
89 |
+
validate_namespace(ns)
|
90 |
+
|
91 |
+
def inner(func):
|
92 |
+
if not inspect.isfunction(func):
|
93 |
+
raise ValueError(
|
94 |
+
f"custom_op(...)(func): Expected `func` to be a Python "
|
95 |
+
f"function, got: {type(func)}"
|
96 |
+
)
|
97 |
+
|
98 |
+
if func.__name__ != name:
|
99 |
+
raise ValueError(
|
100 |
+
f"custom_op(qualname='{qualname}', ...)(func): expected `func` "
|
101 |
+
f"to have name '{name}' but got '{func.__name__}'. "
|
102 |
+
f"Please either change the name of `func` or the qualname that "
|
103 |
+
f"is passed to `custom_op`"
|
104 |
+
)
|
105 |
+
|
106 |
+
schema = infer_schema(func)
|
107 |
+
_custom_op_with_schema(qualname, schema)
|
108 |
+
return func
|
109 |
+
|
110 |
+
if func_or_schema is None:
|
111 |
+
return inner
|
112 |
+
if isinstance(func_or_schema, str):
|
113 |
+
_custom_op_with_schema(qualname, func_or_schema)
|
114 |
+
else:
|
115 |
+
return inner(func_or_schema)
|
116 |
+
|
117 |
+
|
118 |
+
def impl(qualname, *, device_types=("cpu", "cuda"), func=None):
|
119 |
+
r"""Register an implementation for a device type for this custom op.
|
120 |
+
|
121 |
+
If the op is passed multiple Tensor inputs with different device
|
122 |
+
types, it will dispatch to the registered implementation for the highest
|
123 |
+
priority device type among those present.
|
124 |
+
The supported device types, in order of priority, are {'cuda', 'cpu'}.
|
125 |
+
|
126 |
+
This API may be used as a decorator (see examples).
|
127 |
+
|
128 |
+
For a detailed guide on custom ops, please see
|
129 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
130 |
+
|
131 |
+
Arguments:
|
132 |
+
device_types (str or Iterable[str]): the device type(s) to register the function for.
|
133 |
+
|
134 |
+
Example::
|
135 |
+
>>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_CUDA)
|
136 |
+
>>> import torch
|
137 |
+
>>> import numpy as np
|
138 |
+
>>> from torch import Tensor
|
139 |
+
>>>
|
140 |
+
>>> # Step 1: define the custom op.
|
141 |
+
>>> # We need to provide the API a "prototype function"
|
142 |
+
>>> # (a function that returns NotImplementedError), from which
|
143 |
+
>>> # we will infer the types of the inputs and outputs.
|
144 |
+
>>> @torch._custom_ops.custom_op("mylibrary::numpy_cos")
|
145 |
+
>>> def numpy_cos(x: Tensor) -> Tensor:
|
146 |
+
>>> raise NotImplementedError()
|
147 |
+
>>>
|
148 |
+
>>> # The custom op is now accessible via the torch.ops module:
|
149 |
+
>>> torch.ops.mylibrary.numpy_cos
|
150 |
+
>>>
|
151 |
+
>>> # Step 2: Register an implementation for various PyTorch subsystems
|
152 |
+
>>>
|
153 |
+
>>> # Register an implementation for CPU tensors
|
154 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cpu")
|
155 |
+
>>> def numpy_cos_impl_cpu(x):
|
156 |
+
>>> return torch.from_numpy(np.cos(x.numpy()))
|
157 |
+
>>>
|
158 |
+
>>> # Register an implementation for CUDA tensors
|
159 |
+
>>> @torch._custom_ops.impl("mylibrary::numpy_cos", device_types="cuda")
|
160 |
+
>>> def numpy_cos_impl_cuda(x):
|
161 |
+
>>> return torch.from_numpy(np.cos(x.cpu().numpy())).to(x.device)
|
162 |
+
>>>
|
163 |
+
>>> x = torch.randn(3)
|
164 |
+
>>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cpu
|
165 |
+
>>>
|
166 |
+
>>> x_cuda = x.cuda()
|
167 |
+
>>> torch.ops.mylibrary.numpy_cos(x) # calls numpy_cos_impl_cuda
|
168 |
+
|
169 |
+
"""
|
170 |
+
|
171 |
+
def inner(func):
|
172 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
173 |
+
custom_op.impl(device_types, _stacklevel=3)(func)
|
174 |
+
return func
|
175 |
+
|
176 |
+
if func is None:
|
177 |
+
return inner
|
178 |
+
return inner(func)
|
179 |
+
|
180 |
+
|
181 |
+
def impl_abstract(qualname, *, func=None):
|
182 |
+
r"""Register an abstract implementation for this operator.
|
183 |
+
|
184 |
+
An "abstract implementation" specifies the behavior of this operator on
|
185 |
+
Tensors that carry no data. Given some input Tensors with certain properties
|
186 |
+
(sizes/strides/storage_offset/device), it specifies what the properties of
|
187 |
+
the output Tensors are.
|
188 |
+
|
189 |
+
The abstract implementation has the same signature as the operator.
|
190 |
+
It is run for both FakeTensors and meta tensors. To write an abstract
|
191 |
+
implementation, assume that all Tensor inputs to the operator are
|
192 |
+
regular CPU/CUDA/Meta tensors, but they do not have storage, and
|
193 |
+
you are trying to return regular CPU/CUDA/Meta tensor(s) as output.
|
194 |
+
The abstract implementation must consist of only PyTorch operations
|
195 |
+
(and may not directly access the storage or data of any input or
|
196 |
+
intermediate Tensors).
|
197 |
+
|
198 |
+
This API may be used as a decorator (see examples).
|
199 |
+
|
200 |
+
For a detailed guide on custom ops, please see
|
201 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
202 |
+
|
203 |
+
Examples::
|
204 |
+
>>> import numpy as np
|
205 |
+
>>> from torch import Tensor
|
206 |
+
>>>
|
207 |
+
>>> # Example 1: an operator without data-dependent output shape
|
208 |
+
>>> @torch._custom_ops.custom_op("mylibrary::custom_linear")
|
209 |
+
>>> def custom_linear(x: Tensor, weight: Tensor, bias: Tensor) -> Tensor:
|
210 |
+
>>> raise NotImplementedError()
|
211 |
+
>>>
|
212 |
+
>>> @torch._custom_ops.impl_abstract("mylibrary::custom_linear")
|
213 |
+
>>> def custom_linear_abstract(x, weight):
|
214 |
+
>>> assert x.dim() == 2
|
215 |
+
>>> assert weight.dim() == 2
|
216 |
+
>>> assert bias.dim() == 1
|
217 |
+
>>> assert x.shape[1] == weight.shape[1]
|
218 |
+
>>> assert weight.shape[0] == bias.shape[0]
|
219 |
+
>>> assert x.device == weight.device
|
220 |
+
>>>
|
221 |
+
>>> return (x @ weight.t()) + bias
|
222 |
+
>>>
|
223 |
+
>>> # Example 2: an operator with data-dependent output shape
|
224 |
+
>>> @torch._custom_ops.custom_op('mylibrary::custom_nonzero')
|
225 |
+
>>> def custom_nonzero(x: Tensor) -> Tensor:
|
226 |
+
>>> ...
|
227 |
+
>>>
|
228 |
+
>>> @torch._custom_ops.impl_abstract("mylibrary::custom_nonzero")
|
229 |
+
>>> def custom_nonzero_abstract(x):
|
230 |
+
>>> # Number of nonzero-elements is data-dependent.
|
231 |
+
>>> # Since we cannot peek at the data in an abstract impl,
|
232 |
+
>>> # we use the ctx object to construct a new symint that
|
233 |
+
>>> # represents the data-dependent size.
|
234 |
+
>>> ctx = torch._custom_ops.get_ctx()
|
235 |
+
>>> nnz = ctx.create_unbacked_symint()
|
236 |
+
>>> shape = [x.dim(), nnz]
|
237 |
+
>>> result = x.new_empty(shape, dtype=torch.long)
|
238 |
+
>>> return result
|
239 |
+
>>>
|
240 |
+
>>> @torch._custom_ops.impl("mylibrary::custom_nonzero")
|
241 |
+
>>> def custom_nonzero_impl(x):
|
242 |
+
>>> x_np = to_numpy(x)
|
243 |
+
>>> res = np.stack(np.nonzero(x_np), axis=1)
|
244 |
+
>>> # unbacked symbolic ints in PyTorch must be >= 2, so we
|
245 |
+
>>> # constrain the range to at least 2
|
246 |
+
>>> if res.shape[0] <= 1:
|
247 |
+
>>> raise RuntimeError("not supported")
|
248 |
+
>>> return torch.tensor(res, device=x.device)
|
249 |
+
|
250 |
+
"""
|
251 |
+
import torch.library
|
252 |
+
|
253 |
+
return torch.library.impl_abstract(qualname, func, _stacklevel=2)
|
254 |
+
|
255 |
+
|
256 |
+
def impl_save_for_backward(qualname, *, func=None):
|
257 |
+
r"""Register a function that tells us what to save for backward.
|
258 |
+
|
259 |
+
Please see :func:`impl_backward` for more details.
|
260 |
+
"""
|
261 |
+
|
262 |
+
def inner(func):
|
263 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
264 |
+
custom_op.impl_save_for_backward(_stacklevel=3)(func)
|
265 |
+
return func
|
266 |
+
|
267 |
+
if func is None:
|
268 |
+
return inner
|
269 |
+
return inner(func)
|
270 |
+
|
271 |
+
|
272 |
+
def impl_backward(qualname, output_differentiability=None, *, func=None):
|
273 |
+
r"""Registers a backward formula for an operator.
|
274 |
+
|
275 |
+
In order for an operator to work with autograd, you need to register
|
276 |
+
a backward formula. There are two pieces to this:
|
277 |
+
1. You must give us a function to specify what to save for backward.
|
278 |
+
Call this the "save for backward" function.
|
279 |
+
2. You must give us a function that computes gradients. Call this the
|
280 |
+
"backward" function.
|
281 |
+
|
282 |
+
Use `impl_save_for_backward` to define a "save for backward" function
|
283 |
+
that specifies what gets saved for backward. The function should accept
|
284 |
+
two arguments ``(inputs, output)`` and return the quantities to be saved
|
285 |
+
for backward.
|
286 |
+
|
287 |
+
During runtime, when you call the operator in a forwards pass, PyTorch
|
288 |
+
will invoke the "save for backward" function with the inputs and output
|
289 |
+
of the operator.
|
290 |
+
|
291 |
+
Use `impl_backward` to define the "backward" function. The backward
|
292 |
+
function must accept ``(ctx, saved, *grads)``:
|
293 |
+
- ``ctx`` is a context object where we may provide information
|
294 |
+
- ``saved`` is exactly what gets returned from the "save for backward"
|
295 |
+
function
|
296 |
+
- ``grads`` is one or more gradients. The number of gradients matches
|
297 |
+
the number of outputs of the operator.
|
298 |
+
|
299 |
+
The backward function must return a dict that maps the name of
|
300 |
+
an input to the operator to its corresponding gradient. All inputs that
|
301 |
+
were declared to be Tensors in the operator definition must be accounted
|
302 |
+
for in the dict. The gradient may be a Tensor or None.
|
303 |
+
|
304 |
+
For a detailed guide on custom ops, please see
|
305 |
+
https://docs.google.com/document/d/1aGWtgxV3HppuxQAdddyPrs74_aEntpkYt9MalnCKnhk
|
306 |
+
|
307 |
+
"""
|
308 |
+
|
309 |
+
def inner(func):
|
310 |
+
custom_op = _find_custom_op(qualname, also_check_torch_library=True)
|
311 |
+
custom_op.impl_backward(output_differentiability, _stacklevel=3)(func)
|
312 |
+
return func
|
313 |
+
|
314 |
+
if func is None:
|
315 |
+
return inner
|
316 |
+
return inner(func)
|
317 |
+
|
318 |
+
|
319 |
+
def _destroy(qualname):
|
320 |
+
"""De-registers a custom op. For testing purposes only"""
|
321 |
+
custom_op = _find_custom_op(qualname)
|
322 |
+
custom_op._destroy()
|
env-llmeval/lib/python3.10/site-packages/torch/_deploy.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import io
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from torch.package import Importer, OrderedImporter, PackageImporter, sys_importer
|
5 |
+
from torch.package._package_pickler import create_pickler
|
6 |
+
from torch.package._package_unpickler import PackageUnpickler
|
7 |
+
from torch.serialization import _maybe_decode_ascii
|
8 |
+
|
9 |
+
|
10 |
+
def _save_storages(importer, obj):
|
11 |
+
serialized_storages = []
|
12 |
+
serialized_dtypes = []
|
13 |
+
|
14 |
+
importer = importer if isinstance(importer, torch.package.PackageImporter) else None
|
15 |
+
importers: Importer
|
16 |
+
if importer is not None:
|
17 |
+
importers = OrderedImporter(importer, sys_importer)
|
18 |
+
else:
|
19 |
+
importers = sys_importer
|
20 |
+
|
21 |
+
def persistent_id(obj):
|
22 |
+
if torch.is_storage(obj) or isinstance(obj, torch.storage.TypedStorage):
|
23 |
+
if isinstance(obj, torch.storage.TypedStorage):
|
24 |
+
# TODO: Once we decide to break serialization FC, we can
|
25 |
+
# remove this case
|
26 |
+
storage = obj._untyped_storage
|
27 |
+
dtype = obj.dtype
|
28 |
+
else:
|
29 |
+
storage = obj
|
30 |
+
dtype = torch.uint8
|
31 |
+
|
32 |
+
serialized_storages.append(obj)
|
33 |
+
serialized_dtypes.append(dtype)
|
34 |
+
return ("storage", len(serialized_storages) - 1)
|
35 |
+
|
36 |
+
if hasattr(obj, "__reduce_deploy__"):
|
37 |
+
if _serialized_reduces.get(id(obj)) is None:
|
38 |
+
_serialized_reduces[id(obj)] = (
|
39 |
+
"reduce_deploy",
|
40 |
+
id(obj),
|
41 |
+
*obj.__reduce_deploy__(importers),
|
42 |
+
)
|
43 |
+
return _serialized_reduces[id(obj)]
|
44 |
+
|
45 |
+
return None
|
46 |
+
|
47 |
+
# Write the pickle data for `obj`
|
48 |
+
data_buf = io.BytesIO()
|
49 |
+
pickler = create_pickler(data_buf, importers)
|
50 |
+
pickler.persistent_id = persistent_id
|
51 |
+
pickler.dump(obj)
|
52 |
+
data_value = data_buf.getvalue()
|
53 |
+
return (
|
54 |
+
data_value,
|
55 |
+
serialized_storages,
|
56 |
+
serialized_dtypes,
|
57 |
+
importer.zip_reader if importer else None,
|
58 |
+
)
|
59 |
+
|
60 |
+
|
61 |
+
def _load_storages(id, zip_reader, obj_bytes, serialized_storages, serialized_dtypes):
|
62 |
+
def persistent_load(saved_id):
|
63 |
+
assert isinstance(saved_id, tuple)
|
64 |
+
typename = _maybe_decode_ascii(saved_id[0])
|
65 |
+
data = saved_id[1:]
|
66 |
+
|
67 |
+
if typename == "storage":
|
68 |
+
# TODO: Once we decide to break serialization FC, we can
|
69 |
+
# stop wrapping with TypedStorage
|
70 |
+
storage = serialized_storages[data[0]]
|
71 |
+
dtype = serialized_dtypes[data[0]]
|
72 |
+
return torch.storage.TypedStorage(
|
73 |
+
wrap_storage=storage.untyped(), dtype=dtype
|
74 |
+
)
|
75 |
+
|
76 |
+
if typename == "reduce_deploy":
|
77 |
+
reduce_id, func, args = data
|
78 |
+
if reduce_id not in _loaded_reduces:
|
79 |
+
_loaded_reduces[reduce_id] = func(_raw_packages[zip_reader], *args)
|
80 |
+
return _loaded_reduces[reduce_id]
|
81 |
+
|
82 |
+
return None
|
83 |
+
|
84 |
+
importer: Importer
|
85 |
+
if zip_reader is not None:
|
86 |
+
importer = OrderedImporter(_get_package(zip_reader), sys_importer)
|
87 |
+
else:
|
88 |
+
importer = sys_importer
|
89 |
+
|
90 |
+
unpickler = PackageUnpickler(importer, io.BytesIO(obj_bytes))
|
91 |
+
unpickler.persistent_load = persistent_load # type: ignore[assignment]
|
92 |
+
result = _deploy_objects[id] = unpickler.load()
|
93 |
+
return result
|
94 |
+
|
95 |
+
|
96 |
+
def _get_package(zip_reader):
|
97 |
+
if zip_reader not in _raw_packages:
|
98 |
+
_raw_packages[zip_reader] = PackageImporter(zip_reader)
|
99 |
+
return _raw_packages[zip_reader]
|
100 |
+
|
101 |
+
|
102 |
+
_raw_packages: dict = {}
|
103 |
+
_deploy_objects: dict = {}
|
104 |
+
_serialized_reduces: dict = {}
|
105 |
+
_loaded_reduces: dict = {}
|
env-llmeval/lib/python3.10/site-packages/torch/_guards.py
ADDED
@@ -0,0 +1,833 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import contextlib
|
4 |
+
|
5 |
+
import dataclasses
|
6 |
+
import enum
|
7 |
+
import functools
|
8 |
+
import logging
|
9 |
+
import threading
|
10 |
+
import traceback
|
11 |
+
import unittest.mock
|
12 |
+
import weakref
|
13 |
+
from abc import ABC, abstractmethod
|
14 |
+
from contextlib import contextmanager
|
15 |
+
from typing import (
|
16 |
+
Any,
|
17 |
+
Callable,
|
18 |
+
Dict,
|
19 |
+
Generic,
|
20 |
+
List,
|
21 |
+
NamedTuple,
|
22 |
+
Optional,
|
23 |
+
Set,
|
24 |
+
Tuple,
|
25 |
+
TYPE_CHECKING,
|
26 |
+
TypeVar,
|
27 |
+
)
|
28 |
+
|
29 |
+
import torch
|
30 |
+
from torch.utils import _pytree as pytree
|
31 |
+
from torch.utils._traceback import CapturedTraceback
|
32 |
+
from torch.utils.weak import WeakTensorKeyDictionary
|
33 |
+
|
34 |
+
log = logging.getLogger(__name__)
|
35 |
+
|
36 |
+
|
37 |
+
if TYPE_CHECKING:
|
38 |
+
# Import the following modules during type checking to enable code intelligence features,
|
39 |
+
# such as auto-completion in tools like pylance, even when these modules are not explicitly
|
40 |
+
# imported in user code.
|
41 |
+
|
42 |
+
import sympy
|
43 |
+
|
44 |
+
|
45 |
+
"""
|
46 |
+
torch._guards is the definitional source of truth for general purpose guard structures.
|
47 |
+
|
48 |
+
An important thing to keep in mind here is the preservation of layering. There should be no dynamo notions,
|
49 |
+
and no guard installation notions here.
|
50 |
+
"""
|
51 |
+
|
52 |
+
|
53 |
+
class CompileId(NamedTuple):
|
54 |
+
frame_id: int
|
55 |
+
# This id is per-frame, and counts how many times we've compiled this
|
56 |
+
# frame. This could have been a global id but having this be per-frame
|
57 |
+
# gives you a better intuitive sense for how many recompiles have occurred
|
58 |
+
# so far.
|
59 |
+
frame_compile_id: int
|
60 |
+
# TODO: consider also tracking the recompilation count
|
61 |
+
|
62 |
+
def __str__(self):
|
63 |
+
return f"{self.frame_id}/{self.frame_compile_id}"
|
64 |
+
|
65 |
+
|
66 |
+
class TraceId(NamedTuple):
|
67 |
+
compile_id: CompileId
|
68 |
+
# This starts off as 0, and every time we restart analysis it goes
|
69 |
+
# up by one
|
70 |
+
attempt: int
|
71 |
+
|
72 |
+
def __str__(self):
|
73 |
+
if self.attempt == 0:
|
74 |
+
return str(self.compile_id)
|
75 |
+
else:
|
76 |
+
return f"{self.compile_id}_{self.attempt}"
|
77 |
+
|
78 |
+
|
79 |
+
class GuardSource(enum.Enum):
|
80 |
+
LOCAL = 0
|
81 |
+
GLOBAL = 1
|
82 |
+
LOCAL_NN_MODULE = 2
|
83 |
+
GLOBAL_NN_MODULE = 3
|
84 |
+
CONSTANT = 4
|
85 |
+
RANDOM_VALUE = 5
|
86 |
+
SHAPE_ENV = 6
|
87 |
+
LOCAL_FSDP_MODULE = 7
|
88 |
+
GLOBAL_FSDP_MODULE = 8
|
89 |
+
|
90 |
+
def is_fsdp_module(self) -> bool:
|
91 |
+
return self in (GuardSource.GLOBAL_FSDP_MODULE, GuardSource.LOCAL_FSDP_MODULE)
|
92 |
+
|
93 |
+
def is_nn_module(self) -> bool:
|
94 |
+
return (
|
95 |
+
self
|
96 |
+
in (
|
97 |
+
GuardSource.GLOBAL_NN_MODULE,
|
98 |
+
GuardSource.LOCAL_NN_MODULE,
|
99 |
+
)
|
100 |
+
or self.is_fsdp_module()
|
101 |
+
)
|
102 |
+
|
103 |
+
def is_local(self):
|
104 |
+
return self in (
|
105 |
+
GuardSource.LOCAL,
|
106 |
+
GuardSource.LOCAL_NN_MODULE,
|
107 |
+
GuardSource.LOCAL_FSDP_MODULE,
|
108 |
+
)
|
109 |
+
|
110 |
+
|
111 |
+
"""
|
112 |
+
Base class for a "GuardBuilder" role.
|
113 |
+
|
114 |
+
The GuardBuilderBase role is to represent a scope within which to build a guard. The name is a little
|
115 |
+
confusing, as its not a builder, but for the sake of avoiding a lot of renames and keeping the original reference
|
116 |
+
to torchdynamo's GuardBuilder.
|
117 |
+
|
118 |
+
Note: create_fn is invoked with a GuardBuilderBase and a Guard. A GuardBuilder is chosen based
|
119 |
+
on GuardSource's select function.
|
120 |
+
|
121 |
+
There is value in keeping this GuardBuilderBase empty to keep layering clean.
|
122 |
+
"""
|
123 |
+
|
124 |
+
|
125 |
+
class GuardBuilderBase:
|
126 |
+
pass
|
127 |
+
|
128 |
+
|
129 |
+
class ShapeGuard(NamedTuple):
|
130 |
+
expr: sympy.Expr
|
131 |
+
stack: CapturedTraceback
|
132 |
+
|
133 |
+
|
134 |
+
@dataclasses.dataclass
|
135 |
+
class Guard:
|
136 |
+
# originating_source is the source that called the make_guard method to
|
137 |
+
# construct this guard object. The property name specifies what exactly it
|
138 |
+
# is the guard is guarding on. The meaning of the name is dependent on the
|
139 |
+
# create_fn; you must look at the use-site inside create_fn to know what
|
140 |
+
# name means.
|
141 |
+
#
|
142 |
+
# That being said, although you might think this is just a "name", name is
|
143 |
+
# usually an arbitrary Python expression that will be evaluated with all
|
144 |
+
# globals (and locals, if you create a LOCAL guard) to extract the Python
|
145 |
+
# object that we want to perform guard tests on. This evaluation
|
146 |
+
# typically happens in GuardBuilder.eval. In these cases, name is
|
147 |
+
# typically produced by originating_source.name() (not to be confused with
|
148 |
+
# GuardSource - the property source).
|
149 |
+
#
|
150 |
+
# Occasionally, name is not a valid Python expression; sometimes
|
151 |
+
# it is meaningless. Example create_fns that are like this include
|
152 |
+
# GRAD_MODE and SHAPE_ENV.
|
153 |
+
originating_source: Source
|
154 |
+
create_fn: Callable[[GuardBuilderBase, Guard], None]
|
155 |
+
|
156 |
+
# Export only. These values are written to at time of guard check_fn creation.
|
157 |
+
guard_types: Optional[List[str]] = None
|
158 |
+
code_list: Optional[List[str]] = None
|
159 |
+
obj_weakref: Optional[object] = None
|
160 |
+
guarded_class_weakref: Optional[type] = None
|
161 |
+
|
162 |
+
stack = None
|
163 |
+
user_stack = None
|
164 |
+
_hash = None
|
165 |
+
|
166 |
+
def __hash__(self):
|
167 |
+
if self._hash is None:
|
168 |
+
self._hash = hash((self.name, self.source, id(self.create_fn)))
|
169 |
+
return self._hash
|
170 |
+
|
171 |
+
def sort_key(self):
|
172 |
+
return (
|
173 |
+
self.source.value if self.source else -1,
|
174 |
+
len(self.name),
|
175 |
+
self.name,
|
176 |
+
self.inner_create_fn().__code__.co_firstlineno,
|
177 |
+
)
|
178 |
+
|
179 |
+
def __lt__(self, other):
|
180 |
+
return self.sort_key() < other.sort_key()
|
181 |
+
|
182 |
+
def inner_create_fn(self):
|
183 |
+
if isinstance(self.create_fn, functools.partial):
|
184 |
+
return self.create_fn.func
|
185 |
+
else:
|
186 |
+
return self.create_fn
|
187 |
+
|
188 |
+
@property
|
189 |
+
def name(self) -> str:
|
190 |
+
return self.originating_source.name()
|
191 |
+
|
192 |
+
@property
|
193 |
+
def source(self) -> GuardSource:
|
194 |
+
return self.originating_source.guard_source()
|
195 |
+
|
196 |
+
@staticmethod
|
197 |
+
def weakref_to_str(obj_weakref):
|
198 |
+
"""
|
199 |
+
This is a workaround of a Python weakref bug.
|
200 |
+
|
201 |
+
`obj_weakref` is instance returned by `weakref.ref`,
|
202 |
+
`str(obj_weakref)` is buggy if the original obj overrides __getattr__, e.g:
|
203 |
+
|
204 |
+
class MyConfig(dict):
|
205 |
+
def __getattr__(self, x):
|
206 |
+
return self[x]
|
207 |
+
|
208 |
+
obj = MyConfig(offset=5)
|
209 |
+
obj_weakref = weakref.ref(obj)
|
210 |
+
str(obj_weakref) # raise error: KeyError: '__name__'
|
211 |
+
"""
|
212 |
+
if isinstance(obj_weakref, weakref.ReferenceType):
|
213 |
+
obj = obj_weakref()
|
214 |
+
if obj is not None:
|
215 |
+
return f"<weakref at {hex(id(obj_weakref))}; to '{obj.__class__.__name__}' at {hex(id(obj))}>"
|
216 |
+
else:
|
217 |
+
return f"<weakref at {hex(id(obj_weakref))}; dead>"
|
218 |
+
else:
|
219 |
+
return str(obj_weakref)
|
220 |
+
|
221 |
+
def __repr__(self):
|
222 |
+
s = f"""
|
223 |
+
{self.source.name.lower() if self.source else ""} {repr(self.name)} {self.inner_create_fn().__name__}
|
224 |
+
{{
|
225 |
+
'guard_types': {self.guard_types},
|
226 |
+
'code': {self.code_list},
|
227 |
+
'obj_weakref': {self.weakref_to_str(self.obj_weakref)}
|
228 |
+
'guarded_class': {self.guarded_class_weakref}
|
229 |
+
}}
|
230 |
+
"""
|
231 |
+
return s
|
232 |
+
|
233 |
+
def __str__(self):
|
234 |
+
output = f"Name: {repr(self.name)}\n"
|
235 |
+
source = self.source.name.lower() if self.source else ""
|
236 |
+
output += f" Source: {source}\n"
|
237 |
+
output += f" Create Function: {self.inner_create_fn().__name__}\n"
|
238 |
+
output += f" Guard Types: {self.guard_types}\n"
|
239 |
+
output += f" Code List: {self.code_list}\n"
|
240 |
+
output += f" Object Weakref: {self.weakref_to_str(self.obj_weakref)}\n"
|
241 |
+
output += f" Guarded Class Weakref: {self.guarded_class_weakref}\n"
|
242 |
+
return output
|
243 |
+
|
244 |
+
def create(self, builder: GuardBuilderBase):
|
245 |
+
try:
|
246 |
+
return self.create_fn(builder, self)
|
247 |
+
except Exception:
|
248 |
+
log.error("Error while creating guard:\n%s", str(self).rstrip())
|
249 |
+
if self.stack:
|
250 |
+
log.error("Created at:\n%s", "".join(self.stack.format()[-4:]).rstrip())
|
251 |
+
raise
|
252 |
+
|
253 |
+
def is_nn_module(self):
|
254 |
+
return self.source.is_nn_module()
|
255 |
+
|
256 |
+
def is_fsdp_module(self):
|
257 |
+
return self.source.is_fsdp_module()
|
258 |
+
|
259 |
+
def is_local(self):
|
260 |
+
return self.source.is_local()
|
261 |
+
|
262 |
+
def set_export_info(self, guard_type, guarded_class, code_list, obj_weakref):
|
263 |
+
if not self.guard_types:
|
264 |
+
self.guard_types = list()
|
265 |
+
|
266 |
+
self.guard_types.append(guard_type)
|
267 |
+
|
268 |
+
assert self.guarded_class_weakref in (
|
269 |
+
guarded_class,
|
270 |
+
None,
|
271 |
+
), "Guarded class id must be identical, or None"
|
272 |
+
self.guarded_class_weakref = guarded_class
|
273 |
+
|
274 |
+
if not self.code_list:
|
275 |
+
self.code_list = code_list
|
276 |
+
else:
|
277 |
+
self.code_list.extend(code_list)
|
278 |
+
|
279 |
+
assert self.obj_weakref in (
|
280 |
+
obj_weakref,
|
281 |
+
None,
|
282 |
+
), "Guarded object must be identical, or None"
|
283 |
+
self.obj_weakref = obj_weakref
|
284 |
+
|
285 |
+
|
286 |
+
T = TypeVar("T")
|
287 |
+
|
288 |
+
"""
|
289 |
+
Parent structure for guard env expressions.
|
290 |
+
A GuardEnvExpr can have any subtype.
|
291 |
+
Note: All subtypes must be handled exhaustively in
|
292 |
+
torch._dynamo.guards._parse_guard_env_guards to avoid a RuntimeError.
|
293 |
+
"""
|
294 |
+
|
295 |
+
|
296 |
+
@dataclasses.dataclass
|
297 |
+
class GuardEnvExpr:
|
298 |
+
pass
|
299 |
+
|
300 |
+
|
301 |
+
"""
|
302 |
+
A class representing a pair of duplicate inputs.
|
303 |
+
input_pos_a and input_pos_b are input positions we have deduped.
|
304 |
+
"""
|
305 |
+
|
306 |
+
|
307 |
+
@dataclasses.dataclass
|
308 |
+
class DuplicateInputs(GuardEnvExpr):
|
309 |
+
input_source_a: Source
|
310 |
+
input_source_b: Source
|
311 |
+
|
312 |
+
def __post_init__(self):
|
313 |
+
assert self.input_source_a != self.input_source_b
|
314 |
+
|
315 |
+
|
316 |
+
"""
|
317 |
+
Checkpointable is an interface for driving state snapshotting, left purposely vague for now.
|
318 |
+
|
319 |
+
copy_graphstate() -> T, a somewhat legacy name, is expected to emit a snapshot of any type that
|
320 |
+
can also be taken in at restore_graphstate(T) calls.
|
321 |
+
|
322 |
+
When to snapshot, is, at the moment, an implementation detail of upstream callers. Checkpointable
|
323 |
+
does not provide any garuantees around consistency, idempotency, or safety of calling its APIs, yet.
|
324 |
+
|
325 |
+
In the future, it will have a closer coupling to a generic Checkpoint management system.
|
326 |
+
"""
|
327 |
+
|
328 |
+
|
329 |
+
class Checkpointable(ABC, Generic[T]):
|
330 |
+
@abstractmethod
|
331 |
+
def copy_graphstate(self) -> T:
|
332 |
+
...
|
333 |
+
|
334 |
+
@abstractmethod
|
335 |
+
def restore_graphstate(self, state: T):
|
336 |
+
...
|
337 |
+
|
338 |
+
|
339 |
+
"""
|
340 |
+
The GuardCheckpointState - it is the T of Checkpointable[T] for GuardsContext
|
341 |
+
"""
|
342 |
+
|
343 |
+
|
344 |
+
class GuardsCheckpointState:
|
345 |
+
dynamo_guards: Set[Guard] = set()
|
346 |
+
|
347 |
+
def __init__(self, dynamo_guards):
|
348 |
+
self.dynamo_guards = dynamo_guards
|
349 |
+
|
350 |
+
"""
|
351 |
+
Produces a delta against another GuardsCheckpointState.
|
352 |
+
|
353 |
+
Returns None if no delta is found, otherwise, return a set() of mismatched
|
354 |
+
Guard type objects.
|
355 |
+
"""
|
356 |
+
|
357 |
+
def diff(self, other):
|
358 |
+
r = self.dynamo_guards.difference(other.dynamo_guards)
|
359 |
+
if len(r) == 0:
|
360 |
+
return None
|
361 |
+
return r
|
362 |
+
|
363 |
+
def __eq__(self, other):
|
364 |
+
return self.diff(other) is None
|
365 |
+
|
366 |
+
|
367 |
+
class ModuleContextCheckpointState:
|
368 |
+
nn_modules: Dict[str, torch.nn.Module] = {}
|
369 |
+
|
370 |
+
def __init__(self, nn_modules):
|
371 |
+
self.nn_modules = nn_modules
|
372 |
+
|
373 |
+
"""
|
374 |
+
Produces a delta against another ModuleContextCheckpointState.
|
375 |
+
|
376 |
+
Returns None if no delta is found, otherwise, return a set() of mismatched
|
377 |
+
module key names.
|
378 |
+
"""
|
379 |
+
|
380 |
+
def diff(self, other):
|
381 |
+
r = set(self.nn_modules.keys()).difference(set(other.nn_modules.keys()))
|
382 |
+
if len(r) == 0:
|
383 |
+
return None
|
384 |
+
return r
|
385 |
+
|
386 |
+
def __eq__(self, other):
|
387 |
+
return self.diff(other) is None
|
388 |
+
|
389 |
+
|
390 |
+
class ModuleContext(Checkpointable[ModuleContextCheckpointState]):
|
391 |
+
def __init__(self):
|
392 |
+
self.nn_modules: Dict[str, Any] = {}
|
393 |
+
|
394 |
+
def copy_graphstate(self):
|
395 |
+
return ModuleContextCheckpointState(dict(self.nn_modules))
|
396 |
+
|
397 |
+
def restore_graphstate(self, state):
|
398 |
+
assert isinstance(state, ModuleContextCheckpointState)
|
399 |
+
self.nn_modules = state.nn_modules
|
400 |
+
|
401 |
+
|
402 |
+
class GlobalContextCheckpointState:
|
403 |
+
global_state: Dict[str, Tuple[Callable, ...]] = {}
|
404 |
+
|
405 |
+
def __init__(self, global_states):
|
406 |
+
self.global_state = global_states
|
407 |
+
|
408 |
+
"""
|
409 |
+
Produces a delta against another GlobalContextCheckpointState.
|
410 |
+
|
411 |
+
Returns None if no delta is found, otherwise, return a set() of mismatched
|
412 |
+
global key names.
|
413 |
+
"""
|
414 |
+
|
415 |
+
def diff(self, other):
|
416 |
+
r = set(self.global_state.keys()).difference(set(other.global_state.keys()))
|
417 |
+
if len(r) == 0:
|
418 |
+
return None
|
419 |
+
return r
|
420 |
+
|
421 |
+
def __eq__(self, other):
|
422 |
+
return self.diff(other) is None
|
423 |
+
|
424 |
+
|
425 |
+
class GlobalContext(Checkpointable[GlobalContextCheckpointState]):
|
426 |
+
"""
|
427 |
+
This keeps track of the global torch state during tracing of a function.
|
428 |
+
For example, torch.is_grad_enabled.
|
429 |
+
"""
|
430 |
+
|
431 |
+
_supported_global_states = {
|
432 |
+
"grad_enabled",
|
433 |
+
"torch_function_enabled",
|
434 |
+
"autocast_enabled",
|
435 |
+
"autocast_cpu_enabled",
|
436 |
+
"autocast_gpu_dtype",
|
437 |
+
"autocast_cpu_dtype",
|
438 |
+
"autocast_cache_enabled",
|
439 |
+
}
|
440 |
+
|
441 |
+
def __init__(self):
|
442 |
+
self.global_state: Dict[str, Tuple[Callable, ...]] = {}
|
443 |
+
|
444 |
+
def copy_graphstate(self):
|
445 |
+
return GlobalContextCheckpointState(dict(self.global_state))
|
446 |
+
|
447 |
+
def restore_graphstate(self, state):
|
448 |
+
assert isinstance(state, GlobalContextCheckpointState)
|
449 |
+
self.global_state = state.global_state
|
450 |
+
assert (
|
451 |
+
len(self.global_state) == len(self._supported_global_states)
|
452 |
+
and set(self.global_state.keys()) == self._supported_global_states
|
453 |
+
), "Global state mismatch"
|
454 |
+
for func, args in self.global_state.values():
|
455 |
+
func(args)
|
456 |
+
|
457 |
+
|
458 |
+
"""
|
459 |
+
A GuardsContext is a checkpointable representation of all the guards in the current tracing
|
460 |
+
context. It's lifecycle is bound 1:1 to the tracing context, and it should never be instantiated
|
461 |
+
directly outside of it. For passing around internal state representations of this object,
|
462 |
+
prefer to extract them with copy_graphstate to produce a GuardsCheckpointState.
|
463 |
+
"""
|
464 |
+
|
465 |
+
|
466 |
+
# Like a Set[Guard] but will record the user stack on all guards at the
|
467 |
+
# time they were installed at their destination
|
468 |
+
class GuardsSet:
|
469 |
+
def __init__(self, inner=None):
|
470 |
+
if inner is None:
|
471 |
+
inner = set()
|
472 |
+
self.inner = inner
|
473 |
+
|
474 |
+
def __iter__(self):
|
475 |
+
return iter(self.inner)
|
476 |
+
|
477 |
+
def __len__(self):
|
478 |
+
return len(self.inner)
|
479 |
+
|
480 |
+
# Subtraction along with bool is typically used to determine the delta of
|
481 |
+
# added guards between checkpoints for higher order ops
|
482 |
+
def __sub__(self, other):
|
483 |
+
return GuardsSet(self.inner - other.inner)
|
484 |
+
|
485 |
+
def __bool__(self):
|
486 |
+
return bool(self.inner)
|
487 |
+
|
488 |
+
def add(self, guard: Guard, *, skip=0):
|
489 |
+
if guard in self.inner:
|
490 |
+
return
|
491 |
+
if guard.stack is None:
|
492 |
+
guard.stack = CapturedTraceback.extract(skip=1 + skip)
|
493 |
+
if guard.user_stack is None:
|
494 |
+
guard.user_stack = TracingContext.extract_stack()
|
495 |
+
self.inner.add(guard)
|
496 |
+
|
497 |
+
def update(self, *others: Set[Guard]):
|
498 |
+
for o in others:
|
499 |
+
for g in o:
|
500 |
+
self.add(g, skip=1)
|
501 |
+
|
502 |
+
|
503 |
+
class GuardsContext(Checkpointable[GuardsCheckpointState]):
|
504 |
+
def __init__(self):
|
505 |
+
self.dynamo_guards: GuardsSet = GuardsSet()
|
506 |
+
self.aotautograd_guards: List[GuardEnvExpr] = []
|
507 |
+
|
508 |
+
def copy_graphstate(self):
|
509 |
+
return GuardsCheckpointState(set(self.dynamo_guards.inner))
|
510 |
+
|
511 |
+
def restore_graphstate(self, state):
|
512 |
+
# NB: "steals" the passed in state
|
513 |
+
assert isinstance(state, GuardsCheckpointState)
|
514 |
+
self.dynamo_guards = GuardsSet(state.dynamo_guards)
|
515 |
+
|
516 |
+
|
517 |
+
_TLS = threading.local()
|
518 |
+
|
519 |
+
"""
|
520 |
+
TracingContext is the source of truth for all currently accumulated information
|
521 |
+
needed to trace. Its lifecycle is kept 1:1 when using TorchDynamo, but other systems
|
522 |
+
are open to managing their own TracingContext with that in mind.
|
523 |
+
|
524 |
+
The purpose of TracingContext is not to be a dumping ground, or god object, but rather to avoid
|
525 |
+
having to plumb complex subsystems across multiple verticals.
|
526 |
+
|
527 |
+
Ex: A common example is guard accumulation between dynamo, shape_env, aot_autograd, and inductor.
|
528 |
+
Accessing the current tracing context via
|
529 |
+
TracingContext.get() allows users to accumulate their own guards for processing, without needing to know how
|
530 |
+
to plumb objects back up to where frame interpretation happened.
|
531 |
+
|
532 |
+
Note that you can end up with multiple TracingContext for a single compilation
|
533 |
+
of a frame, as we reset the TracingContext whenever we restart analysis.
|
534 |
+
CompileContext is a more overarching context that encompasses multiple restarts.
|
535 |
+
"""
|
536 |
+
|
537 |
+
|
538 |
+
class CompileContext:
|
539 |
+
@staticmethod
|
540 |
+
def get() -> CompileContext:
|
541 |
+
assert _TLS.compile_context is not None
|
542 |
+
return _TLS.compile_context
|
543 |
+
|
544 |
+
@staticmethod
|
545 |
+
def try_get() -> Optional[CompileContext]:
|
546 |
+
return getattr(_TLS, "compile_context", None)
|
547 |
+
|
548 |
+
def __init__(self, compile_id):
|
549 |
+
assert compile_id is None or isinstance(compile_id, CompileId)
|
550 |
+
self.compile_id: Optional[CompileId] = compile_id
|
551 |
+
self.attempt = 0
|
552 |
+
|
553 |
+
@staticmethod
|
554 |
+
def current_compile_id():
|
555 |
+
self = CompileContext.try_get()
|
556 |
+
if self is None:
|
557 |
+
return None
|
558 |
+
return self.compile_id
|
559 |
+
|
560 |
+
@staticmethod
|
561 |
+
def current_trace_id():
|
562 |
+
self = CompileContext.try_get()
|
563 |
+
if self is None:
|
564 |
+
return None
|
565 |
+
if self.compile_id is None:
|
566 |
+
return None
|
567 |
+
return TraceId(self.compile_id, self.attempt)
|
568 |
+
|
569 |
+
|
570 |
+
class TracingContext:
|
571 |
+
"""
|
572 |
+
Provides the currently installed TracingContext, or None.
|
573 |
+
|
574 |
+
Note that it is a staticmethod, and invocations outside of `with tracing()` (see below), are valid but
|
575 |
+
will return None.
|
576 |
+
"""
|
577 |
+
|
578 |
+
@staticmethod
|
579 |
+
def try_get() -> Optional[TracingContext]:
|
580 |
+
return getattr(_TLS, "tracing_context", None)
|
581 |
+
|
582 |
+
@staticmethod
|
583 |
+
def get() -> TracingContext:
|
584 |
+
if ctx := TracingContext.try_get():
|
585 |
+
return ctx
|
586 |
+
raise RuntimeError(
|
587 |
+
"TracingContext.get() must be called within an ongoing trace."
|
588 |
+
)
|
589 |
+
|
590 |
+
def __init__(self, fake_mode):
|
591 |
+
self.guards_context = GuardsContext()
|
592 |
+
self.module_context = ModuleContext()
|
593 |
+
self.global_context = GlobalContext()
|
594 |
+
self.fake_mode = fake_mode
|
595 |
+
self.frame_summary_stack = []
|
596 |
+
# This is morally part of frame_summary_stack, but it is kept separate
|
597 |
+
# for clarity. As we process a frame, this variable gets updated
|
598 |
+
# to keep track of what line we are in the function. We make a
|
599 |
+
# function call, this gets cleared and the frame location is pushed
|
600 |
+
# to frame_summary_stack (prepping this variable for the inner frame's
|
601 |
+
# progress)
|
602 |
+
self.loc_in_frame = None
|
603 |
+
# this is only set after aot_autograd
|
604 |
+
self.fw_metadata = None
|
605 |
+
self.params_flat = None
|
606 |
+
# this is for extended return calling convention from backend
|
607 |
+
# compiler to aot_autograd
|
608 |
+
# Per output, what the compiler specified stride of the output is,
|
609 |
+
# or None if no stride is known. This is always the HINT, it
|
610 |
+
# is never a SymInt (it would be better if it was a SymInt, but
|
611 |
+
# I can't conveniently get this from Inductor atm. Also, be
|
612 |
+
# careful not to accidentally induce guards on the SymInt if
|
613 |
+
# you ever do change this in aot_autograd.py; you should check
|
614 |
+
# on permutations preferentially.)
|
615 |
+
self.output_strides: Optional[List[Optional[List[int]]]] = None
|
616 |
+
# When this is True, whenever we encounter an int in Dynamo tracing,
|
617 |
+
# we will (1) force unspec it and (2) force it as a size-like unbacked
|
618 |
+
# integer. This is currently used when processing certain lists of
|
619 |
+
# ints that are known to be size-like and may have 0/1 entries that we
|
620 |
+
# must not specialize on.
|
621 |
+
self.force_unspec_int_unbacked_size_like = False
|
622 |
+
# See note [Tensor Fakification and Symbol Caching]
|
623 |
+
self.tensor_to_context = WeakTensorKeyDictionary()
|
624 |
+
|
625 |
+
@staticmethod
|
626 |
+
@contextmanager
|
627 |
+
def patch(**kwargs):
|
628 |
+
prior = {}
|
629 |
+
ctx = TracingContext.get()
|
630 |
+
|
631 |
+
for key in kwargs.keys():
|
632 |
+
# KeyError on invalid entry
|
633 |
+
prior[key] = getattr(ctx, key)
|
634 |
+
for key, val in kwargs.items():
|
635 |
+
setattr(ctx, key, val)
|
636 |
+
try:
|
637 |
+
yield
|
638 |
+
finally:
|
639 |
+
for key, val in prior.items():
|
640 |
+
setattr(ctx, key, val)
|
641 |
+
|
642 |
+
@staticmethod
|
643 |
+
def extract_stack():
|
644 |
+
self = TracingContext.try_get()
|
645 |
+
if self is None:
|
646 |
+
return traceback.StackSummary()
|
647 |
+
stack = list(self.frame_summary_stack)
|
648 |
+
if self.loc_in_frame is not None:
|
649 |
+
stack.append(self.loc_in_frame)
|
650 |
+
return traceback.StackSummary.from_list(stack)
|
651 |
+
|
652 |
+
# Call this when you want to call into some code that isn't necessarily
|
653 |
+
# associated with the current frame state
|
654 |
+
@staticmethod
|
655 |
+
@contextlib.contextmanager
|
656 |
+
def clear_frame():
|
657 |
+
tc = TracingContext.get()
|
658 |
+
with unittest.mock.patch.object(
|
659 |
+
tc, "frame_summary_stack", []
|
660 |
+
), unittest.mock.patch.object(tc, "loc_in_frame", None):
|
661 |
+
try:
|
662 |
+
yield
|
663 |
+
except Exception as e:
|
664 |
+
# Prevent real_stack from getting attached
|
665 |
+
#
|
666 |
+
# The invariant is that if an Exception as real_stack, we've
|
667 |
+
# appropriately attached a user stack and we no longer need to
|
668 |
+
# attach anything. Because we cannot conveniently interpose
|
669 |
+
# when an exception is thrown, we instead interpose everywhere
|
670 |
+
# we set what the user stack is set (using the context
|
671 |
+
# manager). However, our compiler stack does "tail calls"
|
672 |
+
# (when it calls into user compiler), at which point the
|
673 |
+
# parent exception frames would incorrectly attach an
|
674 |
+
# incorrect frame.
|
675 |
+
#
|
676 |
+
# However, if, somehow, someone raised an exception with this
|
677 |
+
# scope that had a stack (for example, because they are
|
678 |
+
# restoring the user stack state appropriately as they process
|
679 |
+
# node by node), we should respect it. Thus, we cannot
|
680 |
+
# unconditionally set None.
|
681 |
+
if not hasattr(e, "real_stack"):
|
682 |
+
e.real_stack = None # type: ignore[attr-defined]
|
683 |
+
raise
|
684 |
+
|
685 |
+
@staticmethod
|
686 |
+
@contextlib.contextmanager
|
687 |
+
def current_frame(frame_summary):
|
688 |
+
# frame_summary can be None to solely take advantage of real_stack
|
689 |
+
# attachment to thrown exceptions
|
690 |
+
tc = TracingContext.get()
|
691 |
+
if frame_summary is not None:
|
692 |
+
tc.frame_summary_stack.append(frame_summary)
|
693 |
+
old = tc.loc_in_frame
|
694 |
+
tc.loc_in_frame = None
|
695 |
+
try:
|
696 |
+
yield
|
697 |
+
except Exception as e:
|
698 |
+
if not hasattr(e, "real_stack"):
|
699 |
+
e.real_stack = tc.extract_stack() # type: ignore[attr-defined]
|
700 |
+
raise
|
701 |
+
finally:
|
702 |
+
if frame_summary is not None:
|
703 |
+
tc.frame_summary_stack.pop()
|
704 |
+
tc.loc_in_frame = old
|
705 |
+
|
706 |
+
@staticmethod
|
707 |
+
@contextlib.contextmanager
|
708 |
+
def report_output_strides():
|
709 |
+
tc = TracingContext.try_get()
|
710 |
+
if tc is None:
|
711 |
+
yield None
|
712 |
+
return
|
713 |
+
old_output_strides = tc.output_strides
|
714 |
+
tc.output_strides = []
|
715 |
+
try:
|
716 |
+
yield tc.output_strides
|
717 |
+
finally:
|
718 |
+
tc.output_strides = old_output_strides
|
719 |
+
|
720 |
+
@staticmethod
|
721 |
+
def set_current_loc(filename, lineno, frame_name):
|
722 |
+
TracingContext.get().loc_in_frame = traceback.FrameSummary(
|
723 |
+
filename, lineno, frame_name
|
724 |
+
)
|
725 |
+
|
726 |
+
|
727 |
+
@contextmanager
|
728 |
+
def compile_context(context: CompileContext):
|
729 |
+
old_context = getattr(_TLS, "compile_context", None)
|
730 |
+
_TLS.compile_context = context
|
731 |
+
try:
|
732 |
+
yield context
|
733 |
+
finally:
|
734 |
+
_TLS.compile_context = old_context
|
735 |
+
|
736 |
+
|
737 |
+
@contextmanager
|
738 |
+
def tracing(context: Optional[TracingContext]):
|
739 |
+
"""
|
740 |
+
This function installs the passed in tracing context as a dynamic scoped
|
741 |
+
global variable.
|
742 |
+
|
743 |
+
Calls to TracingContext.get() while not under a `with tracing()` context
|
744 |
+
will return None.
|
745 |
+
"""
|
746 |
+
old_context = getattr(_TLS, "tracing_context", None)
|
747 |
+
_TLS.tracing_context = context
|
748 |
+
try:
|
749 |
+
yield context
|
750 |
+
except Exception as e:
|
751 |
+
if not hasattr(e, "real_stack") and context is not None:
|
752 |
+
e.real_stack = context.extract_stack() # type: ignore[attr-defined]
|
753 |
+
raise
|
754 |
+
finally:
|
755 |
+
if (
|
756 |
+
context is not None
|
757 |
+
and context.fake_mode is not None
|
758 |
+
and context.fake_mode.shape_env is not None
|
759 |
+
):
|
760 |
+
context.fake_mode.shape_env.cleanup()
|
761 |
+
_TLS.tracing_context = old_context
|
762 |
+
|
763 |
+
|
764 |
+
# Subclasses can be found in torch/_dynamo/source.py
|
765 |
+
# TODO(voz): Consider a toplevel torch/_source.py
|
766 |
+
@dataclasses.dataclass(frozen=True)
|
767 |
+
class Source:
|
768 |
+
def reconstruct(self, codegen):
|
769 |
+
raise NotImplementedError()
|
770 |
+
|
771 |
+
def guard_source(self) -> GuardSource:
|
772 |
+
raise NotImplementedError()
|
773 |
+
|
774 |
+
def name(self) -> str:
|
775 |
+
raise NotImplementedError()
|
776 |
+
|
777 |
+
def make_guard(self, fn) -> Guard:
|
778 |
+
if self.guard_source() is GuardSource.CONSTANT:
|
779 |
+
raise NotImplementedError()
|
780 |
+
return Guard(self, fn)
|
781 |
+
|
782 |
+
def is_nn_module(self) -> bool:
|
783 |
+
return self.guard_source().is_nn_module()
|
784 |
+
|
785 |
+
|
786 |
+
# Subclasses can be found in torch/_dynamo/source.py
|
787 |
+
@dataclasses.dataclass(frozen=True)
|
788 |
+
class ChainedSource(Source):
|
789 |
+
base: Source
|
790 |
+
|
791 |
+
|
792 |
+
def detect_fake_mode(inputs: Any = None):
|
793 |
+
"""
|
794 |
+
Attempts to "detect" what the current fake mode is. If there is one ambiently
|
795 |
+
available from TracingContext, we preferentially use that. Otherwise, we
|
796 |
+
heuristically detect the fake mode via the following sources, in order of
|
797 |
+
priority:
|
798 |
+
|
799 |
+
- Currently active fake mode on stack
|
800 |
+
- Fake mode associated with passed in tensors (inputs does not
|
801 |
+
have to be flattened)
|
802 |
+
"""
|
803 |
+
from torch._subclasses.fake_tensor import FakeTensor, FakeTensorMode
|
804 |
+
|
805 |
+
fake_modes = []
|
806 |
+
|
807 |
+
if context := TracingContext.try_get():
|
808 |
+
fake_mode = context.fake_mode
|
809 |
+
if fake_mode is not None:
|
810 |
+
fake_modes.append((fake_mode, "tracing context", 0))
|
811 |
+
|
812 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode_stack
|
813 |
+
|
814 |
+
for i, m in enumerate(reversed(_get_current_dispatch_mode_stack())):
|
815 |
+
if isinstance(m, FakeTensorMode):
|
816 |
+
fake_modes.append((m, "active fake mode", i))
|
817 |
+
|
818 |
+
flat_inputs = pytree.tree_leaves(inputs)
|
819 |
+
for i, flat_input in enumerate(flat_inputs):
|
820 |
+
if isinstance(flat_input, FakeTensor):
|
821 |
+
fake_modes.append((flat_input.fake_mode, "fake tensor input", i))
|
822 |
+
|
823 |
+
if fake_modes:
|
824 |
+
fake_mode, desc1, i1 = fake_modes[0]
|
825 |
+
for m, desc2, i2 in fake_modes[1:]:
|
826 |
+
assert fake_mode is m, (
|
827 |
+
f"fake mode ({fake_mode}) from {desc1} {i1} doesn't match mode ({m}) from {desc2} {i2}\n\n"
|
828 |
+
f"fake mode from {desc1} {i1} allocated at:\n{fake_mode.stack}\n"
|
829 |
+
f"fake mode from {desc2} {i2} allocated at:\n{m.stack}"
|
830 |
+
)
|
831 |
+
return fake_mode
|
832 |
+
else:
|
833 |
+
return None
|
env-llmeval/lib/python3.10/site-packages/torch/_jit_internal.py
ADDED
@@ -0,0 +1,1510 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
The weak_script annotation needs to be here instead of inside torch/jit/ so it
|
3 |
+
can be used in other places in torch/ (namely torch.nn) without running into
|
4 |
+
circular dependency problems
|
5 |
+
"""
|
6 |
+
|
7 |
+
import ast
|
8 |
+
import builtins
|
9 |
+
import collections
|
10 |
+
import contextlib
|
11 |
+
import enum
|
12 |
+
import inspect
|
13 |
+
import io
|
14 |
+
import pickle
|
15 |
+
import sys
|
16 |
+
import threading
|
17 |
+
import types
|
18 |
+
import typing
|
19 |
+
import warnings
|
20 |
+
import weakref
|
21 |
+
from textwrap import dedent
|
22 |
+
from typing import ( # noqa: F401
|
23 |
+
Any,
|
24 |
+
Callable,
|
25 |
+
Dict,
|
26 |
+
Final,
|
27 |
+
ForwardRef,
|
28 |
+
Generic,
|
29 |
+
get_args, # new in 3.8
|
30 |
+
get_origin, # new in 3.8
|
31 |
+
List,
|
32 |
+
Optional,
|
33 |
+
Tuple,
|
34 |
+
Type,
|
35 |
+
TypeVar,
|
36 |
+
Union,
|
37 |
+
)
|
38 |
+
|
39 |
+
import torch
|
40 |
+
|
41 |
+
# This is needed. `torch._jit_internal` is imported before `torch.distributed.__init__`.
|
42 |
+
# Explicitly ask to import `torch.distributed.__init__` first.
|
43 |
+
# Otherwise, "AttributeError: module 'torch' has no attribute 'distributed'" is raised.
|
44 |
+
import torch.distributed.rpc
|
45 |
+
import torch.package._mangling as package_mangling
|
46 |
+
from torch._awaits import _Await
|
47 |
+
from torch._C import _Await as CAwait, Future as CFuture
|
48 |
+
from torch._sources import fake_range, get_source_lines_and_file, parse_def
|
49 |
+
from torch.futures import Future
|
50 |
+
|
51 |
+
IS_PY39_PLUS: Final[bool] = sys.version_info >= (3, 9)
|
52 |
+
IS_PY310_PLUS: Final[bool] = sys.version_info >= (3, 10)
|
53 |
+
|
54 |
+
BuiltinUnionType: Union[Type, Tuple[Type, ...]]
|
55 |
+
if sys.version_info >= (3, 10):
|
56 |
+
# NOTE: IS_PY310_PLUS doesn't work with mypy.
|
57 |
+
# cf. https://mypy.readthedocs.io/en/stable/common_issues.html#python-version-and-system-platform-checks
|
58 |
+
BuiltinUnionType = types.UnionType
|
59 |
+
else:
|
60 |
+
BuiltinUnionType = () # trick: this makes isinstance short circuit.
|
61 |
+
|
62 |
+
LockType: Type
|
63 |
+
try:
|
64 |
+
import _thread
|
65 |
+
|
66 |
+
LockType = _thread.LockType
|
67 |
+
except ImportError:
|
68 |
+
import _dummy_thread
|
69 |
+
|
70 |
+
LockType = _dummy_thread.LockType
|
71 |
+
|
72 |
+
# Wrapper functions that can call either of 2 functions depending on a boolean
|
73 |
+
# argument
|
74 |
+
boolean_dispatched: "weakref.WeakKeyDictionary[Callable, Dict[str, Callable]]" = (
|
75 |
+
weakref.WeakKeyDictionary()
|
76 |
+
) # noqa: T484
|
77 |
+
|
78 |
+
|
79 |
+
FAKE_FILENAME_PREFIX = "__torch_jit_dataclass"
|
80 |
+
|
81 |
+
|
82 |
+
class SourceLoader:
|
83 |
+
def __init__(self):
|
84 |
+
self.content = {}
|
85 |
+
|
86 |
+
def cache(self, fn, source):
|
87 |
+
self.content[fn] = source
|
88 |
+
|
89 |
+
def get_source(self, fn):
|
90 |
+
return self.content.get(fn)
|
91 |
+
|
92 |
+
|
93 |
+
loader = SourceLoader()
|
94 |
+
|
95 |
+
|
96 |
+
def createResolutionCallbackFromEnv(lookup_base):
|
97 |
+
"""
|
98 |
+
Creates a resolution callback that will look up qualified names in an
|
99 |
+
environment, starting with `lookup_base` for the base of any qualified
|
100 |
+
names, then proceeding down the lookup chain with the resolved object.
|
101 |
+
|
102 |
+
You should not use this directly, it should only be used from the other
|
103 |
+
createResolutionCallbackFrom* functions.
|
104 |
+
"""
|
105 |
+
|
106 |
+
def lookupInModule(qualified_name, module):
|
107 |
+
if "." in qualified_name:
|
108 |
+
parts = qualified_name.split(".")
|
109 |
+
base = parts[0]
|
110 |
+
remaining_pieces = ".".join(parts[1:])
|
111 |
+
module_value = getattr(module, base)
|
112 |
+
return lookupInModule(remaining_pieces, module_value)
|
113 |
+
else:
|
114 |
+
return getattr(module, qualified_name)
|
115 |
+
|
116 |
+
def parseNestedExpr(expr, module) -> Tuple[Any, int]:
|
117 |
+
i = 0
|
118 |
+
while i < len(expr) and expr[i] not in (",", "[", "]"):
|
119 |
+
i += 1
|
120 |
+
|
121 |
+
# Special case logic for the empty Tuple as a subscript (used
|
122 |
+
# in the type annotation `Tuple[()]`)
|
123 |
+
if expr[:i] == "()":
|
124 |
+
return (), i
|
125 |
+
|
126 |
+
base = lookupInModule(expr[:i].strip(), module)
|
127 |
+
assert base is not None, f"Unresolvable type {expr[:i]}"
|
128 |
+
if i == len(expr) or expr[i] != "[":
|
129 |
+
return base, i
|
130 |
+
|
131 |
+
assert expr[i] == "["
|
132 |
+
parts = []
|
133 |
+
while expr[i] != "]":
|
134 |
+
part_len = 0
|
135 |
+
i += 1
|
136 |
+
part, part_len = parseNestedExpr(expr[i:], module)
|
137 |
+
parts.append(part)
|
138 |
+
i += part_len
|
139 |
+
if len(parts) > 1:
|
140 |
+
return base[tuple(parts)], i + 1
|
141 |
+
else:
|
142 |
+
return base[parts[0]], i + 1
|
143 |
+
|
144 |
+
def parseExpr(expr, module):
|
145 |
+
try:
|
146 |
+
value, len_parsed = parseNestedExpr(expr, module)
|
147 |
+
assert len_parsed == len(
|
148 |
+
expr
|
149 |
+
), "whole expression was not parsed, falling back to c++ parser"
|
150 |
+
return value
|
151 |
+
except Exception:
|
152 |
+
"""
|
153 |
+
The python resolver fails in several cases in known unit tests, and is intended
|
154 |
+
to fall back gracefully to the c++ resolver in general. For example, python 2 style
|
155 |
+
annotations which are frequent in our unit tests often fail with types e.g. int not
|
156 |
+
resolvable from the calling frame.
|
157 |
+
"""
|
158 |
+
return None
|
159 |
+
|
160 |
+
return lambda expr: parseExpr(expr, lookup_base)
|
161 |
+
|
162 |
+
|
163 |
+
def createResolutionCallbackFromFrame(frames_up: int = 0):
|
164 |
+
"""
|
165 |
+
Creates a function which, given a string variable name,
|
166 |
+
returns the value of the variable in the scope of the caller of
|
167 |
+
the function which called createResolutionCallbackFromFrame (by default).
|
168 |
+
|
169 |
+
This is used to enable access in-scope Python variables inside
|
170 |
+
TorchScript fragments.
|
171 |
+
|
172 |
+
frames_up is number of additional frames to go up on the stack.
|
173 |
+
The default value is 0, which correspond to the frame of the caller
|
174 |
+
of createResolutionCallbackFromFrame. Also for example, if frames_up is set
|
175 |
+
to 1, then the frame of the caller's caller of createResolutionCallbackFromFrame
|
176 |
+
will be taken.
|
177 |
+
|
178 |
+
For example, the following program prints 2::
|
179 |
+
|
180 |
+
def bar():
|
181 |
+
cb = createResolutionCallbackFromFrame(1)
|
182 |
+
print(cb("foo"))
|
183 |
+
|
184 |
+
def baz():
|
185 |
+
foo = 2
|
186 |
+
bar()
|
187 |
+
|
188 |
+
baz()
|
189 |
+
"""
|
190 |
+
frame = inspect.currentframe()
|
191 |
+
i = 0
|
192 |
+
while i < frames_up + 1:
|
193 |
+
assert frame is not None
|
194 |
+
frame = frame.f_back
|
195 |
+
i += 1
|
196 |
+
|
197 |
+
assert frame is not None
|
198 |
+
f_locals = frame.f_locals
|
199 |
+
f_globals = frame.f_globals
|
200 |
+
|
201 |
+
class env:
|
202 |
+
def __getattr__(self, key):
|
203 |
+
if key in f_locals:
|
204 |
+
return f_locals[key]
|
205 |
+
elif key in f_globals:
|
206 |
+
return f_globals[key]
|
207 |
+
elif key in dir(builtins):
|
208 |
+
return getattr(builtins, key)
|
209 |
+
|
210 |
+
return createResolutionCallbackFromEnv(env())
|
211 |
+
|
212 |
+
|
213 |
+
def get_closure(fn):
|
214 |
+
"""
|
215 |
+
Get a dictionary of closed over variables from a function
|
216 |
+
"""
|
217 |
+
captures = {}
|
218 |
+
captures.update(fn.__globals__)
|
219 |
+
|
220 |
+
for index, captured_name in enumerate(fn.__code__.co_freevars):
|
221 |
+
captures[captured_name] = fn.__closure__[index].cell_contents
|
222 |
+
|
223 |
+
return captures
|
224 |
+
|
225 |
+
|
226 |
+
# [local resolution in python]
|
227 |
+
# Depending on where a variable is defined, and where it is used, we may
|
228 |
+
# or may not be able to recover its value when recursively compiling a
|
229 |
+
# script function. Remember in the general case, a module or function is
|
230 |
+
# first defined and then later scripted. This means we do not have a
|
231 |
+
# chance to capture the active frames when the function is defined. Hence any
|
232 |
+
# name resolution has to happen later on the created closure. The way
|
233 |
+
# python captures type annotations restricts what we can recover. The
|
234 |
+
# follow example illustrates the different cases:
|
235 |
+
#
|
236 |
+
# class MyGlobalClass:
|
237 |
+
# ...
|
238 |
+
# def my_local_scope():
|
239 |
+
# @torch.jit.script
|
240 |
+
# class MyClass:
|
241 |
+
# ...
|
242 |
+
# @torch.jit.script
|
243 |
+
# class MyClassUsedAsVar:
|
244 |
+
# ...
|
245 |
+
# def eg(x: MyClass, y: MyGlobalClass):
|
246 |
+
# a_local_capture : Foo
|
247 |
+
# return MyClassUsedAsVar(x)
|
248 |
+
#
|
249 |
+
# MyGlobalClass is defined in the __globals__ dictionary of function
|
250 |
+
# 'eg', so it is always recoverable. my_local_scope introduces a new local
|
251 |
+
# variable scope in the function. Classes defined here are only visible as
|
252 |
+
# local variables. For the case of MyClassUsedAsVar, it is captured
|
253 |
+
# because it is used as a variable inside the body of the function, and we
|
254 |
+
# can resolve it using the captures returned from `get_closure`. However,
|
255 |
+
# the type annotations are not captured by the closure. In Python
|
256 |
+
# 3.0--3.9, the _value_ of MyClass and MyGlobalClass will be available as
|
257 |
+
# annotations on `eg``, but starting in Python 4.0, they will represented as
|
258 |
+
# strings and no longer present. Furthermore, since the body of `eg` does
|
259 |
+
# not reference those names, they do not appear in the list of closed over
|
260 |
+
# variables. In Python 2.x, type annotations are in comments, leading to a
|
261 |
+
# similar situation where their definitions are not available. We anticipate
|
262 |
+
# that most users will not run into this issue because their modules and
|
263 |
+
# functions will be defined at a global scope like MyGlobalClass. In cases
|
264 |
+
# where they are not, it is possible to work around issues by declaring the
|
265 |
+
# values global in the function.
|
266 |
+
# In Python 3.9 declaring class as global will make it invisible to
|
267 |
+
# `inspect.getsource`, see https://bugs.python.org/issue42666 .
|
268 |
+
# This could be worked around by manualy adding it to `global()` dictionary.
|
269 |
+
|
270 |
+
|
271 |
+
def createResolutionCallbackFromClosure(fn):
|
272 |
+
"""
|
273 |
+
Create a resolutionCallback by introspecting the function instead of
|
274 |
+
looking up the stack for the enclosing scope
|
275 |
+
"""
|
276 |
+
closure = get_closure(fn)
|
277 |
+
|
278 |
+
class closure_lookup:
|
279 |
+
# This is a class since `closure` is a dict and it's easier in
|
280 |
+
# `env_helper` if everything just works with `getattr` calls
|
281 |
+
def __getattr__(self, key):
|
282 |
+
if key in closure:
|
283 |
+
return closure[key]
|
284 |
+
elif hasattr(typing, key):
|
285 |
+
return getattr(typing, key)
|
286 |
+
elif hasattr(builtins, key):
|
287 |
+
return getattr(builtins, key)
|
288 |
+
return None
|
289 |
+
|
290 |
+
return createResolutionCallbackFromEnv(closure_lookup())
|
291 |
+
|
292 |
+
|
293 |
+
def can_compile_class(cls) -> bool:
|
294 |
+
# If any of the functions on a type don't have a code object, this type can't
|
295 |
+
# be compiled and is probably a builtin / bound from C
|
296 |
+
if is_ignored_fn(cls):
|
297 |
+
return False
|
298 |
+
|
299 |
+
# Ignore the following list of built-in classes.
|
300 |
+
ignored_builtin_classes = (torch.nn.Module, tuple, list, Exception)
|
301 |
+
if issubclass(cls, ignored_builtin_classes):
|
302 |
+
return False
|
303 |
+
|
304 |
+
names = cls.__dict__
|
305 |
+
fns = [
|
306 |
+
getattr(cls, name)
|
307 |
+
for name in names
|
308 |
+
if inspect.isroutine(getattr(cls, name, None))
|
309 |
+
]
|
310 |
+
has_code = [hasattr(fn, "__code__") for fn in fns]
|
311 |
+
return all(has_code)
|
312 |
+
|
313 |
+
|
314 |
+
def get_callable_argument_names(fn) -> List[str]:
|
315 |
+
"""
|
316 |
+
Gets names of all POSITIONAL_OR_KEYWORD arguments for callable `fn`.
|
317 |
+
Returns an empty list when other types of arguments are present.
|
318 |
+
|
319 |
+
This is used by `torch.jit.trace` to assign meaningful argument names to
|
320 |
+
traced functions and modules.
|
321 |
+
|
322 |
+
Args:
|
323 |
+
fn: A callable.
|
324 |
+
Returns:
|
325 |
+
Argument names: List[str]
|
326 |
+
"""
|
327 |
+
# inspect.signature may fail, give up in that case.
|
328 |
+
try:
|
329 |
+
callable_signature = inspect.signature(fn)
|
330 |
+
except Exception:
|
331 |
+
return []
|
332 |
+
|
333 |
+
argument_names = []
|
334 |
+
for name, param in callable_signature.parameters.items():
|
335 |
+
# All four other types of arguments do not map to individual values
|
336 |
+
# with a keyword as name.
|
337 |
+
if not param.kind == param.POSITIONAL_OR_KEYWORD:
|
338 |
+
continue
|
339 |
+
|
340 |
+
argument_names.append(name)
|
341 |
+
|
342 |
+
return argument_names
|
343 |
+
|
344 |
+
|
345 |
+
def get_annotation_str(annotation):
|
346 |
+
"""
|
347 |
+
Convert an AST node containing a type annotation to the string present in the source
|
348 |
+
that represents the same annotation.
|
349 |
+
"""
|
350 |
+
if isinstance(annotation, ast.Name):
|
351 |
+
return annotation.id
|
352 |
+
elif isinstance(annotation, ast.Attribute):
|
353 |
+
return ".".join([get_annotation_str(annotation.value), annotation.attr])
|
354 |
+
elif isinstance(annotation, ast.Subscript):
|
355 |
+
# In Python3.9+ subscript indicies are not wrapped in ast.Index
|
356 |
+
subscript_slice = annotation.slice if IS_PY39_PLUS else annotation.slice.value # type: ignore[attr-defined]
|
357 |
+
return f"{get_annotation_str(annotation.value)}[{get_annotation_str(subscript_slice)}]"
|
358 |
+
elif isinstance(annotation, ast.Tuple):
|
359 |
+
return ",".join([get_annotation_str(elt) for elt in annotation.elts])
|
360 |
+
elif isinstance(annotation, (ast.Constant, ast.NameConstant)):
|
361 |
+
return f"{annotation.value}"
|
362 |
+
|
363 |
+
# If an AST node is not handled here, it's probably handled in ScriptTypeParser.
|
364 |
+
return None
|
365 |
+
|
366 |
+
|
367 |
+
def get_type_hint_captures(fn):
|
368 |
+
"""
|
369 |
+
Get a dictionary containing type resolution mappings necessary to resolve types
|
370 |
+
for the literal annotations on 'fn'. These are not considered to be closed-over by fn
|
371 |
+
and must be obtained separately (e.g. using this function).
|
372 |
+
|
373 |
+
Args:
|
374 |
+
fn: A callable.
|
375 |
+
Returns:
|
376 |
+
A Dict[str, Any] containing a mapping from the literal annotations used on
|
377 |
+
fn to the Python objects they refer to.
|
378 |
+
"""
|
379 |
+
# First, try to get the source of the function. We'll need to parse it to find the actual string names
|
380 |
+
# that were used to annotate the types, since inspect.signature() will only return the class object that
|
381 |
+
# the annotation refers to, not the string name. If we can't get the source, simply return an empty dict.
|
382 |
+
# This may happen in cases where the function is synthesized dynamically at runtime.
|
383 |
+
src = loader.get_source(fn)
|
384 |
+
if src is None:
|
385 |
+
src = inspect.getsource(fn)
|
386 |
+
|
387 |
+
# Gather a dictionary of parameter name -> type, skipping any parameters whose annotated
|
388 |
+
# types are strings. These are only understood by TorchScript in the context of a type annotation
|
389 |
+
# that refers to a class in its own definition, but trying to include a mapping for this in the result
|
390 |
+
# function would cause infinite recursion because the class is currently being compiled.
|
391 |
+
# In addition, there is logic in ScriptTypeParser to handle this.
|
392 |
+
signature = inspect.signature(fn)
|
393 |
+
name_to_type = {
|
394 |
+
name: parameter.annotation
|
395 |
+
for name, parameter in signature.parameters.items()
|
396 |
+
if parameter.annotation is not inspect.Parameter.empty
|
397 |
+
and not isinstance(parameter.annotation, str)
|
398 |
+
}
|
399 |
+
|
400 |
+
# Then, get the literal type annotations from the function declaration
|
401 |
+
# by source inspection. This accounts for the case in which aliases are used
|
402 |
+
# to annotate the arguments (e.g device_t = torch.device, and then d: device_t).
|
403 |
+
# frontend.py cannot be used here because it includes _jit_internal, so use ast instead.
|
404 |
+
a = ast.parse(dedent(src))
|
405 |
+
if len(a.body) != 1 or not isinstance(a.body[0], ast.FunctionDef):
|
406 |
+
raise RuntimeError(f"Expected {fn} to be a function")
|
407 |
+
f = a.body[0]
|
408 |
+
|
409 |
+
# Prepare a dictionary of source annotation -> type, which will be the final result of this function,
|
410 |
+
# by using the parsed AST (f) to reconstruct source annotations as strings for each parameter and mapping
|
411 |
+
# them to the type object corresponding to the annotation via name_to_type using the parameter name.
|
412 |
+
annotation_to_type = {}
|
413 |
+
|
414 |
+
for arg in f.args.args:
|
415 |
+
# Get the source type annotation string for this argument if possible.
|
416 |
+
arg_annotation_str = (
|
417 |
+
get_annotation_str(arg.annotation) if arg.annotation else None
|
418 |
+
)
|
419 |
+
|
420 |
+
# If the argument has no annotation or get_annotation_str cannot convert it to a string,
|
421 |
+
# arg_annotation_str will be None. Skip this arg; ScriptTypeParser will probably handle
|
422 |
+
# this in the latter case.
|
423 |
+
if arg_annotation_str is None:
|
424 |
+
continue
|
425 |
+
|
426 |
+
# Insert {arg_annotation_str: type} into annotation_to_type if possible. One reason arg_name may not
|
427 |
+
# be present in name_to_type is that the annotation itself is a string and not a type object
|
428 |
+
# (common for self-refential annotations in classes). Once again, let ScriptTypeParser handle this.
|
429 |
+
arg_name = arg.arg
|
430 |
+
if arg_name in name_to_type:
|
431 |
+
annotation_to_type[arg_annotation_str] = name_to_type[arg_name]
|
432 |
+
|
433 |
+
# If there is a valid return annotation, include it in annotation_to_type. As with argument annotations,
|
434 |
+
# the literal annotation has to be convertible to a string by get_annotation_str, and the actual type
|
435 |
+
# of the annotation cannot be a string.
|
436 |
+
literal_return_annotation = get_annotation_str(f.returns)
|
437 |
+
valid_literal_annotation = literal_return_annotation is not None
|
438 |
+
return_annotation = signature.return_annotation
|
439 |
+
valid_return_annotation_type = (
|
440 |
+
return_annotation is not inspect.Parameter.empty
|
441 |
+
and not isinstance(return_annotation, str)
|
442 |
+
)
|
443 |
+
if valid_literal_annotation and valid_return_annotation_type:
|
444 |
+
annotation_to_type[literal_return_annotation] = return_annotation
|
445 |
+
|
446 |
+
return annotation_to_type
|
447 |
+
|
448 |
+
|
449 |
+
def createResolutionCallbackForClassMethods(cls):
|
450 |
+
"""
|
451 |
+
This looks at all the methods defined in a class and pulls their closed-over
|
452 |
+
variables into a dictionary and uses that to resolve variables.
|
453 |
+
"""
|
454 |
+
# cls is a type here, so `ismethod` is false since the methods on the type
|
455 |
+
# aren't bound to anything, so Python treats them as regular functions
|
456 |
+
fns = [
|
457 |
+
getattr(cls, name)
|
458 |
+
for name in cls.__dict__
|
459 |
+
if inspect.isroutine(getattr(cls, name))
|
460 |
+
]
|
461 |
+
# Skip built-ins, as they do not have global scope nor type hints
|
462 |
+
# Needed to support `enum.Enum` derived classes in Python-3.11
|
463 |
+
# That adds `_new_member_` property which is an alias to `__new__`
|
464 |
+
fns = [fn for fn in fns if not inspect.isbuiltin(fn) and hasattr(fn, "__globals__")]
|
465 |
+
captures = {}
|
466 |
+
|
467 |
+
for fn in fns:
|
468 |
+
captures.update(get_closure(fn))
|
469 |
+
captures.update(get_type_hint_captures(fn))
|
470 |
+
|
471 |
+
def lookup_in_class(key):
|
472 |
+
if key in captures:
|
473 |
+
return captures[key]
|
474 |
+
else:
|
475 |
+
return getattr(builtins, key, None)
|
476 |
+
|
477 |
+
return lookup_in_class
|
478 |
+
|
479 |
+
|
480 |
+
def boolean_dispatch(
|
481 |
+
arg_name, arg_index, default, if_true, if_false, module_name, func_name
|
482 |
+
):
|
483 |
+
"""
|
484 |
+
Dispatches to either of 2 script functions based on a boolean argument.
|
485 |
+
In TorchScript, the boolean argument must be constant so that the correct
|
486 |
+
function to use can be determined at compile time.
|
487 |
+
"""
|
488 |
+
|
489 |
+
def fn(*args, **kwargs):
|
490 |
+
dispatch_flag = default
|
491 |
+
if arg_name in kwargs:
|
492 |
+
dispatch_flag = kwargs[arg_name]
|
493 |
+
elif arg_index < len(args):
|
494 |
+
dispatch_flag = args[arg_index]
|
495 |
+
|
496 |
+
if dispatch_flag:
|
497 |
+
return if_true(*args, **kwargs)
|
498 |
+
else:
|
499 |
+
return if_false(*args, **kwargs)
|
500 |
+
|
501 |
+
if if_true.__doc__ is None and if_false.__doc__ is not None:
|
502 |
+
doc = if_false.__doc__
|
503 |
+
if_true.__doc__ = doc
|
504 |
+
elif if_false.__doc__ is None and if_true.__doc__ is not None:
|
505 |
+
doc = if_true.__doc__
|
506 |
+
if_false.__doc__ = doc
|
507 |
+
elif if_false.__doc__ is None and if_true.__doc__ is None:
|
508 |
+
# neither function has a docstring
|
509 |
+
doc = None
|
510 |
+
else:
|
511 |
+
raise RuntimeError("only one function can have a docstring")
|
512 |
+
fn.__doc__ = doc
|
513 |
+
|
514 |
+
if module_name is not None:
|
515 |
+
fn.__module__ = module_name
|
516 |
+
if func_name is not None:
|
517 |
+
fn.__name__ = func_name
|
518 |
+
|
519 |
+
boolean_dispatched[fn] = {
|
520 |
+
"if_true": if_true,
|
521 |
+
"if_false": if_false,
|
522 |
+
"index": arg_index,
|
523 |
+
"default": default,
|
524 |
+
"arg_name": arg_name,
|
525 |
+
}
|
526 |
+
return fn
|
527 |
+
|
528 |
+
|
529 |
+
class FunctionModifiers:
|
530 |
+
"""
|
531 |
+
Used to denote the behavior of a function in TorchScript. See export() and
|
532 |
+
ignore() for details.
|
533 |
+
"""
|
534 |
+
|
535 |
+
UNUSED = "unused (ignored and replaced with raising of an exception)"
|
536 |
+
IGNORE = "ignore (leave as a call to Python, cannot be torch.jit.save'd)"
|
537 |
+
EXPORT = "export (compile this function even if nothing calls it)"
|
538 |
+
DEFAULT = "default (compile if called from a exported function / forward)"
|
539 |
+
COPY_TO_SCRIPT_WRAPPER = (
|
540 |
+
"if this method is not scripted, copy the python method onto the scripted model"
|
541 |
+
)
|
542 |
+
_DROP = "_drop (function is fully ignored, declaration can be unscriptable)"
|
543 |
+
|
544 |
+
|
545 |
+
def export(fn):
|
546 |
+
"""
|
547 |
+
This decorator indicates that a method on an ``nn.Module`` is used as an entry point into a
|
548 |
+
:class:`ScriptModule` and should be compiled.
|
549 |
+
|
550 |
+
``forward`` implicitly is assumed to be an entry point, so it does not need this decorator.
|
551 |
+
Functions and methods called from ``forward`` are compiled as they are seen
|
552 |
+
by the compiler, so they do not need this decorator either.
|
553 |
+
|
554 |
+
Example (using ``@torch.jit.export`` on a method):
|
555 |
+
|
556 |
+
.. testcode::
|
557 |
+
|
558 |
+
import torch
|
559 |
+
import torch.nn as nn
|
560 |
+
|
561 |
+
class MyModule(nn.Module):
|
562 |
+
def implicitly_compiled_method(self, x):
|
563 |
+
return x + 99
|
564 |
+
|
565 |
+
# `forward` is implicitly decorated with `@torch.jit.export`,
|
566 |
+
# so adding it here would have no effect
|
567 |
+
def forward(self, x):
|
568 |
+
return x + 10
|
569 |
+
|
570 |
+
@torch.jit.export
|
571 |
+
def another_forward(self, x):
|
572 |
+
# When the compiler sees this call, it will compile
|
573 |
+
# `implicitly_compiled_method`
|
574 |
+
return self.implicitly_compiled_method(x)
|
575 |
+
|
576 |
+
def unused_method(self, x):
|
577 |
+
return x - 20
|
578 |
+
|
579 |
+
# `m` will contain compiled methods:
|
580 |
+
# `forward`
|
581 |
+
# `another_forward`
|
582 |
+
# `implicitly_compiled_method`
|
583 |
+
# `unused_method` will not be compiled since it was not called from
|
584 |
+
# any compiled methods and wasn't decorated with `@torch.jit.export`
|
585 |
+
m = torch.jit.script(MyModule())
|
586 |
+
"""
|
587 |
+
fn._torchscript_modifier = FunctionModifiers.EXPORT
|
588 |
+
return fn
|
589 |
+
|
590 |
+
|
591 |
+
def unused(fn):
|
592 |
+
"""
|
593 |
+
This decorator indicates to the compiler that a function or method should
|
594 |
+
be ignored and replaced with the raising of an exception. This allows you
|
595 |
+
to leave code in your model that is not yet TorchScript compatible and still
|
596 |
+
export your model.
|
597 |
+
|
598 |
+
Example (using ``@torch.jit.unused`` on a method)::
|
599 |
+
|
600 |
+
import torch
|
601 |
+
import torch.nn as nn
|
602 |
+
|
603 |
+
class MyModule(nn.Module):
|
604 |
+
def __init__(self, use_memory_efficient):
|
605 |
+
super().__init__()
|
606 |
+
self.use_memory_efficient = use_memory_efficient
|
607 |
+
|
608 |
+
@torch.jit.unused
|
609 |
+
def memory_efficient(self, x):
|
610 |
+
import pdb
|
611 |
+
pdb.set_trace()
|
612 |
+
return x + 10
|
613 |
+
|
614 |
+
def forward(self, x):
|
615 |
+
# Use not-yet-scriptable memory efficient mode
|
616 |
+
if self.use_memory_efficient:
|
617 |
+
return self.memory_efficient(x)
|
618 |
+
else:
|
619 |
+
return x + 10
|
620 |
+
|
621 |
+
m = torch.jit.script(MyModule(use_memory_efficient=False))
|
622 |
+
m.save("m.pt")
|
623 |
+
|
624 |
+
m = torch.jit.script(MyModule(use_memory_efficient=True))
|
625 |
+
# exception raised
|
626 |
+
m(torch.rand(100))
|
627 |
+
"""
|
628 |
+
if isinstance(fn, property):
|
629 |
+
prop = fn
|
630 |
+
setattr( # noqa: B010
|
631 |
+
prop.fget, "_torchscript_modifier", FunctionModifiers.UNUSED
|
632 |
+
)
|
633 |
+
|
634 |
+
if prop.fset:
|
635 |
+
setattr( # noqa: B010
|
636 |
+
prop.fset, "_torchscript_modifier", FunctionModifiers.UNUSED
|
637 |
+
)
|
638 |
+
|
639 |
+
return prop
|
640 |
+
|
641 |
+
fn._torchscript_modifier = FunctionModifiers.UNUSED
|
642 |
+
return fn
|
643 |
+
|
644 |
+
|
645 |
+
# No op context manager from python side
|
646 |
+
class _IgnoreContextManager(contextlib.AbstractContextManager):
|
647 |
+
def __init__(self, **kwargs):
|
648 |
+
pass
|
649 |
+
|
650 |
+
def __exit__(self, exc_type: Any, exc_value: Any, traceback: Any) -> None:
|
651 |
+
pass
|
652 |
+
|
653 |
+
|
654 |
+
def ignore(drop=False, **kwargs):
|
655 |
+
"""
|
656 |
+
This decorator indicates to the compiler that a function or method should
|
657 |
+
be ignored and left as a Python function. This allows you to leave code in
|
658 |
+
your model that is not yet TorchScript compatible. If called from TorchScript,
|
659 |
+
ignored functions will dispatch the call to the Python interpreter. Models with ignored
|
660 |
+
functions cannot be exported; use :func:`@torch.jit.unused <torch.jit.unused>` instead.
|
661 |
+
|
662 |
+
Example (using ``@torch.jit.ignore`` on a method)::
|
663 |
+
|
664 |
+
import torch
|
665 |
+
import torch.nn as nn
|
666 |
+
|
667 |
+
class MyModule(nn.Module):
|
668 |
+
@torch.jit.ignore
|
669 |
+
def debugger(self, x):
|
670 |
+
import pdb
|
671 |
+
pdb.set_trace()
|
672 |
+
|
673 |
+
def forward(self, x):
|
674 |
+
x += 10
|
675 |
+
# The compiler would normally try to compile `debugger`,
|
676 |
+
# but since it is `@ignore`d, it will be left as a call
|
677 |
+
# to Python
|
678 |
+
self.debugger(x)
|
679 |
+
return x
|
680 |
+
|
681 |
+
m = torch.jit.script(MyModule())
|
682 |
+
|
683 |
+
# Error! The call `debugger` cannot be saved since it calls into Python
|
684 |
+
m.save("m.pt")
|
685 |
+
|
686 |
+
Example (using ``@torch.jit.ignore(drop=True)`` on a method):
|
687 |
+
|
688 |
+
.. testcode::
|
689 |
+
|
690 |
+
import torch
|
691 |
+
import torch.nn as nn
|
692 |
+
|
693 |
+
class MyModule(nn.Module):
|
694 |
+
@torch.jit.ignore(drop=True)
|
695 |
+
def training_method(self, x):
|
696 |
+
import pdb
|
697 |
+
pdb.set_trace()
|
698 |
+
|
699 |
+
def forward(self, x):
|
700 |
+
if self.training:
|
701 |
+
self.training_method(x)
|
702 |
+
return x
|
703 |
+
|
704 |
+
m = torch.jit.script(MyModule())
|
705 |
+
|
706 |
+
# This is OK since `training_method` is not saved, the call is replaced
|
707 |
+
# with a `raise`.
|
708 |
+
m.save("m.pt")
|
709 |
+
|
710 |
+
.. testcleanup::
|
711 |
+
|
712 |
+
import os
|
713 |
+
os.remove('m.pt')
|
714 |
+
"""
|
715 |
+
|
716 |
+
if callable(drop):
|
717 |
+
# used without any args, so drop is actually a function
|
718 |
+
# @torch.jit.ignore
|
719 |
+
# def fn(...):
|
720 |
+
fn = drop
|
721 |
+
fn._torchscript_modifier = FunctionModifiers.IGNORE
|
722 |
+
return fn
|
723 |
+
|
724 |
+
if not isinstance(drop, bool):
|
725 |
+
raise RuntimeError(
|
726 |
+
"Argument to @torch.jit.ignore must be a bool or "
|
727 |
+
f"a function but got {drop}"
|
728 |
+
)
|
729 |
+
|
730 |
+
# for backwards compat
|
731 |
+
drop_on_export = kwargs.pop("drop_on_export", None)
|
732 |
+
if drop_on_export:
|
733 |
+
warnings.warn(
|
734 |
+
"ignore(drop_on_export=True) has been deprecated. TorchScript will now drop the function "
|
735 |
+
"call on compilation. Use torch.jit.unused now. {}",
|
736 |
+
category=FutureWarning,
|
737 |
+
)
|
738 |
+
|
739 |
+
drop = drop_on_export
|
740 |
+
elif drop:
|
741 |
+
warnings.warn(
|
742 |
+
"ignore(True) has been deprecated. TorchScript will now drop the function "
|
743 |
+
"call on compilation. Use torch.jit.unused now. {}",
|
744 |
+
category=FutureWarning,
|
745 |
+
)
|
746 |
+
|
747 |
+
def decorator(fn):
|
748 |
+
if drop:
|
749 |
+
fn._torchscript_modifier = FunctionModifiers.UNUSED
|
750 |
+
else:
|
751 |
+
fn._torchscript_modifier = FunctionModifiers.IGNORE
|
752 |
+
return fn
|
753 |
+
|
754 |
+
return decorator
|
755 |
+
|
756 |
+
|
757 |
+
def _drop(fn):
|
758 |
+
fn._torchscript_modifier = FunctionModifiers._DROP
|
759 |
+
return fn
|
760 |
+
|
761 |
+
|
762 |
+
def _copy_to_script_wrapper(fn):
|
763 |
+
fn._torchscript_modifier = FunctionModifiers.COPY_TO_SCRIPT_WRAPPER
|
764 |
+
return fn
|
765 |
+
|
766 |
+
|
767 |
+
def module_has_exports(mod):
|
768 |
+
for name in dir(mod):
|
769 |
+
if hasattr(mod, name):
|
770 |
+
item = getattr(mod, name)
|
771 |
+
if callable(item):
|
772 |
+
if get_torchscript_modifier(item) is FunctionModifiers.EXPORT:
|
773 |
+
return True
|
774 |
+
return False
|
775 |
+
|
776 |
+
|
777 |
+
# WARNING: should_drop is currently being used by our JIT code coverage plug-in to mark JIT'd code as covered. If you
|
778 |
+
# rename this function, please update references in tools/coverage_plugins_package/src/coverage_plugins/jit_plugin.py to
|
779 |
+
# allow JIT'd code to still be covered.
|
780 |
+
def should_drop(fn) -> bool:
|
781 |
+
attr = get_torchscript_modifier(fn)
|
782 |
+
if attr is None:
|
783 |
+
return False
|
784 |
+
return attr is FunctionModifiers.UNUSED or attr is FunctionModifiers._DROP
|
785 |
+
|
786 |
+
|
787 |
+
def is_ignored_fn(fn) -> bool:
|
788 |
+
mod = get_torchscript_modifier(fn)
|
789 |
+
return (
|
790 |
+
mod is FunctionModifiers.UNUSED
|
791 |
+
or mod is FunctionModifiers.IGNORE
|
792 |
+
or mod is FunctionModifiers._DROP
|
793 |
+
)
|
794 |
+
|
795 |
+
|
796 |
+
def _is_drop_fn(fn) -> bool:
|
797 |
+
mod = get_torchscript_modifier(fn)
|
798 |
+
return mod is FunctionModifiers._DROP
|
799 |
+
|
800 |
+
|
801 |
+
def is_static_fn(cls, fn) -> bool:
|
802 |
+
return isinstance(inspect.getattr_static(cls, fn, default=None), staticmethod)
|
803 |
+
|
804 |
+
|
805 |
+
def get_static_fn(cls, fn):
|
806 |
+
return inspect.getattr_static(cls, fn).__func__
|
807 |
+
|
808 |
+
|
809 |
+
def get_torchscript_modifier(fn):
|
810 |
+
if not callable(fn):
|
811 |
+
return None
|
812 |
+
if hasattr(fn, "__func__"):
|
813 |
+
fn = fn.__func__
|
814 |
+
return getattr(fn, "_torchscript_modifier", FunctionModifiers.DEFAULT)
|
815 |
+
|
816 |
+
|
817 |
+
def copy_torchscript_modifier(orig, new) -> None:
|
818 |
+
attr = get_torchscript_modifier(orig)
|
819 |
+
if attr is None:
|
820 |
+
return
|
821 |
+
new._torchscript_modifier = attr
|
822 |
+
|
823 |
+
|
824 |
+
# overloading registration
|
825 |
+
# overloads get registered in this file, and compiled in torch/jit/__init__.py
|
826 |
+
# so that they can be imported in nn/functional.py without an import cycle
|
827 |
+
|
828 |
+
# qualified_name => list[overload_functions]
|
829 |
+
_overloaded_fns: Dict[str, List[Callable]] = {} # noqa: T484
|
830 |
+
|
831 |
+
|
832 |
+
_OVERLOAD_EXAMPLE = """
|
833 |
+
Example usage of overload function:
|
834 |
+
@torch.jit._overload
|
835 |
+
def my_function(x: type0) -> type0: # decl 1
|
836 |
+
pass
|
837 |
+
|
838 |
+
@torch.jit._overload
|
839 |
+
def my_function(x: type1) -> type1: # decl 2
|
840 |
+
pass
|
841 |
+
|
842 |
+
def my_function(x): # implementation
|
843 |
+
if isinstance(x, type0):
|
844 |
+
return x
|
845 |
+
elif isinstance(x, type1):
|
846 |
+
return x
|
847 |
+
"""
|
848 |
+
|
849 |
+
|
850 |
+
def get_overload_no_implementation_error_message(kind, obj):
|
851 |
+
sourcelines, file_lineno, filename = get_source_lines_and_file(obj)
|
852 |
+
return (
|
853 |
+
f'Implementation for the {kind} "{_qualified_name(obj)}" is missing. Please make '
|
854 |
+
f"sure a definition is provided and defined after all overload declarations.\n"
|
855 |
+
f'File "{filename}", line {file_lineno}:\n'
|
856 |
+
+ "".join(sourcelines)
|
857 |
+
+ "\n"
|
858 |
+
+ _OVERLOAD_EXAMPLE
|
859 |
+
)
|
860 |
+
|
861 |
+
|
862 |
+
def _check_overload_body(func):
|
863 |
+
try:
|
864 |
+
parsed_def = parse_def(func)
|
865 |
+
except OSError as e:
|
866 |
+
# Parsing the function definition can raise an OSError if source is unavailable.
|
867 |
+
# Since this is just an initial check, just raise a warning if this is the case.
|
868 |
+
warnings.warn(
|
869 |
+
f"Unable to retrieve source for @torch.jit._overload function: {func}."
|
870 |
+
)
|
871 |
+
return
|
872 |
+
|
873 |
+
body = parsed_def.ast.body[0].body
|
874 |
+
|
875 |
+
def is_pass(x):
|
876 |
+
return isinstance(x, ast.Pass)
|
877 |
+
|
878 |
+
def is_ellipsis(x):
|
879 |
+
return isinstance(x, ast.Expr) and isinstance(x.value, ast.Ellipsis)
|
880 |
+
|
881 |
+
if len(body) != 1 or not (is_pass(body[0]) or is_ellipsis(body[0])):
|
882 |
+
msg = (
|
883 |
+
"Only `pass` statement or `...` can be the body of overload declaration:\n"
|
884 |
+
)
|
885 |
+
msg += "\n".join(parsed_def.source.split("\n")[:3])
|
886 |
+
msg += " <- Expecting `pass` or `...` here!\n" + _OVERLOAD_EXAMPLE
|
887 |
+
raise RuntimeError(msg)
|
888 |
+
|
889 |
+
|
890 |
+
def _overload(func):
|
891 |
+
_check_overload_body(func)
|
892 |
+
qual_name = _qualified_name(func)
|
893 |
+
global _overloaded_fns
|
894 |
+
fn_overload_list = _overloaded_fns.get(qual_name)
|
895 |
+
if fn_overload_list is None:
|
896 |
+
fn_overload_list = []
|
897 |
+
_overloaded_fns[qual_name] = fn_overload_list
|
898 |
+
fn_overload_list.append(func)
|
899 |
+
return func
|
900 |
+
|
901 |
+
|
902 |
+
def _get_fn_overloads(qual_name):
|
903 |
+
return _overloaded_fns.get(qual_name)
|
904 |
+
|
905 |
+
|
906 |
+
def _clear_fn_overloads(qual_name) -> None:
|
907 |
+
del _overloaded_fns[qual_name]
|
908 |
+
|
909 |
+
|
910 |
+
def get_class_name_lineno(method) -> Tuple[str, int]:
|
911 |
+
current_frame = inspect.currentframe()
|
912 |
+
|
913 |
+
# one for the get_class_name call, one for _overload_method call
|
914 |
+
for i in range(2):
|
915 |
+
assert (
|
916 |
+
current_frame is not None
|
917 |
+
) # assert current frame is not an Optional[FrameType]
|
918 |
+
current_frame = current_frame.f_back
|
919 |
+
|
920 |
+
assert current_frame is not None # same here
|
921 |
+
class_name = current_frame.f_code.co_name
|
922 |
+
line_no = current_frame.f_code.co_firstlineno
|
923 |
+
return class_name, line_no
|
924 |
+
|
925 |
+
|
926 |
+
# At the point the decorator is applied to class methods the method
|
927 |
+
# has no reference to its owning class. _qualified_name would not include
|
928 |
+
# the class it is defined in, so any methods with the same name in the same file
|
929 |
+
# would have the same _qualified_name, even if they were defined in different
|
930 |
+
# classes. This problem only exists in python 2.
|
931 |
+
# We get around this problem by looking at the stack frame and identifying
|
932 |
+
# the class name, and throwing an error whenever overloads are used
|
933 |
+
# when modules of the same name are in the same file
|
934 |
+
|
935 |
+
# qualified_name => class name => list[overload_functions]
|
936 |
+
_overloaded_methods: Dict[str, Dict[str, List[Callable]]] = {} # noqa: T484
|
937 |
+
|
938 |
+
|
939 |
+
# (qualified_name, class name) => class_fileno
|
940 |
+
_overloaded_method_class_fileno = {}
|
941 |
+
|
942 |
+
|
943 |
+
def _overload_method(func):
|
944 |
+
_check_overload_body(func)
|
945 |
+
qual_name = _qualified_name(func)
|
946 |
+
global _overloaded_methods
|
947 |
+
class_name_map = _overloaded_methods.get(qual_name, None)
|
948 |
+
if class_name_map is None:
|
949 |
+
class_name_map = {}
|
950 |
+
_overloaded_methods[qual_name] = class_name_map
|
951 |
+
|
952 |
+
class_name, line_no = get_class_name_lineno(func)
|
953 |
+
method_overloads = class_name_map.get(class_name, None)
|
954 |
+
if method_overloads is None:
|
955 |
+
method_overloads = []
|
956 |
+
class_name_map[class_name] = method_overloads
|
957 |
+
_overloaded_method_class_fileno[(qual_name, class_name)] = line_no
|
958 |
+
else:
|
959 |
+
existing_lineno = _overloaded_method_class_fileno[(qual_name, class_name)]
|
960 |
+
if existing_lineno != line_no:
|
961 |
+
raise RuntimeError(
|
962 |
+
"Cannot currently overload the same method name in two different"
|
963 |
+
" classes with the same name in the same module"
|
964 |
+
)
|
965 |
+
|
966 |
+
method_overloads.append(func)
|
967 |
+
return func
|
968 |
+
|
969 |
+
|
970 |
+
def _get_overloaded_methods(method, mod_class):
|
971 |
+
# TODO: __name__ not set for submodules in recursive script
|
972 |
+
if not hasattr(method, "__name__"):
|
973 |
+
return None
|
974 |
+
qual_name = _qualified_name(method)
|
975 |
+
class_name_map = _overloaded_methods.get(qual_name, None)
|
976 |
+
if class_name_map is None:
|
977 |
+
return None
|
978 |
+
overloads = class_name_map.get(mod_class.__name__, None)
|
979 |
+
if overloads is None:
|
980 |
+
return None
|
981 |
+
|
982 |
+
method_line_no = get_source_lines_and_file(method)[1]
|
983 |
+
mod_class_fileno = get_source_lines_and_file(mod_class)[1]
|
984 |
+
mod_end_fileno = mod_class_fileno + len(get_source_lines_and_file(mod_class)[0])
|
985 |
+
if not (method_line_no >= mod_class_fileno and method_line_no <= mod_end_fileno):
|
986 |
+
raise Exception(
|
987 |
+
"Overloads are not useable when a module is redeclared within the same file: "
|
988 |
+
+ str(method)
|
989 |
+
)
|
990 |
+
return overloads
|
991 |
+
|
992 |
+
|
993 |
+
def is_tuple(ann) -> bool:
|
994 |
+
if ann is Tuple:
|
995 |
+
raise_error_container_parameter_missing("Tuple")
|
996 |
+
|
997 |
+
# For some reason Python 3.7 violates the Type[A, B].__origin__ == Type rule
|
998 |
+
if not hasattr(ann, "__module__"):
|
999 |
+
return False
|
1000 |
+
|
1001 |
+
ann_origin = get_origin(ann)
|
1002 |
+
if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is tuple:
|
1003 |
+
return True
|
1004 |
+
return ann.__module__ == "typing" and (ann_origin is Tuple or ann_origin is tuple)
|
1005 |
+
|
1006 |
+
|
1007 |
+
def is_list(ann) -> bool:
|
1008 |
+
if ann is List:
|
1009 |
+
raise_error_container_parameter_missing("List")
|
1010 |
+
|
1011 |
+
if not hasattr(ann, "__module__"):
|
1012 |
+
return False
|
1013 |
+
|
1014 |
+
ann_origin = get_origin(ann)
|
1015 |
+
if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is list:
|
1016 |
+
return True
|
1017 |
+
return ann.__module__ == "typing" and (ann_origin is List or ann_origin is list)
|
1018 |
+
|
1019 |
+
|
1020 |
+
def is_dict(ann) -> bool:
|
1021 |
+
if ann is Dict:
|
1022 |
+
raise_error_container_parameter_missing("Dict")
|
1023 |
+
|
1024 |
+
if not hasattr(ann, "__module__"):
|
1025 |
+
return False
|
1026 |
+
|
1027 |
+
ann_origin = get_origin(ann)
|
1028 |
+
if IS_PY39_PLUS and ann.__module__ == "builtins" and ann_origin is dict:
|
1029 |
+
return True
|
1030 |
+
return ann.__module__ == "typing" and (ann_origin is Dict or ann_origin is dict)
|
1031 |
+
|
1032 |
+
|
1033 |
+
def is_union(ann):
|
1034 |
+
if ann is Union:
|
1035 |
+
raise_error_container_parameter_missing("Union")
|
1036 |
+
|
1037 |
+
return isinstance(ann, BuiltinUnionType) or (
|
1038 |
+
hasattr(ann, "__module__")
|
1039 |
+
and ann.__module__ == "typing"
|
1040 |
+
and (get_origin(ann) is Union)
|
1041 |
+
)
|
1042 |
+
|
1043 |
+
|
1044 |
+
def is_optional(ann):
|
1045 |
+
if ann is Optional:
|
1046 |
+
raise_error_container_parameter_missing("Optional")
|
1047 |
+
|
1048 |
+
def is_optional_as_optional(ann):
|
1049 |
+
return (
|
1050 |
+
hasattr(ann, "__module__")
|
1051 |
+
and ann.__module__ == "typing"
|
1052 |
+
and (get_origin(ann) is Optional)
|
1053 |
+
)
|
1054 |
+
|
1055 |
+
def is_union_as_optional(ann):
|
1056 |
+
ann_args = get_args(ann)
|
1057 |
+
return len(ann_args) == 2 and (None in ann_args or type(None) in ann_args)
|
1058 |
+
|
1059 |
+
return is_optional_as_optional(ann) or (is_union(ann) and is_union_as_optional(ann))
|
1060 |
+
|
1061 |
+
|
1062 |
+
def is_future(ann) -> bool:
|
1063 |
+
if ann is Future:
|
1064 |
+
raise RuntimeError(
|
1065 |
+
"Attempted to use Future without a "
|
1066 |
+
"contained type. Please add a contained type, e.g. "
|
1067 |
+
"Future[int]"
|
1068 |
+
)
|
1069 |
+
return get_origin(ann) is Future
|
1070 |
+
|
1071 |
+
|
1072 |
+
def is_await(ann) -> bool:
|
1073 |
+
if ann is _Await:
|
1074 |
+
return True
|
1075 |
+
return get_origin(ann) is _Await
|
1076 |
+
|
1077 |
+
|
1078 |
+
if torch.distributed.rpc.is_available():
|
1079 |
+
from torch._C._distributed_rpc import PyRRef
|
1080 |
+
from torch.distributed.rpc import RRef
|
1081 |
+
|
1082 |
+
def is_rref(ann) -> bool:
|
1083 |
+
if ann is RRef:
|
1084 |
+
raise RuntimeError(
|
1085 |
+
"Attempted to use RRef without a "
|
1086 |
+
"contained type. Please add a contained type, e.g. "
|
1087 |
+
"RRef[int]"
|
1088 |
+
)
|
1089 |
+
return get_origin(ann) is RRef
|
1090 |
+
|
1091 |
+
def is_rref_instance(obj) -> bool:
|
1092 |
+
return isinstance(obj, PyRRef)
|
1093 |
+
|
1094 |
+
else:
|
1095 |
+
|
1096 |
+
def is_rref_instance(obj) -> bool:
|
1097 |
+
# If the RPC module doesn't exist then RRefs don't exist either.
|
1098 |
+
return False
|
1099 |
+
|
1100 |
+
|
1101 |
+
def is_final(ann) -> bool:
|
1102 |
+
return ann.__module__ in {"typing", "typing_extensions"} and (
|
1103 |
+
get_origin(ann) is Final or isinstance(ann, type(Final))
|
1104 |
+
)
|
1105 |
+
|
1106 |
+
|
1107 |
+
# allows BroadcastingList instance to be subscriptable
|
1108 |
+
class BroadcastingListCls:
|
1109 |
+
def __getitem__(self, types):
|
1110 |
+
return
|
1111 |
+
|
1112 |
+
|
1113 |
+
# mypy doesn't support parameters on types, so we have to explicitly type each
|
1114 |
+
# list size
|
1115 |
+
BroadcastingList1 = BroadcastingListCls()
|
1116 |
+
for i in range(2, 7):
|
1117 |
+
globals()[f"BroadcastingList{i}"] = BroadcastingList1
|
1118 |
+
|
1119 |
+
|
1120 |
+
def is_scripting() -> bool:
|
1121 |
+
r"""
|
1122 |
+
Function that returns True when in compilation and False otherwise. This
|
1123 |
+
is useful especially with the @unused decorator to leave code in your
|
1124 |
+
model that is not yet TorchScript compatible.
|
1125 |
+
.. testcode::
|
1126 |
+
|
1127 |
+
import torch
|
1128 |
+
|
1129 |
+
@torch.jit.unused
|
1130 |
+
def unsupported_linear_op(x):
|
1131 |
+
return x
|
1132 |
+
|
1133 |
+
def linear(x):
|
1134 |
+
if torch.jit.is_scripting():
|
1135 |
+
return torch.linear(x)
|
1136 |
+
else:
|
1137 |
+
return unsupported_linear_op(x)
|
1138 |
+
"""
|
1139 |
+
return False
|
1140 |
+
|
1141 |
+
|
1142 |
+
# Retrieves a fully-qualified name (module hierarchy + classname) for a given obj.
|
1143 |
+
def _qualified_name(obj, mangle_name=True) -> str:
|
1144 |
+
# This special case allows us to override the qualified name on a type.
|
1145 |
+
# It's currently used in conjunction with tracing, where we create a
|
1146 |
+
# fake module to filter only supported attributes. However, since this
|
1147 |
+
# new type is defined as a local class, we need a mechanism to override
|
1148 |
+
# its qualname so it appears correctly in the TorchScript system. This,
|
1149 |
+
# we set '_jit_override_qualname' with the original traced module's
|
1150 |
+
# qualified name, which is picked up here
|
1151 |
+
if hasattr(obj, "_jit_override_qualname"):
|
1152 |
+
return obj._jit_override_qualname
|
1153 |
+
# short-circuit in cases where the object already has a known qualified name
|
1154 |
+
if isinstance(obj, torch._C.ScriptFunction):
|
1155 |
+
return obj.qualified_name
|
1156 |
+
|
1157 |
+
if getattr(obj, "__name__", None):
|
1158 |
+
name = obj.__name__
|
1159 |
+
# Enum classes do not have `__name__` attr, instead they have `name`.
|
1160 |
+
elif isinstance(obj, enum.Enum):
|
1161 |
+
name = obj.name
|
1162 |
+
else:
|
1163 |
+
raise RuntimeError("Could not get name of python class object")
|
1164 |
+
|
1165 |
+
if name == "<lambda>":
|
1166 |
+
name = "_lambda" # make name a valid identifier
|
1167 |
+
|
1168 |
+
module_name = obj.__module__
|
1169 |
+
|
1170 |
+
# If the module is actually a torchbind module, then we should short circuit
|
1171 |
+
if module_name == "torch._classes":
|
1172 |
+
return obj.qualified_name
|
1173 |
+
|
1174 |
+
# The Python docs are very clear that `__module__` can be None, but I can't
|
1175 |
+
# figure out when it actually would be.
|
1176 |
+
if module_name is None:
|
1177 |
+
raise RuntimeError(
|
1178 |
+
f"Could not get qualified name for class '{name}': "
|
1179 |
+
"__module__ can't be None."
|
1180 |
+
)
|
1181 |
+
|
1182 |
+
# if getattr(sys.modules[module_name], name) is not obj:
|
1183 |
+
# raise RuntimeError(f"Could not get qualified name for class '{name}': "
|
1184 |
+
# f"the attr {name} on module {module_name} is not the class")
|
1185 |
+
|
1186 |
+
# torch.package and TorchScript have separate mangling schemes to avoid
|
1187 |
+
# name collisions from multiple packages. To avoid them interfering with
|
1188 |
+
# each other, normalize the package manging here.
|
1189 |
+
if package_mangling.is_mangled(module_name):
|
1190 |
+
module_name = module_name.replace("<", "_")
|
1191 |
+
module_name = module_name.replace(">", "_")
|
1192 |
+
|
1193 |
+
# The PythonExceptionValue C++ class in torch/csrc/jit/python/python_sugared_value.h
|
1194 |
+
# does not need mangle the python class name.
|
1195 |
+
if mangle_name:
|
1196 |
+
# __main__ is a builtin module, so rewrite it to "__torch__".
|
1197 |
+
if module_name == "__main__":
|
1198 |
+
module_name = "__torch__"
|
1199 |
+
else:
|
1200 |
+
# Everything else gets a "__torch__" prefix to avoid name collisions
|
1201 |
+
# with the names of user values.
|
1202 |
+
module_name = "__torch__." + module_name
|
1203 |
+
|
1204 |
+
if "." in name:
|
1205 |
+
raise RuntimeError(
|
1206 |
+
f"Could not get qualified name for class '{name}': "
|
1207 |
+
f"'{name}' is not a valid identifier"
|
1208 |
+
)
|
1209 |
+
|
1210 |
+
return module_name + "." + name
|
1211 |
+
|
1212 |
+
|
1213 |
+
def _try_get_dispatched_fn(fn):
|
1214 |
+
if not callable(fn):
|
1215 |
+
return None
|
1216 |
+
return boolean_dispatched.get(fn)
|
1217 |
+
|
1218 |
+
|
1219 |
+
def _get_named_tuple_properties(
|
1220 |
+
obj, loc: Optional[torch._C._jit_tree_views.SourceRange] = None, rcb=None
|
1221 |
+
):
|
1222 |
+
if loc is None:
|
1223 |
+
loc = fake_range()
|
1224 |
+
|
1225 |
+
assert issubclass(obj, tuple) and hasattr(obj, "_fields")
|
1226 |
+
if hasattr(obj, "_field_defaults"):
|
1227 |
+
defaults = [
|
1228 |
+
obj._field_defaults[field]
|
1229 |
+
for field in obj._fields
|
1230 |
+
if field in obj._field_defaults
|
1231 |
+
]
|
1232 |
+
else:
|
1233 |
+
defaults = []
|
1234 |
+
# In 3.10 recommended way to get annotations is to call `inspect.get_annotations` function
|
1235 |
+
# Also, annotations from base class are not inherited so they need to be queried explicitly
|
1236 |
+
if sys.version_info[:2] < (3, 10):
|
1237 |
+
obj_annotations = getattr(obj, "__annotations__", {})
|
1238 |
+
else:
|
1239 |
+
obj_annotations = inspect.get_annotations(obj)
|
1240 |
+
if len(obj_annotations) == 0 and hasattr(obj, "__base__"):
|
1241 |
+
obj_annotations = inspect.get_annotations(obj.__base__)
|
1242 |
+
|
1243 |
+
annotations = []
|
1244 |
+
for field in obj._fields:
|
1245 |
+
if field in obj_annotations:
|
1246 |
+
field_type = obj_annotations[field]
|
1247 |
+
# [Note: ForwardRef annotations in NamedTuple attributes]
|
1248 |
+
# NamedTuple types are slightly different from normal types.
|
1249 |
+
#
|
1250 |
+
# Normally, annotations are evaluted like this (during jit.script):
|
1251 |
+
# 1. Load strings of python code into c++ and parse.
|
1252 |
+
# 2. Get annotations as strings
|
1253 |
+
# 3. Use the PythonResolver's resolution callback (rcb) to convert
|
1254 |
+
# the string into a python object
|
1255 |
+
# 4. We call into annotations.py:ann_to_type to convert python obj
|
1256 |
+
# from step 3 into a type that torchscript understands.
|
1257 |
+
#
|
1258 |
+
# NamedTuples are more complicated, because it has sub-types.
|
1259 |
+
# Normally, once we have the NamedTuple type object from #3,
|
1260 |
+
# we can just look at the annotation literal values and use
|
1261 |
+
# ann_to_type directly on them.
|
1262 |
+
#
|
1263 |
+
# But sometimes, users will annotate with string literals, e.g.
|
1264 |
+
# x: 'int'
|
1265 |
+
# This also happens with PEP563 (from __forward__ import annotations)
|
1266 |
+
#
|
1267 |
+
# These annotations appear in the annotation dict as ForwardRef('int').
|
1268 |
+
#
|
1269 |
+
# Then, we need to convert the string into a python object. This
|
1270 |
+
# requires having local context for custom objects or imported types.
|
1271 |
+
# rcb() is what gives us this. So, we plumb rcb through the stack so
|
1272 |
+
# it can be used in this context for the if block below.
|
1273 |
+
#
|
1274 |
+
# FAQ:
|
1275 |
+
# - Why do we need this special handling for NamedTuple but string
|
1276 |
+
# annotations work fine for normal types? Normally, we parse the
|
1277 |
+
# string directly and then call rcb() directly from C++.
|
1278 |
+
# - Why not use ForwardRef._evaluate? For that, we need globals()
|
1279 |
+
# and locals() for the local context where the NamedTuple was defined.
|
1280 |
+
# rcb is what lets us look up into these. So, basically rcb does the
|
1281 |
+
# hard work for us.
|
1282 |
+
if isinstance(field_type, ForwardRef) and rcb is not None:
|
1283 |
+
rcb_type = rcb(field_type.__forward_arg__)
|
1284 |
+
# rcb returns None if it can't find anything.
|
1285 |
+
if rcb_type is None:
|
1286 |
+
raise ValueError(
|
1287 |
+
f"Unknown type annotation: '{field_type}' in NamedTuple {obj.__name__}."
|
1288 |
+
f" Likely due to partial support for ForwardRef parameters in NamedTuples, see #95858."
|
1289 |
+
f" Issue occurred at {loc.highlight()}"
|
1290 |
+
)
|
1291 |
+
field_type = rcb_type
|
1292 |
+
the_type = torch.jit.annotations.ann_to_type(field_type, loc, rcb)
|
1293 |
+
annotations.append(the_type)
|
1294 |
+
else:
|
1295 |
+
annotations.append(torch._C.TensorType.getInferred())
|
1296 |
+
return type(obj).__name__, obj._fields, annotations, defaults
|
1297 |
+
|
1298 |
+
|
1299 |
+
def _create_named_tuple(
|
1300 |
+
t, unqual_name: str, field_names: List[str], defaults: Tuple[Any, ...]
|
1301 |
+
):
|
1302 |
+
TupleType = collections.namedtuple(unqual_name, field_names, defaults=defaults) # type: ignore[call-arg, no-redef, misc]
|
1303 |
+
return TupleType(*t)
|
1304 |
+
|
1305 |
+
|
1306 |
+
@contextlib.contextmanager
|
1307 |
+
def _disable_emit_hooks():
|
1308 |
+
hooks = torch._C._jit_get_emit_hooks()
|
1309 |
+
torch._C._jit_set_emit_hooks(None, None)
|
1310 |
+
try:
|
1311 |
+
yield
|
1312 |
+
finally:
|
1313 |
+
torch._C._jit_set_emit_hooks(hooks[0], hooks[1])
|
1314 |
+
|
1315 |
+
|
1316 |
+
def _disable_emit_hooks_decorator(_DecoratorContextManager) -> None: # noqa: F811
|
1317 |
+
def __enter__(self) -> None:
|
1318 |
+
self.hooks = torch._C._jit_get_emit_hooks()
|
1319 |
+
torch._C._jit_set_emit_hooks(None, None)
|
1320 |
+
|
1321 |
+
def __exit__(self, *args) -> None:
|
1322 |
+
torch._C._jit_set_emit_hooks(self.hooks[0], self.hooks[1])
|
1323 |
+
|
1324 |
+
|
1325 |
+
def _is_exception(obj) -> bool:
|
1326 |
+
if not inspect.isclass(obj):
|
1327 |
+
return False
|
1328 |
+
return issubclass(obj, Exception)
|
1329 |
+
|
1330 |
+
|
1331 |
+
def raise_error_container_parameter_missing(target_type) -> None:
|
1332 |
+
if target_type == "Dict":
|
1333 |
+
raise RuntimeError(
|
1334 |
+
"Attempted to use Dict without "
|
1335 |
+
"contained types. Please add contained type, e.g. "
|
1336 |
+
"Dict[int, int]"
|
1337 |
+
)
|
1338 |
+
raise RuntimeError(
|
1339 |
+
f"Attempted to use {target_type} without a "
|
1340 |
+
"contained type. Please add a contained type, e.g. "
|
1341 |
+
f"{target_type}[int]"
|
1342 |
+
)
|
1343 |
+
|
1344 |
+
|
1345 |
+
def check_args_exist(target_type) -> None:
|
1346 |
+
if target_type is List or target_type is list:
|
1347 |
+
raise_error_container_parameter_missing("List")
|
1348 |
+
elif target_type is Tuple or target_type is tuple:
|
1349 |
+
raise_error_container_parameter_missing("Tuple")
|
1350 |
+
elif target_type is Dict or target_type is dict:
|
1351 |
+
raise_error_container_parameter_missing("Dict")
|
1352 |
+
elif target_type is None or target_type is Optional:
|
1353 |
+
raise_error_container_parameter_missing("Optional")
|
1354 |
+
|
1355 |
+
|
1356 |
+
def check_empty_containers(obj) -> None:
|
1357 |
+
if obj == [] or obj == {} or obj == ():
|
1358 |
+
warnings.warn(
|
1359 |
+
"The inner type of a container is lost when "
|
1360 |
+
"calling torch.jit.isinstance in eager mode. For "
|
1361 |
+
"example, List[int] would become list and "
|
1362 |
+
"therefore falsely return True for List[float] or"
|
1363 |
+
" List[str]."
|
1364 |
+
)
|
1365 |
+
|
1366 |
+
|
1367 |
+
# supports List/Dict/Tuple and Optional types
|
1368 |
+
# TODO support future
|
1369 |
+
def container_checker(obj, target_type) -> bool:
|
1370 |
+
origin_type = get_origin(target_type)
|
1371 |
+
check_args_exist(target_type)
|
1372 |
+
if origin_type is None:
|
1373 |
+
return False
|
1374 |
+
elif origin_type is list or origin_type is List:
|
1375 |
+
check_empty_containers(obj)
|
1376 |
+
if not isinstance(obj, list):
|
1377 |
+
return False
|
1378 |
+
arg_type = get_args(target_type)[0]
|
1379 |
+
arg_origin = get_origin(arg_type)
|
1380 |
+
for el in obj:
|
1381 |
+
# check if nested container, ex: List[List[str]]
|
1382 |
+
if arg_origin: # processes nested container, ex: List[List[str]]
|
1383 |
+
if not container_checker(el, arg_type):
|
1384 |
+
return False
|
1385 |
+
elif not isinstance(el, arg_type):
|
1386 |
+
return False
|
1387 |
+
return True
|
1388 |
+
elif origin_type is Dict or origin_type is dict:
|
1389 |
+
check_empty_containers(obj)
|
1390 |
+
if not isinstance(obj, dict):
|
1391 |
+
return False
|
1392 |
+
key_type = get_args(target_type)[0]
|
1393 |
+
val_type = get_args(target_type)[1]
|
1394 |
+
for key, val in obj.items():
|
1395 |
+
# check if keys are of right type
|
1396 |
+
if not isinstance(key, key_type):
|
1397 |
+
return False
|
1398 |
+
val_origin = get_origin(val_type)
|
1399 |
+
if val_origin:
|
1400 |
+
if not container_checker(val, val_type):
|
1401 |
+
return False
|
1402 |
+
elif not isinstance(val, val_type):
|
1403 |
+
return False
|
1404 |
+
return True
|
1405 |
+
elif origin_type is Tuple or origin_type is tuple:
|
1406 |
+
check_empty_containers(obj)
|
1407 |
+
if not isinstance(obj, tuple):
|
1408 |
+
return False
|
1409 |
+
arg_types = get_args(target_type)
|
1410 |
+
if len(obj) != len(arg_types):
|
1411 |
+
return False
|
1412 |
+
for el, el_type in zip(obj, arg_types):
|
1413 |
+
el_origin = get_origin(el_type)
|
1414 |
+
if el_origin:
|
1415 |
+
if not container_checker(el, el_type):
|
1416 |
+
return False
|
1417 |
+
elif not isinstance(el, el_type):
|
1418 |
+
return False
|
1419 |
+
return True
|
1420 |
+
elif origin_type is Union or issubclass(
|
1421 |
+
origin_type, BuiltinUnionType
|
1422 |
+
): # also handles Optional
|
1423 |
+
if obj is None: # check before recursion because None is always fine
|
1424 |
+
return True
|
1425 |
+
inner_types = get_args(target_type)
|
1426 |
+
for t in inner_types:
|
1427 |
+
t_origin = get_origin(t)
|
1428 |
+
if t_origin:
|
1429 |
+
return container_checker(obj, t)
|
1430 |
+
elif isinstance(obj, t):
|
1431 |
+
return True
|
1432 |
+
return False
|
1433 |
+
|
1434 |
+
|
1435 |
+
def _isinstance(obj, target_type) -> bool:
|
1436 |
+
if isinstance(target_type, collections.abc.Container):
|
1437 |
+
if not isinstance(target_type, tuple):
|
1438 |
+
raise RuntimeError(
|
1439 |
+
"The second argument to "
|
1440 |
+
"`torch.jit.isinstance` must be a type "
|
1441 |
+
"or a tuple of types"
|
1442 |
+
)
|
1443 |
+
for t_type in target_type:
|
1444 |
+
if _isinstance(obj, t_type):
|
1445 |
+
return True
|
1446 |
+
return False
|
1447 |
+
|
1448 |
+
origin_type = get_origin(target_type)
|
1449 |
+
if origin_type:
|
1450 |
+
return container_checker(obj, target_type)
|
1451 |
+
|
1452 |
+
# Check to handle non-typed optional origin returns as none instead
|
1453 |
+
# of as optional in 3.7-3.8
|
1454 |
+
check_args_exist(target_type)
|
1455 |
+
|
1456 |
+
# handle non-containers
|
1457 |
+
return isinstance(obj, target_type)
|
1458 |
+
|
1459 |
+
|
1460 |
+
class _TensorExtractor(pickle.Pickler):
|
1461 |
+
def __init__(self, *args, tensors: List[torch.Tensor], **kwargs):
|
1462 |
+
super().__init__(*args, **kwargs)
|
1463 |
+
self.tensors = tensors
|
1464 |
+
|
1465 |
+
def persistent_id(self, obj):
|
1466 |
+
if isinstance(obj, torch.Tensor):
|
1467 |
+
self.tensors.append(obj)
|
1468 |
+
return ""
|
1469 |
+
# Since we just want to extract tensors, we don't mind if an object is
|
1470 |
+
# unpicklable if it doesn't contain tensors, as we can just ignore/skip
|
1471 |
+
# it. To play it safe, we only do so for common objects that we're sure
|
1472 |
+
# don't contain tensors. Feel free to add new types here. Note also that
|
1473 |
+
# even if a type isn't listed here this won't block users, since thet
|
1474 |
+
# can just add a __getstate__ or __reduce__ method to their class.
|
1475 |
+
if isinstance(obj, LockType):
|
1476 |
+
return ""
|
1477 |
+
# Futures and RRefs don't technically contain a value, they just offer
|
1478 |
+
# the means to access a value.
|
1479 |
+
if isinstance(obj, CFuture) or is_rref_instance(obj):
|
1480 |
+
return ""
|
1481 |
+
if isinstance(obj, CAwait):
|
1482 |
+
return ""
|
1483 |
+
if isinstance(obj, torch.cuda.Event):
|
1484 |
+
return ""
|
1485 |
+
if isinstance(obj, threading.Thread):
|
1486 |
+
return ""
|
1487 |
+
return None
|
1488 |
+
|
1489 |
+
|
1490 |
+
def _extract_tensors(obj):
|
1491 |
+
r"""
|
1492 |
+
This function is exclusively called from C++.
|
1493 |
+
See ``torch/csrc/jit/python/python_ivalue.h``.
|
1494 |
+
|
1495 |
+
It extracts the tensors contained in the given object, through pickling.
|
1496 |
+
"""
|
1497 |
+
tensors: List[torch.Tensor] = []
|
1498 |
+
extractor = _TensorExtractor(io.BytesIO(), protocol=-1, tensors=tensors)
|
1499 |
+
extractor.dump(obj)
|
1500 |
+
return tensors
|
1501 |
+
|
1502 |
+
|
1503 |
+
# In Python-3.11+ typed enums (i.e. IntEnum for example) retain number of base class methods in subclass
|
1504 |
+
# that were previously dropped. To preserve the behavior, explicitly drop them there
|
1505 |
+
|
1506 |
+
if sys.version_info > (3, 10):
|
1507 |
+
_drop(enum.Enum.__new__)
|
1508 |
+
_drop(enum.Enum.__format__)
|
1509 |
+
_drop(enum.Enum.__repr__)
|
1510 |
+
_drop(enum.Enum.__str__)
|
env-llmeval/lib/python3.10/site-packages/torch/_linalg_utils.py
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Various linear algebra utility methods for internal use.
|
2 |
+
|
3 |
+
"""
|
4 |
+
|
5 |
+
from typing import Optional, Tuple
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch import Tensor
|
9 |
+
|
10 |
+
|
11 |
+
def is_sparse(A):
|
12 |
+
"""Check if tensor A is a sparse tensor"""
|
13 |
+
if isinstance(A, torch.Tensor):
|
14 |
+
return A.layout == torch.sparse_coo
|
15 |
+
|
16 |
+
error_str = "expected Tensor"
|
17 |
+
if not torch.jit.is_scripting():
|
18 |
+
error_str += f" but got {type(A)}"
|
19 |
+
raise TypeError(error_str)
|
20 |
+
|
21 |
+
|
22 |
+
def get_floating_dtype(A):
|
23 |
+
"""Return the floating point dtype of tensor A.
|
24 |
+
|
25 |
+
Integer types map to float32.
|
26 |
+
"""
|
27 |
+
dtype = A.dtype
|
28 |
+
if dtype in (torch.float16, torch.float32, torch.float64):
|
29 |
+
return dtype
|
30 |
+
return torch.float32
|
31 |
+
|
32 |
+
|
33 |
+
def matmul(A: Optional[Tensor], B: Tensor) -> Tensor:
|
34 |
+
"""Multiply two matrices.
|
35 |
+
|
36 |
+
If A is None, return B. A can be sparse or dense. B is always
|
37 |
+
dense.
|
38 |
+
"""
|
39 |
+
if A is None:
|
40 |
+
return B
|
41 |
+
if is_sparse(A):
|
42 |
+
return torch.sparse.mm(A, B)
|
43 |
+
return torch.matmul(A, B)
|
44 |
+
|
45 |
+
|
46 |
+
def conjugate(A):
|
47 |
+
"""Return conjugate of tensor A.
|
48 |
+
|
49 |
+
.. note:: If A's dtype is not complex, A is returned.
|
50 |
+
"""
|
51 |
+
if A.is_complex():
|
52 |
+
return A.conj()
|
53 |
+
return A
|
54 |
+
|
55 |
+
|
56 |
+
def transpose(A):
|
57 |
+
"""Return transpose of a matrix or batches of matrices."""
|
58 |
+
ndim = len(A.shape)
|
59 |
+
return A.transpose(ndim - 1, ndim - 2)
|
60 |
+
|
61 |
+
|
62 |
+
def transjugate(A):
|
63 |
+
"""Return transpose conjugate of a matrix or batches of matrices."""
|
64 |
+
return conjugate(transpose(A))
|
65 |
+
|
66 |
+
|
67 |
+
def bform(X: Tensor, A: Optional[Tensor], Y: Tensor) -> Tensor:
|
68 |
+
"""Return bilinear form of matrices: :math:`X^T A Y`."""
|
69 |
+
return matmul(transpose(X), matmul(A, Y))
|
70 |
+
|
71 |
+
|
72 |
+
def qform(A: Optional[Tensor], S: Tensor):
|
73 |
+
"""Return quadratic form :math:`S^T A S`."""
|
74 |
+
return bform(S, A, S)
|
75 |
+
|
76 |
+
|
77 |
+
def basis(A):
|
78 |
+
"""Return orthogonal basis of A columns."""
|
79 |
+
return torch.linalg.qr(A).Q
|
80 |
+
|
81 |
+
|
82 |
+
def symeig(A: Tensor, largest: Optional[bool] = False) -> Tuple[Tensor, Tensor]:
|
83 |
+
"""Return eigenpairs of A with specified ordering."""
|
84 |
+
if largest is None:
|
85 |
+
largest = False
|
86 |
+
E, Z = torch.linalg.eigh(A, UPLO="U")
|
87 |
+
# assuming that E is ordered
|
88 |
+
if largest:
|
89 |
+
E = torch.flip(E, dims=(-1,))
|
90 |
+
Z = torch.flip(Z, dims=(-1,))
|
91 |
+
return E, Z
|
92 |
+
|
93 |
+
|
94 |
+
# These functions were deprecated and removed
|
95 |
+
# This nice error message can be removed in version 1.13+
|
96 |
+
def matrix_rank(input, tol=None, symmetric=False, *, out=None) -> Tensor:
|
97 |
+
raise RuntimeError(
|
98 |
+
"This function was deprecated since version 1.9 and is now removed.\n"
|
99 |
+
"Please use the `torch.linalg.matrix_rank` function instead. "
|
100 |
+
"The parameter 'symmetric' was renamed in `torch.linalg.matrix_rank()` to 'hermitian'."
|
101 |
+
)
|
102 |
+
|
103 |
+
|
104 |
+
def solve(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
|
105 |
+
raise RuntimeError(
|
106 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
107 |
+
"`torch.solve` is deprecated in favor of `torch.linalg.solve`. "
|
108 |
+
"`torch.linalg.solve` has its arguments reversed and does not return the LU factorization.\n\n"
|
109 |
+
"To get the LU factorization see `torch.lu`, which can be used with `torch.lu_solve` or `torch.lu_unpack`.\n"
|
110 |
+
"X = torch.solve(B, A).solution "
|
111 |
+
"should be replaced with:\n"
|
112 |
+
"X = torch.linalg.solve(A, B)"
|
113 |
+
)
|
114 |
+
|
115 |
+
|
116 |
+
def lstsq(input: Tensor, A: Tensor, *, out=None) -> Tuple[Tensor, Tensor]:
|
117 |
+
raise RuntimeError(
|
118 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
119 |
+
"`torch.lstsq` is deprecated in favor of `torch.linalg.lstsq`.\n"
|
120 |
+
"`torch.linalg.lstsq` has reversed arguments and does not return the QR decomposition in "
|
121 |
+
"the returned tuple (although it returns other information about the problem).\n\n"
|
122 |
+
"To get the QR decomposition consider using `torch.linalg.qr`.\n\n"
|
123 |
+
"The returned solution in `torch.lstsq` stored the residuals of the solution in the "
|
124 |
+
"last m - n columns of the returned value whenever m > n. In torch.linalg.lstsq, "
|
125 |
+
"the residuals are in the field 'residuals' of the returned named tuple.\n\n"
|
126 |
+
"The unpacking of the solution, as in\n"
|
127 |
+
"X, _ = torch.lstsq(B, A).solution[:A.size(1)]\n"
|
128 |
+
"should be replaced with:\n"
|
129 |
+
"X = torch.linalg.lstsq(A, B).solution"
|
130 |
+
)
|
131 |
+
|
132 |
+
|
133 |
+
def _symeig(
|
134 |
+
input, eigenvectors=False, upper=True, *, out=None
|
135 |
+
) -> Tuple[Tensor, Tensor]:
|
136 |
+
raise RuntimeError(
|
137 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
138 |
+
"The default behavior has changed from using the upper triangular portion of the matrix by default "
|
139 |
+
"to using the lower triangular portion.\n\n"
|
140 |
+
"L, _ = torch.symeig(A, upper=upper) "
|
141 |
+
"should be replaced with:\n"
|
142 |
+
"L = torch.linalg.eigvalsh(A, UPLO='U' if upper else 'L')\n\n"
|
143 |
+
"and\n\n"
|
144 |
+
"L, V = torch.symeig(A, eigenvectors=True) "
|
145 |
+
"should be replaced with:\n"
|
146 |
+
"L, V = torch.linalg.eigh(A, UPLO='U' if upper else 'L')"
|
147 |
+
)
|
148 |
+
|
149 |
+
|
150 |
+
def eig(
|
151 |
+
self: Tensor, eigenvectors: bool = False, *, e=None, v=None
|
152 |
+
) -> Tuple[Tensor, Tensor]:
|
153 |
+
raise RuntimeError(
|
154 |
+
"This function was deprecated since version 1.9 and is now removed. "
|
155 |
+
"`torch.linalg.eig` returns complex tensors of dtype `cfloat` or `cdouble` rather than real tensors "
|
156 |
+
"mimicking complex tensors.\n\n"
|
157 |
+
"L, _ = torch.eig(A) "
|
158 |
+
"should be replaced with:\n"
|
159 |
+
"L_complex = torch.linalg.eigvals(A)\n\n"
|
160 |
+
"and\n\n"
|
161 |
+
"L, V = torch.eig(A, eigenvectors=True) "
|
162 |
+
"should be replaced with:\n"
|
163 |
+
"L_complex, V_complex = torch.linalg.eig(A)"
|
164 |
+
)
|
env-llmeval/lib/python3.10/site-packages/torch/_lobpcg.py
ADDED
@@ -0,0 +1,1167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Locally Optimal Block Preconditioned Conjugate Gradient methods.
|
2 |
+
"""
|
3 |
+
# Author: Pearu Peterson
|
4 |
+
# Created: February 2020
|
5 |
+
|
6 |
+
from typing import Dict, Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from torch import Tensor
|
10 |
+
from . import _linalg_utils as _utils
|
11 |
+
from .overrides import handle_torch_function, has_torch_function
|
12 |
+
|
13 |
+
|
14 |
+
__all__ = ["lobpcg"]
|
15 |
+
|
16 |
+
|
17 |
+
def _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U):
|
18 |
+
# compute F, such that F_ij = (d_j - d_i)^{-1} for i != j, F_ii = 0
|
19 |
+
F = D.unsqueeze(-2) - D.unsqueeze(-1)
|
20 |
+
F.diagonal(dim1=-2, dim2=-1).fill_(float("inf"))
|
21 |
+
F.pow_(-1)
|
22 |
+
|
23 |
+
# A.grad = U (D.grad + (U^T U.grad * F)) U^T
|
24 |
+
Ut = U.mT.contiguous()
|
25 |
+
res = torch.matmul(
|
26 |
+
U, torch.matmul(torch.diag_embed(D_grad) + torch.matmul(Ut, U_grad) * F, Ut)
|
27 |
+
)
|
28 |
+
|
29 |
+
return res
|
30 |
+
|
31 |
+
|
32 |
+
def _polynomial_coefficients_given_roots(roots):
|
33 |
+
"""
|
34 |
+
Given the `roots` of a polynomial, find the polynomial's coefficients.
|
35 |
+
|
36 |
+
If roots = (r_1, ..., r_n), then the method returns
|
37 |
+
coefficients (a_0, a_1, ..., a_n (== 1)) so that
|
38 |
+
p(x) = (x - r_1) * ... * (x - r_n)
|
39 |
+
= x^n + a_{n-1} * x^{n-1} + ... a_1 * x_1 + a_0
|
40 |
+
|
41 |
+
Note: for better performance requires writing a low-level kernel
|
42 |
+
"""
|
43 |
+
poly_order = roots.shape[-1]
|
44 |
+
poly_coeffs_shape = list(roots.shape)
|
45 |
+
# we assume p(x) = x^n + a_{n-1} * x^{n-1} + ... + a_1 * x + a_0,
|
46 |
+
# so poly_coeffs = {a_0, ..., a_n, a_{n+1}(== 1)},
|
47 |
+
# but we insert one extra coefficient to enable better vectorization below
|
48 |
+
poly_coeffs_shape[-1] += 2
|
49 |
+
poly_coeffs = roots.new_zeros(poly_coeffs_shape)
|
50 |
+
poly_coeffs[..., 0] = 1
|
51 |
+
poly_coeffs[..., -1] = 1
|
52 |
+
|
53 |
+
# perform the Horner's rule
|
54 |
+
for i in range(1, poly_order + 1):
|
55 |
+
# note that it is computationally hard to compute backward for this method,
|
56 |
+
# because then given the coefficients it would require finding the roots and/or
|
57 |
+
# calculating the sensitivity based on the Vieta's theorem.
|
58 |
+
# So the code below tries to circumvent the explicit root finding by series
|
59 |
+
# of operations on memory copies imitating the Horner's method.
|
60 |
+
# The memory copies are required to construct nodes in the computational graph
|
61 |
+
# by exploting the explicit (not in-place, separate node for each step)
|
62 |
+
# recursion of the Horner's method.
|
63 |
+
# Needs more memory, O(... * k^2), but with only O(... * k^2) complexity.
|
64 |
+
poly_coeffs_new = poly_coeffs.clone() if roots.requires_grad else poly_coeffs
|
65 |
+
out = poly_coeffs_new.narrow(-1, poly_order - i, i + 1)
|
66 |
+
out -= roots.narrow(-1, i - 1, 1) * poly_coeffs.narrow(
|
67 |
+
-1, poly_order - i + 1, i + 1
|
68 |
+
)
|
69 |
+
poly_coeffs = poly_coeffs_new
|
70 |
+
|
71 |
+
return poly_coeffs.narrow(-1, 1, poly_order + 1)
|
72 |
+
|
73 |
+
|
74 |
+
def _polynomial_value(poly, x, zero_power, transition):
|
75 |
+
"""
|
76 |
+
A generic method for computing poly(x) using the Horner's rule.
|
77 |
+
|
78 |
+
Args:
|
79 |
+
poly (Tensor): the (possibly batched) 1D Tensor representing
|
80 |
+
polynomial coefficients such that
|
81 |
+
poly[..., i] = (a_{i_0}, ..., a{i_n} (==1)), and
|
82 |
+
poly(x) = poly[..., 0] * zero_power + ... + poly[..., n] * x^n
|
83 |
+
|
84 |
+
x (Tensor): the value (possible batched) to evalate the polynomial `poly` at.
|
85 |
+
|
86 |
+
zero_power (Tensor): the representation of `x^0`. It is application-specific.
|
87 |
+
|
88 |
+
transition (Callable): the function that accepts some intermediate result `int_val`,
|
89 |
+
the `x` and a specific polynomial coefficient
|
90 |
+
`poly[..., k]` for some iteration `k`.
|
91 |
+
It basically performs one iteration of the Horner's rule
|
92 |
+
defined as `x * int_val + poly[..., k] * zero_power`.
|
93 |
+
Note that `zero_power` is not a parameter,
|
94 |
+
because the step `+ poly[..., k] * zero_power` depends on `x`,
|
95 |
+
whether it is a vector, a matrix, or something else, so this
|
96 |
+
functionality is delegated to the user.
|
97 |
+
"""
|
98 |
+
|
99 |
+
res = zero_power.clone()
|
100 |
+
for k in range(poly.size(-1) - 2, -1, -1):
|
101 |
+
res = transition(res, x, poly[..., k])
|
102 |
+
return res
|
103 |
+
|
104 |
+
|
105 |
+
def _matrix_polynomial_value(poly, x, zero_power=None):
|
106 |
+
"""
|
107 |
+
Evaluates `poly(x)` for the (batched) matrix input `x`.
|
108 |
+
Check out `_polynomial_value` function for more details.
|
109 |
+
"""
|
110 |
+
|
111 |
+
# matrix-aware Horner's rule iteration
|
112 |
+
def transition(curr_poly_val, x, poly_coeff):
|
113 |
+
res = x.matmul(curr_poly_val)
|
114 |
+
res.diagonal(dim1=-2, dim2=-1).add_(poly_coeff.unsqueeze(-1))
|
115 |
+
return res
|
116 |
+
|
117 |
+
if zero_power is None:
|
118 |
+
zero_power = torch.eye(
|
119 |
+
x.size(-1), x.size(-1), dtype=x.dtype, device=x.device
|
120 |
+
).view(*([1] * len(list(x.shape[:-2]))), x.size(-1), x.size(-1))
|
121 |
+
|
122 |
+
return _polynomial_value(poly, x, zero_power, transition)
|
123 |
+
|
124 |
+
|
125 |
+
def _vector_polynomial_value(poly, x, zero_power=None):
|
126 |
+
"""
|
127 |
+
Evaluates `poly(x)` for the (batched) vector input `x`.
|
128 |
+
Check out `_polynomial_value` function for more details.
|
129 |
+
"""
|
130 |
+
|
131 |
+
# vector-aware Horner's rule iteration
|
132 |
+
def transition(curr_poly_val, x, poly_coeff):
|
133 |
+
res = torch.addcmul(poly_coeff.unsqueeze(-1), x, curr_poly_val)
|
134 |
+
return res
|
135 |
+
|
136 |
+
if zero_power is None:
|
137 |
+
zero_power = x.new_ones(1).expand(x.shape)
|
138 |
+
|
139 |
+
return _polynomial_value(poly, x, zero_power, transition)
|
140 |
+
|
141 |
+
|
142 |
+
def _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest):
|
143 |
+
# compute a projection operator onto an orthogonal subspace spanned by the
|
144 |
+
# columns of U defined as (I - UU^T)
|
145 |
+
Ut = U.mT.contiguous()
|
146 |
+
proj_U_ortho = -U.matmul(Ut)
|
147 |
+
proj_U_ortho.diagonal(dim1=-2, dim2=-1).add_(1)
|
148 |
+
|
149 |
+
# compute U_ortho, a basis for the orthogonal complement to the span(U),
|
150 |
+
# by projecting a random [..., m, m - k] matrix onto the subspace spanned
|
151 |
+
# by the columns of U.
|
152 |
+
#
|
153 |
+
# fix generator for determinism
|
154 |
+
gen = torch.Generator(A.device)
|
155 |
+
|
156 |
+
# orthogonal complement to the span(U)
|
157 |
+
U_ortho = proj_U_ortho.matmul(
|
158 |
+
torch.randn(
|
159 |
+
(*A.shape[:-1], A.size(-1) - D.size(-1)),
|
160 |
+
dtype=A.dtype,
|
161 |
+
device=A.device,
|
162 |
+
generator=gen,
|
163 |
+
)
|
164 |
+
)
|
165 |
+
U_ortho_t = U_ortho.mT.contiguous()
|
166 |
+
|
167 |
+
# compute the coefficients of the characteristic polynomial of the tensor D.
|
168 |
+
# Note that D is diagonal, so the diagonal elements are exactly the roots
|
169 |
+
# of the characteristic polynomial.
|
170 |
+
chr_poly_D = _polynomial_coefficients_given_roots(D)
|
171 |
+
|
172 |
+
# the code belows finds the explicit solution to the Sylvester equation
|
173 |
+
# U_ortho^T A U_ortho dX - dX D = -U_ortho^T A U
|
174 |
+
# and incorporates it into the whole gradient stored in the `res` variable.
|
175 |
+
#
|
176 |
+
# Equivalent to the following naive implementation:
|
177 |
+
# res = A.new_zeros(A.shape)
|
178 |
+
# p_res = A.new_zeros(*A.shape[:-1], D.size(-1))
|
179 |
+
# for k in range(1, chr_poly_D.size(-1)):
|
180 |
+
# p_res.zero_()
|
181 |
+
# for i in range(0, k):
|
182 |
+
# p_res += (A.matrix_power(k - 1 - i) @ U_grad) * D.pow(i).unsqueeze(-2)
|
183 |
+
# res -= chr_poly_D[k] * (U_ortho @ poly_D_at_A.inverse() @ U_ortho_t @ p_res @ U.t())
|
184 |
+
#
|
185 |
+
# Note that dX is a differential, so the gradient contribution comes from the backward sensitivity
|
186 |
+
# Tr(f(U_grad, D_grad, A, U, D)^T dX) = Tr(g(U_grad, A, U, D)^T dA) for some functions f and g,
|
187 |
+
# and we need to compute g(U_grad, A, U, D)
|
188 |
+
#
|
189 |
+
# The naive implementation is based on the paper
|
190 |
+
# Hu, Qingxi, and Daizhan Cheng.
|
191 |
+
# "The polynomial solution to the Sylvester matrix equation."
|
192 |
+
# Applied mathematics letters 19.9 (2006): 859-864.
|
193 |
+
#
|
194 |
+
# We can modify the computation of `p_res` from above in a more efficient way
|
195 |
+
# p_res = U_grad * (chr_poly_D[1] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k)).unsqueeze(-2)
|
196 |
+
# + A U_grad * (chr_poly_D[2] * D.pow(0) + ... + chr_poly_D[k] * D.pow(k - 1)).unsqueeze(-2)
|
197 |
+
# + ...
|
198 |
+
# + A.matrix_power(k - 1) U_grad * chr_poly_D[k]
|
199 |
+
# Note that this saves us from redundant matrix products with A (elimination of matrix_power)
|
200 |
+
U_grad_projected = U_grad
|
201 |
+
series_acc = U_grad_projected.new_zeros(U_grad_projected.shape)
|
202 |
+
for k in range(1, chr_poly_D.size(-1)):
|
203 |
+
poly_D = _vector_polynomial_value(chr_poly_D[..., k:], D)
|
204 |
+
series_acc += U_grad_projected * poly_D.unsqueeze(-2)
|
205 |
+
U_grad_projected = A.matmul(U_grad_projected)
|
206 |
+
|
207 |
+
# compute chr_poly_D(A) which essentially is:
|
208 |
+
#
|
209 |
+
# chr_poly_D_at_A = A.new_zeros(A.shape)
|
210 |
+
# for k in range(chr_poly_D.size(-1)):
|
211 |
+
# chr_poly_D_at_A += chr_poly_D[k] * A.matrix_power(k)
|
212 |
+
#
|
213 |
+
# Note, however, for better performance we use the Horner's rule
|
214 |
+
chr_poly_D_at_A = _matrix_polynomial_value(chr_poly_D, A)
|
215 |
+
|
216 |
+
# compute the action of `chr_poly_D_at_A` restricted to U_ortho_t
|
217 |
+
chr_poly_D_at_A_to_U_ortho = torch.matmul(
|
218 |
+
U_ortho_t, torch.matmul(chr_poly_D_at_A, U_ortho)
|
219 |
+
)
|
220 |
+
# we need to invert 'chr_poly_D_at_A_to_U_ortho`, for that we compute its
|
221 |
+
# Cholesky decomposition and then use `torch.cholesky_solve` for better stability.
|
222 |
+
# Cholesky decomposition requires the input to be positive-definite.
|
223 |
+
# Note that `chr_poly_D_at_A_to_U_ortho` is positive-definite if
|
224 |
+
# 1. `largest` == False, or
|
225 |
+
# 2. `largest` == True and `k` is even
|
226 |
+
# under the assumption that `A` has distinct eigenvalues.
|
227 |
+
#
|
228 |
+
# check if `chr_poly_D_at_A_to_U_ortho` is positive-definite or negative-definite
|
229 |
+
chr_poly_D_at_A_to_U_ortho_sign = -1 if (largest and (k % 2 == 1)) else +1
|
230 |
+
chr_poly_D_at_A_to_U_ortho_L = torch.linalg.cholesky(
|
231 |
+
chr_poly_D_at_A_to_U_ortho_sign * chr_poly_D_at_A_to_U_ortho
|
232 |
+
)
|
233 |
+
|
234 |
+
# compute the gradient part in span(U)
|
235 |
+
res = _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U)
|
236 |
+
|
237 |
+
# incorporate the Sylvester equation solution into the full gradient
|
238 |
+
# it resides in span(U_ortho)
|
239 |
+
res -= U_ortho.matmul(
|
240 |
+
chr_poly_D_at_A_to_U_ortho_sign
|
241 |
+
* torch.cholesky_solve(
|
242 |
+
U_ortho_t.matmul(series_acc), chr_poly_D_at_A_to_U_ortho_L
|
243 |
+
)
|
244 |
+
).matmul(Ut)
|
245 |
+
|
246 |
+
return res
|
247 |
+
|
248 |
+
|
249 |
+
def _symeig_backward(D_grad, U_grad, A, D, U, largest):
|
250 |
+
# if `U` is square, then the columns of `U` is a complete eigenspace
|
251 |
+
if U.size(-1) == U.size(-2):
|
252 |
+
return _symeig_backward_complete_eigenspace(D_grad, U_grad, A, D, U)
|
253 |
+
else:
|
254 |
+
return _symeig_backward_partial_eigenspace(D_grad, U_grad, A, D, U, largest)
|
255 |
+
|
256 |
+
|
257 |
+
class LOBPCGAutogradFunction(torch.autograd.Function):
|
258 |
+
@staticmethod
|
259 |
+
def forward( # type: ignore[override]
|
260 |
+
ctx,
|
261 |
+
A: Tensor,
|
262 |
+
k: Optional[int] = None,
|
263 |
+
B: Optional[Tensor] = None,
|
264 |
+
X: Optional[Tensor] = None,
|
265 |
+
n: Optional[int] = None,
|
266 |
+
iK: Optional[Tensor] = None,
|
267 |
+
niter: Optional[int] = None,
|
268 |
+
tol: Optional[float] = None,
|
269 |
+
largest: Optional[bool] = None,
|
270 |
+
method: Optional[str] = None,
|
271 |
+
tracker: None = None,
|
272 |
+
ortho_iparams: Optional[Dict[str, int]] = None,
|
273 |
+
ortho_fparams: Optional[Dict[str, float]] = None,
|
274 |
+
ortho_bparams: Optional[Dict[str, bool]] = None,
|
275 |
+
) -> Tuple[Tensor, Tensor]:
|
276 |
+
# makes sure that input is contiguous for efficiency.
|
277 |
+
# Note: autograd does not support dense gradients for sparse input yet.
|
278 |
+
A = A.contiguous() if (not A.is_sparse) else A
|
279 |
+
if B is not None:
|
280 |
+
B = B.contiguous() if (not B.is_sparse) else B
|
281 |
+
|
282 |
+
D, U = _lobpcg(
|
283 |
+
A,
|
284 |
+
k,
|
285 |
+
B,
|
286 |
+
X,
|
287 |
+
n,
|
288 |
+
iK,
|
289 |
+
niter,
|
290 |
+
tol,
|
291 |
+
largest,
|
292 |
+
method,
|
293 |
+
tracker,
|
294 |
+
ortho_iparams,
|
295 |
+
ortho_fparams,
|
296 |
+
ortho_bparams,
|
297 |
+
)
|
298 |
+
|
299 |
+
ctx.save_for_backward(A, B, D, U)
|
300 |
+
ctx.largest = largest
|
301 |
+
|
302 |
+
return D, U
|
303 |
+
|
304 |
+
@staticmethod
|
305 |
+
def backward(ctx, D_grad, U_grad):
|
306 |
+
A_grad = B_grad = None
|
307 |
+
grads = [None] * 14
|
308 |
+
|
309 |
+
A, B, D, U = ctx.saved_tensors
|
310 |
+
largest = ctx.largest
|
311 |
+
|
312 |
+
# lobpcg.backward has some limitations. Checks for unsupported input
|
313 |
+
if A.is_sparse or (B is not None and B.is_sparse and ctx.needs_input_grad[2]):
|
314 |
+
raise ValueError(
|
315 |
+
"lobpcg.backward does not support sparse input yet."
|
316 |
+
"Note that lobpcg.forward does though."
|
317 |
+
)
|
318 |
+
if (
|
319 |
+
A.dtype in (torch.complex64, torch.complex128)
|
320 |
+
or B is not None
|
321 |
+
and B.dtype in (torch.complex64, torch.complex128)
|
322 |
+
):
|
323 |
+
raise ValueError(
|
324 |
+
"lobpcg.backward does not support complex input yet."
|
325 |
+
"Note that lobpcg.forward does though."
|
326 |
+
)
|
327 |
+
if B is not None:
|
328 |
+
raise ValueError(
|
329 |
+
"lobpcg.backward does not support backward with B != I yet."
|
330 |
+
)
|
331 |
+
|
332 |
+
if largest is None:
|
333 |
+
largest = True
|
334 |
+
|
335 |
+
# symeig backward
|
336 |
+
if B is None:
|
337 |
+
A_grad = _symeig_backward(D_grad, U_grad, A, D, U, largest)
|
338 |
+
|
339 |
+
# A has index 0
|
340 |
+
grads[0] = A_grad
|
341 |
+
# B has index 2
|
342 |
+
grads[2] = B_grad
|
343 |
+
return tuple(grads)
|
344 |
+
|
345 |
+
|
346 |
+
def lobpcg(
|
347 |
+
A: Tensor,
|
348 |
+
k: Optional[int] = None,
|
349 |
+
B: Optional[Tensor] = None,
|
350 |
+
X: Optional[Tensor] = None,
|
351 |
+
n: Optional[int] = None,
|
352 |
+
iK: Optional[Tensor] = None,
|
353 |
+
niter: Optional[int] = None,
|
354 |
+
tol: Optional[float] = None,
|
355 |
+
largest: Optional[bool] = None,
|
356 |
+
method: Optional[str] = None,
|
357 |
+
tracker: None = None,
|
358 |
+
ortho_iparams: Optional[Dict[str, int]] = None,
|
359 |
+
ortho_fparams: Optional[Dict[str, float]] = None,
|
360 |
+
ortho_bparams: Optional[Dict[str, bool]] = None,
|
361 |
+
) -> Tuple[Tensor, Tensor]:
|
362 |
+
"""Find the k largest (or smallest) eigenvalues and the corresponding
|
363 |
+
eigenvectors of a symmetric positive definite generalized
|
364 |
+
eigenvalue problem using matrix-free LOBPCG methods.
|
365 |
+
|
366 |
+
This function is a front-end to the following LOBPCG algorithms
|
367 |
+
selectable via `method` argument:
|
368 |
+
|
369 |
+
`method="basic"` - the LOBPCG method introduced by Andrew
|
370 |
+
Knyazev, see [Knyazev2001]. A less robust method, may fail when
|
371 |
+
Cholesky is applied to singular input.
|
372 |
+
|
373 |
+
`method="ortho"` - the LOBPCG method with orthogonal basis
|
374 |
+
selection [StathopoulosEtal2002]. A robust method.
|
375 |
+
|
376 |
+
Supported inputs are dense, sparse, and batches of dense matrices.
|
377 |
+
|
378 |
+
.. note:: In general, the basic method spends least time per
|
379 |
+
iteration. However, the robust methods converge much faster and
|
380 |
+
are more stable. So, the usage of the basic method is generally
|
381 |
+
not recommended but there exist cases where the usage of the
|
382 |
+
basic method may be preferred.
|
383 |
+
|
384 |
+
.. warning:: The backward method does not support sparse and complex inputs.
|
385 |
+
It works only when `B` is not provided (i.e. `B == None`).
|
386 |
+
We are actively working on extensions, and the details of
|
387 |
+
the algorithms are going to be published promptly.
|
388 |
+
|
389 |
+
.. warning:: While it is assumed that `A` is symmetric, `A.grad` is not.
|
390 |
+
To make sure that `A.grad` is symmetric, so that `A - t * A.grad` is symmetric
|
391 |
+
in first-order optimization routines, prior to running `lobpcg`
|
392 |
+
we do the following symmetrization map: `A -> (A + A.t()) / 2`.
|
393 |
+
The map is performed only when the `A` requires gradients.
|
394 |
+
|
395 |
+
Args:
|
396 |
+
|
397 |
+
A (Tensor): the input tensor of size :math:`(*, m, m)`
|
398 |
+
|
399 |
+
B (Tensor, optional): the input tensor of size :math:`(*, m,
|
400 |
+
m)`. When not specified, `B` is interpreted as
|
401 |
+
identity matrix.
|
402 |
+
|
403 |
+
X (tensor, optional): the input tensor of size :math:`(*, m, n)`
|
404 |
+
where `k <= n <= m`. When specified, it is used as
|
405 |
+
initial approximation of eigenvectors. X must be a
|
406 |
+
dense tensor.
|
407 |
+
|
408 |
+
iK (tensor, optional): the input tensor of size :math:`(*, m,
|
409 |
+
m)`. When specified, it will be used as preconditioner.
|
410 |
+
|
411 |
+
k (integer, optional): the number of requested
|
412 |
+
eigenpairs. Default is the number of :math:`X`
|
413 |
+
columns (when specified) or `1`.
|
414 |
+
|
415 |
+
n (integer, optional): if :math:`X` is not specified then `n`
|
416 |
+
specifies the size of the generated random
|
417 |
+
approximation of eigenvectors. Default value for `n`
|
418 |
+
is `k`. If :math:`X` is specified, the value of `n`
|
419 |
+
(when specified) must be the number of :math:`X`
|
420 |
+
columns.
|
421 |
+
|
422 |
+
tol (float, optional): residual tolerance for stopping
|
423 |
+
criterion. Default is `feps ** 0.5` where `feps` is
|
424 |
+
smallest non-zero floating-point number of the given
|
425 |
+
input tensor `A` data type.
|
426 |
+
|
427 |
+
largest (bool, optional): when True, solve the eigenproblem for
|
428 |
+
the largest eigenvalues. Otherwise, solve the
|
429 |
+
eigenproblem for smallest eigenvalues. Default is
|
430 |
+
`True`.
|
431 |
+
|
432 |
+
method (str, optional): select LOBPCG method. See the
|
433 |
+
description of the function above. Default is
|
434 |
+
"ortho".
|
435 |
+
|
436 |
+
niter (int, optional): maximum number of iterations. When
|
437 |
+
reached, the iteration process is hard-stopped and
|
438 |
+
the current approximation of eigenpairs is returned.
|
439 |
+
For infinite iteration but until convergence criteria
|
440 |
+
is met, use `-1`.
|
441 |
+
|
442 |
+
tracker (callable, optional) : a function for tracing the
|
443 |
+
iteration process. When specified, it is called at
|
444 |
+
each iteration step with LOBPCG instance as an
|
445 |
+
argument. The LOBPCG instance holds the full state of
|
446 |
+
the iteration process in the following attributes:
|
447 |
+
|
448 |
+
`iparams`, `fparams`, `bparams` - dictionaries of
|
449 |
+
integer, float, and boolean valued input
|
450 |
+
parameters, respectively
|
451 |
+
|
452 |
+
`ivars`, `fvars`, `bvars`, `tvars` - dictionaries
|
453 |
+
of integer, float, boolean, and Tensor valued
|
454 |
+
iteration variables, respectively.
|
455 |
+
|
456 |
+
`A`, `B`, `iK` - input Tensor arguments.
|
457 |
+
|
458 |
+
`E`, `X`, `S`, `R` - iteration Tensor variables.
|
459 |
+
|
460 |
+
For instance:
|
461 |
+
|
462 |
+
`ivars["istep"]` - the current iteration step
|
463 |
+
`X` - the current approximation of eigenvectors
|
464 |
+
`E` - the current approximation of eigenvalues
|
465 |
+
`R` - the current residual
|
466 |
+
`ivars["converged_count"]` - the current number of converged eigenpairs
|
467 |
+
`tvars["rerr"]` - the current state of convergence criteria
|
468 |
+
|
469 |
+
Note that when `tracker` stores Tensor objects from
|
470 |
+
the LOBPCG instance, it must make copies of these.
|
471 |
+
|
472 |
+
If `tracker` sets `bvars["force_stop"] = True`, the
|
473 |
+
iteration process will be hard-stopped.
|
474 |
+
|
475 |
+
ortho_iparams, ortho_fparams, ortho_bparams (dict, optional):
|
476 |
+
various parameters to LOBPCG algorithm when using
|
477 |
+
`method="ortho"`.
|
478 |
+
|
479 |
+
Returns:
|
480 |
+
|
481 |
+
E (Tensor): tensor of eigenvalues of size :math:`(*, k)`
|
482 |
+
|
483 |
+
X (Tensor): tensor of eigenvectors of size :math:`(*, m, k)`
|
484 |
+
|
485 |
+
References:
|
486 |
+
|
487 |
+
[Knyazev2001] Andrew V. Knyazev. (2001) Toward the Optimal
|
488 |
+
Preconditioned Eigensolver: Locally Optimal Block Preconditioned
|
489 |
+
Conjugate Gradient Method. SIAM J. Sci. Comput., 23(2),
|
490 |
+
517-541. (25 pages)
|
491 |
+
https://epubs.siam.org/doi/abs/10.1137/S1064827500366124
|
492 |
+
|
493 |
+
[StathopoulosEtal2002] Andreas Stathopoulos and Kesheng
|
494 |
+
Wu. (2002) A Block Orthogonalization Procedure with Constant
|
495 |
+
Synchronization Requirements. SIAM J. Sci. Comput., 23(6),
|
496 |
+
2165-2182. (18 pages)
|
497 |
+
https://epubs.siam.org/doi/10.1137/S1064827500370883
|
498 |
+
|
499 |
+
[DuerschEtal2018] Jed A. Duersch, Meiyue Shao, Chao Yang, Ming
|
500 |
+
Gu. (2018) A Robust and Efficient Implementation of LOBPCG.
|
501 |
+
SIAM J. Sci. Comput., 40(5), C655-C676. (22 pages)
|
502 |
+
https://epubs.siam.org/doi/abs/10.1137/17M1129830
|
503 |
+
|
504 |
+
"""
|
505 |
+
|
506 |
+
if not torch.jit.is_scripting():
|
507 |
+
tensor_ops = (A, B, X, iK)
|
508 |
+
if not set(map(type, tensor_ops)).issubset(
|
509 |
+
(torch.Tensor, type(None))
|
510 |
+
) and has_torch_function(tensor_ops):
|
511 |
+
return handle_torch_function(
|
512 |
+
lobpcg,
|
513 |
+
tensor_ops,
|
514 |
+
A,
|
515 |
+
k=k,
|
516 |
+
B=B,
|
517 |
+
X=X,
|
518 |
+
n=n,
|
519 |
+
iK=iK,
|
520 |
+
niter=niter,
|
521 |
+
tol=tol,
|
522 |
+
largest=largest,
|
523 |
+
method=method,
|
524 |
+
tracker=tracker,
|
525 |
+
ortho_iparams=ortho_iparams,
|
526 |
+
ortho_fparams=ortho_fparams,
|
527 |
+
ortho_bparams=ortho_bparams,
|
528 |
+
)
|
529 |
+
|
530 |
+
if not torch._jit_internal.is_scripting():
|
531 |
+
if A.requires_grad or (B is not None and B.requires_grad):
|
532 |
+
# While it is expected that `A` is symmetric,
|
533 |
+
# the `A_grad` might be not. Therefore we perform the trick below,
|
534 |
+
# so that `A_grad` becomes symmetric.
|
535 |
+
# The symmetrization is important for first-order optimization methods,
|
536 |
+
# so that (A - alpha * A_grad) is still a symmetric matrix.
|
537 |
+
# Same holds for `B`.
|
538 |
+
A_sym = (A + A.mT) / 2
|
539 |
+
B_sym = (B + B.mT) / 2 if (B is not None) else None
|
540 |
+
|
541 |
+
return LOBPCGAutogradFunction.apply(
|
542 |
+
A_sym,
|
543 |
+
k,
|
544 |
+
B_sym,
|
545 |
+
X,
|
546 |
+
n,
|
547 |
+
iK,
|
548 |
+
niter,
|
549 |
+
tol,
|
550 |
+
largest,
|
551 |
+
method,
|
552 |
+
tracker,
|
553 |
+
ortho_iparams,
|
554 |
+
ortho_fparams,
|
555 |
+
ortho_bparams,
|
556 |
+
)
|
557 |
+
else:
|
558 |
+
if A.requires_grad or (B is not None and B.requires_grad):
|
559 |
+
raise RuntimeError(
|
560 |
+
"Script and require grads is not supported atm."
|
561 |
+
"If you just want to do the forward, use .detach()"
|
562 |
+
"on A and B before calling into lobpcg"
|
563 |
+
)
|
564 |
+
|
565 |
+
return _lobpcg(
|
566 |
+
A,
|
567 |
+
k,
|
568 |
+
B,
|
569 |
+
X,
|
570 |
+
n,
|
571 |
+
iK,
|
572 |
+
niter,
|
573 |
+
tol,
|
574 |
+
largest,
|
575 |
+
method,
|
576 |
+
tracker,
|
577 |
+
ortho_iparams,
|
578 |
+
ortho_fparams,
|
579 |
+
ortho_bparams,
|
580 |
+
)
|
581 |
+
|
582 |
+
|
583 |
+
def _lobpcg(
|
584 |
+
A: Tensor,
|
585 |
+
k: Optional[int] = None,
|
586 |
+
B: Optional[Tensor] = None,
|
587 |
+
X: Optional[Tensor] = None,
|
588 |
+
n: Optional[int] = None,
|
589 |
+
iK: Optional[Tensor] = None,
|
590 |
+
niter: Optional[int] = None,
|
591 |
+
tol: Optional[float] = None,
|
592 |
+
largest: Optional[bool] = None,
|
593 |
+
method: Optional[str] = None,
|
594 |
+
tracker: None = None,
|
595 |
+
ortho_iparams: Optional[Dict[str, int]] = None,
|
596 |
+
ortho_fparams: Optional[Dict[str, float]] = None,
|
597 |
+
ortho_bparams: Optional[Dict[str, bool]] = None,
|
598 |
+
) -> Tuple[Tensor, Tensor]:
|
599 |
+
# A must be square:
|
600 |
+
assert A.shape[-2] == A.shape[-1], A.shape
|
601 |
+
if B is not None:
|
602 |
+
# A and B must have the same shapes:
|
603 |
+
assert A.shape == B.shape, (A.shape, B.shape)
|
604 |
+
|
605 |
+
dtype = _utils.get_floating_dtype(A)
|
606 |
+
device = A.device
|
607 |
+
if tol is None:
|
608 |
+
feps = {torch.float32: 1.2e-07, torch.float64: 2.23e-16}[dtype]
|
609 |
+
tol = feps**0.5
|
610 |
+
|
611 |
+
m = A.shape[-1]
|
612 |
+
k = (1 if X is None else X.shape[-1]) if k is None else k
|
613 |
+
n = (k if n is None else n) if X is None else X.shape[-1]
|
614 |
+
|
615 |
+
if m < 3 * n:
|
616 |
+
raise ValueError(
|
617 |
+
f"LPBPCG algorithm is not applicable when the number of A rows (={m})"
|
618 |
+
f" is smaller than 3 x the number of requested eigenpairs (={n})"
|
619 |
+
)
|
620 |
+
|
621 |
+
method = "ortho" if method is None else method
|
622 |
+
|
623 |
+
iparams = {
|
624 |
+
"m": m,
|
625 |
+
"n": n,
|
626 |
+
"k": k,
|
627 |
+
"niter": 1000 if niter is None else niter,
|
628 |
+
}
|
629 |
+
|
630 |
+
fparams = {
|
631 |
+
"tol": tol,
|
632 |
+
}
|
633 |
+
|
634 |
+
bparams = {"largest": True if largest is None else largest}
|
635 |
+
|
636 |
+
if method == "ortho":
|
637 |
+
if ortho_iparams is not None:
|
638 |
+
iparams.update(ortho_iparams)
|
639 |
+
if ortho_fparams is not None:
|
640 |
+
fparams.update(ortho_fparams)
|
641 |
+
if ortho_bparams is not None:
|
642 |
+
bparams.update(ortho_bparams)
|
643 |
+
iparams["ortho_i_max"] = iparams.get("ortho_i_max", 3)
|
644 |
+
iparams["ortho_j_max"] = iparams.get("ortho_j_max", 3)
|
645 |
+
fparams["ortho_tol"] = fparams.get("ortho_tol", tol)
|
646 |
+
fparams["ortho_tol_drop"] = fparams.get("ortho_tol_drop", tol)
|
647 |
+
fparams["ortho_tol_replace"] = fparams.get("ortho_tol_replace", tol)
|
648 |
+
bparams["ortho_use_drop"] = bparams.get("ortho_use_drop", False)
|
649 |
+
|
650 |
+
if not torch.jit.is_scripting():
|
651 |
+
LOBPCG.call_tracker = LOBPCG_call_tracker # type: ignore[assignment]
|
652 |
+
|
653 |
+
if len(A.shape) > 2:
|
654 |
+
N = int(torch.prod(torch.tensor(A.shape[:-2])))
|
655 |
+
bA = A.reshape((N,) + A.shape[-2:])
|
656 |
+
bB = B.reshape((N,) + A.shape[-2:]) if B is not None else None
|
657 |
+
bX = X.reshape((N,) + X.shape[-2:]) if X is not None else None
|
658 |
+
bE = torch.empty((N, k), dtype=dtype, device=device)
|
659 |
+
bXret = torch.empty((N, m, k), dtype=dtype, device=device)
|
660 |
+
|
661 |
+
for i in range(N):
|
662 |
+
A_ = bA[i]
|
663 |
+
B_ = bB[i] if bB is not None else None
|
664 |
+
X_ = (
|
665 |
+
torch.randn((m, n), dtype=dtype, device=device) if bX is None else bX[i]
|
666 |
+
)
|
667 |
+
assert len(X_.shape) == 2 and X_.shape == (m, n), (X_.shape, (m, n))
|
668 |
+
iparams["batch_index"] = i
|
669 |
+
worker = LOBPCG(A_, B_, X_, iK, iparams, fparams, bparams, method, tracker)
|
670 |
+
worker.run()
|
671 |
+
bE[i] = worker.E[:k]
|
672 |
+
bXret[i] = worker.X[:, :k]
|
673 |
+
|
674 |
+
if not torch.jit.is_scripting():
|
675 |
+
LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment]
|
676 |
+
|
677 |
+
return bE.reshape(A.shape[:-2] + (k,)), bXret.reshape(A.shape[:-2] + (m, k))
|
678 |
+
|
679 |
+
X = torch.randn((m, n), dtype=dtype, device=device) if X is None else X
|
680 |
+
assert len(X.shape) == 2 and X.shape == (m, n), (X.shape, (m, n))
|
681 |
+
|
682 |
+
worker = LOBPCG(A, B, X, iK, iparams, fparams, bparams, method, tracker)
|
683 |
+
|
684 |
+
worker.run()
|
685 |
+
|
686 |
+
if not torch.jit.is_scripting():
|
687 |
+
LOBPCG.call_tracker = LOBPCG_call_tracker_orig # type: ignore[assignment]
|
688 |
+
|
689 |
+
return worker.E[:k], worker.X[:, :k]
|
690 |
+
|
691 |
+
|
692 |
+
class LOBPCG:
|
693 |
+
"""Worker class of LOBPCG methods."""
|
694 |
+
|
695 |
+
def __init__(
|
696 |
+
self,
|
697 |
+
A: Optional[Tensor],
|
698 |
+
B: Optional[Tensor],
|
699 |
+
X: Tensor,
|
700 |
+
iK: Optional[Tensor],
|
701 |
+
iparams: Dict[str, int],
|
702 |
+
fparams: Dict[str, float],
|
703 |
+
bparams: Dict[str, bool],
|
704 |
+
method: str,
|
705 |
+
tracker: None,
|
706 |
+
) -> None:
|
707 |
+
# constant parameters
|
708 |
+
self.A = A
|
709 |
+
self.B = B
|
710 |
+
self.iK = iK
|
711 |
+
self.iparams = iparams
|
712 |
+
self.fparams = fparams
|
713 |
+
self.bparams = bparams
|
714 |
+
self.method = method
|
715 |
+
self.tracker = tracker
|
716 |
+
m = iparams["m"]
|
717 |
+
n = iparams["n"]
|
718 |
+
|
719 |
+
# variable parameters
|
720 |
+
self.X = X
|
721 |
+
self.E = torch.zeros((n,), dtype=X.dtype, device=X.device)
|
722 |
+
self.R = torch.zeros((m, n), dtype=X.dtype, device=X.device)
|
723 |
+
self.S = torch.zeros((m, 3 * n), dtype=X.dtype, device=X.device)
|
724 |
+
self.tvars: Dict[str, Tensor] = {}
|
725 |
+
self.ivars: Dict[str, int] = {"istep": 0}
|
726 |
+
self.fvars: Dict[str, float] = {"_": 0.0}
|
727 |
+
self.bvars: Dict[str, bool] = {"_": False}
|
728 |
+
|
729 |
+
def __str__(self):
|
730 |
+
lines = ["LOPBCG:"]
|
731 |
+
lines += [f" iparams={self.iparams}"]
|
732 |
+
lines += [f" fparams={self.fparams}"]
|
733 |
+
lines += [f" bparams={self.bparams}"]
|
734 |
+
lines += [f" ivars={self.ivars}"]
|
735 |
+
lines += [f" fvars={self.fvars}"]
|
736 |
+
lines += [f" bvars={self.bvars}"]
|
737 |
+
lines += [f" tvars={self.tvars}"]
|
738 |
+
lines += [f" A={self.A}"]
|
739 |
+
lines += [f" B={self.B}"]
|
740 |
+
lines += [f" iK={self.iK}"]
|
741 |
+
lines += [f" X={self.X}"]
|
742 |
+
lines += [f" E={self.E}"]
|
743 |
+
r = ""
|
744 |
+
for line in lines:
|
745 |
+
r += line + "\n"
|
746 |
+
return r
|
747 |
+
|
748 |
+
def update(self):
|
749 |
+
"""Set and update iteration variables."""
|
750 |
+
if self.ivars["istep"] == 0:
|
751 |
+
X_norm = float(torch.norm(self.X))
|
752 |
+
iX_norm = X_norm**-1
|
753 |
+
A_norm = float(torch.norm(_utils.matmul(self.A, self.X))) * iX_norm
|
754 |
+
B_norm = float(torch.norm(_utils.matmul(self.B, self.X))) * iX_norm
|
755 |
+
self.fvars["X_norm"] = X_norm
|
756 |
+
self.fvars["A_norm"] = A_norm
|
757 |
+
self.fvars["B_norm"] = B_norm
|
758 |
+
self.ivars["iterations_left"] = self.iparams["niter"]
|
759 |
+
self.ivars["converged_count"] = 0
|
760 |
+
self.ivars["converged_end"] = 0
|
761 |
+
|
762 |
+
if self.method == "ortho":
|
763 |
+
self._update_ortho()
|
764 |
+
else:
|
765 |
+
self._update_basic()
|
766 |
+
|
767 |
+
self.ivars["iterations_left"] = self.ivars["iterations_left"] - 1
|
768 |
+
self.ivars["istep"] = self.ivars["istep"] + 1
|
769 |
+
|
770 |
+
def update_residual(self):
|
771 |
+
"""Update residual R from A, B, X, E."""
|
772 |
+
mm = _utils.matmul
|
773 |
+
self.R = mm(self.A, self.X) - mm(self.B, self.X) * self.E
|
774 |
+
|
775 |
+
def update_converged_count(self):
|
776 |
+
"""Determine the number of converged eigenpairs using backward stable
|
777 |
+
convergence criterion, see discussion in Sec 4.3 of [DuerschEtal2018].
|
778 |
+
|
779 |
+
Users may redefine this method for custom convergence criteria.
|
780 |
+
"""
|
781 |
+
# (...) -> int
|
782 |
+
prev_count = self.ivars["converged_count"]
|
783 |
+
tol = self.fparams["tol"]
|
784 |
+
A_norm = self.fvars["A_norm"]
|
785 |
+
B_norm = self.fvars["B_norm"]
|
786 |
+
E, X, R = self.E, self.X, self.R
|
787 |
+
rerr = (
|
788 |
+
torch.norm(R, 2, (0,))
|
789 |
+
* (torch.norm(X, 2, (0,)) * (A_norm + E[: X.shape[-1]] * B_norm)) ** -1
|
790 |
+
)
|
791 |
+
converged = rerr < tol
|
792 |
+
count = 0
|
793 |
+
for b in converged:
|
794 |
+
if not b:
|
795 |
+
# ignore convergence of following pairs to ensure
|
796 |
+
# strict ordering of eigenpairs
|
797 |
+
break
|
798 |
+
count += 1
|
799 |
+
assert (
|
800 |
+
count >= prev_count
|
801 |
+
), f"the number of converged eigenpairs (was {prev_count}, got {count}) cannot decrease"
|
802 |
+
self.ivars["converged_count"] = count
|
803 |
+
self.tvars["rerr"] = rerr
|
804 |
+
return count
|
805 |
+
|
806 |
+
def stop_iteration(self):
|
807 |
+
"""Return True to stop iterations.
|
808 |
+
|
809 |
+
Note that tracker (if defined) can force-stop iterations by
|
810 |
+
setting ``worker.bvars['force_stop'] = True``.
|
811 |
+
"""
|
812 |
+
return (
|
813 |
+
self.bvars.get("force_stop", False)
|
814 |
+
or self.ivars["iterations_left"] == 0
|
815 |
+
or self.ivars["converged_count"] >= self.iparams["k"]
|
816 |
+
)
|
817 |
+
|
818 |
+
def run(self):
|
819 |
+
"""Run LOBPCG iterations.
|
820 |
+
|
821 |
+
Use this method as a template for implementing LOBPCG
|
822 |
+
iteration scheme with custom tracker that is compatible with
|
823 |
+
TorchScript.
|
824 |
+
"""
|
825 |
+
self.update()
|
826 |
+
|
827 |
+
if not torch.jit.is_scripting() and self.tracker is not None:
|
828 |
+
self.call_tracker()
|
829 |
+
|
830 |
+
while not self.stop_iteration():
|
831 |
+
self.update()
|
832 |
+
|
833 |
+
if not torch.jit.is_scripting() and self.tracker is not None:
|
834 |
+
self.call_tracker()
|
835 |
+
|
836 |
+
@torch.jit.unused
|
837 |
+
def call_tracker(self):
|
838 |
+
"""Interface for tracking iteration process in Python mode.
|
839 |
+
|
840 |
+
Tracking the iteration process is disabled in TorchScript
|
841 |
+
mode. In fact, one should specify tracker=None when JIT
|
842 |
+
compiling functions using lobpcg.
|
843 |
+
"""
|
844 |
+
# do nothing when in TorchScript mode
|
845 |
+
pass
|
846 |
+
|
847 |
+
# Internal methods
|
848 |
+
|
849 |
+
def _update_basic(self):
|
850 |
+
"""
|
851 |
+
Update or initialize iteration variables when `method == "basic"`.
|
852 |
+
"""
|
853 |
+
mm = torch.matmul
|
854 |
+
ns = self.ivars["converged_end"]
|
855 |
+
nc = self.ivars["converged_count"]
|
856 |
+
n = self.iparams["n"]
|
857 |
+
largest = self.bparams["largest"]
|
858 |
+
|
859 |
+
if self.ivars["istep"] == 0:
|
860 |
+
Ri = self._get_rayleigh_ritz_transform(self.X)
|
861 |
+
M = _utils.qform(_utils.qform(self.A, self.X), Ri)
|
862 |
+
E, Z = _utils.symeig(M, largest)
|
863 |
+
self.X[:] = mm(self.X, mm(Ri, Z))
|
864 |
+
self.E[:] = E
|
865 |
+
np = 0
|
866 |
+
self.update_residual()
|
867 |
+
nc = self.update_converged_count()
|
868 |
+
self.S[..., :n] = self.X
|
869 |
+
|
870 |
+
W = _utils.matmul(self.iK, self.R)
|
871 |
+
self.ivars["converged_end"] = ns = n + np + W.shape[-1]
|
872 |
+
self.S[:, n + np : ns] = W
|
873 |
+
else:
|
874 |
+
S_ = self.S[:, nc:ns]
|
875 |
+
Ri = self._get_rayleigh_ritz_transform(S_)
|
876 |
+
M = _utils.qform(_utils.qform(self.A, S_), Ri)
|
877 |
+
E_, Z = _utils.symeig(M, largest)
|
878 |
+
self.X[:, nc:] = mm(S_, mm(Ri, Z[:, : n - nc]))
|
879 |
+
self.E[nc:] = E_[: n - nc]
|
880 |
+
P = mm(S_, mm(Ri, Z[:, n : 2 * n - nc]))
|
881 |
+
np = P.shape[-1]
|
882 |
+
|
883 |
+
self.update_residual()
|
884 |
+
nc = self.update_converged_count()
|
885 |
+
self.S[..., :n] = self.X
|
886 |
+
self.S[:, n : n + np] = P
|
887 |
+
W = _utils.matmul(self.iK, self.R[:, nc:])
|
888 |
+
|
889 |
+
self.ivars["converged_end"] = ns = n + np + W.shape[-1]
|
890 |
+
self.S[:, n + np : ns] = W
|
891 |
+
|
892 |
+
def _update_ortho(self):
|
893 |
+
"""
|
894 |
+
Update or initialize iteration variables when `method == "ortho"`.
|
895 |
+
"""
|
896 |
+
mm = torch.matmul
|
897 |
+
ns = self.ivars["converged_end"]
|
898 |
+
nc = self.ivars["converged_count"]
|
899 |
+
n = self.iparams["n"]
|
900 |
+
largest = self.bparams["largest"]
|
901 |
+
|
902 |
+
if self.ivars["istep"] == 0:
|
903 |
+
Ri = self._get_rayleigh_ritz_transform(self.X)
|
904 |
+
M = _utils.qform(_utils.qform(self.A, self.X), Ri)
|
905 |
+
E, Z = _utils.symeig(M, largest)
|
906 |
+
self.X = mm(self.X, mm(Ri, Z))
|
907 |
+
self.update_residual()
|
908 |
+
np = 0
|
909 |
+
nc = self.update_converged_count()
|
910 |
+
self.S[:, :n] = self.X
|
911 |
+
W = self._get_ortho(self.R, self.X)
|
912 |
+
ns = self.ivars["converged_end"] = n + np + W.shape[-1]
|
913 |
+
self.S[:, n + np : ns] = W
|
914 |
+
|
915 |
+
else:
|
916 |
+
S_ = self.S[:, nc:ns]
|
917 |
+
# Rayleigh-Ritz procedure
|
918 |
+
E_, Z = _utils.symeig(_utils.qform(self.A, S_), largest)
|
919 |
+
|
920 |
+
# Update E, X, P
|
921 |
+
self.X[:, nc:] = mm(S_, Z[:, : n - nc])
|
922 |
+
self.E[nc:] = E_[: n - nc]
|
923 |
+
P = mm(
|
924 |
+
S_,
|
925 |
+
mm(
|
926 |
+
Z[:, n - nc :],
|
927 |
+
_utils.basis(_utils.transpose(Z[: n - nc, n - nc :])),
|
928 |
+
),
|
929 |
+
)
|
930 |
+
np = P.shape[-1]
|
931 |
+
|
932 |
+
# check convergence
|
933 |
+
self.update_residual()
|
934 |
+
nc = self.update_converged_count()
|
935 |
+
|
936 |
+
# update S
|
937 |
+
self.S[:, :n] = self.X
|
938 |
+
self.S[:, n : n + np] = P
|
939 |
+
W = self._get_ortho(self.R[:, nc:], self.S[:, : n + np])
|
940 |
+
ns = self.ivars["converged_end"] = n + np + W.shape[-1]
|
941 |
+
self.S[:, n + np : ns] = W
|
942 |
+
|
943 |
+
def _get_rayleigh_ritz_transform(self, S):
|
944 |
+
"""Return a transformation matrix that is used in Rayleigh-Ritz
|
945 |
+
procedure for reducing a general eigenvalue problem :math:`(S^TAS)
|
946 |
+
C = (S^TBS) C E` to a standard eigenvalue problem :math: `(Ri^T
|
947 |
+
S^TAS Ri) Z = Z E` where `C = Ri Z`.
|
948 |
+
|
949 |
+
.. note:: In the original Rayleight-Ritz procedure in
|
950 |
+
[DuerschEtal2018], the problem is formulated as follows::
|
951 |
+
|
952 |
+
SAS = S^T A S
|
953 |
+
SBS = S^T B S
|
954 |
+
D = (<diagonal matrix of SBS>) ** -1/2
|
955 |
+
R^T R = Cholesky(D SBS D)
|
956 |
+
Ri = D R^-1
|
957 |
+
solve symeig problem Ri^T SAS Ri Z = Theta Z
|
958 |
+
C = Ri Z
|
959 |
+
|
960 |
+
To reduce the number of matrix products (denoted by empty
|
961 |
+
space between matrices), here we introduce element-wise
|
962 |
+
products (denoted by symbol `*`) so that the Rayleight-Ritz
|
963 |
+
procedure becomes::
|
964 |
+
|
965 |
+
SAS = S^T A S
|
966 |
+
SBS = S^T B S
|
967 |
+
d = (<diagonal of SBS>) ** -1/2 # this is 1-d column vector
|
968 |
+
dd = d d^T # this is 2-d matrix
|
969 |
+
R^T R = Cholesky(dd * SBS)
|
970 |
+
Ri = R^-1 * d # broadcasting
|
971 |
+
solve symeig problem Ri^T SAS Ri Z = Theta Z
|
972 |
+
C = Ri Z
|
973 |
+
|
974 |
+
where `dd` is 2-d matrix that replaces matrix products `D M
|
975 |
+
D` with one element-wise product `M * dd`; and `d` replaces
|
976 |
+
matrix product `D M` with element-wise product `M *
|
977 |
+
d`. Also, creating the diagonal matrix `D` is avoided.
|
978 |
+
|
979 |
+
Args:
|
980 |
+
S (Tensor): the matrix basis for the search subspace, size is
|
981 |
+
:math:`(m, n)`.
|
982 |
+
|
983 |
+
Returns:
|
984 |
+
Ri (tensor): upper-triangular transformation matrix of size
|
985 |
+
:math:`(n, n)`.
|
986 |
+
|
987 |
+
"""
|
988 |
+
B = self.B
|
989 |
+
mm = torch.matmul
|
990 |
+
SBS = _utils.qform(B, S)
|
991 |
+
d_row = SBS.diagonal(0, -2, -1) ** -0.5
|
992 |
+
d_col = d_row.reshape(d_row.shape[0], 1)
|
993 |
+
# TODO use torch.linalg.cholesky_solve once it is implemented
|
994 |
+
R = torch.linalg.cholesky((SBS * d_row) * d_col, upper=True)
|
995 |
+
return torch.linalg.solve_triangular(
|
996 |
+
R, d_row.diag_embed(), upper=True, left=False
|
997 |
+
)
|
998 |
+
|
999 |
+
def _get_svqb(
|
1000 |
+
self, U: Tensor, drop: bool, tau: float # Tensor # bool # float
|
1001 |
+
) -> Tensor:
|
1002 |
+
"""Return B-orthonormal U.
|
1003 |
+
|
1004 |
+
.. note:: When `drop` is `False` then `svqb` is based on the
|
1005 |
+
Algorithm 4 from [DuerschPhD2015] that is a slight
|
1006 |
+
modification of the corresponding algorithm
|
1007 |
+
introduced in [StathopolousWu2002].
|
1008 |
+
|
1009 |
+
Args:
|
1010 |
+
|
1011 |
+
U (Tensor) : initial approximation, size is (m, n)
|
1012 |
+
drop (bool) : when True, drop columns that
|
1013 |
+
contribution to the `span([U])` is small.
|
1014 |
+
tau (float) : positive tolerance
|
1015 |
+
|
1016 |
+
Returns:
|
1017 |
+
|
1018 |
+
U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`), size
|
1019 |
+
is (m, n1), where `n1 = n` if `drop` is `False,
|
1020 |
+
otherwise `n1 <= n`.
|
1021 |
+
|
1022 |
+
"""
|
1023 |
+
if torch.numel(U) == 0:
|
1024 |
+
return U
|
1025 |
+
UBU = _utils.qform(self.B, U)
|
1026 |
+
d = UBU.diagonal(0, -2, -1)
|
1027 |
+
|
1028 |
+
# Detect and drop exact zero columns from U. While the test
|
1029 |
+
# `abs(d) == 0` is unlikely to be True for random data, it is
|
1030 |
+
# possible to construct input data to lobpcg where it will be
|
1031 |
+
# True leading to a failure (notice the `d ** -0.5` operation
|
1032 |
+
# in the original algorithm). To prevent the failure, we drop
|
1033 |
+
# the exact zero columns here and then continue with the
|
1034 |
+
# original algorithm below.
|
1035 |
+
nz = torch.where(abs(d) != 0.0)
|
1036 |
+
assert len(nz) == 1, nz
|
1037 |
+
if len(nz[0]) < len(d):
|
1038 |
+
U = U[:, nz[0]]
|
1039 |
+
if torch.numel(U) == 0:
|
1040 |
+
return U
|
1041 |
+
UBU = _utils.qform(self.B, U)
|
1042 |
+
d = UBU.diagonal(0, -2, -1)
|
1043 |
+
nz = torch.where(abs(d) != 0.0)
|
1044 |
+
assert len(nz[0]) == len(d)
|
1045 |
+
|
1046 |
+
# The original algorithm 4 from [DuerschPhD2015].
|
1047 |
+
d_col = (d**-0.5).reshape(d.shape[0], 1)
|
1048 |
+
DUBUD = (UBU * d_col) * _utils.transpose(d_col)
|
1049 |
+
E, Z = _utils.symeig(DUBUD)
|
1050 |
+
t = tau * abs(E).max()
|
1051 |
+
if drop:
|
1052 |
+
keep = torch.where(E > t)
|
1053 |
+
assert len(keep) == 1, keep
|
1054 |
+
E = E[keep[0]]
|
1055 |
+
Z = Z[:, keep[0]]
|
1056 |
+
d_col = d_col[keep[0]]
|
1057 |
+
else:
|
1058 |
+
E[(torch.where(E < t))[0]] = t
|
1059 |
+
|
1060 |
+
return torch.matmul(U * _utils.transpose(d_col), Z * E**-0.5)
|
1061 |
+
|
1062 |
+
def _get_ortho(self, U, V):
|
1063 |
+
"""Return B-orthonormal U with columns are B-orthogonal to V.
|
1064 |
+
|
1065 |
+
.. note:: When `bparams["ortho_use_drop"] == False` then
|
1066 |
+
`_get_ortho` is based on the Algorithm 3 from
|
1067 |
+
[DuerschPhD2015] that is a slight modification of
|
1068 |
+
the corresponding algorithm introduced in
|
1069 |
+
[StathopolousWu2002]. Otherwise, the method
|
1070 |
+
implements Algorithm 6 from [DuerschPhD2015]
|
1071 |
+
|
1072 |
+
.. note:: If all U columns are B-collinear to V then the
|
1073 |
+
returned tensor U will be empty.
|
1074 |
+
|
1075 |
+
Args:
|
1076 |
+
|
1077 |
+
U (Tensor) : initial approximation, size is (m, n)
|
1078 |
+
V (Tensor) : B-orthogonal external basis, size is (m, k)
|
1079 |
+
|
1080 |
+
Returns:
|
1081 |
+
|
1082 |
+
U (Tensor) : B-orthonormal columns (:math:`U^T B U = I`)
|
1083 |
+
such that :math:`V^T B U=0`, size is (m, n1),
|
1084 |
+
where `n1 = n` if `drop` is `False, otherwise
|
1085 |
+
`n1 <= n`.
|
1086 |
+
"""
|
1087 |
+
mm = torch.matmul
|
1088 |
+
mm_B = _utils.matmul
|
1089 |
+
m = self.iparams["m"]
|
1090 |
+
tau_ortho = self.fparams["ortho_tol"]
|
1091 |
+
tau_drop = self.fparams["ortho_tol_drop"]
|
1092 |
+
tau_replace = self.fparams["ortho_tol_replace"]
|
1093 |
+
i_max = self.iparams["ortho_i_max"]
|
1094 |
+
j_max = self.iparams["ortho_j_max"]
|
1095 |
+
# when use_drop==True, enable dropping U columns that have
|
1096 |
+
# small contribution to the `span([U, V])`.
|
1097 |
+
use_drop = self.bparams["ortho_use_drop"]
|
1098 |
+
|
1099 |
+
# clean up variables from the previous call
|
1100 |
+
for vkey in list(self.fvars.keys()):
|
1101 |
+
if vkey.startswith("ortho_") and vkey.endswith("_rerr"):
|
1102 |
+
self.fvars.pop(vkey)
|
1103 |
+
self.ivars.pop("ortho_i", 0)
|
1104 |
+
self.ivars.pop("ortho_j", 0)
|
1105 |
+
|
1106 |
+
BV_norm = torch.norm(mm_B(self.B, V))
|
1107 |
+
BU = mm_B(self.B, U)
|
1108 |
+
VBU = mm(_utils.transpose(V), BU)
|
1109 |
+
i = j = 0
|
1110 |
+
stats = ""
|
1111 |
+
for i in range(i_max):
|
1112 |
+
U = U - mm(V, VBU)
|
1113 |
+
drop = False
|
1114 |
+
tau_svqb = tau_drop
|
1115 |
+
for j in range(j_max):
|
1116 |
+
if use_drop:
|
1117 |
+
U = self._get_svqb(U, drop, tau_svqb)
|
1118 |
+
drop = True
|
1119 |
+
tau_svqb = tau_replace
|
1120 |
+
else:
|
1121 |
+
U = self._get_svqb(U, False, tau_replace)
|
1122 |
+
if torch.numel(U) == 0:
|
1123 |
+
# all initial U columns are B-collinear to V
|
1124 |
+
self.ivars["ortho_i"] = i
|
1125 |
+
self.ivars["ortho_j"] = j
|
1126 |
+
return U
|
1127 |
+
BU = mm_B(self.B, U)
|
1128 |
+
UBU = mm(_utils.transpose(U), BU)
|
1129 |
+
U_norm = torch.norm(U)
|
1130 |
+
BU_norm = torch.norm(BU)
|
1131 |
+
R = UBU - torch.eye(UBU.shape[-1], device=UBU.device, dtype=UBU.dtype)
|
1132 |
+
R_norm = torch.norm(R)
|
1133 |
+
# https://github.com/pytorch/pytorch/issues/33810 workaround:
|
1134 |
+
rerr = float(R_norm) * float(BU_norm * U_norm) ** -1
|
1135 |
+
vkey = f"ortho_UBUmI_rerr[{i}, {j}]"
|
1136 |
+
self.fvars[vkey] = rerr
|
1137 |
+
if rerr < tau_ortho:
|
1138 |
+
break
|
1139 |
+
VBU = mm(_utils.transpose(V), BU)
|
1140 |
+
VBU_norm = torch.norm(VBU)
|
1141 |
+
U_norm = torch.norm(U)
|
1142 |
+
rerr = float(VBU_norm) * float(BV_norm * U_norm) ** -1
|
1143 |
+
vkey = f"ortho_VBU_rerr[{i}]"
|
1144 |
+
self.fvars[vkey] = rerr
|
1145 |
+
if rerr < tau_ortho:
|
1146 |
+
break
|
1147 |
+
if m < U.shape[-1] + V.shape[-1]:
|
1148 |
+
# TorchScript needs the class var to be assigned to a local to
|
1149 |
+
# do optional type refinement
|
1150 |
+
B = self.B
|
1151 |
+
assert B is not None
|
1152 |
+
raise ValueError(
|
1153 |
+
"Overdetermined shape of U:"
|
1154 |
+
f" #B-cols(={B.shape[-1]}) >= #U-cols(={U.shape[-1]}) + #V-cols(={V.shape[-1]}) must hold"
|
1155 |
+
)
|
1156 |
+
self.ivars["ortho_i"] = i
|
1157 |
+
self.ivars["ortho_j"] = j
|
1158 |
+
return U
|
1159 |
+
|
1160 |
+
|
1161 |
+
# Calling tracker is separated from LOBPCG definitions because
|
1162 |
+
# TorchScript does not support user-defined callback arguments:
|
1163 |
+
LOBPCG_call_tracker_orig = LOBPCG.call_tracker
|
1164 |
+
|
1165 |
+
|
1166 |
+
def LOBPCG_call_tracker(self):
|
1167 |
+
self.tracker(self)
|
env-llmeval/lib/python3.10/site-packages/torch/_lowrank.py
ADDED
@@ -0,0 +1,298 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Implement various linear algebra algorithms for low rank matrices.
|
2 |
+
"""
|
3 |
+
|
4 |
+
__all__ = ["svd_lowrank", "pca_lowrank"]
|
5 |
+
|
6 |
+
from typing import Optional, Tuple
|
7 |
+
|
8 |
+
import torch
|
9 |
+
from torch import Tensor
|
10 |
+
from . import _linalg_utils as _utils
|
11 |
+
from .overrides import handle_torch_function, has_torch_function
|
12 |
+
|
13 |
+
|
14 |
+
def get_approximate_basis(
|
15 |
+
A: Tensor, q: int, niter: Optional[int] = 2, M: Optional[Tensor] = None
|
16 |
+
) -> Tensor:
|
17 |
+
"""Return tensor :math:`Q` with :math:`q` orthonormal columns such
|
18 |
+
that :math:`Q Q^H A` approximates :math:`A`. If :math:`M` is
|
19 |
+
specified, then :math:`Q` is such that :math:`Q Q^H (A - M)`
|
20 |
+
approximates :math:`A - M`.
|
21 |
+
|
22 |
+
.. note:: The implementation is based on the Algorithm 4.4 from
|
23 |
+
Halko et al, 2009.
|
24 |
+
|
25 |
+
.. note:: For an adequate approximation of a k-rank matrix
|
26 |
+
:math:`A`, where k is not known in advance but could be
|
27 |
+
estimated, the number of :math:`Q` columns, q, can be
|
28 |
+
choosen according to the following criteria: in general,
|
29 |
+
:math:`k <= q <= min(2*k, m, n)`. For large low-rank
|
30 |
+
matrices, take :math:`q = k + 5..10`. If k is
|
31 |
+
relatively small compared to :math:`min(m, n)`, choosing
|
32 |
+
:math:`q = k + 0..2` may be sufficient.
|
33 |
+
|
34 |
+
.. note:: To obtain repeatable results, reset the seed for the
|
35 |
+
pseudorandom number generator
|
36 |
+
|
37 |
+
Args::
|
38 |
+
A (Tensor): the input tensor of size :math:`(*, m, n)`
|
39 |
+
|
40 |
+
q (int): the dimension of subspace spanned by :math:`Q`
|
41 |
+
columns.
|
42 |
+
|
43 |
+
niter (int, optional): the number of subspace iterations to
|
44 |
+
conduct; ``niter`` must be a
|
45 |
+
nonnegative integer. In most cases, the
|
46 |
+
default value 2 is more than enough.
|
47 |
+
|
48 |
+
M (Tensor, optional): the input tensor's mean of size
|
49 |
+
:math:`(*, 1, n)`.
|
50 |
+
|
51 |
+
References::
|
52 |
+
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
|
53 |
+
structure with randomness: probabilistic algorithms for
|
54 |
+
constructing approximate matrix decompositions,
|
55 |
+
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
|
56 |
+
`arXiv <http://arxiv.org/abs/0909.4061>`_).
|
57 |
+
"""
|
58 |
+
|
59 |
+
niter = 2 if niter is None else niter
|
60 |
+
m, n = A.shape[-2:]
|
61 |
+
dtype = _utils.get_floating_dtype(A)
|
62 |
+
matmul = _utils.matmul
|
63 |
+
|
64 |
+
R = torch.randn(n, q, dtype=dtype, device=A.device)
|
65 |
+
|
66 |
+
# The following code could be made faster using torch.geqrf + torch.ormqr
|
67 |
+
# but geqrf is not differentiable
|
68 |
+
A_H = _utils.transjugate(A)
|
69 |
+
if M is None:
|
70 |
+
Q = torch.linalg.qr(matmul(A, R)).Q
|
71 |
+
for i in range(niter):
|
72 |
+
Q = torch.linalg.qr(matmul(A_H, Q)).Q
|
73 |
+
Q = torch.linalg.qr(matmul(A, Q)).Q
|
74 |
+
else:
|
75 |
+
M_H = _utils.transjugate(M)
|
76 |
+
Q = torch.linalg.qr(matmul(A, R) - matmul(M, R)).Q
|
77 |
+
for i in range(niter):
|
78 |
+
Q = torch.linalg.qr(matmul(A_H, Q) - matmul(M_H, Q)).Q
|
79 |
+
Q = torch.linalg.qr(matmul(A, Q) - matmul(M, Q)).Q
|
80 |
+
|
81 |
+
return Q
|
82 |
+
|
83 |
+
|
84 |
+
def svd_lowrank(
|
85 |
+
A: Tensor,
|
86 |
+
q: Optional[int] = 6,
|
87 |
+
niter: Optional[int] = 2,
|
88 |
+
M: Optional[Tensor] = None,
|
89 |
+
) -> Tuple[Tensor, Tensor, Tensor]:
|
90 |
+
r"""Return the singular value decomposition ``(U, S, V)`` of a matrix,
|
91 |
+
batches of matrices, or a sparse matrix :math:`A` such that
|
92 |
+
:math:`A \approx U diag(S) V^T`. In case :math:`M` is given, then
|
93 |
+
SVD is computed for the matrix :math:`A - M`.
|
94 |
+
|
95 |
+
.. note:: The implementation is based on the Algorithm 5.1 from
|
96 |
+
Halko et al, 2009.
|
97 |
+
|
98 |
+
.. note:: To obtain repeatable results, reset the seed for the
|
99 |
+
pseudorandom number generator
|
100 |
+
|
101 |
+
.. note:: The input is assumed to be a low-rank matrix.
|
102 |
+
|
103 |
+
.. note:: In general, use the full-rank SVD implementation
|
104 |
+
:func:`torch.linalg.svd` for dense matrices due to its 10-fold
|
105 |
+
higher performance characteristics. The low-rank SVD
|
106 |
+
will be useful for huge sparse matrices that
|
107 |
+
:func:`torch.linalg.svd` cannot handle.
|
108 |
+
|
109 |
+
Args::
|
110 |
+
A (Tensor): the input tensor of size :math:`(*, m, n)`
|
111 |
+
|
112 |
+
q (int, optional): a slightly overestimated rank of A.
|
113 |
+
|
114 |
+
niter (int, optional): the number of subspace iterations to
|
115 |
+
conduct; niter must be a nonnegative
|
116 |
+
integer, and defaults to 2
|
117 |
+
|
118 |
+
M (Tensor, optional): the input tensor's mean of size
|
119 |
+
:math:`(*, 1, n)`.
|
120 |
+
|
121 |
+
References::
|
122 |
+
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
|
123 |
+
structure with randomness: probabilistic algorithms for
|
124 |
+
constructing approximate matrix decompositions,
|
125 |
+
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
|
126 |
+
`arXiv <https://arxiv.org/abs/0909.4061>`_).
|
127 |
+
|
128 |
+
"""
|
129 |
+
if not torch.jit.is_scripting():
|
130 |
+
tensor_ops = (A, M)
|
131 |
+
if not set(map(type, tensor_ops)).issubset(
|
132 |
+
(torch.Tensor, type(None))
|
133 |
+
) and has_torch_function(tensor_ops):
|
134 |
+
return handle_torch_function(
|
135 |
+
svd_lowrank, tensor_ops, A, q=q, niter=niter, M=M
|
136 |
+
)
|
137 |
+
return _svd_lowrank(A, q=q, niter=niter, M=M)
|
138 |
+
|
139 |
+
|
140 |
+
def _svd_lowrank(
|
141 |
+
A: Tensor,
|
142 |
+
q: Optional[int] = 6,
|
143 |
+
niter: Optional[int] = 2,
|
144 |
+
M: Optional[Tensor] = None,
|
145 |
+
) -> Tuple[Tensor, Tensor, Tensor]:
|
146 |
+
q = 6 if q is None else q
|
147 |
+
m, n = A.shape[-2:]
|
148 |
+
matmul = _utils.matmul
|
149 |
+
if M is None:
|
150 |
+
M_t = None
|
151 |
+
else:
|
152 |
+
M_t = _utils.transpose(M)
|
153 |
+
A_t = _utils.transpose(A)
|
154 |
+
|
155 |
+
# Algorithm 5.1 in Halko et al 2009, slightly modified to reduce
|
156 |
+
# the number conjugate and transpose operations
|
157 |
+
if m < n or n > q:
|
158 |
+
# computing the SVD approximation of a transpose in
|
159 |
+
# order to keep B shape minimal (the m < n case) or the V
|
160 |
+
# shape small (the n > q case)
|
161 |
+
Q = get_approximate_basis(A_t, q, niter=niter, M=M_t)
|
162 |
+
Q_c = _utils.conjugate(Q)
|
163 |
+
if M is None:
|
164 |
+
B_t = matmul(A, Q_c)
|
165 |
+
else:
|
166 |
+
B_t = matmul(A, Q_c) - matmul(M, Q_c)
|
167 |
+
assert B_t.shape[-2] == m, (B_t.shape, m)
|
168 |
+
assert B_t.shape[-1] == q, (B_t.shape, q)
|
169 |
+
assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
|
170 |
+
U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
|
171 |
+
V = Vh.mH
|
172 |
+
V = Q.matmul(V)
|
173 |
+
else:
|
174 |
+
Q = get_approximate_basis(A, q, niter=niter, M=M)
|
175 |
+
Q_c = _utils.conjugate(Q)
|
176 |
+
if M is None:
|
177 |
+
B = matmul(A_t, Q_c)
|
178 |
+
else:
|
179 |
+
B = matmul(A_t, Q_c) - matmul(M_t, Q_c)
|
180 |
+
B_t = _utils.transpose(B)
|
181 |
+
assert B_t.shape[-2] == q, (B_t.shape, q)
|
182 |
+
assert B_t.shape[-1] == n, (B_t.shape, n)
|
183 |
+
assert B_t.shape[-1] <= B_t.shape[-2], B_t.shape
|
184 |
+
U, S, Vh = torch.linalg.svd(B_t, full_matrices=False)
|
185 |
+
V = Vh.mH
|
186 |
+
U = Q.matmul(U)
|
187 |
+
|
188 |
+
return U, S, V
|
189 |
+
|
190 |
+
|
191 |
+
def pca_lowrank(
|
192 |
+
A: Tensor, q: Optional[int] = None, center: bool = True, niter: int = 2
|
193 |
+
) -> Tuple[Tensor, Tensor, Tensor]:
|
194 |
+
r"""Performs linear Principal Component Analysis (PCA) on a low-rank
|
195 |
+
matrix, batches of such matrices, or sparse matrix.
|
196 |
+
|
197 |
+
This function returns a namedtuple ``(U, S, V)`` which is the
|
198 |
+
nearly optimal approximation of a singular value decomposition of
|
199 |
+
a centered matrix :math:`A` such that :math:`A = U diag(S) V^T`.
|
200 |
+
|
201 |
+
.. note:: The relation of ``(U, S, V)`` to PCA is as follows:
|
202 |
+
|
203 |
+
- :math:`A` is a data matrix with ``m`` samples and
|
204 |
+
``n`` features
|
205 |
+
|
206 |
+
- the :math:`V` columns represent the principal directions
|
207 |
+
|
208 |
+
- :math:`S ** 2 / (m - 1)` contains the eigenvalues of
|
209 |
+
:math:`A^T A / (m - 1)` which is the covariance of
|
210 |
+
``A`` when ``center=True`` is provided.
|
211 |
+
|
212 |
+
- ``matmul(A, V[:, :k])`` projects data to the first k
|
213 |
+
principal components
|
214 |
+
|
215 |
+
.. note:: Different from the standard SVD, the size of returned
|
216 |
+
matrices depend on the specified rank and q
|
217 |
+
values as follows:
|
218 |
+
|
219 |
+
- :math:`U` is m x q matrix
|
220 |
+
|
221 |
+
- :math:`S` is q-vector
|
222 |
+
|
223 |
+
- :math:`V` is n x q matrix
|
224 |
+
|
225 |
+
.. note:: To obtain repeatable results, reset the seed for the
|
226 |
+
pseudorandom number generator
|
227 |
+
|
228 |
+
Args:
|
229 |
+
|
230 |
+
A (Tensor): the input tensor of size :math:`(*, m, n)`
|
231 |
+
|
232 |
+
q (int, optional): a slightly overestimated rank of
|
233 |
+
:math:`A`. By default, ``q = min(6, m,
|
234 |
+
n)``.
|
235 |
+
|
236 |
+
center (bool, optional): if True, center the input tensor,
|
237 |
+
otherwise, assume that the input is
|
238 |
+
centered.
|
239 |
+
|
240 |
+
niter (int, optional): the number of subspace iterations to
|
241 |
+
conduct; niter must be a nonnegative
|
242 |
+
integer, and defaults to 2.
|
243 |
+
|
244 |
+
References::
|
245 |
+
|
246 |
+
- Nathan Halko, Per-Gunnar Martinsson, and Joel Tropp, Finding
|
247 |
+
structure with randomness: probabilistic algorithms for
|
248 |
+
constructing approximate matrix decompositions,
|
249 |
+
arXiv:0909.4061 [math.NA; math.PR], 2009 (available at
|
250 |
+
`arXiv <http://arxiv.org/abs/0909.4061>`_).
|
251 |
+
|
252 |
+
"""
|
253 |
+
|
254 |
+
if not torch.jit.is_scripting():
|
255 |
+
if type(A) is not torch.Tensor and has_torch_function((A,)):
|
256 |
+
return handle_torch_function(
|
257 |
+
pca_lowrank, (A,), A, q=q, center=center, niter=niter
|
258 |
+
)
|
259 |
+
|
260 |
+
(m, n) = A.shape[-2:]
|
261 |
+
|
262 |
+
if q is None:
|
263 |
+
q = min(6, m, n)
|
264 |
+
elif not (q >= 0 and q <= min(m, n)):
|
265 |
+
raise ValueError(
|
266 |
+
f"q(={q}) must be non-negative integer and not greater than min(m, n)={min(m, n)}"
|
267 |
+
)
|
268 |
+
if not (niter >= 0):
|
269 |
+
raise ValueError(f"niter(={niter}) must be non-negative integer")
|
270 |
+
|
271 |
+
dtype = _utils.get_floating_dtype(A)
|
272 |
+
|
273 |
+
if not center:
|
274 |
+
return _svd_lowrank(A, q, niter=niter, M=None)
|
275 |
+
|
276 |
+
if _utils.is_sparse(A):
|
277 |
+
if len(A.shape) != 2:
|
278 |
+
raise ValueError("pca_lowrank input is expected to be 2-dimensional tensor")
|
279 |
+
c = torch.sparse.sum(A, dim=(-2,)) / m
|
280 |
+
# reshape c
|
281 |
+
column_indices = c.indices()[0]
|
282 |
+
indices = torch.zeros(
|
283 |
+
2,
|
284 |
+
len(column_indices),
|
285 |
+
dtype=column_indices.dtype,
|
286 |
+
device=column_indices.device,
|
287 |
+
)
|
288 |
+
indices[0] = column_indices
|
289 |
+
C_t = torch.sparse_coo_tensor(
|
290 |
+
indices, c.values(), (n, 1), dtype=dtype, device=A.device
|
291 |
+
)
|
292 |
+
|
293 |
+
ones_m1_t = torch.ones(A.shape[:-2] + (1, m), dtype=dtype, device=A.device)
|
294 |
+
M = _utils.transpose(torch.sparse.mm(C_t, ones_m1_t))
|
295 |
+
return _svd_lowrank(A, q, niter=niter, M=M)
|
296 |
+
else:
|
297 |
+
C = A.mean(dim=(-2,), keepdim=True)
|
298 |
+
return _svd_lowrank(A - C, q, niter=niter, M=None)
|
env-llmeval/lib/python3.10/site-packages/torch/_meta_registrations.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/_ops.py
ADDED
@@ -0,0 +1,938 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
import ctypes
|
3 |
+
import importlib
|
4 |
+
import inspect
|
5 |
+
import sys
|
6 |
+
import types
|
7 |
+
from typing import Any, Callable, Dict, List, Type, Union
|
8 |
+
|
9 |
+
import torch._C
|
10 |
+
import torch.utils._pytree as pytree
|
11 |
+
from torch import _utils_internal
|
12 |
+
from torch._functorch.pyfunctorch import dispatch_functorch
|
13 |
+
|
14 |
+
# Query `hasattr` only once.
|
15 |
+
|
16 |
+
_SET_GLOBAL_FLAGS = hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags")
|
17 |
+
|
18 |
+
|
19 |
+
@contextlib.contextmanager
|
20 |
+
def dl_open_guard():
|
21 |
+
"""
|
22 |
+
Context manager to set the RTLD_GLOBAL dynamic linker flag while we open a
|
23 |
+
shared library to load custom operators.
|
24 |
+
"""
|
25 |
+
if not _SET_GLOBAL_FLAGS:
|
26 |
+
yield
|
27 |
+
return
|
28 |
+
old_flags = sys.getdlopenflags()
|
29 |
+
sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
|
30 |
+
try:
|
31 |
+
yield
|
32 |
+
finally:
|
33 |
+
sys.setdlopenflags(old_flags)
|
34 |
+
|
35 |
+
|
36 |
+
class OperatorBase:
|
37 |
+
"""
|
38 |
+
Base class for OpOverload (which represents C++ ATen operators) and HigherOrderOperator
|
39 |
+
(which represents Python-only operators that are unrepresentable in TorchScript).
|
40 |
+
"""
|
41 |
+
|
42 |
+
def __init__(self):
|
43 |
+
# The dispatch cache precomputes a mapping of dispatch key that the
|
44 |
+
# dispatcher wants to dispatch to, to an actual implementation of the
|
45 |
+
# dispatch key. Confusingly, the actual implementation could *also* be a
|
46 |
+
# dispatch key, but in this case, this refers to the C++ kernel that
|
47 |
+
# was registered to some dispatch key. Aliases are permitted in the
|
48 |
+
# latter but not the former; for example, you might lookup the
|
49 |
+
# entry for AutogradCPU, and this maps you to the Autograd key for
|
50 |
+
# the generic autograd kernel that works for all devices. Since this
|
51 |
+
# is the Python dispatcher, you can also put an arbitrary Python
|
52 |
+
# callable to call instead. This handler gets precisely the
|
53 |
+
# args/kwargs that the operator was __call__'ed with.
|
54 |
+
# NB: This name is hard-coded in torch/csrc/autograd/python_variable.cpp
|
55 |
+
# for use with OpOverload; cache lookup is done entirely from C++
|
56 |
+
# for speed.
|
57 |
+
# TODO: The cache is NOT currently used by HigherOrderOperator, but it should!
|
58 |
+
self._dispatch_cache: Dict[
|
59 |
+
torch._C.DispatchKey, Union[torch._C.DispatchKey, Callable[..., Any]]
|
60 |
+
] = {}
|
61 |
+
|
62 |
+
# This table allows you to override the behavior of a particular
|
63 |
+
# dispatch key to call a custom Python function, rather than the
|
64 |
+
# ordinary C++ configured behavior. This is the raison d'etre of
|
65 |
+
# Python dispatcher: to let you program the dispatcher from Python
|
66 |
+
# in case you need something unusual, and don't want to clobber
|
67 |
+
# the existing registrations using the Python operator registration
|
68 |
+
# API.
|
69 |
+
self.py_kernels: Dict[torch._C.DispatchKey, Callable[..., Any]] = {}
|
70 |
+
|
71 |
+
from torch.utils._python_dispatch import TorchDispatchMode
|
72 |
+
|
73 |
+
# This table allows you to override the behavior of a particular
|
74 |
+
# operator for a particular TorchDispatchMode. In practice,
|
75 |
+
# we are using this mostly for ProxyTensorMode. Modes can be
|
76 |
+
# thought of as an open world extension of dispatch keys, so it
|
77 |
+
# makes sense that you should be able to register them, the same
|
78 |
+
# way you can register dispatch keys.
|
79 |
+
self.python_key_mode_table: Dict[
|
80 |
+
Type[TorchDispatchMode], Callable[..., Any]
|
81 |
+
] = {}
|
82 |
+
|
83 |
+
# This table allows you to override the behavior of functorch
|
84 |
+
# transformations. NB: this currently only does something for
|
85 |
+
# HigherOrderOperator
|
86 |
+
self.functorch_table = {}
|
87 |
+
|
88 |
+
def __call__(self, *args, **kwargs):
|
89 |
+
raise NotImplementedError()
|
90 |
+
|
91 |
+
def has_kernel_for_dispatch_key(self, k):
|
92 |
+
return k in self.py_kernels
|
93 |
+
|
94 |
+
def has_kernel_for_any_dispatch_key(self, ks):
|
95 |
+
for k in self.py_kernels:
|
96 |
+
if not torch._C._dispatch_is_alias_key(k) and ks.has(k):
|
97 |
+
return True
|
98 |
+
return False
|
99 |
+
|
100 |
+
def py_impl(self, k):
|
101 |
+
def inner(fn):
|
102 |
+
if inspect.isclass(k) and issubclass(
|
103 |
+
k, torch.utils._python_dispatch.TorchDispatchMode
|
104 |
+
):
|
105 |
+
assert k not in self.python_key_mode_table
|
106 |
+
# TODO(voz): Should we replace setting torch._C.DispatchKey.Python entirely with setting mode keys?
|
107 |
+
self.python_key_mode_table[k] = fn
|
108 |
+
self._dispatch_cache.clear()
|
109 |
+
return fn
|
110 |
+
|
111 |
+
if isinstance(k, torch._C._functorch.TransformType):
|
112 |
+
assert k not in self.functorch_table
|
113 |
+
self.functorch_table[k] = fn
|
114 |
+
return fn
|
115 |
+
|
116 |
+
assert isinstance(k, torch._C.DispatchKey)
|
117 |
+
assert (
|
118 |
+
k != torch._C.DispatchKey.Python
|
119 |
+
), "Please register a mode for the torch._C.DispatchKey.Python key instead."
|
120 |
+
|
121 |
+
if k in self.py_kernels:
|
122 |
+
raise RuntimeError(
|
123 |
+
f"Trying to override a python impl for {k} on operator {self.name()}"
|
124 |
+
)
|
125 |
+
self.py_kernels[k] = fn
|
126 |
+
self._dispatch_cache.clear()
|
127 |
+
return fn
|
128 |
+
|
129 |
+
return inner
|
130 |
+
|
131 |
+
# Registers an implementation to all **3** variants of functionalization that we have:
|
132 |
+
# - DispatchKey.Functionalize
|
133 |
+
# - functorch.TransformType.Functionalize
|
134 |
+
# - FunctionalTensorMode
|
135 |
+
# Example:
|
136 |
+
# @py_functionalize_impl
|
137 |
+
# def functionalize_rule(ctx, inner_f, *args):
|
138 |
+
# args_unwrapped = ctx.unwrap_tensors(args)
|
139 |
+
# with ctx.redispatch_to_next():
|
140 |
+
# out = ctx.functionalize(inner_f)(*args_unwrapped)
|
141 |
+
# return ctx.wrap_tensors(out)
|
142 |
+
def py_functionalize_impl(self, fn):
|
143 |
+
from torch._subclasses.functional_tensor import (
|
144 |
+
CppFunctionalizeAPI as _CppFunctionalizeAPI,
|
145 |
+
FunctorchFunctionalizeAPI as _FunctorchFunctionalizeAPI,
|
146 |
+
PythonFunctionalizeAPI as _PythonFunctionalizeAPI,
|
147 |
+
)
|
148 |
+
|
149 |
+
# Construct our three flavors of functionalization,
|
150 |
+
# each of which have slightly different wrap/unwrap/redispatch policies
|
151 |
+
def functionalize_dk_fn(*args, **kwargs):
|
152 |
+
return fn(_CppFunctionalizeAPI(), *args, **kwargs)
|
153 |
+
|
154 |
+
def functionalize_dispatch_mode_fn(mode, *args, **kwargs):
|
155 |
+
# Mode is unused (there's a global FunctionalTensorMode that we can access)
|
156 |
+
return fn(_PythonFunctionalizeAPI(), *args, **kwargs)
|
157 |
+
|
158 |
+
def functionalize_functorch_fn(interpreter, *args, **kwargs):
|
159 |
+
return fn(_FunctorchFunctionalizeAPI(interpreter), *args, **kwargs)
|
160 |
+
|
161 |
+
self.py_impl(torch._C.DispatchKey.Functionalize)(functionalize_dk_fn)
|
162 |
+
self.py_impl(torch._subclasses.functional_tensor.FunctionalTensorMode)(
|
163 |
+
functionalize_dispatch_mode_fn
|
164 |
+
)
|
165 |
+
self.py_impl(torch._C._functorch.TransformType.Functionalize)(
|
166 |
+
functionalize_functorch_fn
|
167 |
+
)
|
168 |
+
|
169 |
+
return fn
|
170 |
+
|
171 |
+
def name(self):
|
172 |
+
raise NotImplementedError()
|
173 |
+
|
174 |
+
|
175 |
+
is_included_in_alias = torch._C._dispatch_is_included_in_alias
|
176 |
+
|
177 |
+
DispatchKey = torch._C.DispatchKey
|
178 |
+
|
179 |
+
|
180 |
+
# Equivalent to computeDispatchTableEntryWithDebug
|
181 |
+
def resolve_key(op: OperatorBase, k: DispatchKey): # type: ignore[valid-type]
|
182 |
+
# 1. (Direct) operator registration
|
183 |
+
if op.has_kernel_for_dispatch_key(k):
|
184 |
+
return k
|
185 |
+
# 2.1 Use CompositeExplicitAutogradNonFunctional kernel if available
|
186 |
+
cand = DispatchKey.CompositeExplicitAutogradNonFunctional
|
187 |
+
if (
|
188 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
189 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
190 |
+
return cand
|
191 |
+
# 2.2 Use CompositeExplicitAutograd kernel if available
|
192 |
+
cand = DispatchKey.CompositeExplicitAutograd
|
193 |
+
if (
|
194 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
195 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
196 |
+
return cand
|
197 |
+
has_backend_kernel = op.has_kernel_for_any_dispatch_key(
|
198 |
+
torch._C._dispatch_get_backend_keyset_from_autograd(k)
|
199 |
+
) or op.has_kernel_for_dispatch_key(DispatchKey.CompositeExplicitAutograd)
|
200 |
+
# 2.3. Use CompositeImplicitAutograd kernel if available
|
201 |
+
cand = DispatchKey.CompositeImplicitAutogradNestedTensor
|
202 |
+
if (
|
203 |
+
(k != DispatchKey.Undefined and is_included_in_alias(k, cand))
|
204 |
+
and op.has_kernel_for_dispatch_key(cand)
|
205 |
+
and not has_backend_kernel
|
206 |
+
):
|
207 |
+
return cand
|
208 |
+
cand = DispatchKey.CompositeImplicitAutograd
|
209 |
+
if (
|
210 |
+
k == DispatchKey.Undefined or is_included_in_alias(k, cand)
|
211 |
+
) and op.has_kernel_for_dispatch_key(cand):
|
212 |
+
if k == DispatchKey.AutogradOther and op.has_kernel_for_any_dispatch_key(
|
213 |
+
torch._C._dispatch_autogradother_backends
|
214 |
+
):
|
215 |
+
raise RuntimeError("ambiguous autogradother kernel")
|
216 |
+
elif not has_backend_kernel:
|
217 |
+
return cand
|
218 |
+
# 2.4. For autograd backend keys, use kernel from DispatchKey::Autograd if available
|
219 |
+
cand = DispatchKey.Autograd
|
220 |
+
if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
|
221 |
+
return cand
|
222 |
+
# 2.5 Use kernel from DispatchKey::FuncTorchBatchedDecomposition if available
|
223 |
+
cand = DispatchKey.FuncTorchBatchedDecomposition
|
224 |
+
if is_included_in_alias(k, cand) and op.has_kernel_for_dispatch_key(cand):
|
225 |
+
return cand
|
226 |
+
# Backend fallback
|
227 |
+
if torch._C._dispatch_has_backend_fallback(k):
|
228 |
+
# The dispatch key itself will implicitly route to backend fallback.
|
229 |
+
# This is probably not great for the pure Python implementation.
|
230 |
+
return k
|
231 |
+
raise NotImplementedError(f"could not find kernel for {op} at dispatch key {k}")
|
232 |
+
|
233 |
+
|
234 |
+
_higher_order_ops = {}
|
235 |
+
|
236 |
+
_HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS = [
|
237 |
+
DispatchKey.PythonDispatcher, # type: ignore[attr-defined]
|
238 |
+
DispatchKey.PythonTLSSnapshot, # type: ignore[attr-defined]
|
239 |
+
DispatchKey.ADInplaceOrView,
|
240 |
+
DispatchKey.BackendSelect,
|
241 |
+
DispatchKey.AutocastCPU, # type: ignore[attr-defined]
|
242 |
+
DispatchKey.AutocastCUDA, # type: ignore[attr-defined]
|
243 |
+
]
|
244 |
+
|
245 |
+
|
246 |
+
class HigherOrderOperator(OperatorBase):
|
247 |
+
# The HigherOrderOperator will appear as torch.ops.higher_order.{name}
|
248 |
+
#
|
249 |
+
# If you're creating a new HigherOrderOperator, please do not change the
|
250 |
+
# default. Adding operators to the global torch.ops namespace is a bad
|
251 |
+
# practice due to name collisions.
|
252 |
+
def __init__(self, name):
|
253 |
+
super().__init__()
|
254 |
+
self._name = name
|
255 |
+
|
256 |
+
# Make _OPNamespace not scream, this whole name based association needs a good hard look
|
257 |
+
self.__name__ = name
|
258 |
+
_higher_order_ops[name] = self
|
259 |
+
self._ns = "higher_order"
|
260 |
+
|
261 |
+
# For a normal HigherOrderOperator instance, we will change its __module__ from torch._ops to
|
262 |
+
# torch._ops.higher_order.
|
263 |
+
# For an instance of subclass of HigherOrderOperator (e.g. customized higher order op),
|
264 |
+
# the __module__ attribute will be kept unchanged.
|
265 |
+
if self.__class__ is HigherOrderOperator:
|
266 |
+
self_name_space = "." + self.namespace if self.namespace else ""
|
267 |
+
self.__module__ = self.__module__ + self_name_space
|
268 |
+
self.non_fallthrough_keys = torch._C._dispatch_keyset_full()
|
269 |
+
|
270 |
+
for dispatch_key in _HIGHER_ORDER_OP_DEFAULT_FALLTHROUGH_DISPATCH_KEYS:
|
271 |
+
self.fallthrough(dispatch_key)
|
272 |
+
|
273 |
+
def py_impl(self, k):
|
274 |
+
if isinstance(k, torch._C.DispatchKey) and not self.non_fallthrough_keys.has(k):
|
275 |
+
self.non_fallthrough_keys = self.non_fallthrough_keys.add(k)
|
276 |
+
return super().py_impl(k)
|
277 |
+
|
278 |
+
@property
|
279 |
+
def namespace(self):
|
280 |
+
return self._ns
|
281 |
+
|
282 |
+
def fallthrough(self, dispatch_key):
|
283 |
+
self.non_fallthrough_keys = self.non_fallthrough_keys.remove(dispatch_key)
|
284 |
+
|
285 |
+
def dispatch(self, dispatch_key, *args, **kwargs):
|
286 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
287 |
+
|
288 |
+
if dispatch_key in self._dispatch_cache:
|
289 |
+
kernel = self._dispatch_cache[dispatch_key]
|
290 |
+
assert not isinstance(kernel, torch._C.DispatchKey)
|
291 |
+
return kernel(*args, **kwargs)
|
292 |
+
|
293 |
+
if dispatch_key == torch._C.DispatchKey.FuncTorchDynamicLayerFrontMode:
|
294 |
+
return dispatch_functorch(self, args, kwargs)
|
295 |
+
|
296 |
+
if dispatch_key == torch._C.DispatchKey.Python:
|
297 |
+
# The place to handle ProxyTorchDispatchMode, FakeTensorMode, etc
|
298 |
+
from torch.utils._python_dispatch import _pop_mode_temporarily
|
299 |
+
|
300 |
+
curr_mode = _get_current_dispatch_mode()
|
301 |
+
assert (
|
302 |
+
curr_mode is not None
|
303 |
+
), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
|
304 |
+
assert (
|
305 |
+
type(curr_mode) in self.python_key_mode_table
|
306 |
+
), f"Current active mode {curr_mode} not registered"
|
307 |
+
handler = self.python_key_mode_table[type(curr_mode)]
|
308 |
+
with _pop_mode_temporarily() as mode:
|
309 |
+
return handler(mode, *args, **kwargs)
|
310 |
+
|
311 |
+
functionality_key = torch._C._to_functionality_key(dispatch_key) # type: ignore[attr-defined]
|
312 |
+
if functionality_key in mode_stack_per_key():
|
313 |
+
# The place to handle DispatchKey.PreDispatch
|
314 |
+
curr_stack = mode_stack_per_key()[functionality_key]
|
315 |
+
# The check for Python in the exclude set is so we properly respect `with no_dispatch()`
|
316 |
+
# calls inside of a mode.
|
317 |
+
if len(
|
318 |
+
curr_stack
|
319 |
+
) > 0 and not torch._C._dispatch_tls_is_dispatch_key_excluded(
|
320 |
+
DispatchKey.Python
|
321 |
+
):
|
322 |
+
curr_mode = curr_stack[-1]
|
323 |
+
pre_dispatch_modes = mode_stack_per_key().get(
|
324 |
+
DispatchKey.PreDispatch, [] # type: ignore[attr-defined]
|
325 |
+
)
|
326 |
+
handler = self.python_key_mode_table[type(curr_mode)]
|
327 |
+
if len(pre_dispatch_modes) > 0:
|
328 |
+
with temporarily_pop_mode(pre_dispatch_modes) as mode:
|
329 |
+
return handler(mode, *args, **kwargs)
|
330 |
+
|
331 |
+
final_key = resolve_key(self, dispatch_key)
|
332 |
+
|
333 |
+
# This can current fail due to backend fallbacks. You just have to
|
334 |
+
# register them by hand for HigherOrderOperator.
|
335 |
+
if final_key not in self.py_kernels:
|
336 |
+
raise NotImplementedError(
|
337 |
+
f"could not find kernel for HigherOrderOperator {self._name} "
|
338 |
+
f"at dispatch key {final_key} (resolved from {dispatch_key})"
|
339 |
+
)
|
340 |
+
self._dispatch_cache[dispatch_key] = self.py_kernels[final_key]
|
341 |
+
kernel = self.py_kernels[final_key]
|
342 |
+
# It's illegal to register DispatchKey to py_kernels, since there's no
|
343 |
+
# C++ kernel to call into
|
344 |
+
assert not isinstance(kernel, torch._C.DispatchKey)
|
345 |
+
return kernel(*args, **kwargs)
|
346 |
+
|
347 |
+
def __call__(self, *args, **kwargs):
|
348 |
+
# Dynamo already traces the body of HigherOrderOp beforehand when it
|
349 |
+
# so no need to trace into it.
|
350 |
+
import torch._dynamo
|
351 |
+
from torch._dynamo import disable
|
352 |
+
|
353 |
+
@disable
|
354 |
+
def wrapper():
|
355 |
+
flat_args = _to_flat_tuple(args, kwargs)
|
356 |
+
if torch.overrides.has_torch_function(flat_args):
|
357 |
+
return torch.overrides.handle_torch_function(
|
358 |
+
self, flat_args, *args, **kwargs
|
359 |
+
)
|
360 |
+
|
361 |
+
dispatch_key_set = _compute_keyset(args, kwargs, self.non_fallthrough_keys)
|
362 |
+
return self.dispatch(
|
363 |
+
dispatch_key_set.highestPriorityTypeId(), *args, **kwargs
|
364 |
+
)
|
365 |
+
|
366 |
+
return wrapper()
|
367 |
+
|
368 |
+
def __str__(self):
|
369 |
+
return f"{self.name()}"
|
370 |
+
|
371 |
+
def name(self):
|
372 |
+
return self._name
|
373 |
+
|
374 |
+
|
375 |
+
def _to_flat_tuple(args, kwargs):
|
376 |
+
return pytree.arg_tree_leaves(*args, **kwargs)
|
377 |
+
|
378 |
+
|
379 |
+
def _compute_keyset(args, kwargs, non_fallthrough_keys):
|
380 |
+
tensors = _get_tensors(args, kwargs)
|
381 |
+
return key_extractor(tensors, non_fallthrough_keys)
|
382 |
+
|
383 |
+
|
384 |
+
def _get_tensors(args, kwargs):
|
385 |
+
flat_all = _to_flat_tuple(args, kwargs)
|
386 |
+
tensor_args = [t for t in flat_all if isinstance(t, torch.Tensor)]
|
387 |
+
return tuple(tensor_args)
|
388 |
+
|
389 |
+
|
390 |
+
# Note - this should maintain identical impl to the C++ dispatcher key extraction logic
|
391 |
+
# at ATen/core/dispatch/DispatchKeyExtractor.h
|
392 |
+
def key_extractor(tensors, key_mask):
|
393 |
+
key_set = torch._C._dispatch_tls_local_include_set()
|
394 |
+
for tensor in tensors:
|
395 |
+
key_set = key_set | torch._C._dispatch_keys(tensor)
|
396 |
+
key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
|
397 |
+
key_set = key_set & key_mask
|
398 |
+
return key_set
|
399 |
+
|
400 |
+
|
401 |
+
# Note [Per Dispatch Key Modes]
|
402 |
+
# In ordinary eager mode, we have a Python dispatch key that we attach
|
403 |
+
# a mode stack to.
|
404 |
+
# However - when the PyDispatcher is enabled, we extend this functionality
|
405 |
+
# such that every (functionality) dispatch key is allowed to have
|
406 |
+
# its own mode stack.
|
407 |
+
# This is controlled by passing a `torch._C.DispatchKey` into
|
408 |
+
# the mode constructor.
|
409 |
+
_mode_stack_per_key: Dict[torch._C.DispatchKey, List] = {}
|
410 |
+
|
411 |
+
|
412 |
+
# Per-dispatch-key mode variant.
|
413 |
+
# Temporarily pops the top of a given mode stack.
|
414 |
+
@contextlib.contextmanager
|
415 |
+
def temporarily_pop_mode(mode_stack):
|
416 |
+
assert len(mode_stack) > 0
|
417 |
+
top_mode = mode_stack.pop()
|
418 |
+
try:
|
419 |
+
yield top_mode
|
420 |
+
finally:
|
421 |
+
mode_stack.append(top_mode)
|
422 |
+
|
423 |
+
|
424 |
+
def mode_stack_per_key():
|
425 |
+
global _mode_stack_per_key
|
426 |
+
return _mode_stack_per_key
|
427 |
+
|
428 |
+
|
429 |
+
# Per-dispatch-key mode variant of push_mode().
|
430 |
+
def push_mode_for_key(key, mode):
|
431 |
+
assert isinstance(key, torch._C.DispatchKey)
|
432 |
+
assert isinstance(mode, torch.utils._python_dispatch.TorchDispatchMode)
|
433 |
+
if key not in mode_stack_per_key():
|
434 |
+
mode_stack_per_key()[key] = []
|
435 |
+
mode_stack_per_key()[key].append(mode)
|
436 |
+
|
437 |
+
|
438 |
+
# Per-dispatch-key mode variant of pop_mode().
|
439 |
+
def pop_mode_for_key(key):
|
440 |
+
assert isinstance(key, torch._C.DispatchKey)
|
441 |
+
assert key in mode_stack_per_key()
|
442 |
+
curr_mode_stack = mode_stack_per_key()[key]
|
443 |
+
assert len(curr_mode_stack) > 0
|
444 |
+
return curr_mode_stack.pop()
|
445 |
+
|
446 |
+
|
447 |
+
cached_ops = set()
|
448 |
+
|
449 |
+
|
450 |
+
def add_cached_op(op_overload):
|
451 |
+
global cached_ops
|
452 |
+
cached_ops.add(op_overload)
|
453 |
+
|
454 |
+
|
455 |
+
def reset_cached_ops():
|
456 |
+
global cached_ops
|
457 |
+
cached_ops.clear()
|
458 |
+
|
459 |
+
|
460 |
+
def get_cached_ops():
|
461 |
+
global cached_ops
|
462 |
+
return cached_ops
|
463 |
+
|
464 |
+
|
465 |
+
# Each OpOverload object contains pointer to a a specific operator overload, a pointer to the parent `OpOverloadPacket` object.
|
466 |
+
# You can obtain an OpOverload object through attribute query on OpOverloadPacket.
|
467 |
+
class OpOverload(OperatorBase):
|
468 |
+
def __init__(self, overloadpacket, op, op_dk, schema, tags):
|
469 |
+
super().__init__()
|
470 |
+
self._op = op
|
471 |
+
self._op_dk = op_dk
|
472 |
+
self._schema = schema
|
473 |
+
self._overloadpacket = overloadpacket
|
474 |
+
self._tags = tags
|
475 |
+
self._overloadname = (
|
476 |
+
"default" if schema.overload_name == "" else schema.overload_name
|
477 |
+
)
|
478 |
+
self._name = self._schema.name
|
479 |
+
if schema.overload_name:
|
480 |
+
self._name += "." + schema.overload_name
|
481 |
+
self.__name__ = f"{self._schema.name.split('::')[1]}.{self._overloadname}"
|
482 |
+
self.__module__ = overloadpacket.__module__
|
483 |
+
op.__module__ = overloadpacket.__module__
|
484 |
+
self.__qualname__ = self._name
|
485 |
+
self.__annotations__ = {}
|
486 |
+
|
487 |
+
# If the OpOverload was constructed from a Library.def in Python.
|
488 |
+
self._defined_in_python = self.__qualname__ in torch.library._defs
|
489 |
+
|
490 |
+
# Logic replicated from aten/src/ATen/native/MathBitsFallback.h
|
491 |
+
is_write = None
|
492 |
+
for a in self._schema.arguments:
|
493 |
+
if a.alias_info is None:
|
494 |
+
continue
|
495 |
+
if is_write is None:
|
496 |
+
is_write = a.alias_info.is_write
|
497 |
+
else:
|
498 |
+
# We will conservatively call mixed mutable/non-mutable
|
499 |
+
# aliased inputs as NOT a view
|
500 |
+
is_write = a.alias_info.is_write or is_write
|
501 |
+
self.is_view = is_write is not None and not is_write
|
502 |
+
|
503 |
+
# it's a no-op since OpOverload object is immutable and must be unique for a given op overload.
|
504 |
+
def __deepcopy__(self, memo=None):
|
505 |
+
return self
|
506 |
+
|
507 |
+
def __repr__(self):
|
508 |
+
return "<OpOverload(op='{}.{}', overload='{}')>".format(
|
509 |
+
*self._schema.name.split("::"), self._overloadname
|
510 |
+
)
|
511 |
+
|
512 |
+
def __call__(self, *args, **kwargs):
|
513 |
+
return self._op(*args, **(kwargs or {}))
|
514 |
+
|
515 |
+
def __hash__(self):
|
516 |
+
return hash(self._op)
|
517 |
+
|
518 |
+
# `my_namespace.my_op_name.overload_name`
|
519 |
+
def __str__(self):
|
520 |
+
return "{}.{}.{}".format(*self._schema.name.split("::"), self._overloadname)
|
521 |
+
|
522 |
+
def has_kernel_for_dispatch_key(self, k):
|
523 |
+
return super().has_kernel_for_dispatch_key(
|
524 |
+
k
|
525 |
+
) or torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), k)
|
526 |
+
|
527 |
+
def has_kernel_for_any_dispatch_key(self, ks):
|
528 |
+
return torch._C._dispatch_has_kernel_for_any_dispatch_key(
|
529 |
+
self.name(), ks
|
530 |
+
) or super().has_kernel_for_any_dispatch_key(ks)
|
531 |
+
|
532 |
+
@property
|
533 |
+
def namespace(self):
|
534 |
+
return self._schema.name.split("::")[0]
|
535 |
+
|
536 |
+
def decompose(self, *args, **kwargs):
|
537 |
+
dk = torch._C.DispatchKey.CompositeImplicitAutograd
|
538 |
+
if dk in self.py_kernels:
|
539 |
+
# NB: This branch is not too necessary anymore, because we can
|
540 |
+
# apply Python CompositeImplicitAutograd *before* tracing
|
541 |
+
# using Python dispatcher (also taking advantage of the autograd
|
542 |
+
# formula). But it's included for completeness
|
543 |
+
return self.py_kernels[dk](*args, **kwargs)
|
544 |
+
elif torch._C._dispatch_has_kernel_for_dispatch_key(self.name(), dk):
|
545 |
+
return self._op_dk(dk, *args, **kwargs)
|
546 |
+
else:
|
547 |
+
return NotImplemented
|
548 |
+
|
549 |
+
# Remove a dispatch key from the dispatch cache. This will force it to get
|
550 |
+
# recomputed the next time. Does nothing
|
551 |
+
# WARNING: if you register a dispatch key to py_kernels of an OpOverload,
|
552 |
+
# calling _del_dispatch on that key is NOT sufficient to apply your change,
|
553 |
+
# because a single registration may affect MULTIPLE dispatch keys (e.g.,
|
554 |
+
# registering Autograd affects AutogradCPU). del_dispatch is to be used
|
555 |
+
# only if you are specifically modifying how get_dispatch handles a
|
556 |
+
# particular input 'key'.
|
557 |
+
def _uncache_dispatch(self, key):
|
558 |
+
self._dispatch_cache.pop(key, None)
|
559 |
+
|
560 |
+
# This implements the pre-computation logic for the Python dispatcher.
|
561 |
+
def _get_dispatch(self, key):
|
562 |
+
# This is only called upon a cache miss
|
563 |
+
assert key not in self._dispatch_cache, f"{self} {key}"
|
564 |
+
|
565 |
+
if key == torch._C.DispatchKey.Python:
|
566 |
+
if not self.python_key_mode_table:
|
567 |
+
self._dispatch_cache[key] = key
|
568 |
+
add_cached_op(self)
|
569 |
+
return key
|
570 |
+
|
571 |
+
def handler(*args, **kwargs):
|
572 |
+
from torch.utils._python_dispatch import _get_current_dispatch_mode
|
573 |
+
|
574 |
+
# TODO: We also need to handle tensor subclasses here
|
575 |
+
# TODO(voz): We should walk all the nodes here / turn it into a list, topmode is ok for now.
|
576 |
+
curr_mode = type(_get_current_dispatch_mode())
|
577 |
+
assert (
|
578 |
+
curr_mode is not None
|
579 |
+
), "Illegal invocation of dispatch on torch._C.DispatchKey.Python without a mode."
|
580 |
+
if curr_mode not in self.python_key_mode_table:
|
581 |
+
# TODO: This path is slow, should generally encourage this
|
582 |
+
# case to not happen
|
583 |
+
return self._op_dk(key, *args, **kwargs)
|
584 |
+
# TODO(voz): The idea behind this is that we do not yet support dispatch by key + mode, only key.
|
585 |
+
return self.python_key_mode_table[curr_mode](*args, **kwargs)
|
586 |
+
|
587 |
+
self._dispatch_cache[key] = handler
|
588 |
+
add_cached_op(self)
|
589 |
+
return handler
|
590 |
+
|
591 |
+
cache_result = True
|
592 |
+
functionality_key = torch._C._to_functionality_key(key) # type: ignore[attr-defined]
|
593 |
+
if functionality_key in mode_stack_per_key():
|
594 |
+
curr_stack = mode_stack_per_key()[functionality_key]
|
595 |
+
# The check for Python in the exclude set is so we properly respect `with no_dispatch()`
|
596 |
+
# calls inside of a mode.
|
597 |
+
if len(
|
598 |
+
curr_stack
|
599 |
+
) > 0 and not torch._C._dispatch_tls_is_dispatch_key_excluded(
|
600 |
+
DispatchKey.Python
|
601 |
+
):
|
602 |
+
|
603 |
+
def handler(*args, **kwargs):
|
604 |
+
# This logic is meant to be a python parallel of handle_torch_function_no_python_arg_parser.
|
605 |
+
with temporarily_pop_mode(curr_stack) as curr_mode:
|
606 |
+
assert hasattr(curr_mode, "__torch_dispatch__")
|
607 |
+
overload_types = []
|
608 |
+
args_flattened = pytree.arg_tree_leaves(*args, **kwargs)
|
609 |
+
for a in args_flattened:
|
610 |
+
# TODO: need to double check the semantics of the "types" argument to torch_dispatch.
|
611 |
+
# It's generated in PyInterpreter.cpp, but seems to be generated in two places,
|
612 |
+
# where in one case we only include tensors with the python key, and in another
|
613 |
+
# we include **all** tensors.
|
614 |
+
if isinstance(a, torch.Tensor) and torch._C._dispatch_keys(
|
615 |
+
a
|
616 |
+
).has(torch._C.DispatchKey.Python):
|
617 |
+
overload_types.append(type(a))
|
618 |
+
# TODO: check that I got these args correct (in C++, we pass in "0000"??)
|
619 |
+
return curr_mode.__torch_dispatch__(
|
620 |
+
self, overload_types, args, kwargs
|
621 |
+
)
|
622 |
+
|
623 |
+
# Note [Not Caching Per-Dispatch-Key Mode Handlers]
|
624 |
+
# Note that we're not caching this handler. There isn't really a point, since the slow bit
|
625 |
+
# is the handler itself (in python).
|
626 |
+
# Also, not caching means that we don't have to reset the cache when any existing
|
627 |
+
# modes go out of scope (which in of itself takes time to loop through all operators).
|
628 |
+
return handler
|
629 |
+
else:
|
630 |
+
# See Note [Not Caching Per-Dispatch-Key Mode Handlers]
|
631 |
+
cache_result = False
|
632 |
+
|
633 |
+
final_key = resolve_key(self, key)
|
634 |
+
|
635 |
+
# TODO: We could potentially have lots of debugging wrappers against
|
636 |
+
# dispatch keys; design some general registration mechanism instead of
|
637 |
+
# having if statement for each of them
|
638 |
+
if key == torch._C.DispatchKey.Functionalize:
|
639 |
+
import torch._dispatch.python as pydispatch
|
640 |
+
|
641 |
+
if pydispatch.CROSSREF_FUNCTIONALIZE:
|
642 |
+
handler = pydispatch.make_crossref_functionalize(self, final_key)
|
643 |
+
if cache_result:
|
644 |
+
self._dispatch_cache[key] = handler
|
645 |
+
add_cached_op(self)
|
646 |
+
return handler
|
647 |
+
|
648 |
+
# print(self, key, final_key)
|
649 |
+
r = self.py_kernels.get(final_key, final_key)
|
650 |
+
if cache_result:
|
651 |
+
self._dispatch_cache[key] = r
|
652 |
+
add_cached_op(self)
|
653 |
+
return r
|
654 |
+
|
655 |
+
def name(self):
|
656 |
+
return self._name
|
657 |
+
|
658 |
+
@property
|
659 |
+
def overloadpacket(self):
|
660 |
+
return self._overloadpacket
|
661 |
+
|
662 |
+
@property
|
663 |
+
def op(self):
|
664 |
+
return self._op
|
665 |
+
|
666 |
+
@property
|
667 |
+
def tags(self):
|
668 |
+
return self._tags
|
669 |
+
|
670 |
+
# TODO: add more methods to expose information about input and output arguments
|
671 |
+
|
672 |
+
|
673 |
+
# OpOverloadPacket class contains pointer to a base unresolved operator that doesn't correspond to a specific operator
|
674 |
+
# You can obtain an OpOverload object through attribute query.
|
675 |
+
class OpOverloadPacket:
|
676 |
+
def __init__(self, qualified_op_name, op_name, op, overload_names):
|
677 |
+
# These attributes are accessible on the object through the properties
|
678 |
+
# defined below but are immutable
|
679 |
+
self._qualified_op_name = qualified_op_name
|
680 |
+
self.__name__ = op_name
|
681 |
+
self._op = op
|
682 |
+
self._overload_names = overload_names
|
683 |
+
self._dir = []
|
684 |
+
|
685 |
+
# it's a no-op since OpOverloadPacket object is immutable and must be unique for a given op.
|
686 |
+
def __deepcopy__(self, memo=None):
|
687 |
+
return self
|
688 |
+
|
689 |
+
def __repr__(self):
|
690 |
+
return "<OpOverloadPacket(op='{}.{}')>".format(
|
691 |
+
*self._qualified_op_name.split("::")
|
692 |
+
)
|
693 |
+
|
694 |
+
def __hash__(self):
|
695 |
+
return hash(self._op)
|
696 |
+
|
697 |
+
def __str__(self):
|
698 |
+
return "{}.{}".format(*self._qualified_op_name.split("::"))
|
699 |
+
|
700 |
+
@property
|
701 |
+
def op(self):
|
702 |
+
return self._op
|
703 |
+
|
704 |
+
def __getattr__(self, key):
|
705 |
+
# It is not a valid op_name when __file__ is passed in
|
706 |
+
if key == "__file__":
|
707 |
+
return "torch.ops"
|
708 |
+
|
709 |
+
# ensure that query for dunder attributes that does not exist on
|
710 |
+
# opoverloadpacket but instead exists on the self._op object does not unnecessarily call
|
711 |
+
# `_get_operation_overload` (which is an expensive operation).
|
712 |
+
# This is done to prevent any potential slowdown. This list can be extended
|
713 |
+
# if there exists other attributes like `__name__` that only exist on self._op and not on the
|
714 |
+
# opoverloadpacket.
|
715 |
+
# This is ok since we are guaranteed that an overload name for an aten op can't start with '__'
|
716 |
+
try:
|
717 |
+
if key.startswith("__"):
|
718 |
+
return getattr(self._op, key)
|
719 |
+
except AttributeError:
|
720 |
+
# for consistency because it seems weird to
|
721 |
+
# throw an attribute error with a message containing
|
722 |
+
# an object name different from the one the attribute
|
723 |
+
# query was performed on.
|
724 |
+
raise AttributeError(
|
725 |
+
f"'{str(self)}' can't have an overload name beginning with '__' and the "
|
726 |
+
f"underlying op {str(self._op)} has no attribute {key} either."
|
727 |
+
) from None
|
728 |
+
|
729 |
+
try:
|
730 |
+
# This is ok since we are guaranteed that an overload name for an aten op can't be 'default'
|
731 |
+
use_key = "" if key == "default" else key
|
732 |
+
# TODO: disallow access to overloads registered by JIT
|
733 |
+
op_, op_dk_, tags = torch._C._get_operation_overload(
|
734 |
+
self._qualified_op_name, use_key
|
735 |
+
)
|
736 |
+
schema = torch._C._get_schema(self._qualified_op_name, use_key)
|
737 |
+
overload = OpOverload(self, op_, op_dk_, schema, tags)
|
738 |
+
# cache the overload object
|
739 |
+
setattr(self, key, overload)
|
740 |
+
self._dir.append(key)
|
741 |
+
return overload
|
742 |
+
except RuntimeError:
|
743 |
+
raise AttributeError(
|
744 |
+
f"The underlying op of '{str(self)}' has no overload name '{key}'"
|
745 |
+
) from None
|
746 |
+
|
747 |
+
def __iter__(self):
|
748 |
+
return iter(self._dir)
|
749 |
+
|
750 |
+
def __call__(self, *args, **kwargs):
|
751 |
+
# overloading __call__ to ensure torch.ops.foo.bar()
|
752 |
+
# is still callable from JIT
|
753 |
+
# We save the function ptr as the `op` attribute on
|
754 |
+
# OpOverloadPacket to access it here.
|
755 |
+
return self._op(*args, **(kwargs or {}))
|
756 |
+
|
757 |
+
# TODO: use this to make a __dir__
|
758 |
+
def overloads(self):
|
759 |
+
return [n if n else "default" for n in self._overload_names]
|
760 |
+
|
761 |
+
|
762 |
+
# Resolution of torch.fn is different from torch.ops.aten.fn
|
763 |
+
# torch.fn uses the Python argparser, matches with the
|
764 |
+
# appropriate schema, and calls into the unboxed version of the method
|
765 |
+
# torch.ops.aten.fn resolution is done via the mechanism defined in JIT.
|
766 |
+
# JIT creates a stack of all the overloads and then tries to match the
|
767 |
+
# correct one at runtime and always calls into the boxed version of the method
|
768 |
+
# Autograd codegen creates VariableType, TracerType,
|
769 |
+
# inplace or view type and python bindings.
|
770 |
+
# Aten codegen generates tensor methods for the tensor class.
|
771 |
+
|
772 |
+
# _OpNamespace is a subclass of ModuleType because the torch script
|
773 |
+
# allows attribute lookups on modules only. Since we want torch.ops.foo.bar()
|
774 |
+
# to work from script, we need to ensure ops and foo are modules
|
775 |
+
|
776 |
+
|
777 |
+
class _OpNamespace(types.ModuleType):
|
778 |
+
"""
|
779 |
+
An op namespace to dynamically bind Operators into Python.
|
780 |
+
|
781 |
+
Say a user has created a custom Operator called "my_namespace::my_op". To
|
782 |
+
call this op, the user will write torch.ops.my_namespace.my_op(...).
|
783 |
+
At startup, this operation will not yet be bound into Python. Instead, the
|
784 |
+
following sequence of magic tricks will occur:
|
785 |
+
1. `torch.ops.my_namespace` will invoke the `__getattr__` magic method
|
786 |
+
on the `torch.ops` object, which will create a new `_OpNamespace`
|
787 |
+
object called `my_namespace` and set it as an attribute on the `ops`
|
788 |
+
object.
|
789 |
+
2. `torch.ops.my_namespace.my_op` will then invoke `__getattr__` on
|
790 |
+
the `my_namespace` object, which will retrieve the operation via
|
791 |
+
`torch.get_operation`, a function bound from C++, and then in a similar
|
792 |
+
fashion bind this new object onto the `my_namespace` object.
|
793 |
+
3. `torch.ops.my_namespace.my_op(...)` then calls this new operation
|
794 |
+
and subsequent accesses will incur no further lookup (the namespace and
|
795 |
+
operation will already exist).
|
796 |
+
"""
|
797 |
+
|
798 |
+
def __init__(self, name):
|
799 |
+
super().__init__("torch.ops." + name)
|
800 |
+
self.name = name
|
801 |
+
self._dir = []
|
802 |
+
|
803 |
+
def __iter__(self):
|
804 |
+
return iter(self._dir)
|
805 |
+
|
806 |
+
def __getattr__(self, op_name):
|
807 |
+
# It is not a valid op_name when __file__ is passed in
|
808 |
+
if op_name == "__file__":
|
809 |
+
return "torch.ops"
|
810 |
+
elif op_name in ["__origin__", "__self__"]:
|
811 |
+
raise AttributeError(
|
812 |
+
f"Invalid attribute '{op_name}' for '_OpNamespace' '{self.name}'"
|
813 |
+
)
|
814 |
+
|
815 |
+
# Get the op `my_namespace::my_op` if available. This will also check
|
816 |
+
# for overloads and raise an exception if there are more than one.
|
817 |
+
namespace_name = self.name
|
818 |
+
qualified_op_name = f"{namespace_name}::{op_name}"
|
819 |
+
try:
|
820 |
+
op, overload_names = torch._C._jit_get_operation(qualified_op_name)
|
821 |
+
if op is None:
|
822 |
+
raise AttributeError(
|
823 |
+
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
|
824 |
+
)
|
825 |
+
except RuntimeError as e:
|
826 |
+
# Turn this into AttributeError so getattr(obj, key, default)
|
827 |
+
# works (this is called by TorchScript with __origin__)
|
828 |
+
raise AttributeError(
|
829 |
+
f"'_OpNamespace' '{self.name}' object has no attribute '{op_name}'"
|
830 |
+
) from e
|
831 |
+
|
832 |
+
# let the script frontend know that op is identical to the builtin op
|
833 |
+
# with qualified_op_name
|
834 |
+
torch.jit._builtins._register_builtin(op, qualified_op_name)
|
835 |
+
op.__module__ = self.__module__ + "." + namespace_name
|
836 |
+
opoverloadpacket = OpOverloadPacket(
|
837 |
+
qualified_op_name, op_name, op, overload_names
|
838 |
+
)
|
839 |
+
opoverloadpacket.__module__ = self.__module__ + "." + namespace_name
|
840 |
+
# cache the opoverloadpacket to ensure that each op corresponds to
|
841 |
+
# a unique OpOverloadPacket object
|
842 |
+
setattr(self, op_name, opoverloadpacket)
|
843 |
+
self._dir.append(op_name)
|
844 |
+
return opoverloadpacket
|
845 |
+
|
846 |
+
|
847 |
+
class _PyOpNamespace(_OpNamespace):
|
848 |
+
def __init__(self, name, ops):
|
849 |
+
super().__init__(name)
|
850 |
+
self._ops = ops
|
851 |
+
|
852 |
+
def __getattr__(self, name):
|
853 |
+
# Following _OpNamespace.__getattr__, we cache the op on the _PyOpNamespace object.
|
854 |
+
op = self._ops.get(name, None)
|
855 |
+
if op is None:
|
856 |
+
raise AttributeError(
|
857 |
+
f"'_PyOpNamespace' '{self.name}' object has no attribute '{name}'"
|
858 |
+
)
|
859 |
+
setattr(self, name, op)
|
860 |
+
return op
|
861 |
+
|
862 |
+
|
863 |
+
class _Ops(types.ModuleType):
|
864 |
+
__file__ = "_ops.py"
|
865 |
+
|
866 |
+
def __init__(self):
|
867 |
+
super().__init__("torch.ops")
|
868 |
+
self.loaded_libraries = set()
|
869 |
+
self._higher_order_op_namespace = _PyOpNamespace(
|
870 |
+
"torch.ops.higher_order", _higher_order_ops
|
871 |
+
)
|
872 |
+
self._dir = []
|
873 |
+
|
874 |
+
def __getattr__(self, name):
|
875 |
+
# Check if the name is a HigherOrderOperator
|
876 |
+
if name == "higher_order":
|
877 |
+
return self._higher_order_op_namespace
|
878 |
+
|
879 |
+
# Here we are creating `torch.ops.my_namespace`
|
880 |
+
namespace = _OpNamespace(name)
|
881 |
+
setattr(self, name, namespace)
|
882 |
+
self._dir.append(name)
|
883 |
+
return namespace
|
884 |
+
|
885 |
+
def __iter__(self):
|
886 |
+
return iter(self._dir)
|
887 |
+
|
888 |
+
def import_module(self, module):
|
889 |
+
"""
|
890 |
+
Imports a Python module that has torch.library registrations.
|
891 |
+
|
892 |
+
Generally, to extend PyTorch with custom operators, a user will
|
893 |
+
create a Python module whose import triggers registration of
|
894 |
+
the custom operators via a torch.ops.load_library call or a call
|
895 |
+
to one or more torch.library.* APIs.
|
896 |
+
|
897 |
+
It is unexpected for Python modules to have side effects, so some
|
898 |
+
linters and formatters will complain. Use this API to import Python
|
899 |
+
modules that contain these torch.library side effects.
|
900 |
+
|
901 |
+
Args:
|
902 |
+
module (str): The name of the Python module to import
|
903 |
+
|
904 |
+
"""
|
905 |
+
importlib.import_module(module)
|
906 |
+
|
907 |
+
def load_library(self, path):
|
908 |
+
"""
|
909 |
+
Loads a shared library from the given path into the current process.
|
910 |
+
|
911 |
+
The library being loaded may run global initialization code to register
|
912 |
+
custom operators with the PyTorch JIT runtime. This allows dynamically
|
913 |
+
loading custom operators. For this, you should compile your operator
|
914 |
+
and the static registration code into a shared library object, and then
|
915 |
+
call ``torch.ops.load_library('path/to/libcustom.so')`` to load the
|
916 |
+
shared object.
|
917 |
+
|
918 |
+
After the library is loaded, it is added to the
|
919 |
+
``torch.ops.loaded_libraries`` attribute, a set that may be inspected
|
920 |
+
for the paths of all libraries loaded using this function.
|
921 |
+
|
922 |
+
Args:
|
923 |
+
path (str): A path to a shared library to load.
|
924 |
+
"""
|
925 |
+
if torch._running_with_deploy():
|
926 |
+
return
|
927 |
+
|
928 |
+
path = _utils_internal.resolve_library_path(path)
|
929 |
+
with dl_open_guard():
|
930 |
+
# Import the shared library into the process, thus running its
|
931 |
+
# static (global) initialization code in order to register custom
|
932 |
+
# operators with the JIT.
|
933 |
+
ctypes.CDLL(path)
|
934 |
+
self.loaded_libraries.add(path)
|
935 |
+
|
936 |
+
|
937 |
+
# The ops "namespace"
|
938 |
+
ops = _Ops()
|
env-llmeval/lib/python3.10/site-packages/torch/_sources.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import ast
|
2 |
+
import functools
|
3 |
+
import inspect
|
4 |
+
from textwrap import dedent
|
5 |
+
from typing import Any, List, NamedTuple, Optional, Tuple
|
6 |
+
|
7 |
+
from torch._C import ErrorReport
|
8 |
+
from torch._C._jit_tree_views import SourceRangeFactory
|
9 |
+
|
10 |
+
|
11 |
+
def get_source_lines_and_file(
|
12 |
+
obj: Any,
|
13 |
+
error_msg: Optional[str] = None,
|
14 |
+
) -> Tuple[List[str], int, Optional[str]]:
|
15 |
+
"""
|
16 |
+
Wrapper around inspect.getsourcelines and inspect.getsourcefile.
|
17 |
+
|
18 |
+
Returns: (sourcelines, file_lino, filename)
|
19 |
+
"""
|
20 |
+
filename = None # in case getsourcefile throws
|
21 |
+
try:
|
22 |
+
filename = inspect.getsourcefile(obj)
|
23 |
+
sourcelines, file_lineno = inspect.getsourcelines(obj)
|
24 |
+
except OSError as e:
|
25 |
+
msg = (
|
26 |
+
f"Can't get source for {obj}. TorchScript requires source access in "
|
27 |
+
"order to carry out compilation, make sure original .py files are "
|
28 |
+
"available."
|
29 |
+
)
|
30 |
+
if error_msg:
|
31 |
+
msg += "\n" + error_msg
|
32 |
+
raise OSError(msg) from e
|
33 |
+
|
34 |
+
return sourcelines, file_lineno, filename
|
35 |
+
|
36 |
+
|
37 |
+
def normalize_source_lines(sourcelines: List[str]) -> List[str]:
|
38 |
+
"""
|
39 |
+
This helper function accepts a list of source lines. It finds the
|
40 |
+
indentation level of the function definition (`def`), then it indents
|
41 |
+
all lines in the function body to a point at or greater than that
|
42 |
+
level. This allows for comments and continued string literals that
|
43 |
+
are at a lower indentation than the rest of the code.
|
44 |
+
Args:
|
45 |
+
sourcelines: function source code, separated into lines by
|
46 |
+
the '\n' character
|
47 |
+
Returns:
|
48 |
+
A list of source lines that have been correctly aligned
|
49 |
+
"""
|
50 |
+
|
51 |
+
def remove_prefix(text, prefix):
|
52 |
+
return text[text.startswith(prefix) and len(prefix) :]
|
53 |
+
|
54 |
+
# Find the line and line number containing the function definition
|
55 |
+
idx = None
|
56 |
+
for i, l in enumerate(sourcelines):
|
57 |
+
if l.lstrip().startswith("def"):
|
58 |
+
idx = i
|
59 |
+
break
|
60 |
+
|
61 |
+
# This will happen when the function is a lambda- we won't find "def" anywhere in the source
|
62 |
+
# lines in that case. Currently trying to JIT compile a lambda will throw an error up in
|
63 |
+
# `parse_def()`, but we might want to handle this case in the future.
|
64 |
+
if idx is None:
|
65 |
+
return sourcelines
|
66 |
+
|
67 |
+
# Get a string representing the amount of leading whitespace
|
68 |
+
fn_def = sourcelines[idx]
|
69 |
+
whitespace = fn_def.split("def")[0]
|
70 |
+
|
71 |
+
# Add this leading whitespace to all lines before and after the `def`
|
72 |
+
aligned_prefix = [
|
73 |
+
whitespace + remove_prefix(s, whitespace) for s in sourcelines[:idx]
|
74 |
+
]
|
75 |
+
aligned_suffix = [
|
76 |
+
whitespace + remove_prefix(s, whitespace) for s in sourcelines[idx + 1 :]
|
77 |
+
]
|
78 |
+
|
79 |
+
# Put it together again
|
80 |
+
aligned_prefix.append(fn_def)
|
81 |
+
return aligned_prefix + aligned_suffix
|
82 |
+
|
83 |
+
|
84 |
+
# Thin wrapper around SourceRangeFactory to store extra metadata
|
85 |
+
# about the function-to-be-compiled.
|
86 |
+
class SourceContext(SourceRangeFactory):
|
87 |
+
def __init__(
|
88 |
+
self,
|
89 |
+
source,
|
90 |
+
filename,
|
91 |
+
file_lineno,
|
92 |
+
leading_whitespace_len,
|
93 |
+
uses_true_division=True,
|
94 |
+
funcname=None,
|
95 |
+
):
|
96 |
+
super().__init__(source, filename, file_lineno, leading_whitespace_len)
|
97 |
+
self.uses_true_division = uses_true_division
|
98 |
+
self.filename = filename
|
99 |
+
self.funcname = funcname
|
100 |
+
|
101 |
+
|
102 |
+
@functools.lru_cache(maxsize=None)
|
103 |
+
def make_source_context(*args):
|
104 |
+
return SourceContext(*args)
|
105 |
+
|
106 |
+
|
107 |
+
def fake_range():
|
108 |
+
return SourceContext("", None, 0, 0).make_raw_range(0, 1)
|
109 |
+
|
110 |
+
|
111 |
+
class ParsedDef(NamedTuple):
|
112 |
+
ast: ast.Module
|
113 |
+
ctx: SourceContext
|
114 |
+
source: str
|
115 |
+
filename: Optional[str]
|
116 |
+
file_lineno: int
|
117 |
+
|
118 |
+
|
119 |
+
def parse_def(fn):
|
120 |
+
sourcelines, file_lineno, filename = get_source_lines_and_file(
|
121 |
+
fn, ErrorReport.call_stack()
|
122 |
+
)
|
123 |
+
sourcelines = normalize_source_lines(sourcelines)
|
124 |
+
source = "".join(sourcelines)
|
125 |
+
dedent_src = dedent(source)
|
126 |
+
py_ast = ast.parse(dedent_src)
|
127 |
+
if len(py_ast.body) != 1 or not isinstance(py_ast.body[0], ast.FunctionDef):
|
128 |
+
raise RuntimeError(
|
129 |
+
f"Expected a single top-level function: {filename}:{file_lineno}"
|
130 |
+
)
|
131 |
+
leading_whitespace_len = len(source.split("\n", 1)[0]) - len(
|
132 |
+
dedent_src.split("\n", 1)[0]
|
133 |
+
)
|
134 |
+
ctx = make_source_context(
|
135 |
+
source, filename, file_lineno, leading_whitespace_len, True, fn.__name__
|
136 |
+
)
|
137 |
+
return ParsedDef(py_ast, ctx, source, filename, file_lineno)
|
env-llmeval/lib/python3.10/site-packages/torch/_storage_docs.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Adds docstrings to Storage functions"""
|
2 |
+
|
3 |
+
import torch._C
|
4 |
+
from torch._C import _add_docstr as add_docstr
|
5 |
+
|
6 |
+
|
7 |
+
storage_classes = [
|
8 |
+
"StorageBase",
|
9 |
+
]
|
10 |
+
|
11 |
+
|
12 |
+
def add_docstr_all(method, docstr):
|
13 |
+
for cls_name in storage_classes:
|
14 |
+
cls = getattr(torch._C, cls_name)
|
15 |
+
try:
|
16 |
+
add_docstr(getattr(cls, method), docstr)
|
17 |
+
except AttributeError:
|
18 |
+
pass
|
19 |
+
|
20 |
+
|
21 |
+
add_docstr_all(
|
22 |
+
"from_file",
|
23 |
+
"""
|
24 |
+
from_file(filename, shared=False, size=0) -> Storage
|
25 |
+
|
26 |
+
Creates a CPU storage backed by a memory-mapped file.
|
27 |
+
|
28 |
+
If ``shared`` is ``True``, then memory is shared between all processes.
|
29 |
+
All changes are written to the file. If ``shared`` is ``False``, then the changes on
|
30 |
+
the storage do not affect the file.
|
31 |
+
|
32 |
+
``size`` is the number of elements in the storage. If ``shared`` is ``False``,
|
33 |
+
then the file must contain at least ``size * sizeof(Type)`` bytes
|
34 |
+
(``Type`` is the type of storage, in the case of an ``UnTypedStorage`` the file must contain at
|
35 |
+
least ``size`` bytes). If ``shared`` is ``True`` the file will be created if needed.
|
36 |
+
|
37 |
+
Args:
|
38 |
+
filename (str): file name to map
|
39 |
+
shared (bool): whether to share memory (whether ``MAP_SHARED`` or ``MAP_PRIVATE`` is passed to the
|
40 |
+
underlying `mmap(2) call <https://man7.org/linux/man-pages/man2/mmap.2.html>`_)
|
41 |
+
size (int): number of elements in the storage
|
42 |
+
""",
|
43 |
+
)
|
env-llmeval/lib/python3.10/site-packages/torch/_streambase.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from abc import ABC, abstractmethod
|
2 |
+
|
3 |
+
|
4 |
+
class _StreamBase(ABC):
|
5 |
+
r"""Base stream class abstraction for multi backends Stream to herit from"""
|
6 |
+
|
7 |
+
@abstractmethod
|
8 |
+
def wait_event(self, event):
|
9 |
+
raise NotImplementedError()
|
10 |
+
|
11 |
+
@abstractmethod
|
12 |
+
def wait_stream(self, stream):
|
13 |
+
raise NotImplementedError()
|
14 |
+
|
15 |
+
@abstractmethod
|
16 |
+
def record_event(self, event=None):
|
17 |
+
raise NotImplementedError()
|
18 |
+
|
19 |
+
@abstractmethod
|
20 |
+
def query(self):
|
21 |
+
raise NotImplementedError()
|
22 |
+
|
23 |
+
@abstractmethod
|
24 |
+
def synchronize(self):
|
25 |
+
raise NotImplementedError()
|
26 |
+
|
27 |
+
@abstractmethod
|
28 |
+
def __eq__(self, stream):
|
29 |
+
raise NotImplementedError()
|
30 |
+
|
31 |
+
|
32 |
+
class _EventBase(ABC):
|
33 |
+
r"""Base Event class abstraction for multi backends Event to herit from"""
|
34 |
+
|
35 |
+
@abstractmethod
|
36 |
+
def wait(self, stream=None):
|
37 |
+
raise NotImplementedError()
|
38 |
+
|
39 |
+
@abstractmethod
|
40 |
+
def query(self):
|
41 |
+
raise NotImplementedError()
|
42 |
+
|
43 |
+
@abstractmethod
|
44 |
+
def synchronize(self):
|
45 |
+
raise NotImplementedError()
|
env-llmeval/lib/python3.10/site-packages/torch/_tensor.py
ADDED
@@ -0,0 +1,1518 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copyreg
|
2 |
+
import enum
|
3 |
+
import functools
|
4 |
+
import warnings
|
5 |
+
from collections import OrderedDict
|
6 |
+
from copy import deepcopy
|
7 |
+
from numbers import Number
|
8 |
+
from typing import Any, Dict, Optional, Tuple, Union
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import torch._C as _C
|
12 |
+
import torch.utils.hooks as hooks
|
13 |
+
from torch._namedtensor_internals import (
|
14 |
+
check_serializing_named_tensor,
|
15 |
+
is_ellipsis,
|
16 |
+
resolve_ellipsis,
|
17 |
+
single_ellipsis_index,
|
18 |
+
unzip_namedshape,
|
19 |
+
update_names,
|
20 |
+
)
|
21 |
+
from torch.overrides import (
|
22 |
+
get_default_nowrap_functions,
|
23 |
+
handle_torch_function,
|
24 |
+
has_torch_function,
|
25 |
+
has_torch_function_unary,
|
26 |
+
has_torch_function_variadic,
|
27 |
+
)
|
28 |
+
from torch.utils.dlpack import DLDeviceType
|
29 |
+
|
30 |
+
|
31 |
+
def _handle_torch_function_and_wrap_type_error_to_not_implemented(f):
|
32 |
+
assigned = functools.WRAPPER_ASSIGNMENTS
|
33 |
+
|
34 |
+
@functools.wraps(f, assigned=assigned)
|
35 |
+
def wrapped(*args, **kwargs):
|
36 |
+
try:
|
37 |
+
# See https://github.com/pytorch/pytorch/issues/75462
|
38 |
+
if has_torch_function(args):
|
39 |
+
return handle_torch_function(wrapped, args, *args, **kwargs)
|
40 |
+
return f(*args, **kwargs)
|
41 |
+
except TypeError:
|
42 |
+
return NotImplemented
|
43 |
+
|
44 |
+
return wrapped
|
45 |
+
|
46 |
+
|
47 |
+
# Should not be used, this is kept only for BC of loading old serialized Tensor subclasses
|
48 |
+
def _rebuild_from_type(func, type, args, dict):
|
49 |
+
if type is Tensor:
|
50 |
+
return func(*args)
|
51 |
+
|
52 |
+
ret = func(*args).as_subclass(type)
|
53 |
+
ret.__dict__ = dict
|
54 |
+
return ret
|
55 |
+
|
56 |
+
|
57 |
+
def _rebuild_from_type_v2(func, new_type, args, state):
|
58 |
+
ret = func(*args)
|
59 |
+
if type(ret) is not new_type:
|
60 |
+
ret = ret.as_subclass(new_type)
|
61 |
+
# Tensor does define __setstate__ even though it doesn't define
|
62 |
+
# __getstate__. So only use __setstate__ if it is NOT the one defined
|
63 |
+
# on Tensor
|
64 |
+
if (
|
65 |
+
getattr(ret.__class__, "__setstate__", Tensor.__setstate__)
|
66 |
+
is not Tensor.__setstate__
|
67 |
+
):
|
68 |
+
ret.__setstate__(state)
|
69 |
+
else:
|
70 |
+
ret = torch._utils._set_obj_state(ret, state)
|
71 |
+
return ret
|
72 |
+
|
73 |
+
|
74 |
+
# NB: If you subclass Tensor, and want to share the subclassed class
|
75 |
+
# across processes, you must also update torch/multiprocessing/reductions.py
|
76 |
+
# to define a ForkingPickler serialization mode for the class.
|
77 |
+
#
|
78 |
+
# NB: If you add a new method to Tensor, you must update
|
79 |
+
# torch/_C/__init__.pyi.in to add a type annotation for your method;
|
80 |
+
# otherwise, it will not show up in autocomplete.
|
81 |
+
class Tensor(torch._C.TensorBase):
|
82 |
+
def __deepcopy__(self, memo):
|
83 |
+
if has_torch_function_unary(self):
|
84 |
+
return handle_torch_function(Tensor.__deepcopy__, (self,), self, memo)
|
85 |
+
if not self.is_leaf:
|
86 |
+
raise RuntimeError(
|
87 |
+
"Only Tensors created explicitly by the user "
|
88 |
+
"(graph leaves) support the deepcopy protocol at the moment. "
|
89 |
+
"If you were attempting to deepcopy a module, this may be because "
|
90 |
+
"of a torch.nn.utils.weight_norm usage, "
|
91 |
+
"see https://github.com/pytorch/pytorch/pull/103001"
|
92 |
+
)
|
93 |
+
if id(self) in memo:
|
94 |
+
return memo[id(self)]
|
95 |
+
with torch.no_grad():
|
96 |
+
# TODO: skipping storage copy is wrong for meta, as meta
|
97 |
+
# does accurate alias tracking; however, the code below
|
98 |
+
# doesn't work because of
|
99 |
+
# https://github.com/pytorch/pytorch/issues/47442
|
100 |
+
# Update the test in test_serialization if you remove 'meta' from here
|
101 |
+
if (
|
102 |
+
self.is_sparse
|
103 |
+
or self.device.type
|
104 |
+
in ["lazy", "xla", "mtia", "mps", "ort", "meta", "ipu"]
|
105 |
+
or (
|
106 |
+
not torch._C._has_storage(self)
|
107 |
+
and self.device.type == torch._C._get_privateuse1_backend_name()
|
108 |
+
)
|
109 |
+
or (type(self) is not Tensor and self.data_ptr() == 0)
|
110 |
+
):
|
111 |
+
new_tensor = self.clone()
|
112 |
+
if type(new_tensor) is not type(self):
|
113 |
+
raise RuntimeError(
|
114 |
+
"The default implementation of __deepcopy__() for wrapper subclasses "
|
115 |
+
"only works for subclass types that implement clone() and for which "
|
116 |
+
"cloning returns another instance of the same subclass. You should either "
|
117 |
+
"properly implement clone() for your subclass or override __deepcopy__() "
|
118 |
+
"if it is intended behavior for clone() to return an instance of a "
|
119 |
+
"different type."
|
120 |
+
)
|
121 |
+
else:
|
122 |
+
new_storage = self._typed_storage()._deepcopy(memo)
|
123 |
+
if self.is_quantized:
|
124 |
+
# quantizer_params can be different type based on torch attribute
|
125 |
+
quantizer_params: Union[
|
126 |
+
Tuple[torch.qscheme, float, int],
|
127 |
+
Tuple[torch.qscheme, Tensor, Tensor, int],
|
128 |
+
]
|
129 |
+
if self.qscheme() == torch.per_tensor_affine:
|
130 |
+
quantizer_params = (
|
131 |
+
self.qscheme(),
|
132 |
+
self.q_scale(),
|
133 |
+
self.q_zero_point(),
|
134 |
+
)
|
135 |
+
elif self.qscheme() in (
|
136 |
+
torch.per_channel_affine,
|
137 |
+
torch.per_channel_affine_float_qparams,
|
138 |
+
):
|
139 |
+
quantizer_params = (
|
140 |
+
self.qscheme(),
|
141 |
+
self.q_per_channel_scales(),
|
142 |
+
self.q_per_channel_zero_points(),
|
143 |
+
self.q_per_channel_axis(),
|
144 |
+
)
|
145 |
+
else:
|
146 |
+
raise RuntimeError(
|
147 |
+
f"Unsupported qscheme {self.qscheme()} in deepcopy"
|
148 |
+
)
|
149 |
+
# TODO: Once we decide to break serialization FC, no longer
|
150 |
+
# need to wrap with TypedStorage
|
151 |
+
new_tensor = torch._utils._rebuild_qtensor(
|
152 |
+
torch.storage.TypedStorage(
|
153 |
+
wrap_storage=new_storage._untyped_storage,
|
154 |
+
dtype=self.dtype,
|
155 |
+
_internal=True,
|
156 |
+
),
|
157 |
+
self.storage_offset(),
|
158 |
+
self.size(),
|
159 |
+
self.stride(),
|
160 |
+
quantizer_params,
|
161 |
+
self.requires_grad,
|
162 |
+
self._backward_hooks,
|
163 |
+
)
|
164 |
+
if type(new_tensor) is not type(self):
|
165 |
+
raise RuntimeError(
|
166 |
+
"The default implementation of __deepcopy__() for quantized tensors "
|
167 |
+
"expects the tensor returned by torch._utils._rebuild_qtensor() to "
|
168 |
+
"match the type of the instance being copied. If you encounter this, "
|
169 |
+
"please open an issue on PyTorch's GitHub."
|
170 |
+
)
|
171 |
+
else:
|
172 |
+
new_tensor = self.new_empty([])
|
173 |
+
if type(new_tensor) is not type(self):
|
174 |
+
raise RuntimeError(
|
175 |
+
"The default implementation of __deepcopy__() for non-wrapper subclasses "
|
176 |
+
"only works for subclass types that implement new_empty() and for which "
|
177 |
+
"that function returns another instance of the same subclass. You should "
|
178 |
+
"either properly implement new_empty() for your subclass or override "
|
179 |
+
"__deepcopy__() if it is intended behavior for new_empty() to return "
|
180 |
+
"an instance of a different type."
|
181 |
+
)
|
182 |
+
new_tensor.set_(
|
183 |
+
new_storage, self.storage_offset(), self.size(), self.stride()
|
184 |
+
)
|
185 |
+
if self.is_conj():
|
186 |
+
new_tensor = new_tensor.conj_physical()
|
187 |
+
if self.is_neg():
|
188 |
+
new_tensor = new_tensor.neg()
|
189 |
+
if self.requires_grad:
|
190 |
+
new_tensor.requires_grad_()
|
191 |
+
if self.grad is not None:
|
192 |
+
new_tensor.grad = self.grad.__deepcopy__(memo)
|
193 |
+
|
194 |
+
if type(self) is not Tensor:
|
195 |
+
if type(new_tensor) is not type(self):
|
196 |
+
raise RuntimeError(
|
197 |
+
"Type of deepcopy result does not match the type of the source tensor. "
|
198 |
+
"If you encounter this, please open an issue on PyTorch's GitHub."
|
199 |
+
)
|
200 |
+
|
201 |
+
# Plain Tensors don't have slots
|
202 |
+
slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
|
203 |
+
for slot in slots_to_save:
|
204 |
+
if hasattr(self, slot):
|
205 |
+
setattr(new_tensor, slot, deepcopy(getattr(self, slot), memo))
|
206 |
+
|
207 |
+
new_tensor.__dict__ = deepcopy(self.__dict__, memo)
|
208 |
+
|
209 |
+
memo[id(self)] = new_tensor
|
210 |
+
return new_tensor
|
211 |
+
|
212 |
+
def __reduce_ex__(self, proto):
|
213 |
+
state = torch._utils._get_obj_state(self)
|
214 |
+
if type(self) is Tensor and not state:
|
215 |
+
# Fast path for regular tensor without Python state.
|
216 |
+
return self._reduce_ex_internal(proto)
|
217 |
+
if has_torch_function_unary(self):
|
218 |
+
return handle_torch_function(Tensor.__reduce_ex__, (self,), self, proto)
|
219 |
+
func, args = self._reduce_ex_internal(proto)
|
220 |
+
return (_rebuild_from_type_v2, (func, type(self), args, state))
|
221 |
+
|
222 |
+
def storage(self):
|
223 |
+
r"""
|
224 |
+
storage() -> torch.TypedStorage
|
225 |
+
|
226 |
+
Returns the underlying :class:`TypedStorage`.
|
227 |
+
|
228 |
+
.. warning::
|
229 |
+
|
230 |
+
:class:`TypedStorage` is deprecated. It will be removed in the future, and
|
231 |
+
:class:`UntypedStorage` will be the only storage class. To access the
|
232 |
+
:class:`UntypedStorage` directly, use :attr:`Tensor.untyped_storage()`.
|
233 |
+
"""
|
234 |
+
if has_torch_function_unary(self):
|
235 |
+
return handle_torch_function(Tensor.storage, (self,), self)
|
236 |
+
|
237 |
+
torch.storage._warn_typed_storage_removal(stacklevel=2)
|
238 |
+
return self._typed_storage()
|
239 |
+
|
240 |
+
# For internal use only, to avoid raising deprecation warning
|
241 |
+
def _typed_storage(self):
|
242 |
+
untyped_storage = self.untyped_storage()
|
243 |
+
return torch.TypedStorage(
|
244 |
+
wrap_storage=untyped_storage, dtype=self.dtype, _internal=True
|
245 |
+
)
|
246 |
+
|
247 |
+
def _reduce_ex_internal(self, proto):
|
248 |
+
check_serializing_named_tensor(self)
|
249 |
+
# See Note [Don't serialize hooks]
|
250 |
+
torch.utils.hooks.warn_if_has_hooks(self)
|
251 |
+
backward_hooks: Dict[Any, Any] = OrderedDict()
|
252 |
+
# Note: Numpy array is chosen to be the rebuild component for XLA, MTIA, ORT Tensors.
|
253 |
+
# We considered a few options:
|
254 |
+
# 1. CPU tensor can't be used here.
|
255 |
+
# Otherwise in torch.load CPU storage is reconstructed with randomly
|
256 |
+
# initialized data, moved onto backend device, and then storage is updated
|
257 |
+
# to the serialized content. This works perfectly for CPU/CUDA but not these backends;
|
258 |
+
# their tensors are disconnected with storage so they don't get the update.
|
259 |
+
# 2. Python list is not a good fit due to performance reason.
|
260 |
+
# `tolist()` converts every single element in the tensor into python objects
|
261 |
+
# and serialize them one by one.
|
262 |
+
if self.device.type in ["xla", "mtia", "ort"] or (
|
263 |
+
not torch._C._has_storage(self)
|
264 |
+
and self.device.type == torch._C._get_privateuse1_backend_name()
|
265 |
+
):
|
266 |
+
# Convert BFloat16 tesors to Float32 before conversion to numpy, as numpy doesn't
|
267 |
+
# support BFloat16. The rebuild tensor from numpy takes in the original self.dtype,
|
268 |
+
# this would reconstruct the BFloat16 tensor from numpy.
|
269 |
+
numpy_tensor = (
|
270 |
+
self.cpu().numpy()
|
271 |
+
if self.dtype != torch.bfloat16
|
272 |
+
else self.cpu().to(torch.float32).numpy()
|
273 |
+
)
|
274 |
+
return (
|
275 |
+
torch._utils._rebuild_device_tensor_from_numpy,
|
276 |
+
(numpy_tensor, self.dtype, str(self.device), self.requires_grad),
|
277 |
+
)
|
278 |
+
if self.device.type == "meta":
|
279 |
+
# NB: This implementation BREAKS storage sharing. Current
|
280 |
+
# hypothesis is that no one cares for meta tensors.
|
281 |
+
arg_meta = (
|
282 |
+
self.dtype,
|
283 |
+
tuple(self.size()),
|
284 |
+
self.stride(),
|
285 |
+
self.requires_grad,
|
286 |
+
)
|
287 |
+
return (torch._utils._rebuild_meta_tensor_no_storage, arg_meta)
|
288 |
+
if self.is_quantized:
|
289 |
+
# quantizer_params can be different type based on torch attribute
|
290 |
+
quantizer_params: Union[
|
291 |
+
Tuple[torch.qscheme, float, int], Tuple[Any, Tensor, Tensor, int]
|
292 |
+
]
|
293 |
+
if self.qscheme() == torch.per_tensor_affine:
|
294 |
+
quantizer_params = (
|
295 |
+
torch.per_tensor_affine,
|
296 |
+
self.q_scale(),
|
297 |
+
self.q_zero_point(),
|
298 |
+
)
|
299 |
+
elif self.qscheme() in (
|
300 |
+
torch.per_channel_affine,
|
301 |
+
torch.per_channel_affine_float_qparams,
|
302 |
+
):
|
303 |
+
# convert scales and zero points to tuple to avoid recursive calls
|
304 |
+
# when/if we get multi-axis quantized tensors in the future, the shape
|
305 |
+
# is recoverable from the main tensor shape
|
306 |
+
quantizer_params = (
|
307 |
+
torch.per_channel_affine,
|
308 |
+
self.q_per_channel_scales(),
|
309 |
+
self.q_per_channel_zero_points(),
|
310 |
+
self.q_per_channel_axis(),
|
311 |
+
)
|
312 |
+
else:
|
313 |
+
raise RuntimeError(
|
314 |
+
f"Serialization is not supported for tensors of type {self.qscheme()}"
|
315 |
+
)
|
316 |
+
# TODO: Once we decide to break serialization FC, no longer
|
317 |
+
# need to wrap with TypedStorage
|
318 |
+
args_qtensor = (
|
319 |
+
torch.storage.TypedStorage(
|
320 |
+
wrap_storage=self._typed_storage()._untyped_storage,
|
321 |
+
dtype=self.dtype,
|
322 |
+
_internal=True,
|
323 |
+
),
|
324 |
+
self.storage_offset(),
|
325 |
+
tuple(self.size()),
|
326 |
+
self.stride(),
|
327 |
+
quantizer_params,
|
328 |
+
self.requires_grad,
|
329 |
+
backward_hooks,
|
330 |
+
)
|
331 |
+
return (torch._utils._rebuild_qtensor, args_qtensor)
|
332 |
+
elif self.is_sparse:
|
333 |
+
if self.layout == torch.sparse_coo:
|
334 |
+
args_sparse = (
|
335 |
+
self.layout,
|
336 |
+
(self._indices(), self._values(), self.size(), self.is_coalesced()),
|
337 |
+
)
|
338 |
+
else:
|
339 |
+
raise NotImplementedError(
|
340 |
+
f"sparse tensor __reduce_ex__ for layout `{self.layout}`"
|
341 |
+
)
|
342 |
+
return (torch._utils._rebuild_sparse_tensor, args_sparse)
|
343 |
+
elif self.layout in {
|
344 |
+
torch.sparse_csr,
|
345 |
+
torch.sparse_csc,
|
346 |
+
torch.sparse_bsr,
|
347 |
+
torch.sparse_bsc,
|
348 |
+
}:
|
349 |
+
if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
350 |
+
compressed_indices, plain_indices = (
|
351 |
+
self.crow_indices(),
|
352 |
+
self.col_indices(),
|
353 |
+
)
|
354 |
+
else:
|
355 |
+
compressed_indices, plain_indices = (
|
356 |
+
self.ccol_indices(),
|
357 |
+
self.row_indices(),
|
358 |
+
)
|
359 |
+
args_sparse_compressed = (
|
360 |
+
self.layout,
|
361 |
+
(
|
362 |
+
compressed_indices,
|
363 |
+
plain_indices,
|
364 |
+
self.values(),
|
365 |
+
self.size(),
|
366 |
+
),
|
367 |
+
)
|
368 |
+
return (torch._utils._rebuild_sparse_tensor, args_sparse_compressed)
|
369 |
+
elif self.is_nested:
|
370 |
+
args_nested = (
|
371 |
+
# NB: values() currently returns the storage as a buffer in an unsafe way.
|
372 |
+
# Ideally, we'd use a private API for this instead. TODO: Switch to this if
|
373 |
+
# we ever get around to adding it.
|
374 |
+
self.values(),
|
375 |
+
self._nested_tensor_size(),
|
376 |
+
self._nested_tensor_strides(),
|
377 |
+
self._nested_tensor_storage_offsets(),
|
378 |
+
)
|
379 |
+
return (torch._utils._rebuild_nested_tensor, args_nested)
|
380 |
+
elif (
|
381 |
+
self.data_ptr() == 0
|
382 |
+
and type(self) is not torch.Tensor
|
383 |
+
and type(self).__torch_dispatch__ is not torch.Tensor.__torch_dispatch__
|
384 |
+
):
|
385 |
+
arg_wrapper_subclass = (
|
386 |
+
type(self),
|
387 |
+
self.dtype,
|
388 |
+
tuple(self.size()),
|
389 |
+
self.stride(),
|
390 |
+
self.storage_offset(),
|
391 |
+
self.layout,
|
392 |
+
self.device,
|
393 |
+
self.requires_grad,
|
394 |
+
)
|
395 |
+
return (torch._utils._rebuild_wrapper_subclass, arg_wrapper_subclass)
|
396 |
+
else:
|
397 |
+
v3_dtypes = [
|
398 |
+
torch.float8_e5m2,
|
399 |
+
torch.float8_e4m3fn,
|
400 |
+
torch.bits8,
|
401 |
+
torch.bits16,
|
402 |
+
torch.bits1x8,
|
403 |
+
torch.bits2x4,
|
404 |
+
torch.bits4x2,
|
405 |
+
]
|
406 |
+
if self.dtype in v3_dtypes:
|
407 |
+
rebuild_func = torch._utils._rebuild_tensor_v3
|
408 |
+
storage = self.untyped_storage()
|
409 |
+
else:
|
410 |
+
# TODO: Once we decide to break serialization FC, no longer
|
411 |
+
# need to wrap with TypedStorage
|
412 |
+
rebuild_func = torch._utils._rebuild_tensor_v2 # type: ignore[assignment]
|
413 |
+
storage = torch.storage.TypedStorage(
|
414 |
+
wrap_storage=self._typed_storage()._untyped_storage,
|
415 |
+
dtype=self.dtype,
|
416 |
+
_internal=True,
|
417 |
+
) # type: ignore[assignment]
|
418 |
+
args = (
|
419 |
+
storage,
|
420 |
+
self.storage_offset(),
|
421 |
+
tuple(self.size()),
|
422 |
+
self.stride(),
|
423 |
+
self.requires_grad,
|
424 |
+
backward_hooks,
|
425 |
+
) # previously was self._backward_hooks
|
426 |
+
|
427 |
+
if isinstance(storage, torch.storage.UntypedStorage):
|
428 |
+
args = args + (self.dtype,) # type: ignore[assignment]
|
429 |
+
|
430 |
+
metadata = torch._utils.get_tensor_metadata(self)
|
431 |
+
if metadata:
|
432 |
+
args = args + (metadata,) # type: ignore[assignment]
|
433 |
+
|
434 |
+
return (rebuild_func, args)
|
435 |
+
|
436 |
+
def __setstate__(self, state):
|
437 |
+
if has_torch_function_unary(self):
|
438 |
+
return handle_torch_function(Tensor.__setstate__, (self,), self, state)
|
439 |
+
# Warning: this method is NOT called when you torch.load() a tensor;
|
440 |
+
# that is managed by _rebuild_tensor_v2
|
441 |
+
if not self.is_leaf:
|
442 |
+
raise RuntimeError("__setstate__ can be only called on leaf Tensors")
|
443 |
+
if len(state) == 4:
|
444 |
+
# legacy serialization of Tensor
|
445 |
+
self.set_(*state)
|
446 |
+
return
|
447 |
+
elif len(state) == 5:
|
448 |
+
# legacy serialization of Variable
|
449 |
+
self.data = state[0]
|
450 |
+
state = (state[3], state[4], state[2])
|
451 |
+
# The setting of _backward_hooks is expected to be a no-op.
|
452 |
+
# See Note [Don't serialize hooks]
|
453 |
+
self.requires_grad, _, self._backward_hooks = state
|
454 |
+
|
455 |
+
def __repr__(self, *, tensor_contents=None):
|
456 |
+
if has_torch_function_unary(self):
|
457 |
+
return handle_torch_function(
|
458 |
+
Tensor.__repr__, (self,), self, tensor_contents=tensor_contents
|
459 |
+
)
|
460 |
+
# All strings are unicode in Python 3.
|
461 |
+
return torch._tensor_str._str(self, tensor_contents=tensor_contents)
|
462 |
+
|
463 |
+
def backward(
|
464 |
+
self, gradient=None, retain_graph=None, create_graph=False, inputs=None
|
465 |
+
):
|
466 |
+
r"""Computes the gradient of current tensor wrt graph leaves.
|
467 |
+
|
468 |
+
The graph is differentiated using the chain rule. If the tensor is
|
469 |
+
non-scalar (i.e. its data has more than one element) and requires
|
470 |
+
gradient, the function additionally requires specifying ``gradient``.
|
471 |
+
It should be a tensor of matching type and location, that contains
|
472 |
+
the gradient of the differentiated function w.r.t. ``self``.
|
473 |
+
|
474 |
+
This function accumulates gradients in the leaves - you might need to zero
|
475 |
+
``.grad`` attributes or set them to ``None`` before calling it.
|
476 |
+
See :ref:`Default gradient layouts<default-grad-layouts>`
|
477 |
+
for details on the memory layout of accumulated gradients.
|
478 |
+
|
479 |
+
.. note::
|
480 |
+
|
481 |
+
If you run any forward ops, create ``gradient``, and/or call ``backward``
|
482 |
+
in a user-specified CUDA stream context, see
|
483 |
+
:ref:`Stream semantics of backward passes<bwd-cuda-stream-semantics>`.
|
484 |
+
|
485 |
+
.. note::
|
486 |
+
|
487 |
+
When ``inputs`` are provided and a given input is not a leaf,
|
488 |
+
the current implementation will call its grad_fn (though it is not strictly needed to get this gradients).
|
489 |
+
It is an implementation detail on which the user should not rely.
|
490 |
+
See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
|
491 |
+
|
492 |
+
Args:
|
493 |
+
gradient (Tensor or None): Gradient w.r.t. the
|
494 |
+
tensor. If it is a tensor, it will be automatically converted
|
495 |
+
to a Tensor that does not require grad unless ``create_graph`` is True.
|
496 |
+
None values can be specified for scalar Tensors or ones that
|
497 |
+
don't require grad. If a None value would be acceptable then
|
498 |
+
this argument is optional.
|
499 |
+
retain_graph (bool, optional): If ``False``, the graph used to compute
|
500 |
+
the grads will be freed. Note that in nearly all cases setting
|
501 |
+
this option to True is not needed and often can be worked around
|
502 |
+
in a much more efficient way. Defaults to the value of
|
503 |
+
``create_graph``.
|
504 |
+
create_graph (bool, optional): If ``True``, graph of the derivative will
|
505 |
+
be constructed, allowing to compute higher order derivative
|
506 |
+
products. Defaults to ``False``.
|
507 |
+
inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be
|
508 |
+
accumulated into ``.grad``. All other Tensors will be ignored. If not
|
509 |
+
provided, the gradient is accumulated into all the leaf Tensors that were
|
510 |
+
used to compute the attr::tensors.
|
511 |
+
"""
|
512 |
+
if has_torch_function_unary(self):
|
513 |
+
return handle_torch_function(
|
514 |
+
Tensor.backward,
|
515 |
+
(self,),
|
516 |
+
self,
|
517 |
+
gradient=gradient,
|
518 |
+
retain_graph=retain_graph,
|
519 |
+
create_graph=create_graph,
|
520 |
+
inputs=inputs,
|
521 |
+
)
|
522 |
+
torch.autograd.backward(
|
523 |
+
self, gradient, retain_graph, create_graph, inputs=inputs
|
524 |
+
)
|
525 |
+
|
526 |
+
def register_hook(self, hook):
|
527 |
+
r"""Registers a backward hook.
|
528 |
+
|
529 |
+
The hook will be called every time a gradient with respect to the
|
530 |
+
Tensor is computed. The hook should have the following signature::
|
531 |
+
|
532 |
+
hook(grad) -> Tensor or None
|
533 |
+
|
534 |
+
|
535 |
+
The hook should not modify its argument, but it can optionally return
|
536 |
+
a new gradient which will be used in place of :attr:`grad`.
|
537 |
+
|
538 |
+
This function returns a handle with a method ``handle.remove()``
|
539 |
+
that removes the hook from the module.
|
540 |
+
|
541 |
+
.. note::
|
542 |
+
See :ref:`backward-hooks-execution` for more information on how when this hook
|
543 |
+
is executed, and how its execution is ordered relative to other hooks.
|
544 |
+
|
545 |
+
Example::
|
546 |
+
|
547 |
+
>>> v = torch.tensor([0., 0., 0.], requires_grad=True)
|
548 |
+
>>> h = v.register_hook(lambda grad: grad * 2) # double the gradient
|
549 |
+
>>> v.backward(torch.tensor([1., 2., 3.]))
|
550 |
+
>>> v.grad
|
551 |
+
|
552 |
+
2
|
553 |
+
4
|
554 |
+
6
|
555 |
+
[torch.FloatTensor of size (3,)]
|
556 |
+
|
557 |
+
>>> h.remove() # removes the hook
|
558 |
+
"""
|
559 |
+
if has_torch_function_unary(self):
|
560 |
+
return handle_torch_function(Tensor.register_hook, (self,), self, hook)
|
561 |
+
if not self.requires_grad:
|
562 |
+
raise RuntimeError(
|
563 |
+
"cannot register a hook on a tensor that doesn't require gradient"
|
564 |
+
)
|
565 |
+
if self._backward_hooks is None:
|
566 |
+
self._backward_hooks = OrderedDict()
|
567 |
+
if self.grad_fn is not None:
|
568 |
+
self.grad_fn._register_hook_dict(self)
|
569 |
+
handle = hooks.RemovableHandle(self._backward_hooks)
|
570 |
+
self._backward_hooks[handle.id] = hook
|
571 |
+
return handle
|
572 |
+
|
573 |
+
def register_post_accumulate_grad_hook(self, hook):
|
574 |
+
r"""Registers a backward hook that runs after grad accumulation.
|
575 |
+
|
576 |
+
The hook will be called after all gradients for a tensor have been accumulated,
|
577 |
+
meaning that the .grad field has been updated on that tensor. The post
|
578 |
+
accumulate grad hook is ONLY applicable for leaf tensors (tensors without a
|
579 |
+
.grad_fn field). Registering this hook on a non-leaf tensor will error!
|
580 |
+
|
581 |
+
The hook should have the following signature::
|
582 |
+
|
583 |
+
hook(param: Tensor) -> None
|
584 |
+
|
585 |
+
Note that, unlike other autograd hooks, this hook operates on the tensor
|
586 |
+
that requires grad and not the grad itself. The hook can in-place modify
|
587 |
+
and access its Tensor argument, including its .grad field.
|
588 |
+
|
589 |
+
This function returns a handle with a method ``handle.remove()``
|
590 |
+
that removes the hook from the module.
|
591 |
+
|
592 |
+
.. note::
|
593 |
+
See :ref:`backward-hooks-execution` for more information on how when this hook
|
594 |
+
is executed, and how its execution is ordered relative to other hooks. Since
|
595 |
+
this hook runs during the backward pass, it will run in no_grad mode (unless
|
596 |
+
create_graph is True). You can use torch.enable_grad() to re-enable autograd
|
597 |
+
within the hook if you need it.
|
598 |
+
|
599 |
+
Example::
|
600 |
+
|
601 |
+
>>> v = torch.tensor([0., 0., 0.], requires_grad=True)
|
602 |
+
>>> lr = 0.01
|
603 |
+
>>> # simulate a simple SGD update
|
604 |
+
>>> h = v.register_post_accumulate_grad_hook(lambda p: p.add_(p.grad, alpha=-lr))
|
605 |
+
>>> v.backward(torch.tensor([1., 2., 3.]))
|
606 |
+
>>> v
|
607 |
+
tensor([-0.0100, -0.0200, -0.0300], requires_grad=True)
|
608 |
+
|
609 |
+
>>> h.remove() # removes the hook
|
610 |
+
"""
|
611 |
+
if has_torch_function_unary(self):
|
612 |
+
return handle_torch_function(
|
613 |
+
Tensor.register_post_accumulate_grad_hook, (self,), self, hook
|
614 |
+
)
|
615 |
+
if not self.requires_grad:
|
616 |
+
raise RuntimeError(
|
617 |
+
"cannot register a hook on a tensor that doesn't require gradient"
|
618 |
+
)
|
619 |
+
if self.grad_fn is not None:
|
620 |
+
raise RuntimeError(
|
621 |
+
"post accumulate grad hooks cannot be registered on non-leaf tensors"
|
622 |
+
)
|
623 |
+
if self._post_accumulate_grad_hooks is None:
|
624 |
+
self._post_accumulate_grad_hooks: Dict[Any, Any] = OrderedDict()
|
625 |
+
handle = hooks.RemovableHandle(self._post_accumulate_grad_hooks)
|
626 |
+
self._post_accumulate_grad_hooks[handle.id] = hook
|
627 |
+
return handle
|
628 |
+
|
629 |
+
def reinforce(self, reward):
|
630 |
+
def trim(str):
|
631 |
+
return "\n".join([line.strip() for line in str.split("\n")])
|
632 |
+
|
633 |
+
raise RuntimeError(
|
634 |
+
trim(
|
635 |
+
r"""reinforce() was removed.
|
636 |
+
Use torch.distributions instead.
|
637 |
+
See https://pytorch.org/docs/master/distributions.html
|
638 |
+
|
639 |
+
Instead of:
|
640 |
+
|
641 |
+
probs = policy_network(state)
|
642 |
+
action = probs.multinomial()
|
643 |
+
next_state, reward = env.step(action)
|
644 |
+
action.reinforce(reward)
|
645 |
+
action.backward()
|
646 |
+
|
647 |
+
Use:
|
648 |
+
|
649 |
+
probs = policy_network(state)
|
650 |
+
# NOTE: categorical is equivalent to what used to be called multinomial
|
651 |
+
m = torch.distributions.Categorical(probs)
|
652 |
+
action = m.sample()
|
653 |
+
next_state, reward = env.step(action)
|
654 |
+
loss = -m.log_prob(action) * reward
|
655 |
+
loss.backward()
|
656 |
+
"""
|
657 |
+
)
|
658 |
+
)
|
659 |
+
|
660 |
+
detach = _C._add_docstr(
|
661 |
+
_C.TensorBase.detach,
|
662 |
+
r"""
|
663 |
+
Returns a new Tensor, detached from the current graph.
|
664 |
+
|
665 |
+
The result will never require gradient.
|
666 |
+
|
667 |
+
This method also affects forward mode AD gradients and the result will never
|
668 |
+
have forward mode AD gradients.
|
669 |
+
|
670 |
+
.. note::
|
671 |
+
|
672 |
+
Returned Tensor shares the same storage with the original one.
|
673 |
+
In-place modifications on either of them will be seen, and may trigger
|
674 |
+
errors in correctness checks.
|
675 |
+
IMPORTANT NOTE: Previously, in-place size / stride / storage changes
|
676 |
+
(such as `resize_` / `resize_as_` / `set_` / `transpose_`) to the returned tensor
|
677 |
+
also update the original tensor. Now, these in-place changes will not update the
|
678 |
+
original tensor anymore, and will instead trigger an error.
|
679 |
+
For sparse tensors:
|
680 |
+
In-place indices / values changes (such as `zero_` / `copy_` / `add_`) to the
|
681 |
+
returned tensor will not update the original tensor anymore, and will instead
|
682 |
+
trigger an error.
|
683 |
+
""",
|
684 |
+
)
|
685 |
+
|
686 |
+
detach_ = _C._add_docstr(
|
687 |
+
_C.TensorBase.detach_,
|
688 |
+
r"""
|
689 |
+
Detaches the Tensor from the graph that created it, making it a leaf.
|
690 |
+
Views cannot be detached in-place.
|
691 |
+
|
692 |
+
This method also affects forward mode AD gradients and the result will never
|
693 |
+
have forward mode AD gradients.
|
694 |
+
""",
|
695 |
+
)
|
696 |
+
|
697 |
+
def is_shared(self):
|
698 |
+
r"""Checks if tensor is in shared memory.
|
699 |
+
|
700 |
+
This is always ``True`` for CUDA tensors.
|
701 |
+
"""
|
702 |
+
if has_torch_function_unary(self):
|
703 |
+
return handle_torch_function(Tensor.is_shared, (self,), self)
|
704 |
+
return self._typed_storage()._is_shared()
|
705 |
+
|
706 |
+
def share_memory_(self):
|
707 |
+
r"""Moves the underlying storage to shared memory.
|
708 |
+
|
709 |
+
This is a no-op if the underlying storage is already in shared memory
|
710 |
+
and for CUDA tensors. Tensors in shared memory cannot be resized.
|
711 |
+
|
712 |
+
See :meth:`torch.UntypedStorage.share_memory_` for more details.
|
713 |
+
"""
|
714 |
+
if has_torch_function_unary(self):
|
715 |
+
return handle_torch_function(Tensor.share_memory_, (self,), self)
|
716 |
+
self._typed_storage()._share_memory_()
|
717 |
+
return self
|
718 |
+
|
719 |
+
def __reversed__(self):
|
720 |
+
r"""Reverses the tensor along dimension 0."""
|
721 |
+
if has_torch_function_unary(self):
|
722 |
+
return handle_torch_function(Tensor.__reversed__, (self,), self)
|
723 |
+
if self.dim() == 0:
|
724 |
+
return self
|
725 |
+
else:
|
726 |
+
return self.flip(0)
|
727 |
+
|
728 |
+
def norm(
|
729 |
+
self,
|
730 |
+
p: Optional[Union[float, str]] = "fro",
|
731 |
+
dim=None,
|
732 |
+
keepdim=False,
|
733 |
+
dtype=None,
|
734 |
+
):
|
735 |
+
r"""See :func:`torch.norm`"""
|
736 |
+
if has_torch_function_unary(self):
|
737 |
+
return handle_torch_function(
|
738 |
+
Tensor.norm, (self,), self, p=p, dim=dim, keepdim=keepdim, dtype=dtype
|
739 |
+
)
|
740 |
+
return torch.norm(self, p, dim, keepdim, dtype=dtype)
|
741 |
+
|
742 |
+
def solve(self, other):
|
743 |
+
from ._linalg_utils import solve
|
744 |
+
|
745 |
+
return solve(self, other)
|
746 |
+
|
747 |
+
def lstsq(self, other):
|
748 |
+
from ._linalg_utils import lstsq
|
749 |
+
|
750 |
+
return lstsq(self, other)
|
751 |
+
|
752 |
+
def eig(self, eigenvectors=False):
|
753 |
+
from ._linalg_utils import eig
|
754 |
+
|
755 |
+
return eig(self, eigenvectors=eigenvectors)
|
756 |
+
|
757 |
+
def symeig(self, eigenvectors=False):
|
758 |
+
from ._linalg_utils import _symeig
|
759 |
+
|
760 |
+
return _symeig(self, eigenvectors=eigenvectors)
|
761 |
+
|
762 |
+
def lu(self, pivot=True, get_infos=False):
|
763 |
+
r"""See :func:`torch.lu`"""
|
764 |
+
# If get_infos is True, then we don't need to check for errors and vice versa
|
765 |
+
if has_torch_function_unary(self):
|
766 |
+
return handle_torch_function(
|
767 |
+
Tensor.lu, (self,), self, pivot=pivot, get_infos=get_infos
|
768 |
+
)
|
769 |
+
|
770 |
+
LU, pivots, infos = torch._lu_with_info(
|
771 |
+
self, pivot=pivot, check_errors=(not get_infos)
|
772 |
+
)
|
773 |
+
if get_infos:
|
774 |
+
return LU, pivots, infos
|
775 |
+
else:
|
776 |
+
return LU, pivots
|
777 |
+
|
778 |
+
def stft(
|
779 |
+
self,
|
780 |
+
n_fft: int,
|
781 |
+
hop_length: Optional[int] = None,
|
782 |
+
win_length: Optional[int] = None,
|
783 |
+
window: "Optional[Tensor]" = None,
|
784 |
+
center: bool = True,
|
785 |
+
pad_mode: str = "reflect",
|
786 |
+
normalized: bool = False,
|
787 |
+
onesided: Optional[bool] = None,
|
788 |
+
return_complex: Optional[bool] = None,
|
789 |
+
):
|
790 |
+
r"""See :func:`torch.stft`
|
791 |
+
|
792 |
+
.. warning::
|
793 |
+
This function changed signature at version 0.4.1. Calling with
|
794 |
+
the previous signature may cause error or return incorrect result.
|
795 |
+
"""
|
796 |
+
if has_torch_function_unary(self):
|
797 |
+
return handle_torch_function(
|
798 |
+
Tensor.stft,
|
799 |
+
(self,),
|
800 |
+
self,
|
801 |
+
n_fft,
|
802 |
+
hop_length=hop_length,
|
803 |
+
win_length=win_length,
|
804 |
+
window=window,
|
805 |
+
center=center,
|
806 |
+
pad_mode=pad_mode,
|
807 |
+
normalized=normalized,
|
808 |
+
onesided=onesided,
|
809 |
+
return_complex=return_complex,
|
810 |
+
)
|
811 |
+
return torch.stft(
|
812 |
+
self,
|
813 |
+
n_fft,
|
814 |
+
hop_length,
|
815 |
+
win_length,
|
816 |
+
window,
|
817 |
+
center,
|
818 |
+
pad_mode,
|
819 |
+
normalized,
|
820 |
+
onesided,
|
821 |
+
return_complex=return_complex,
|
822 |
+
)
|
823 |
+
|
824 |
+
def istft(
|
825 |
+
self,
|
826 |
+
n_fft: int,
|
827 |
+
hop_length: Optional[int] = None,
|
828 |
+
win_length: Optional[int] = None,
|
829 |
+
window: "Optional[Tensor]" = None,
|
830 |
+
center: bool = True,
|
831 |
+
normalized: bool = False,
|
832 |
+
onesided: Optional[bool] = None,
|
833 |
+
length: Optional[int] = None,
|
834 |
+
return_complex: bool = False,
|
835 |
+
):
|
836 |
+
r"""See :func:`torch.istft`"""
|
837 |
+
if has_torch_function_unary(self):
|
838 |
+
return handle_torch_function(
|
839 |
+
Tensor.istft,
|
840 |
+
(self,),
|
841 |
+
self,
|
842 |
+
n_fft,
|
843 |
+
hop_length=hop_length,
|
844 |
+
win_length=win_length,
|
845 |
+
window=window,
|
846 |
+
center=center,
|
847 |
+
normalized=normalized,
|
848 |
+
onesided=onesided,
|
849 |
+
length=length,
|
850 |
+
return_complex=return_complex,
|
851 |
+
)
|
852 |
+
return torch.istft(
|
853 |
+
self,
|
854 |
+
n_fft,
|
855 |
+
hop_length,
|
856 |
+
win_length,
|
857 |
+
window,
|
858 |
+
center,
|
859 |
+
normalized,
|
860 |
+
onesided,
|
861 |
+
length,
|
862 |
+
return_complex=return_complex,
|
863 |
+
)
|
864 |
+
|
865 |
+
def resize(self, *sizes):
|
866 |
+
if has_torch_function_unary(self):
|
867 |
+
return handle_torch_function(Tensor.resize, (self,), self, *sizes)
|
868 |
+
warnings.warn("non-inplace resize is deprecated")
|
869 |
+
from torch.autograd._functions import Resize
|
870 |
+
|
871 |
+
return Resize.apply(self, sizes)
|
872 |
+
|
873 |
+
def resize_as(self, tensor):
|
874 |
+
if has_torch_function_variadic(self, tensor):
|
875 |
+
return handle_torch_function(Tensor.resize_as, (self, tensor), self, tensor)
|
876 |
+
warnings.warn("non-inplace resize_as is deprecated")
|
877 |
+
from torch.autograd._functions import Resize
|
878 |
+
|
879 |
+
return Resize.apply(self, tensor.size())
|
880 |
+
|
881 |
+
def split(self, split_size, dim=0):
|
882 |
+
r"""See :func:`torch.split`"""
|
883 |
+
if has_torch_function_unary(self):
|
884 |
+
return handle_torch_function(
|
885 |
+
Tensor.split, (self,), self, split_size, dim=dim
|
886 |
+
)
|
887 |
+
if isinstance(split_size, Tensor):
|
888 |
+
try:
|
889 |
+
split_size = int(split_size)
|
890 |
+
except ValueError:
|
891 |
+
pass
|
892 |
+
|
893 |
+
if isinstance(split_size, (int, torch.SymInt)):
|
894 |
+
return torch._VF.split(self, split_size, dim) # type: ignore[attr-defined]
|
895 |
+
else:
|
896 |
+
return torch._VF.split_with_sizes(self, split_size, dim)
|
897 |
+
|
898 |
+
def unique(self, sorted=True, return_inverse=False, return_counts=False, dim=None):
|
899 |
+
r"""Returns the unique elements of the input tensor.
|
900 |
+
|
901 |
+
See :func:`torch.unique`
|
902 |
+
"""
|
903 |
+
if has_torch_function_unary(self):
|
904 |
+
return handle_torch_function(
|
905 |
+
Tensor.unique,
|
906 |
+
(self,),
|
907 |
+
self,
|
908 |
+
sorted=sorted,
|
909 |
+
return_inverse=return_inverse,
|
910 |
+
return_counts=return_counts,
|
911 |
+
dim=dim,
|
912 |
+
)
|
913 |
+
return torch.unique(
|
914 |
+
self,
|
915 |
+
sorted=sorted,
|
916 |
+
return_inverse=return_inverse,
|
917 |
+
return_counts=return_counts,
|
918 |
+
dim=dim,
|
919 |
+
)
|
920 |
+
|
921 |
+
def unique_consecutive(self, return_inverse=False, return_counts=False, dim=None):
|
922 |
+
r"""Eliminates all but the first element from every consecutive group of equivalent elements.
|
923 |
+
|
924 |
+
See :func:`torch.unique_consecutive`
|
925 |
+
"""
|
926 |
+
if has_torch_function_unary(self):
|
927 |
+
return handle_torch_function(
|
928 |
+
Tensor.unique_consecutive,
|
929 |
+
(self,),
|
930 |
+
self,
|
931 |
+
return_inverse=return_inverse,
|
932 |
+
return_counts=return_counts,
|
933 |
+
dim=dim,
|
934 |
+
)
|
935 |
+
return torch.unique_consecutive(
|
936 |
+
self, return_inverse=return_inverse, return_counts=return_counts, dim=dim
|
937 |
+
)
|
938 |
+
|
939 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
940 |
+
def __rsub__(self, other):
|
941 |
+
return _C._VariableFunctions.rsub(self, other)
|
942 |
+
|
943 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
944 |
+
def __rdiv__(self, other):
|
945 |
+
return self.reciprocal() * other
|
946 |
+
|
947 |
+
__rtruediv__ = __rdiv__
|
948 |
+
__itruediv__ = _C.TensorBase.__idiv__
|
949 |
+
|
950 |
+
__pow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
|
951 |
+
_C.TensorBase.pow
|
952 |
+
)
|
953 |
+
__ipow__ = _handle_torch_function_and_wrap_type_error_to_not_implemented(
|
954 |
+
_C.TensorBase.pow_
|
955 |
+
)
|
956 |
+
|
957 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
958 |
+
def __rmod__(self, other):
|
959 |
+
return torch.remainder(other, self)
|
960 |
+
|
961 |
+
def __format__(self, format_spec):
|
962 |
+
if has_torch_function_unary(self):
|
963 |
+
return handle_torch_function(Tensor.__format__, (self,), self, format_spec)
|
964 |
+
if self.dim() == 0 and not self.is_meta and type(self) is Tensor:
|
965 |
+
return self.item().__format__(format_spec)
|
966 |
+
return object.__format__(self, format_spec)
|
967 |
+
|
968 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
969 |
+
def __rpow__(self, other):
|
970 |
+
return torch.pow(other, self)
|
971 |
+
|
972 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
973 |
+
def __floordiv__(self, other):
|
974 |
+
return torch.floor_divide(self, other)
|
975 |
+
|
976 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
977 |
+
def __rfloordiv__(self, other):
|
978 |
+
return torch.floor_divide(other, self)
|
979 |
+
|
980 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
981 |
+
def __rlshift__(self, other):
|
982 |
+
return torch.bitwise_left_shift(other, self)
|
983 |
+
|
984 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
985 |
+
def __rrshift__(self, other):
|
986 |
+
return torch.bitwise_right_shift(other, self)
|
987 |
+
|
988 |
+
@_handle_torch_function_and_wrap_type_error_to_not_implemented
|
989 |
+
def __rmatmul__(self, other):
|
990 |
+
return torch.matmul(other, self)
|
991 |
+
|
992 |
+
__pos__ = _C.TensorBase.positive
|
993 |
+
__neg__ = _C.TensorBase.neg
|
994 |
+
__abs__ = _C.TensorBase.abs
|
995 |
+
|
996 |
+
def __len__(self):
|
997 |
+
if has_torch_function_unary(self):
|
998 |
+
return handle_torch_function(Tensor.__len__, (self,), self)
|
999 |
+
if self.dim() == 0:
|
1000 |
+
raise TypeError("len() of a 0-d tensor")
|
1001 |
+
if torch._C._get_tracing_state():
|
1002 |
+
warnings.warn(
|
1003 |
+
"Using len to get tensor shape might cause the trace to be incorrect. "
|
1004 |
+
"Recommended usage would be tensor.shape[0]. "
|
1005 |
+
"Passing a tensor of different shape might lead to errors or silently give "
|
1006 |
+
"incorrect results.",
|
1007 |
+
category=torch.jit.TracerWarning,
|
1008 |
+
stacklevel=2,
|
1009 |
+
)
|
1010 |
+
return self.shape[0]
|
1011 |
+
|
1012 |
+
def __iter__(self):
|
1013 |
+
# NB: we use 'imap' and not 'map' here, so that in Python 2 we get a
|
1014 |
+
# generator and don't eagerly perform all the indexes. This could
|
1015 |
+
# save us work, and also helps keep trace ordering deterministic
|
1016 |
+
# (e.g., if you zip(*hiddens), the eager map will force all the
|
1017 |
+
# indexes of hiddens[0] before hiddens[1], while the generator
|
1018 |
+
# map will interleave them.)
|
1019 |
+
# NB: We have intentionally skipped __torch_function__ dispatch here.
|
1020 |
+
# See gh-54457
|
1021 |
+
if self.dim() == 0:
|
1022 |
+
raise TypeError("iteration over a 0-d tensor")
|
1023 |
+
if torch._C._get_tracing_state():
|
1024 |
+
warnings.warn(
|
1025 |
+
"Iterating over a tensor might cause the trace to be incorrect. "
|
1026 |
+
"Passing a tensor of different shape won't change the number of "
|
1027 |
+
"iterations executed (and might lead to errors or silently give "
|
1028 |
+
"incorrect results).",
|
1029 |
+
category=torch.jit.TracerWarning,
|
1030 |
+
stacklevel=2,
|
1031 |
+
)
|
1032 |
+
return iter(self.unbind(0))
|
1033 |
+
|
1034 |
+
def __hash__(self):
|
1035 |
+
# Do NOT handle __torch_function__ here as user's default
|
1036 |
+
# implementation that handle most functions will most likely do it wrong.
|
1037 |
+
# It can be easily overridden by defining this method on the user
|
1038 |
+
# subclass if needed.
|
1039 |
+
return id(self)
|
1040 |
+
|
1041 |
+
def __dir__(self):
|
1042 |
+
if has_torch_function_unary(self):
|
1043 |
+
return handle_torch_function(Tensor.__dir__, (self,), self)
|
1044 |
+
tensor_methods = dir(self.__class__)
|
1045 |
+
tensor_methods.remove("volatile") # deprecated
|
1046 |
+
attrs = list(self.__dict__.keys())
|
1047 |
+
keys = tensor_methods + attrs
|
1048 |
+
|
1049 |
+
# property only available dense, cuda tensors
|
1050 |
+
if (not self.is_cuda) or self.is_sparse:
|
1051 |
+
keys.remove("__cuda_array_interface__")
|
1052 |
+
|
1053 |
+
return sorted(keys)
|
1054 |
+
|
1055 |
+
# Numpy array interface, to support `numpy.asarray(tensor) -> ndarray`
|
1056 |
+
__array_priority__ = 1000 # prefer Tensor ops over numpy ones
|
1057 |
+
|
1058 |
+
def __array__(self, dtype=None):
|
1059 |
+
if has_torch_function_unary(self):
|
1060 |
+
return handle_torch_function(Tensor.__array__, (self,), self, dtype=dtype)
|
1061 |
+
if dtype is None:
|
1062 |
+
return self.numpy()
|
1063 |
+
else:
|
1064 |
+
return self.numpy().astype(dtype, copy=False)
|
1065 |
+
|
1066 |
+
# Wrap Numpy array again in a suitable tensor when done, to support e.g.
|
1067 |
+
# `numpy.sin(tensor) -> tensor` or `numpy.greater(tensor, 0) -> ByteTensor`
|
1068 |
+
def __array_wrap__(self, array):
|
1069 |
+
if has_torch_function_unary(self):
|
1070 |
+
return handle_torch_function(
|
1071 |
+
Tensor.__array_wrap__, (self,), self, array=array
|
1072 |
+
)
|
1073 |
+
if array.dtype == bool:
|
1074 |
+
# Workaround, torch has no built-in bool tensor
|
1075 |
+
array = array.astype("uint8")
|
1076 |
+
return torch.from_numpy(array)
|
1077 |
+
|
1078 |
+
def __contains__(self, element):
|
1079 |
+
r"""Check if `element` is present in tensor
|
1080 |
+
|
1081 |
+
Args:
|
1082 |
+
element (Tensor or scalar): element to be checked
|
1083 |
+
for presence in current tensor"
|
1084 |
+
"""
|
1085 |
+
if has_torch_function_unary(self):
|
1086 |
+
return handle_torch_function(Tensor.__contains__, (self,), self, element)
|
1087 |
+
if isinstance(
|
1088 |
+
element, (torch.Tensor, Number, torch.SymInt, torch.SymFloat, torch.SymBool)
|
1089 |
+
):
|
1090 |
+
# type hint doesn't understand the __contains__ result array
|
1091 |
+
return (element == self).any().item() # type: ignore[union-attr]
|
1092 |
+
|
1093 |
+
raise RuntimeError(
|
1094 |
+
f"Tensor.__contains__ only supports Tensor or scalar, but you passed in a {type(element)}."
|
1095 |
+
)
|
1096 |
+
|
1097 |
+
@property
|
1098 |
+
def __cuda_array_interface__(self):
|
1099 |
+
"""Array view description for cuda tensors.
|
1100 |
+
|
1101 |
+
See:
|
1102 |
+
https://numba.pydata.org/numba-doc/latest/cuda/cuda_array_interface.html
|
1103 |
+
"""
|
1104 |
+
if has_torch_function_unary(self):
|
1105 |
+
# TODO mypy doesn't support @property, see: https://github.com/python/mypy/issues/6185
|
1106 |
+
return handle_torch_function(Tensor.__cuda_array_interface__.__get__, (self,), self) # type: ignore[attr-defined]
|
1107 |
+
|
1108 |
+
# raise AttributeError for unsupported tensors, so that
|
1109 |
+
# hasattr(cpu_tensor, "__cuda_array_interface__") is False.
|
1110 |
+
if not self.is_cuda:
|
1111 |
+
raise AttributeError(
|
1112 |
+
"Can't get __cuda_array_interface__ on non-CUDA tensor type: %s "
|
1113 |
+
"If CUDA data is required use tensor.cuda() to copy tensor to device memory."
|
1114 |
+
% self.type()
|
1115 |
+
)
|
1116 |
+
|
1117 |
+
if self.is_sparse:
|
1118 |
+
raise AttributeError(
|
1119 |
+
"Can't get __cuda_array_interface__ on sparse type: %s "
|
1120 |
+
"Use Tensor.to_dense() to convert to a dense tensor first."
|
1121 |
+
% self.type()
|
1122 |
+
)
|
1123 |
+
|
1124 |
+
# RuntimeError, matching tensor.__array__() behavior.
|
1125 |
+
if self.requires_grad:
|
1126 |
+
raise RuntimeError(
|
1127 |
+
"Can't get __cuda_array_interface__ on Variable that requires grad. "
|
1128 |
+
"If gradients aren't required, use var.detach() to get Variable that doesn't require grad."
|
1129 |
+
)
|
1130 |
+
|
1131 |
+
# CUDA devices are little-endian and tensors are stored in native byte
|
1132 |
+
# order. 1-byte entries are endian-agnostic.
|
1133 |
+
typestr = {
|
1134 |
+
torch.complex64: "<c8",
|
1135 |
+
torch.complex128: "<c16",
|
1136 |
+
torch.float16: "<f2",
|
1137 |
+
torch.float32: "<f4",
|
1138 |
+
torch.float64: "<f8",
|
1139 |
+
torch.uint8: "|u1",
|
1140 |
+
torch.int8: "|i1",
|
1141 |
+
torch.int16: "<i2",
|
1142 |
+
torch.int32: "<i4",
|
1143 |
+
torch.int64: "<i8",
|
1144 |
+
}[self.dtype]
|
1145 |
+
|
1146 |
+
itemsize = self.element_size()
|
1147 |
+
|
1148 |
+
shape = tuple(self.shape)
|
1149 |
+
if self.is_contiguous():
|
1150 |
+
# __cuda_array_interface__ v2 requires the strides to be omitted
|
1151 |
+
# (either not set or set to None) for C-contiguous arrays.
|
1152 |
+
strides = None
|
1153 |
+
else:
|
1154 |
+
strides = tuple(s * itemsize for s in self.stride())
|
1155 |
+
data_ptr = self.data_ptr() if self.numel() > 0 else 0
|
1156 |
+
data = (data_ptr, False) # read-only is false
|
1157 |
+
|
1158 |
+
return dict(typestr=typestr, shape=shape, strides=strides, data=data, version=2)
|
1159 |
+
|
1160 |
+
def storage_type(self):
|
1161 |
+
r"""storage_type() -> type
|
1162 |
+
|
1163 |
+
Returns the type of the underlying storage.
|
1164 |
+
|
1165 |
+
"""
|
1166 |
+
if has_torch_function_unary(self):
|
1167 |
+
return handle_torch_function(Tensor.storage_type, (self,), self)
|
1168 |
+
|
1169 |
+
torch.storage._warn_typed_storage_removal()
|
1170 |
+
|
1171 |
+
return self._typed_storage()._get_legacy_storage_class()
|
1172 |
+
|
1173 |
+
def refine_names(self, *names):
|
1174 |
+
r"""Refines the dimension names of :attr:`self` according to :attr:`names`.
|
1175 |
+
|
1176 |
+
Refining is a special case of renaming that "lifts" unnamed dimensions.
|
1177 |
+
A ``None`` dim can be refined to have any name; a named dim can only be
|
1178 |
+
refined to have the same name.
|
1179 |
+
|
1180 |
+
Because named tensors can coexist with unnamed tensors, refining names
|
1181 |
+
gives a nice way to write named-tensor-aware code that works with both
|
1182 |
+
named and unnamed tensors.
|
1183 |
+
|
1184 |
+
:attr:`names` may contain up to one Ellipsis (``...``).
|
1185 |
+
The Ellipsis is expanded greedily; it is expanded in-place to fill
|
1186 |
+
:attr:`names` to the same length as ``self.dim()`` using names from the
|
1187 |
+
corresponding indices of ``self.names``.
|
1188 |
+
|
1189 |
+
Python 2 does not support Ellipsis but one may use a string literal
|
1190 |
+
instead (``'...'``).
|
1191 |
+
|
1192 |
+
Args:
|
1193 |
+
names (iterable of str): The desired names of the output tensor. May
|
1194 |
+
contain up to one Ellipsis.
|
1195 |
+
|
1196 |
+
Examples::
|
1197 |
+
|
1198 |
+
>>> imgs = torch.randn(32, 3, 128, 128)
|
1199 |
+
>>> named_imgs = imgs.refine_names('N', 'C', 'H', 'W')
|
1200 |
+
>>> named_imgs.names
|
1201 |
+
('N', 'C', 'H', 'W')
|
1202 |
+
|
1203 |
+
>>> tensor = torch.randn(2, 3, 5, 7, 11)
|
1204 |
+
>>> tensor = tensor.refine_names('A', ..., 'B', 'C')
|
1205 |
+
>>> tensor.names
|
1206 |
+
('A', None, None, 'B', 'C')
|
1207 |
+
|
1208 |
+
.. warning::
|
1209 |
+
The named tensor API is experimental and subject to change.
|
1210 |
+
|
1211 |
+
"""
|
1212 |
+
if has_torch_function_unary(self):
|
1213 |
+
return handle_torch_function(Tensor.refine_names, (self,), self, *names)
|
1214 |
+
names = resolve_ellipsis(names, self.names, "refine_names")
|
1215 |
+
return super().refine_names(names)
|
1216 |
+
|
1217 |
+
def align_to(self, *names):
|
1218 |
+
r"""Permutes the dimensions of the :attr:`self` tensor to match the order
|
1219 |
+
specified in :attr:`names`, adding size-one dims for any new names.
|
1220 |
+
|
1221 |
+
All of the dims of :attr:`self` must be named in order to use this method.
|
1222 |
+
The resulting tensor is a view on the original tensor.
|
1223 |
+
|
1224 |
+
All dimension names of :attr:`self` must be present in :attr:`names`.
|
1225 |
+
:attr:`names` may contain additional names that are not in ``self.names``;
|
1226 |
+
the output tensor has a size-one dimension for each of those new names.
|
1227 |
+
|
1228 |
+
:attr:`names` may contain up to one Ellipsis (``...``).
|
1229 |
+
The Ellipsis is expanded to be equal to all dimension names of :attr:`self`
|
1230 |
+
that are not mentioned in :attr:`names`, in the order that they appear
|
1231 |
+
in :attr:`self`.
|
1232 |
+
|
1233 |
+
Python 2 does not support Ellipsis but one may use a string literal
|
1234 |
+
instead (``'...'``).
|
1235 |
+
|
1236 |
+
Args:
|
1237 |
+
names (iterable of str): The desired dimension ordering of the
|
1238 |
+
output tensor. May contain up to one Ellipsis that is expanded
|
1239 |
+
to all unmentioned dim names of :attr:`self`.
|
1240 |
+
|
1241 |
+
Examples::
|
1242 |
+
|
1243 |
+
>>> tensor = torch.randn(2, 2, 2, 2, 2, 2)
|
1244 |
+
>>> named_tensor = tensor.refine_names('A', 'B', 'C', 'D', 'E', 'F')
|
1245 |
+
|
1246 |
+
# Move the F and E dims to the front while keeping the rest in order
|
1247 |
+
>>> named_tensor.align_to('F', 'E', ...)
|
1248 |
+
|
1249 |
+
.. warning::
|
1250 |
+
The named tensor API is experimental and subject to change.
|
1251 |
+
|
1252 |
+
"""
|
1253 |
+
if has_torch_function_unary(self):
|
1254 |
+
return handle_torch_function(Tensor.align_to, (self,), self, *names)
|
1255 |
+
ellipsis_idx = single_ellipsis_index(names, "align_to")
|
1256 |
+
if ellipsis_idx is None:
|
1257 |
+
return super().align_to(names)
|
1258 |
+
return super().align_to(
|
1259 |
+
[name for name in names if not is_ellipsis(name)], ellipsis_idx
|
1260 |
+
)
|
1261 |
+
|
1262 |
+
def unflatten(self, dim, sizes):
|
1263 |
+
r"""
|
1264 |
+
unflatten(dim, sizes) -> Tensor
|
1265 |
+
|
1266 |
+
See :func:`torch.unflatten`.
|
1267 |
+
|
1268 |
+
"""
|
1269 |
+
if has_torch_function_unary(self):
|
1270 |
+
return handle_torch_function(Tensor.unflatten, (self,), self, dim, sizes)
|
1271 |
+
|
1272 |
+
if not sizes:
|
1273 |
+
raise RuntimeError("unflatten: sizes must be non-empty")
|
1274 |
+
|
1275 |
+
names = None
|
1276 |
+
if isinstance(sizes, OrderedDict) or (
|
1277 |
+
isinstance(sizes, (tuple, list)) and isinstance(sizes[0], (tuple, list))
|
1278 |
+
):
|
1279 |
+
names, sizes = unzip_namedshape(sizes)
|
1280 |
+
return super().unflatten(dim, sizes, names)
|
1281 |
+
else:
|
1282 |
+
return super().unflatten(dim, sizes)
|
1283 |
+
|
1284 |
+
def rename_(self, *names, **rename_map):
|
1285 |
+
"""In-place version of :meth:`~Tensor.rename`."""
|
1286 |
+
|
1287 |
+
if has_torch_function_unary(self):
|
1288 |
+
return handle_torch_function(
|
1289 |
+
Tensor.rename_, (self,), self, *names, **rename_map
|
1290 |
+
)
|
1291 |
+
|
1292 |
+
# Note [rename_ / rename API]
|
1293 |
+
# The Python API for these is different from the C++ API. In Python:
|
1294 |
+
# 1) tensor.rename(*names) takes a vararglist of names
|
1295 |
+
# 2) tensor.rename(**rename_map) takes a map of names to rename.
|
1296 |
+
# C++ is static, making it difficult to implement similar behavior.
|
1297 |
+
return update_names(self, names, rename_map, inplace=True)
|
1298 |
+
|
1299 |
+
def rename(self, *names, **rename_map):
|
1300 |
+
"""Renames dimension names of :attr:`self`.
|
1301 |
+
|
1302 |
+
There are two main usages:
|
1303 |
+
|
1304 |
+
``self.rename(**rename_map)`` returns a view on tensor that has dims
|
1305 |
+
renamed as specified in the mapping :attr:`rename_map`.
|
1306 |
+
|
1307 |
+
``self.rename(*names)`` returns a view on tensor, renaming all
|
1308 |
+
dimensions positionally using :attr:`names`.
|
1309 |
+
Use ``self.rename(None)`` to drop names on a tensor.
|
1310 |
+
|
1311 |
+
One cannot specify both positional args :attr:`names` and keyword args
|
1312 |
+
:attr:`rename_map`.
|
1313 |
+
|
1314 |
+
Examples::
|
1315 |
+
|
1316 |
+
>>> imgs = torch.rand(2, 3, 5, 7, names=('N', 'C', 'H', 'W'))
|
1317 |
+
>>> renamed_imgs = imgs.rename(N='batch', C='channels')
|
1318 |
+
>>> renamed_imgs.names
|
1319 |
+
('batch', 'channels', 'H', 'W')
|
1320 |
+
|
1321 |
+
>>> renamed_imgs = imgs.rename(None)
|
1322 |
+
>>> renamed_imgs.names
|
1323 |
+
(None, None, None, None)
|
1324 |
+
|
1325 |
+
>>> renamed_imgs = imgs.rename('batch', 'channel', 'height', 'width')
|
1326 |
+
>>> renamed_imgs.names
|
1327 |
+
('batch', 'channel', 'height', 'width')
|
1328 |
+
|
1329 |
+
.. warning::
|
1330 |
+
The named tensor API is experimental and subject to change.
|
1331 |
+
|
1332 |
+
"""
|
1333 |
+
if has_torch_function_unary(self):
|
1334 |
+
return handle_torch_function(
|
1335 |
+
Tensor.rename, (self,), self, *names, **rename_map
|
1336 |
+
)
|
1337 |
+
|
1338 |
+
# See Note [rename_ / rename API]
|
1339 |
+
return update_names(self, names, rename_map, inplace=False)
|
1340 |
+
|
1341 |
+
def to_sparse_coo(self):
|
1342 |
+
"""Convert a tensor to :ref:`coordinate format <sparse-coo-docs>`.
|
1343 |
+
|
1344 |
+
Examples::
|
1345 |
+
|
1346 |
+
>>> dense = torch.randn(5, 5)
|
1347 |
+
>>> sparse = dense.to_sparse_coo()
|
1348 |
+
>>> sparse._nnz()
|
1349 |
+
25
|
1350 |
+
|
1351 |
+
"""
|
1352 |
+
return self.to_sparse()
|
1353 |
+
|
1354 |
+
def dim_order(self):
|
1355 |
+
"""
|
1356 |
+
|
1357 |
+
dim_order() -> tuple
|
1358 |
+
|
1359 |
+
Returns a tuple of int describing the dim order or physical layout of :attr:`self`.
|
1360 |
+
|
1361 |
+
Args:
|
1362 |
+
None
|
1363 |
+
|
1364 |
+
Dim order represents how dimensions are laid out in memory,
|
1365 |
+
starting from the outermost to the innermost dimension.
|
1366 |
+
|
1367 |
+
Example::
|
1368 |
+
>>> torch.empty((2, 3, 5, 7)).dim_order()
|
1369 |
+
(0, 1, 2, 3)
|
1370 |
+
>>> torch.empty((2, 3, 5, 7), memory_format=torch.channels_last).dim_order()
|
1371 |
+
(0, 2, 3, 1)
|
1372 |
+
|
1373 |
+
.. warning::
|
1374 |
+
The dim_order tensor API is experimental and subject to change.
|
1375 |
+
|
1376 |
+
"""
|
1377 |
+
if has_torch_function_unary(self):
|
1378 |
+
return handle_torch_function(Tensor.dim_order, (self,), self)
|
1379 |
+
|
1380 |
+
import torch._prims_common as utils
|
1381 |
+
|
1382 |
+
return tuple(utils.compute_elementwise_output_logical_to_physical_perm(self))
|
1383 |
+
|
1384 |
+
def _update_names(self, names, inplace):
|
1385 |
+
if has_torch_function_unary(self):
|
1386 |
+
return handle_torch_function(
|
1387 |
+
Tensor._update_names, (self,), self, names, inplace
|
1388 |
+
)
|
1389 |
+
|
1390 |
+
# See Note [rename_ / rename API]
|
1391 |
+
if inplace:
|
1392 |
+
return super().rename_(names)
|
1393 |
+
else:
|
1394 |
+
return super().rename(names)
|
1395 |
+
|
1396 |
+
@classmethod
|
1397 |
+
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
1398 |
+
"""
|
1399 |
+
This __torch_function__ implementation wraps subclasses such that
|
1400 |
+
methods called on subclasses return a subclass instance instead of
|
1401 |
+
a ``torch.Tensor`` instance.
|
1402 |
+
|
1403 |
+
One corollary to this is that you need coverage for torch.Tensor
|
1404 |
+
methods if implementing __torch_function__ for subclasses.
|
1405 |
+
|
1406 |
+
We recommend always calling ``super().__torch_function__`` as the base
|
1407 |
+
case when doing the above.
|
1408 |
+
|
1409 |
+
While not mandatory, we recommend making `__torch_function__` a classmethod.
|
1410 |
+
"""
|
1411 |
+
if kwargs is None:
|
1412 |
+
kwargs = {}
|
1413 |
+
|
1414 |
+
if not all(issubclass(cls, t) for t in types):
|
1415 |
+
return NotImplemented
|
1416 |
+
|
1417 |
+
with _C.DisableTorchFunctionSubclass():
|
1418 |
+
ret = func(*args, **kwargs)
|
1419 |
+
if func in get_default_nowrap_functions():
|
1420 |
+
return ret
|
1421 |
+
else:
|
1422 |
+
return _convert(ret, cls)
|
1423 |
+
|
1424 |
+
__torch_dispatch__ = _C._disabled_torch_dispatch_impl
|
1425 |
+
|
1426 |
+
def __dlpack__(self, stream=None):
|
1427 |
+
"""
|
1428 |
+
Creates a DLpack `capsule https://data-apis.org/array-api/latest/design_topics/data_interchange.html#data-interchange`_
|
1429 |
+
of the current tensor to be exported to other libraries.
|
1430 |
+
|
1431 |
+
This function will be called from the `from_dlpack` method
|
1432 |
+
of the library that will consume the capsule. `from_dlpack` passes the current
|
1433 |
+
stream to this method as part of the specification.
|
1434 |
+
|
1435 |
+
Args:
|
1436 |
+
stream (integer or None): An optional Python integer representing a
|
1437 |
+
pointer to a CUDA stream. The current stream is synchronized with
|
1438 |
+
this stream before the capsule is created, and since the capsule
|
1439 |
+
shares its storage with the tensor this make it safe to access from
|
1440 |
+
both streams. If None or -1 is passed then no synchronization is performed.
|
1441 |
+
If 1 (on CUDA) or 0 (on ROCM) then the default stream is used for
|
1442 |
+
synchronization.
|
1443 |
+
"""
|
1444 |
+
if has_torch_function_unary(self):
|
1445 |
+
return handle_torch_function(Tensor.__dlpack__, (self,), self, stream)
|
1446 |
+
|
1447 |
+
# DLPack capsules can't capture all of PyTorch's semantics,
|
1448 |
+
# so we prohibit exporting tensors that would lose their properties like
|
1449 |
+
# requires_grad and having the conjugate bit set.
|
1450 |
+
if self.requires_grad:
|
1451 |
+
raise RuntimeError(
|
1452 |
+
"Can't export tensors that require gradient, use tensor.detach()"
|
1453 |
+
)
|
1454 |
+
if self.is_conj():
|
1455 |
+
raise RuntimeError("Can't export tensors with the conjugate bit set")
|
1456 |
+
if self.layout != torch.strided:
|
1457 |
+
raise RuntimeError(
|
1458 |
+
"Can't export tensors with layout other than torch.strided"
|
1459 |
+
)
|
1460 |
+
|
1461 |
+
if stream is not None and type(stream) is not int:
|
1462 |
+
# Stream pointers in CUDA/ROCm are uniquely numbered and can
|
1463 |
+
# be retrieved from their integer value.
|
1464 |
+
raise TypeError("stream must be ``int`` or ``none``")
|
1465 |
+
elif stream is not None and stream != -1:
|
1466 |
+
if self.device.type == "cuda":
|
1467 |
+
# NB: This logic handles the special case values for default
|
1468 |
+
# streams and must be kept in sync with from_dlpack in
|
1469 |
+
# torch/utils/dlpack.py
|
1470 |
+
if stream == 1 and torch.version.hip is None:
|
1471 |
+
stream = torch.cuda.default_stream()
|
1472 |
+
elif stream == 0 and torch.version.hip is not None:
|
1473 |
+
stream = torch.cuda.default_stream()
|
1474 |
+
else:
|
1475 |
+
stream = torch.cuda.ExternalStream(stream)
|
1476 |
+
# Only synchronize on different streams
|
1477 |
+
sync_stream = torch.cuda.current_stream()
|
1478 |
+
if stream != sync_stream:
|
1479 |
+
event = torch.cuda.Event()
|
1480 |
+
event.record(sync_stream)
|
1481 |
+
stream.wait_event(event)
|
1482 |
+
return torch.to_dlpack(self)
|
1483 |
+
|
1484 |
+
def __dlpack_device__(self) -> Tuple[enum.IntEnum, int]:
|
1485 |
+
if has_torch_function_unary(self):
|
1486 |
+
return handle_torch_function(Tensor.__dlpack_device__, (self,), self)
|
1487 |
+
device = self.device
|
1488 |
+
idx = device.index if device.index is not None else 0
|
1489 |
+
torch_device_type = device.type
|
1490 |
+
if torch_device_type == "cuda" and torch.version.hip is not None:
|
1491 |
+
device_type = DLDeviceType.kDLROCM
|
1492 |
+
elif torch_device_type == "cpu" and self.is_pinned():
|
1493 |
+
device_type = DLDeviceType.kDLCPUPinned
|
1494 |
+
elif torch_device_type == "cuda":
|
1495 |
+
device_type = DLDeviceType.kDLGPU
|
1496 |
+
elif torch_device_type == "cpu":
|
1497 |
+
device_type = DLDeviceType.kDLCPU
|
1498 |
+
elif self.device.type == "xpu":
|
1499 |
+
device_type = DLDeviceType.kDLOneAPI
|
1500 |
+
else:
|
1501 |
+
raise ValueError(f"Unknown device type {torch_device_type} for Dlpack")
|
1502 |
+
return (device_type, idx)
|
1503 |
+
|
1504 |
+
__module__ = "torch"
|
1505 |
+
|
1506 |
+
|
1507 |
+
def _convert(ret, cls):
|
1508 |
+
if cls is Tensor:
|
1509 |
+
return ret
|
1510 |
+
|
1511 |
+
if isinstance(ret, Tensor) and not isinstance(ret, cls):
|
1512 |
+
ret = ret.as_subclass(cls)
|
1513 |
+
|
1514 |
+
if isinstance(ret, (tuple, list)):
|
1515 |
+
# Also handles things like namedtuples
|
1516 |
+
ret = type(ret)(_convert(r, cls) for r in ret)
|
1517 |
+
|
1518 |
+
return ret
|
env-llmeval/lib/python3.10/site-packages/torch/_tensor_docs.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/_tensor_str.py
ADDED
@@ -0,0 +1,677 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import contextlib
|
2 |
+
import dataclasses
|
3 |
+
import math
|
4 |
+
import textwrap
|
5 |
+
from typing import Any, Dict, Optional
|
6 |
+
|
7 |
+
import torch
|
8 |
+
from torch import inf
|
9 |
+
|
10 |
+
|
11 |
+
@dataclasses.dataclass
|
12 |
+
class __PrinterOptions:
|
13 |
+
precision: int = 4
|
14 |
+
threshold: float = 1000
|
15 |
+
edgeitems: int = 3
|
16 |
+
linewidth: int = 80
|
17 |
+
sci_mode: Optional[bool] = None
|
18 |
+
|
19 |
+
|
20 |
+
PRINT_OPTS = __PrinterOptions()
|
21 |
+
|
22 |
+
|
23 |
+
# We could use **kwargs, but this will give better docs
|
24 |
+
def set_printoptions(
|
25 |
+
precision=None,
|
26 |
+
threshold=None,
|
27 |
+
edgeitems=None,
|
28 |
+
linewidth=None,
|
29 |
+
profile=None,
|
30 |
+
sci_mode=None,
|
31 |
+
):
|
32 |
+
r"""Set options for printing. Items shamelessly taken from NumPy
|
33 |
+
|
34 |
+
Args:
|
35 |
+
precision: Number of digits of precision for floating point output
|
36 |
+
(default = 4).
|
37 |
+
threshold: Total number of array elements which trigger summarization
|
38 |
+
rather than full `repr` (default = 1000).
|
39 |
+
edgeitems: Number of array items in summary at beginning and end of
|
40 |
+
each dimension (default = 3).
|
41 |
+
linewidth: The number of characters per line for the purpose of
|
42 |
+
inserting line breaks (default = 80). Thresholded matrices will
|
43 |
+
ignore this parameter.
|
44 |
+
profile: Sane defaults for pretty printing. Can override with any of
|
45 |
+
the above options. (any one of `default`, `short`, `full`)
|
46 |
+
sci_mode: Enable (True) or disable (False) scientific notation. If
|
47 |
+
None (default) is specified, the value is defined by
|
48 |
+
`torch._tensor_str._Formatter`. This value is automatically chosen
|
49 |
+
by the framework.
|
50 |
+
|
51 |
+
Example::
|
52 |
+
|
53 |
+
>>> # Limit the precision of elements
|
54 |
+
>>> torch.set_printoptions(precision=2)
|
55 |
+
>>> torch.tensor([1.12345])
|
56 |
+
tensor([1.12])
|
57 |
+
>>> # Limit the number of elements shown
|
58 |
+
>>> torch.set_printoptions(threshold=5)
|
59 |
+
>>> torch.arange(10)
|
60 |
+
tensor([0, 1, 2, ..., 7, 8, 9])
|
61 |
+
>>> # Restore defaults
|
62 |
+
>>> torch.set_printoptions(profile='default')
|
63 |
+
>>> torch.tensor([1.12345])
|
64 |
+
tensor([1.1235])
|
65 |
+
>>> torch.arange(10)
|
66 |
+
tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
|
67 |
+
|
68 |
+
"""
|
69 |
+
if profile is not None:
|
70 |
+
if profile == "default":
|
71 |
+
PRINT_OPTS.precision = 4
|
72 |
+
PRINT_OPTS.threshold = 1000
|
73 |
+
PRINT_OPTS.edgeitems = 3
|
74 |
+
PRINT_OPTS.linewidth = 80
|
75 |
+
elif profile == "short":
|
76 |
+
PRINT_OPTS.precision = 2
|
77 |
+
PRINT_OPTS.threshold = 1000
|
78 |
+
PRINT_OPTS.edgeitems = 2
|
79 |
+
PRINT_OPTS.linewidth = 80
|
80 |
+
elif profile == "full":
|
81 |
+
PRINT_OPTS.precision = 4
|
82 |
+
PRINT_OPTS.threshold = inf
|
83 |
+
PRINT_OPTS.edgeitems = 3
|
84 |
+
PRINT_OPTS.linewidth = 80
|
85 |
+
|
86 |
+
if precision is not None:
|
87 |
+
PRINT_OPTS.precision = precision
|
88 |
+
if threshold is not None:
|
89 |
+
PRINT_OPTS.threshold = threshold
|
90 |
+
if edgeitems is not None:
|
91 |
+
PRINT_OPTS.edgeitems = edgeitems
|
92 |
+
if linewidth is not None:
|
93 |
+
PRINT_OPTS.linewidth = linewidth
|
94 |
+
PRINT_OPTS.sci_mode = sci_mode
|
95 |
+
|
96 |
+
|
97 |
+
def get_printoptions() -> Dict[str, Any]:
|
98 |
+
r"""Gets the current options for printing, as a dictionary that
|
99 |
+
can be passed as ``**kwargs`` to set_printoptions().
|
100 |
+
"""
|
101 |
+
return dataclasses.asdict(PRINT_OPTS)
|
102 |
+
|
103 |
+
|
104 |
+
@contextlib.contextmanager
|
105 |
+
def printoptions(**kwargs):
|
106 |
+
r"""Context manager that temporarily changes the print options. Accepted
|
107 |
+
arguments are same as :func:`set_printoptions`."""
|
108 |
+
old_kwargs = get_printoptions()
|
109 |
+
set_printoptions(**kwargs)
|
110 |
+
try:
|
111 |
+
yield
|
112 |
+
finally:
|
113 |
+
set_printoptions(**old_kwargs)
|
114 |
+
|
115 |
+
|
116 |
+
def tensor_totype(t):
|
117 |
+
dtype = torch.float if t.is_mps else torch.double
|
118 |
+
return t.to(dtype=dtype)
|
119 |
+
|
120 |
+
|
121 |
+
class _Formatter:
|
122 |
+
def __init__(self, tensor):
|
123 |
+
self.floating_dtype = tensor.dtype.is_floating_point
|
124 |
+
self.int_mode = True
|
125 |
+
self.sci_mode = False
|
126 |
+
self.max_width = 1
|
127 |
+
|
128 |
+
with torch.no_grad():
|
129 |
+
tensor_view = tensor.reshape(-1)
|
130 |
+
|
131 |
+
if not self.floating_dtype:
|
132 |
+
for value in tensor_view:
|
133 |
+
value_str = f"{value}"
|
134 |
+
self.max_width = max(self.max_width, len(value_str))
|
135 |
+
|
136 |
+
else:
|
137 |
+
nonzero_finite_vals = torch.masked_select(
|
138 |
+
tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0)
|
139 |
+
)
|
140 |
+
|
141 |
+
if nonzero_finite_vals.numel() == 0:
|
142 |
+
# no valid number, do nothing
|
143 |
+
return
|
144 |
+
|
145 |
+
# Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU.
|
146 |
+
nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs())
|
147 |
+
nonzero_finite_min = tensor_totype(nonzero_finite_abs.min())
|
148 |
+
nonzero_finite_max = tensor_totype(nonzero_finite_abs.max())
|
149 |
+
|
150 |
+
for value in nonzero_finite_vals:
|
151 |
+
if value != torch.ceil(value):
|
152 |
+
self.int_mode = False
|
153 |
+
break
|
154 |
+
|
155 |
+
if self.int_mode:
|
156 |
+
# in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
|
157 |
+
# to indicate that the tensor is of floating type. add 1 to the len to account for this.
|
158 |
+
if (
|
159 |
+
nonzero_finite_max / nonzero_finite_min > 1000.0
|
160 |
+
or nonzero_finite_max > 1.0e8
|
161 |
+
):
|
162 |
+
self.sci_mode = True
|
163 |
+
for value in nonzero_finite_vals:
|
164 |
+
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
|
165 |
+
self.max_width = max(self.max_width, len(value_str))
|
166 |
+
else:
|
167 |
+
for value in nonzero_finite_vals:
|
168 |
+
value_str = f"{value:.0f}"
|
169 |
+
self.max_width = max(self.max_width, len(value_str) + 1)
|
170 |
+
else:
|
171 |
+
# Check if scientific representation should be used.
|
172 |
+
if (
|
173 |
+
nonzero_finite_max / nonzero_finite_min > 1000.0
|
174 |
+
or nonzero_finite_max > 1.0e8
|
175 |
+
or nonzero_finite_min < 1.0e-4
|
176 |
+
):
|
177 |
+
self.sci_mode = True
|
178 |
+
for value in nonzero_finite_vals:
|
179 |
+
value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
|
180 |
+
self.max_width = max(self.max_width, len(value_str))
|
181 |
+
else:
|
182 |
+
for value in nonzero_finite_vals:
|
183 |
+
value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
|
184 |
+
self.max_width = max(self.max_width, len(value_str))
|
185 |
+
|
186 |
+
if PRINT_OPTS.sci_mode is not None:
|
187 |
+
self.sci_mode = PRINT_OPTS.sci_mode
|
188 |
+
|
189 |
+
def width(self):
|
190 |
+
return self.max_width
|
191 |
+
|
192 |
+
def format(self, value):
|
193 |
+
if self.floating_dtype:
|
194 |
+
if self.sci_mode:
|
195 |
+
ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value)
|
196 |
+
elif self.int_mode:
|
197 |
+
ret = f"{value:.0f}"
|
198 |
+
if not (math.isinf(value) or math.isnan(value)):
|
199 |
+
ret += "."
|
200 |
+
else:
|
201 |
+
ret = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
|
202 |
+
else:
|
203 |
+
ret = f"{value}"
|
204 |
+
return (self.max_width - len(ret)) * " " + ret
|
205 |
+
|
206 |
+
|
207 |
+
def _scalar_str(self, formatter1, formatter2=None):
|
208 |
+
if formatter2 is not None:
|
209 |
+
real_str = _scalar_str(self.real, formatter1)
|
210 |
+
imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip()
|
211 |
+
# handles negative numbers, +0.0, -0.0
|
212 |
+
if imag_str[0] == "+" or imag_str[0] == "-":
|
213 |
+
return real_str + imag_str
|
214 |
+
else:
|
215 |
+
return real_str + "+" + imag_str
|
216 |
+
else:
|
217 |
+
return formatter1.format(self.item())
|
218 |
+
|
219 |
+
|
220 |
+
def _vector_str(self, indent, summarize, formatter1, formatter2=None):
|
221 |
+
# length includes spaces and comma between elements
|
222 |
+
element_length = formatter1.width() + 2
|
223 |
+
if formatter2 is not None:
|
224 |
+
# width for imag_formatter + an extra j for complex
|
225 |
+
element_length += formatter2.width() + 1
|
226 |
+
|
227 |
+
elements_per_line = max(
|
228 |
+
1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
|
229 |
+
)
|
230 |
+
|
231 |
+
def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
|
232 |
+
if formatter2 is not None:
|
233 |
+
real_str = formatter1.format(val.real)
|
234 |
+
imag_str = (formatter2.format(val.imag) + "j").lstrip()
|
235 |
+
# handles negative numbers, +0.0, -0.0
|
236 |
+
if imag_str[0] == "+" or imag_str[0] == "-":
|
237 |
+
return real_str + imag_str
|
238 |
+
else:
|
239 |
+
return real_str + "+" + imag_str
|
240 |
+
else:
|
241 |
+
return formatter1.format(val)
|
242 |
+
|
243 |
+
if summarize and not PRINT_OPTS.edgeitems:
|
244 |
+
# Deal with edge case that negative zero is zero
|
245 |
+
data = ["..."]
|
246 |
+
elif summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
247 |
+
data = (
|
248 |
+
[_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()]
|
249 |
+
+ [" ..."]
|
250 |
+
+ [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()]
|
251 |
+
)
|
252 |
+
else:
|
253 |
+
data = [_val_formatter(val) for val in self.tolist()]
|
254 |
+
|
255 |
+
data_lines = [
|
256 |
+
data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
|
257 |
+
]
|
258 |
+
lines = [", ".join(line) for line in data_lines]
|
259 |
+
return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
|
260 |
+
|
261 |
+
|
262 |
+
# formatter2 is only used for printing complex tensors.
|
263 |
+
# For complex tensors, formatter1 and formatter2 are the formatters for tensor.real
|
264 |
+
# and tensor.imag respesectively
|
265 |
+
def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None):
|
266 |
+
dim = self.dim()
|
267 |
+
|
268 |
+
if dim == 0:
|
269 |
+
return _scalar_str(self, formatter1, formatter2)
|
270 |
+
|
271 |
+
if dim == 1:
|
272 |
+
return _vector_str(self, indent, summarize, formatter1, formatter2)
|
273 |
+
|
274 |
+
if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
275 |
+
slices = (
|
276 |
+
[
|
277 |
+
_tensor_str_with_formatter(
|
278 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
279 |
+
)
|
280 |
+
for i in range(0, PRINT_OPTS.edgeitems)
|
281 |
+
]
|
282 |
+
+ ["..."]
|
283 |
+
+ [
|
284 |
+
_tensor_str_with_formatter(
|
285 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
286 |
+
)
|
287 |
+
for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))
|
288 |
+
]
|
289 |
+
)
|
290 |
+
else:
|
291 |
+
slices = [
|
292 |
+
_tensor_str_with_formatter(
|
293 |
+
self[i], indent + 1, summarize, formatter1, formatter2
|
294 |
+
)
|
295 |
+
for i in range(0, self.size(0))
|
296 |
+
]
|
297 |
+
|
298 |
+
tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
|
299 |
+
return "[" + tensor_str + "]"
|
300 |
+
|
301 |
+
|
302 |
+
def _tensor_str(self, indent):
|
303 |
+
if self.numel() == 0:
|
304 |
+
return "[]"
|
305 |
+
|
306 |
+
if self.has_names():
|
307 |
+
# There are two main codepaths (possibly more) that tensor printing goes through:
|
308 |
+
# - tensor data can fit comfortably on screen
|
309 |
+
# - tensor data needs to be summarized
|
310 |
+
# Some of the codepaths don't fully support named tensors, so we send in
|
311 |
+
# an unnamed tensor to the formatting code as a workaround.
|
312 |
+
self = self.rename(None)
|
313 |
+
|
314 |
+
summarize = self.numel() > PRINT_OPTS.threshold
|
315 |
+
|
316 |
+
if self._is_zerotensor():
|
317 |
+
self = self.clone()
|
318 |
+
|
319 |
+
# handle the negative bit
|
320 |
+
if self.is_neg():
|
321 |
+
self = self.resolve_neg()
|
322 |
+
|
323 |
+
if self.dtype in [
|
324 |
+
torch.float16,
|
325 |
+
torch.bfloat16,
|
326 |
+
torch.float8_e5m2,
|
327 |
+
torch.float8_e5m2fnuz,
|
328 |
+
torch.float8_e4m3fn,
|
329 |
+
torch.float8_e4m3fnuz,
|
330 |
+
]:
|
331 |
+
self = self.float()
|
332 |
+
|
333 |
+
if self.dtype is torch.complex32:
|
334 |
+
self = self.cfloat()
|
335 |
+
|
336 |
+
if self.dtype.is_complex:
|
337 |
+
# handle the conjugate bit
|
338 |
+
self = self.resolve_conj()
|
339 |
+
real_formatter = _Formatter(
|
340 |
+
get_summarized_data(self.real) if summarize else self.real
|
341 |
+
)
|
342 |
+
imag_formatter = _Formatter(
|
343 |
+
get_summarized_data(self.imag) if summarize else self.imag
|
344 |
+
)
|
345 |
+
return _tensor_str_with_formatter(
|
346 |
+
self, indent, summarize, real_formatter, imag_formatter
|
347 |
+
)
|
348 |
+
else:
|
349 |
+
formatter = _Formatter(get_summarized_data(self) if summarize else self)
|
350 |
+
return _tensor_str_with_formatter(self, indent, summarize, formatter)
|
351 |
+
|
352 |
+
|
353 |
+
def _add_suffixes(tensor_str, suffixes, indent, force_newline):
|
354 |
+
tensor_strs = [tensor_str]
|
355 |
+
last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
|
356 |
+
for suffix in suffixes:
|
357 |
+
suffix_len = len(suffix)
|
358 |
+
if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
|
359 |
+
tensor_strs.append(",\n" + " " * indent + suffix)
|
360 |
+
last_line_len = indent + suffix_len
|
361 |
+
force_newline = False
|
362 |
+
else:
|
363 |
+
tensor_strs.append(", " + suffix)
|
364 |
+
last_line_len += suffix_len + 2
|
365 |
+
tensor_strs.append(")")
|
366 |
+
return "".join(tensor_strs)
|
367 |
+
|
368 |
+
|
369 |
+
def get_summarized_data(self):
|
370 |
+
dim = self.dim()
|
371 |
+
if dim == 0:
|
372 |
+
return self
|
373 |
+
if dim == 1:
|
374 |
+
if self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
375 |
+
return torch.cat(
|
376 |
+
(self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
|
377 |
+
)
|
378 |
+
else:
|
379 |
+
return self
|
380 |
+
if not PRINT_OPTS.edgeitems:
|
381 |
+
return self.new_empty([0] * self.dim())
|
382 |
+
elif self.size(0) > 2 * PRINT_OPTS.edgeitems:
|
383 |
+
start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
|
384 |
+
end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]
|
385 |
+
return torch.stack([get_summarized_data(x) for x in (start + end)])
|
386 |
+
else:
|
387 |
+
return torch.stack([get_summarized_data(x) for x in self])
|
388 |
+
|
389 |
+
|
390 |
+
def _str_intern(inp, *, tensor_contents=None):
|
391 |
+
if torch._C._functorch.is_functorch_wrapped_tensor(inp):
|
392 |
+
return _functorch_wrapper_str_intern(inp, tensor_contents=tensor_contents)
|
393 |
+
is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter
|
394 |
+
if inp.is_nested:
|
395 |
+
prefix = "nested_tensor("
|
396 |
+
elif is_plain_tensor:
|
397 |
+
prefix = "tensor("
|
398 |
+
else:
|
399 |
+
prefix = f"{type(inp).__name__}("
|
400 |
+
indent = len(prefix)
|
401 |
+
suffixes = []
|
402 |
+
custom_contents_provided = tensor_contents is not None
|
403 |
+
if custom_contents_provided:
|
404 |
+
tensor_str = tensor_contents
|
405 |
+
|
406 |
+
# This is used to extract the primal value and thus disable the forward AD
|
407 |
+
# within this function.
|
408 |
+
# TODO(albanD) This needs to be updated when more than one level is supported
|
409 |
+
self, tangent = torch.autograd.forward_ad.unpack_dual(inp)
|
410 |
+
|
411 |
+
# Note [Print tensor device]:
|
412 |
+
# A general logic here is we only print device when it doesn't match
|
413 |
+
# the device specified in default tensor type.
|
414 |
+
# Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus
|
415 |
+
# torch._C._get_default_device() only returns either cpu or cuda.
|
416 |
+
# In other cases, we don't have a way to set them as default yet,
|
417 |
+
# and we should always print out device for them.
|
418 |
+
if (
|
419 |
+
self.device.type != torch._C._get_default_device()
|
420 |
+
or (
|
421 |
+
self.device.type == "cuda"
|
422 |
+
and torch.cuda.current_device() != self.device.index
|
423 |
+
)
|
424 |
+
or (self.device.type == "mps")
|
425 |
+
):
|
426 |
+
suffixes.append("device='" + str(self.device) + "'")
|
427 |
+
|
428 |
+
# Tensor printing performs tensor operations like slice, indexing, etc to make it in a
|
429 |
+
# representable format. These operations on ipu/xla/lazy/mtia tensor results in compilations. Hence,
|
430 |
+
# to avoid compilations, copying the tensor to cpu before printing.
|
431 |
+
if self.device.type in ["xla", "lazy", "ipu", "mtia"]:
|
432 |
+
self = self.to("cpu")
|
433 |
+
|
434 |
+
# TODO: add an API to map real -> complex dtypes
|
435 |
+
_default_complex_dtype = (
|
436 |
+
torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat
|
437 |
+
)
|
438 |
+
has_default_dtype = self.dtype in (
|
439 |
+
torch.get_default_dtype(),
|
440 |
+
_default_complex_dtype,
|
441 |
+
torch.int64,
|
442 |
+
torch.bool,
|
443 |
+
)
|
444 |
+
if self.is_sparse:
|
445 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
446 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
447 |
+
|
448 |
+
if not self.is_meta and not isinstance(self, FakeTensor):
|
449 |
+
suffixes.append("nnz=" + str(self._nnz()))
|
450 |
+
if not has_default_dtype:
|
451 |
+
suffixes.append("dtype=" + str(self.dtype))
|
452 |
+
if not custom_contents_provided:
|
453 |
+
indices_prefix = "indices=tensor("
|
454 |
+
indices = self._indices().detach()
|
455 |
+
indices_str = _tensor_str(indices, indent + len(indices_prefix))
|
456 |
+
if indices.numel() == 0:
|
457 |
+
indices_str += ", size=" + str(tuple(indices.shape))
|
458 |
+
values_prefix = "values=tensor("
|
459 |
+
values = self._values().detach()
|
460 |
+
values_str = _tensor_str(values, indent + len(values_prefix))
|
461 |
+
if values.numel() == 0:
|
462 |
+
values_str += ", size=" + str(tuple(values.shape))
|
463 |
+
tensor_str = (
|
464 |
+
indices_prefix
|
465 |
+
+ indices_str
|
466 |
+
+ "),\n"
|
467 |
+
+ " " * indent
|
468 |
+
+ values_prefix
|
469 |
+
+ values_str
|
470 |
+
+ ")"
|
471 |
+
)
|
472 |
+
elif self.layout in {
|
473 |
+
torch.sparse_csr,
|
474 |
+
torch.sparse_csc,
|
475 |
+
torch.sparse_bsr,
|
476 |
+
torch.sparse_bsc,
|
477 |
+
}:
|
478 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
479 |
+
suffixes.append("nnz=" + str(self._nnz()))
|
480 |
+
if not has_default_dtype:
|
481 |
+
suffixes.append("dtype=" + str(self.dtype))
|
482 |
+
if not custom_contents_provided:
|
483 |
+
compressed_indices_method, plain_indices_method = {
|
484 |
+
torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
|
485 |
+
torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
|
486 |
+
torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
|
487 |
+
torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
|
488 |
+
}[self.layout]
|
489 |
+
if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
490 |
+
cdimname, pdimname = "row", "column"
|
491 |
+
else:
|
492 |
+
cdimname, pdimname = "column", "row"
|
493 |
+
compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor("
|
494 |
+
compressed_indices = compressed_indices_method(self).detach()
|
495 |
+
compressed_indices_str = _tensor_str(
|
496 |
+
compressed_indices, indent + len(compressed_indices_prefix)
|
497 |
+
)
|
498 |
+
if compressed_indices.numel() == 0:
|
499 |
+
compressed_indices_str += ", size=" + str(
|
500 |
+
tuple(compressed_indices.shape)
|
501 |
+
)
|
502 |
+
plain_indices_prefix = f"{pdimname[:3]}_indices=tensor("
|
503 |
+
plain_indices = plain_indices_method(self).detach()
|
504 |
+
plain_indices_str = _tensor_str(
|
505 |
+
plain_indices, indent + len(plain_indices_prefix)
|
506 |
+
)
|
507 |
+
if plain_indices.numel() == 0:
|
508 |
+
plain_indices_str += ", size=" + str(tuple(plain_indices.shape))
|
509 |
+
values_prefix = "values=tensor("
|
510 |
+
values = self.values().detach()
|
511 |
+
values_str = _tensor_str(values, indent + len(values_prefix))
|
512 |
+
if values.numel() == 0:
|
513 |
+
values_str += ", size=" + str(tuple(values.shape))
|
514 |
+
tensor_str = (
|
515 |
+
compressed_indices_prefix
|
516 |
+
+ compressed_indices_str
|
517 |
+
+ "),\n"
|
518 |
+
+ " " * indent
|
519 |
+
+ plain_indices_prefix
|
520 |
+
+ plain_indices_str
|
521 |
+
+ "),\n"
|
522 |
+
+ " " * indent
|
523 |
+
+ values_prefix
|
524 |
+
+ values_str
|
525 |
+
+ ")"
|
526 |
+
)
|
527 |
+
elif self.is_quantized:
|
528 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
529 |
+
if not has_default_dtype:
|
530 |
+
suffixes.append("dtype=" + str(self.dtype))
|
531 |
+
suffixes.append("quantization_scheme=" + str(self.qscheme()))
|
532 |
+
if (
|
533 |
+
self.qscheme() == torch.per_tensor_affine
|
534 |
+
or self.qscheme() == torch.per_tensor_symmetric
|
535 |
+
):
|
536 |
+
suffixes.append("scale=" + str(self.q_scale()))
|
537 |
+
suffixes.append("zero_point=" + str(self.q_zero_point()))
|
538 |
+
elif (
|
539 |
+
self.qscheme() == torch.per_channel_affine
|
540 |
+
or self.qscheme() == torch.per_channel_symmetric
|
541 |
+
or self.qscheme() == torch.per_channel_affine_float_qparams
|
542 |
+
):
|
543 |
+
suffixes.append("scale=" + str(self.q_per_channel_scales()))
|
544 |
+
suffixes.append("zero_point=" + str(self.q_per_channel_zero_points()))
|
545 |
+
suffixes.append("axis=" + str(self.q_per_channel_axis()))
|
546 |
+
if not custom_contents_provided:
|
547 |
+
tensor_str = _tensor_str(self.dequantize(), indent)
|
548 |
+
elif self.is_nested:
|
549 |
+
if not custom_contents_provided:
|
550 |
+
|
551 |
+
def indented_str(s, indent):
|
552 |
+
return "\n".join(f" {line}" for line in s.split("\n"))
|
553 |
+
|
554 |
+
strs = ",\n".join(
|
555 |
+
indented_str(str(t), indent + 1)
|
556 |
+
for t in torch.ops.aten.unbind.int(self, 0)
|
557 |
+
)
|
558 |
+
tensor_str = f"[\n{strs}\n]"
|
559 |
+
elif torch._is_functional_tensor(self):
|
560 |
+
prefix = "_to_functional_tensor("
|
561 |
+
tensor_str = repr(torch._from_functional_tensor(self))
|
562 |
+
else:
|
563 |
+
# Circular import problem, so we import it here
|
564 |
+
from torch._subclasses.fake_tensor import FakeTensor
|
565 |
+
|
566 |
+
if self.is_meta or isinstance(self, FakeTensor):
|
567 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
568 |
+
if self.dtype != torch.get_default_dtype():
|
569 |
+
suffixes.append("dtype=" + str(self.dtype))
|
570 |
+
# TODO: This implies that ellipses is valid syntax for allocating
|
571 |
+
# a meta tensor or FakeTensor, which it could be, but it isn't right now
|
572 |
+
if not custom_contents_provided:
|
573 |
+
tensor_str = "..."
|
574 |
+
else:
|
575 |
+
if self.numel() == 0 and not self.is_sparse:
|
576 |
+
# Explicitly print the shape if it is not (0,), to match NumPy behavior
|
577 |
+
if self.dim() != 1:
|
578 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
579 |
+
|
580 |
+
# In an empty tensor, there are no elements to infer if the dtype
|
581 |
+
# should be int64, so it must be shown explicitly.
|
582 |
+
if self.dtype != torch.get_default_dtype():
|
583 |
+
suffixes.append("dtype=" + str(self.dtype))
|
584 |
+
if not custom_contents_provided:
|
585 |
+
tensor_str = "[]"
|
586 |
+
else:
|
587 |
+
if not PRINT_OPTS.edgeitems:
|
588 |
+
suffixes.append("size=" + str(tuple(self.shape)))
|
589 |
+
|
590 |
+
if not has_default_dtype:
|
591 |
+
suffixes.append("dtype=" + str(self.dtype))
|
592 |
+
|
593 |
+
if not custom_contents_provided:
|
594 |
+
if self.layout != torch.strided:
|
595 |
+
tensor_str = _tensor_str(self.to_dense(), indent)
|
596 |
+
else:
|
597 |
+
tensor_str = _tensor_str(self, indent)
|
598 |
+
|
599 |
+
if self.layout != torch.strided:
|
600 |
+
suffixes.append("layout=" + str(self.layout))
|
601 |
+
|
602 |
+
# Use inp here to get the original grad_fn and not the one generated by the forward grad
|
603 |
+
# unpacking.
|
604 |
+
grad_fn_name = None
|
605 |
+
try:
|
606 |
+
grad_fn = inp.grad_fn
|
607 |
+
except RuntimeError:
|
608 |
+
# Accessing the grad_fn calls rebasing logic which would cause an error
|
609 |
+
# if that tensor is a view created in no-grad mode modified in-place in
|
610 |
+
# no-grad mode. See: https://github.com/pytorch/pytorch/issues/99968
|
611 |
+
grad_fn_name = "Invalid"
|
612 |
+
|
613 |
+
if grad_fn_name is None and grad_fn is not None:
|
614 |
+
grad_fn_name = type(grad_fn).__name__
|
615 |
+
if grad_fn_name == "CppFunction":
|
616 |
+
grad_fn_name = grad_fn.name().rsplit("::", 1)[-1]
|
617 |
+
|
618 |
+
if grad_fn_name is not None:
|
619 |
+
suffixes.append(f"grad_fn=<{grad_fn_name}>")
|
620 |
+
elif inp.requires_grad:
|
621 |
+
suffixes.append("requires_grad=True")
|
622 |
+
|
623 |
+
if self.has_names():
|
624 |
+
suffixes.append(f"names={self.names}")
|
625 |
+
|
626 |
+
if tangent is not None:
|
627 |
+
suffixes.append(f"tangent={tangent}")
|
628 |
+
|
629 |
+
string_repr = _add_suffixes(
|
630 |
+
prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse
|
631 |
+
)
|
632 |
+
|
633 |
+
# Check if this instance is flagged as a parameter and change the repr accordingly.
|
634 |
+
# Unfortunately, this function has to be aware of this detail.
|
635 |
+
# NB: This is currently skipped for plain tensor parameters to maintain BC. In the future,
|
636 |
+
# this should be done for those as well to produce a valid repr.
|
637 |
+
if isinstance(self, torch.nn.Parameter) and not is_plain_tensor:
|
638 |
+
string_repr = f"Parameter({string_repr})"
|
639 |
+
|
640 |
+
return string_repr
|
641 |
+
|
642 |
+
|
643 |
+
def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None):
|
644 |
+
level = torch._C._functorch.maybe_get_level(tensor)
|
645 |
+
assert level != -1
|
646 |
+
|
647 |
+
if torch._C._functorch.is_functionaltensor(tensor):
|
648 |
+
# Since we're unwrapping the FunctionalTensorWrapper, we need to make sure
|
649 |
+
# that it's up to date first
|
650 |
+
torch._sync(tensor)
|
651 |
+
|
652 |
+
value = torch._C._functorch.get_unwrapped(tensor)
|
653 |
+
value_repr = repr(value)
|
654 |
+
|
655 |
+
indented_value_repr = textwrap.indent(value_repr, " " * 4)
|
656 |
+
if torch._C._functorch.is_batchedtensor(tensor):
|
657 |
+
bdim = torch._C._functorch.maybe_get_bdim(tensor)
|
658 |
+
assert bdim != -1
|
659 |
+
return (
|
660 |
+
f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n"
|
661 |
+
f"{indented_value_repr}\n"
|
662 |
+
f")"
|
663 |
+
)
|
664 |
+
if torch._C._functorch.is_gradtrackingtensor(tensor):
|
665 |
+
return (
|
666 |
+
f"GradTrackingTensor(lvl={level}, value=\n" f"{indented_value_repr}\n" f")"
|
667 |
+
)
|
668 |
+
if torch._C._functorch.is_functionaltensor(tensor):
|
669 |
+
return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})"
|
670 |
+
|
671 |
+
raise ValueError("We don't know how to print this, please file us an issue")
|
672 |
+
|
673 |
+
|
674 |
+
def _str(self, *, tensor_contents=None):
|
675 |
+
with torch.no_grad(), torch.utils._python_dispatch._disable_current_modes():
|
676 |
+
guard = torch._C._DisableFuncTorch()
|
677 |
+
return _str_intern(self, tensor_contents=tensor_contents)
|
env-llmeval/lib/python3.10/site-packages/torch/_torch_docs.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|
env-llmeval/lib/python3.10/site-packages/torch/_utils.py
ADDED
@@ -0,0 +1,918 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copyreg
|
2 |
+
import functools
|
3 |
+
import sys
|
4 |
+
import traceback
|
5 |
+
import warnings
|
6 |
+
from collections import defaultdict
|
7 |
+
from contextlib import nullcontext
|
8 |
+
from typing import Any, DefaultDict, List, Optional
|
9 |
+
|
10 |
+
import torch
|
11 |
+
|
12 |
+
|
13 |
+
def _type(self, dtype=None, non_blocking=False, **kwargs):
|
14 |
+
"""Returns the type if `dtype` is not provided, else casts this object to
|
15 |
+
the specified type.
|
16 |
+
|
17 |
+
If this is already of the correct type, no copy is performed and the
|
18 |
+
original object is returned.
|
19 |
+
|
20 |
+
Args:
|
21 |
+
dtype (type or string): The desired type
|
22 |
+
non_blocking (bool): If ``True``, and the source is in pinned memory
|
23 |
+
and destination is on the GPU or vice versa, the copy is performed
|
24 |
+
asynchronously with respect to the host. Otherwise, the argument
|
25 |
+
has no effect.
|
26 |
+
**kwargs: For compatibility, may contain the key ``async`` in place of
|
27 |
+
the ``non_blocking`` argument. The ``async`` arg is deprecated.
|
28 |
+
"""
|
29 |
+
non_blocking = _get_async_or_non_blocking("type", non_blocking, kwargs)
|
30 |
+
if dtype is None:
|
31 |
+
return self.__module__ + "." + self.__class__.__name__
|
32 |
+
|
33 |
+
if isinstance(dtype, str):
|
34 |
+
dtype = _import_dotted_name(dtype)
|
35 |
+
if dtype == type(self):
|
36 |
+
return self
|
37 |
+
if self.is_sparse:
|
38 |
+
if not dtype.is_sparse:
|
39 |
+
raise RuntimeError("Cannot cast sparse tensor to dense tensor")
|
40 |
+
new_module_name = dtype.__module__.replace(".sparse", "")
|
41 |
+
new_values_type_name = new_module_name + "." + dtype.__name__
|
42 |
+
new_values = torch.Tensor._values(self).type(new_values_type_name, non_blocking)
|
43 |
+
new_indices_type_name = new_module_name + ".LongTensor"
|
44 |
+
new_indices = torch.Tensor._indices(self).type(
|
45 |
+
new_indices_type_name, non_blocking
|
46 |
+
)
|
47 |
+
return dtype(new_indices, new_values, self.size())
|
48 |
+
if dtype.is_sparse:
|
49 |
+
raise RuntimeError("Cannot cast dense tensor to sparse tensor")
|
50 |
+
return dtype(self.size()).copy_(self, non_blocking)
|
51 |
+
|
52 |
+
|
53 |
+
def _hpu(self, device=None, non_blocking=False, **kwargs):
|
54 |
+
"""Returns a copy of this object in HPU memory.
|
55 |
+
|
56 |
+
If this object is already in HPU memory and on the correct device, then
|
57 |
+
no copy is performed and the original object is returned.
|
58 |
+
|
59 |
+
Args:
|
60 |
+
device (int): The destination HPU id. Defaults to the current device.
|
61 |
+
non_blocking (bool): If ``True`` and the source is in pinned memory,
|
62 |
+
the copy will be asynchronous with respect to the host. Otherwise,
|
63 |
+
the argument has no effect.
|
64 |
+
**kwargs: For compatibility, may contain the key ``async`` in place of
|
65 |
+
the ``non_blocking`` argument.
|
66 |
+
"""
|
67 |
+
non_blocking = _get_async_or_non_blocking("hpu", non_blocking, kwargs)
|
68 |
+
hpu = getattr(torch, "hpu", None)
|
69 |
+
assert hpu is not None, "HPU device module is not loaded"
|
70 |
+
if self.is_hpu:
|
71 |
+
if device is None:
|
72 |
+
device = hpu.current_device()
|
73 |
+
if self.get_device() == device:
|
74 |
+
return self
|
75 |
+
else:
|
76 |
+
if device is None:
|
77 |
+
device = -1
|
78 |
+
with hpu.device(device):
|
79 |
+
assert not self.is_sparse, "sparse storage is not supported for HPU tensors"
|
80 |
+
untyped_storage = torch.UntypedStorage(self.size(), device=torch.device("hpu"))
|
81 |
+
untyped_storage.copy_(self, non_blocking)
|
82 |
+
return untyped_storage
|
83 |
+
|
84 |
+
|
85 |
+
def _cuda(self, device=None, non_blocking=False, **kwargs):
|
86 |
+
"""Returns a copy of this object in CUDA memory.
|
87 |
+
|
88 |
+
If this object is already in CUDA memory and on the correct device, then
|
89 |
+
no copy is performed and the original object is returned.
|
90 |
+
|
91 |
+
Args:
|
92 |
+
device (int): The destination GPU id. Defaults to the current device.
|
93 |
+
non_blocking (bool): If ``True`` and the source is in pinned memory,
|
94 |
+
the copy will be asynchronous with respect to the host. Otherwise,
|
95 |
+
the argument has no effect.
|
96 |
+
**kwargs: For compatibility, may contain the key ``async`` in place of
|
97 |
+
the ``non_blocking`` argument.
|
98 |
+
"""
|
99 |
+
non_blocking = _get_async_or_non_blocking("cuda", non_blocking, kwargs)
|
100 |
+
if self.is_cuda:
|
101 |
+
if device is None:
|
102 |
+
device = torch.cuda.current_device()
|
103 |
+
if self.get_device() == device:
|
104 |
+
return self
|
105 |
+
else:
|
106 |
+
if device is None:
|
107 |
+
device = -1
|
108 |
+
with torch.cuda.device(device):
|
109 |
+
if self.is_sparse:
|
110 |
+
new_type = getattr(torch.cuda.sparse, self.__class__.__name__)
|
111 |
+
indices = torch.Tensor._indices(self).cuda(device, non_blocking)
|
112 |
+
values = torch.Tensor._values(self).cuda(device, non_blocking)
|
113 |
+
return new_type(indices, values, self.size())
|
114 |
+
else:
|
115 |
+
untyped_storage = torch.UntypedStorage(
|
116 |
+
self.size(), device=torch.device("cuda")
|
117 |
+
)
|
118 |
+
untyped_storage.copy_(self, non_blocking)
|
119 |
+
return untyped_storage
|
120 |
+
|
121 |
+
|
122 |
+
def _get_async_or_non_blocking(function_name, non_blocking, kwargs):
|
123 |
+
"""Return the non-blocking flag given the function name and kwargs.
|
124 |
+
|
125 |
+
Args:
|
126 |
+
function_name (str): the name of the function being used.
|
127 |
+
non_blocking (bool): the default value.
|
128 |
+
**kwargs (dict): the kwargs passed to the function.
|
129 |
+
"""
|
130 |
+
if not kwargs:
|
131 |
+
return non_blocking
|
132 |
+
if len(kwargs) != 1 or "async" not in kwargs:
|
133 |
+
message = "{}() got an unexpected keyword argument '{}'"
|
134 |
+
argument = list(kwargs.keys()).pop()
|
135 |
+
raise TypeError(message.format(function_name, argument))
|
136 |
+
warnings.warn("'async' is deprecated; use 'non_blocking'")
|
137 |
+
return kwargs["async"]
|
138 |
+
|
139 |
+
|
140 |
+
# Note [Don't serialize hooks]
|
141 |
+
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
142 |
+
# Since time immemorial, we have serialized the backward hooks associated with
|
143 |
+
# variables. This kind of half-worked--Python can pickle global functions
|
144 |
+
# (but not closures!)--but there were problems.
|
145 |
+
#
|
146 |
+
# - It's fragile. If you serialize a backward hook into a saved
|
147 |
+
# model, and then you rename the function associated with the hook,
|
148 |
+
# now your saved model is broken and you can't load it anymore.
|
149 |
+
#
|
150 |
+
# - It's not actually used. The standard recommendation is to
|
151 |
+
# serialize the *state_dict* of a model, not the model itself
|
152 |
+
# (since this is more stable to code changes affecting the model
|
153 |
+
# serialization), and the state dict saves "data" only, thus
|
154 |
+
# stripping the backward hooks. In some cases, hooks are
|
155 |
+
# essential to the well-functioning of a model (e.g., DDP),
|
156 |
+
# but DDP already manages readding the hooks!
|
157 |
+
#
|
158 |
+
# - We didn't serialize them in many cases. Prior to #10220, we
|
159 |
+
# were dropping backward hooks in ForkingPickler. We "fixed" this
|
160 |
+
# to be convenient with other serialization sites, but lack of
|
161 |
+
# serializing backward hooks wasn't actually the root cause of
|
162 |
+
# the bug.
|
163 |
+
#
|
164 |
+
# With these cases in mind, we have decided that a better strategy
|
165 |
+
# is to just NOT serialize hooks at all.
|
166 |
+
#
|
167 |
+
# Since this is a BC-breaking change, we should warn when we previously
|
168 |
+
# serialized a hook, but no longer do so. This will be done by adding a special
|
169 |
+
# sentinel property to hooks will be used to suppress this warning. If a hook
|
170 |
+
# has the property _torch_serialize_ignore, we will not emit a warning if we
|
171 |
+
# attempt to serialize a Tensor with this hook attached to it.
|
172 |
+
#
|
173 |
+
# By the way, when _backward_hooks is skipped, we must give an EMPTY
|
174 |
+
# OrderedDict(), if you pass a None you'll run afoul #12219.
|
175 |
+
|
176 |
+
|
177 |
+
# TODO: Once we decide to break serialization FC, `storage` no longer needs to
|
178 |
+
# be a TypedStorage
|
179 |
+
def _rebuild_tensor(storage, storage_offset, size, stride):
|
180 |
+
# first construct a tensor with the correct dtype/device
|
181 |
+
t = torch.tensor([], dtype=storage.dtype, device=storage._untyped_storage.device)
|
182 |
+
return t.set_(storage._untyped_storage, storage_offset, size, stride)
|
183 |
+
|
184 |
+
|
185 |
+
def get_tensor_metadata(tensor):
|
186 |
+
# Tensor's Metadata for serializing.
|
187 |
+
# Currently, this only returns a dict[string, bool] specifing whether
|
188 |
+
# `conj` or `neg` bit is set.
|
189 |
+
assert isinstance(tensor, torch.Tensor)
|
190 |
+
return torch._C._get_tensor_metadata(tensor) # type: ignore[attr-defined]
|
191 |
+
|
192 |
+
|
193 |
+
def set_tensor_metadata(tensor, metadata):
|
194 |
+
# See `get_tensor_metadata` above
|
195 |
+
assert isinstance(metadata, dict)
|
196 |
+
assert isinstance(tensor, torch.Tensor)
|
197 |
+
torch._C._set_tensor_metadata(tensor, metadata) # type: ignore[attr-defined]
|
198 |
+
|
199 |
+
|
200 |
+
def _rebuild_tensor_v2(
|
201 |
+
storage, storage_offset, size, stride, requires_grad, backward_hooks, metadata=None
|
202 |
+
):
|
203 |
+
tensor = _rebuild_tensor(storage, storage_offset, size, stride)
|
204 |
+
tensor.requires_grad = requires_grad
|
205 |
+
if metadata:
|
206 |
+
set_tensor_metadata(tensor, metadata)
|
207 |
+
|
208 |
+
# NB: This line exists only for backwards compatibility; the
|
209 |
+
# general expectation is that backward_hooks is an empty
|
210 |
+
# OrderedDict. See Note [Don't serialize hooks]
|
211 |
+
tensor._backward_hooks = backward_hooks
|
212 |
+
return tensor
|
213 |
+
|
214 |
+
|
215 |
+
def _rebuild_tensor_v3(
|
216 |
+
storage,
|
217 |
+
storage_offset,
|
218 |
+
size,
|
219 |
+
stride,
|
220 |
+
requires_grad,
|
221 |
+
backward_hooks,
|
222 |
+
dtype,
|
223 |
+
metadata=None,
|
224 |
+
):
|
225 |
+
t = torch.tensor(
|
226 |
+
[],
|
227 |
+
dtype=dtype,
|
228 |
+
device=storage._untyped_storage.device,
|
229 |
+
requires_grad=requires_grad,
|
230 |
+
)
|
231 |
+
t.set_(storage._untyped_storage, storage_offset, size, stride)
|
232 |
+
if metadata:
|
233 |
+
set_tensor_metadata(t, metadata)
|
234 |
+
t._backward_hooks = backward_hooks
|
235 |
+
return t
|
236 |
+
|
237 |
+
|
238 |
+
_sparse_tensors_to_validate: List["torch.Tensor"] = []
|
239 |
+
|
240 |
+
|
241 |
+
# In _legacy_load() in serialization.py we unpickle storages after the sparse
|
242 |
+
# tensors have been already unpickled. Those storages contain data necessary for
|
243 |
+
# validating sparse tensors: indices and values. That's why sparse tensors are
|
244 |
+
# first unpickled without any validation, and then this function is called just
|
245 |
+
# before _legacy_load() returns, so that all the sparse tensors can be validated
|
246 |
+
# in bulk.
|
247 |
+
#
|
248 |
+
# The same procedure must be followed by _load() in serialization.py because due
|
249 |
+
# to Pickler semantics, we have to use the same (non-validating) function for
|
250 |
+
# unpickling sparse tensors, regardless of the caller.
|
251 |
+
def _validate_loaded_sparse_tensors():
|
252 |
+
try:
|
253 |
+
for t in _sparse_tensors_to_validate:
|
254 |
+
if t.layout is torch.sparse_coo:
|
255 |
+
torch._validate_sparse_coo_tensor_args(
|
256 |
+
t._indices(), t._values(), t.size(), t.is_coalesced()
|
257 |
+
)
|
258 |
+
elif t.layout in {
|
259 |
+
torch.sparse_csr,
|
260 |
+
torch.sparse_csc,
|
261 |
+
torch.sparse_bsr,
|
262 |
+
torch.sparse_bsc,
|
263 |
+
}:
|
264 |
+
# TODO: Validation currently involves an expensive traversal
|
265 |
+
# on CPU, which may include a device transfer.
|
266 |
+
if t.layout in {torch.sparse_csr, torch.sparse_bsr}:
|
267 |
+
compressed_indices, plain_indices = (
|
268 |
+
t.crow_indices(),
|
269 |
+
t.col_indices(),
|
270 |
+
)
|
271 |
+
else:
|
272 |
+
compressed_indices, plain_indices = (
|
273 |
+
t.ccol_indices(),
|
274 |
+
t.row_indices(),
|
275 |
+
)
|
276 |
+
torch._validate_sparse_compressed_tensor_args(
|
277 |
+
compressed_indices, plain_indices, t.values(), t.size(), t.layout
|
278 |
+
)
|
279 |
+
else:
|
280 |
+
raise NotImplementedError(
|
281 |
+
f"_validate_loaded_sparse_tensors for layout `{t.layout}`"
|
282 |
+
)
|
283 |
+
|
284 |
+
finally:
|
285 |
+
_sparse_tensors_to_validate.clear()
|
286 |
+
|
287 |
+
|
288 |
+
def _rebuild_sparse_tensor(layout, data):
|
289 |
+
"""
|
290 |
+
Rebuilds a sparse tensor from its sparse storage representation.
|
291 |
+
|
292 |
+
Args:
|
293 |
+
layout (str): The sparse storage layout of the tensor.
|
294 |
+
data (tuple): The tensor's sparse storage representation.
|
295 |
+
"""
|
296 |
+
if layout == torch.sparse_coo:
|
297 |
+
if len(data) == 3:
|
298 |
+
# For BC:
|
299 |
+
indices, values, size = data
|
300 |
+
is_coalesced = None
|
301 |
+
else:
|
302 |
+
indices, values, size, is_coalesced = data
|
303 |
+
result = torch.sparse_coo_tensor(
|
304 |
+
indices, values, size, check_invariants=False, is_coalesced=is_coalesced
|
305 |
+
)
|
306 |
+
_sparse_tensors_to_validate.append(result)
|
307 |
+
return result
|
308 |
+
|
309 |
+
elif layout in {
|
310 |
+
torch.sparse_csr,
|
311 |
+
torch.sparse_csc,
|
312 |
+
torch.sparse_bsr,
|
313 |
+
torch.sparse_bsc,
|
314 |
+
}:
|
315 |
+
compressed_indices, plain_indices, values, size = data
|
316 |
+
result = torch.sparse_compressed_tensor(
|
317 |
+
compressed_indices,
|
318 |
+
plain_indices,
|
319 |
+
values,
|
320 |
+
size,
|
321 |
+
layout=layout,
|
322 |
+
check_invariants=False,
|
323 |
+
)
|
324 |
+
_sparse_tensors_to_validate.append(result)
|
325 |
+
return result
|
326 |
+
|
327 |
+
raise NotImplementedError(f"rebuilding sparse tensor for layout {layout}")
|
328 |
+
|
329 |
+
|
330 |
+
def _rebuild_nested_tensor(buffer, sizes, strides, storage_offsets):
|
331 |
+
return torch._nested_view_from_buffer(buffer, sizes, strides, storage_offsets)
|
332 |
+
|
333 |
+
|
334 |
+
def _rebuild_device_tensor_from_numpy(data, dtype, device, requires_grad):
|
335 |
+
tensor = torch.from_numpy(data).to(dtype=dtype, device=device)
|
336 |
+
tensor.requires_grad = requires_grad
|
337 |
+
return tensor
|
338 |
+
|
339 |
+
|
340 |
+
# Should not be used, only here to be able to load Tensors serialized with older versions of pytorch
|
341 |
+
_rebuild_xla_tensor = _rebuild_device_tensor_from_numpy
|
342 |
+
|
343 |
+
|
344 |
+
def _rebuild_meta_tensor_no_storage(dtype, size, stride, requires_grad):
|
345 |
+
return torch.empty_strided(
|
346 |
+
size, stride, dtype=dtype, device="meta", requires_grad=requires_grad
|
347 |
+
)
|
348 |
+
|
349 |
+
|
350 |
+
def _rebuild_wrapper_subclass(
|
351 |
+
cls, dtype, size, stride, storage_offset, layout, device, requires_grad
|
352 |
+
):
|
353 |
+
return torch.Tensor._make_wrapper_subclass( # type: ignore[attr-defined]
|
354 |
+
cls,
|
355 |
+
size,
|
356 |
+
strides=stride,
|
357 |
+
storage_offset=storage_offset,
|
358 |
+
layout=layout,
|
359 |
+
device=device,
|
360 |
+
requires_grad=requires_grad,
|
361 |
+
)
|
362 |
+
|
363 |
+
|
364 |
+
# TODO: Once we decide to break serialization FC, `storage` no longer needs to
|
365 |
+
# be a TypedStorage
|
366 |
+
def _rebuild_qtensor(
|
367 |
+
storage,
|
368 |
+
storage_offset,
|
369 |
+
size,
|
370 |
+
stride,
|
371 |
+
quantizer_params,
|
372 |
+
requires_grad,
|
373 |
+
backward_hooks,
|
374 |
+
):
|
375 |
+
qscheme = quantizer_params[0]
|
376 |
+
if qscheme == torch.per_tensor_affine:
|
377 |
+
_, scale, zero_point = quantizer_params
|
378 |
+
tensor = torch._empty_affine_quantized(
|
379 |
+
size,
|
380 |
+
scale=scale,
|
381 |
+
zero_point=zero_point,
|
382 |
+
dtype=storage.dtype,
|
383 |
+
device=storage.device,
|
384 |
+
)
|
385 |
+
elif qscheme in (torch.per_channel_affine, torch.per_channel_affine_float_qparams):
|
386 |
+
_, scales, zero_points, axis = quantizer_params
|
387 |
+
if type(scales) is list and type(zero_points) is list:
|
388 |
+
if qscheme == torch.per_channel_affine:
|
389 |
+
scales = torch.tensor(scales, dtype=torch.double, device=storage.device)
|
390 |
+
zero_points = torch.tensor(
|
391 |
+
zero_points, dtype=torch.long, device=storage.device
|
392 |
+
)
|
393 |
+
else:
|
394 |
+
scales = torch.tensor(scales, dtype=torch.float, device=storage.device)
|
395 |
+
zero_points = torch.tensor(
|
396 |
+
zero_points, dtype=torch.float, device=storage.device
|
397 |
+
)
|
398 |
+
tensor = torch._empty_per_channel_affine_quantized(
|
399 |
+
size,
|
400 |
+
scales=scales,
|
401 |
+
zero_points=zero_points,
|
402 |
+
axis=axis,
|
403 |
+
dtype=storage.dtype,
|
404 |
+
device=storage.device,
|
405 |
+
)
|
406 |
+
else:
|
407 |
+
raise RuntimeError(f"Can't deserialize quantized tensor with qscheme {qscheme}")
|
408 |
+
tensor.set_(storage, storage_offset, size, stride)
|
409 |
+
tensor.requires_grad = requires_grad
|
410 |
+
# NB: This line exists only for backwards compatibility; the
|
411 |
+
# general expectation is that backward_hooks is an empty
|
412 |
+
# OrderedDict. See Note [Don't serialize hooks]
|
413 |
+
tensor._backward_hooks = backward_hooks
|
414 |
+
return tensor
|
415 |
+
|
416 |
+
|
417 |
+
def _rebuild_parameter(data, requires_grad, backward_hooks):
|
418 |
+
param = torch.nn.Parameter(data, requires_grad)
|
419 |
+
# NB: This line exists only for backwards compatibility; the
|
420 |
+
# general expectation is that backward_hooks is an empty
|
421 |
+
# OrderedDict. See Note [Don't serialize hooks]
|
422 |
+
param._backward_hooks = backward_hooks
|
423 |
+
|
424 |
+
return param
|
425 |
+
|
426 |
+
|
427 |
+
def _rebuild_parameter_with_state(data, requires_grad, backward_hooks, state):
|
428 |
+
param = torch.nn.Parameter(data, requires_grad)
|
429 |
+
# NB: This line exists only for backwards compatibility; the
|
430 |
+
# general expectation is that backward_hooks is an empty
|
431 |
+
# OrderedDict. See Note [Don't serialize hooks]
|
432 |
+
param._backward_hooks = backward_hooks
|
433 |
+
|
434 |
+
# Restore state on Parameter like python attr.
|
435 |
+
param = _set_obj_state(param, state)
|
436 |
+
return param
|
437 |
+
|
438 |
+
|
439 |
+
def _get_obj_state(obj):
|
440 |
+
# Get the state of the python subclass
|
441 |
+
# This loosely mimicks the function on the object class but since Tensor do not inherit
|
442 |
+
# from it, we cannot call that function directly
|
443 |
+
# https://github.com/python/cpython/blob/c83919bd635f4433f1c6ae8504996a9fe3c215e5/Objects/typeobject.c#L4891
|
444 |
+
# Note that starting with Python 3.11, this `__getstate__` is always defined and thus
|
445 |
+
# the else branch will never be taken.
|
446 |
+
getstate_fn = getattr(obj, "__getstate__", None)
|
447 |
+
if getstate_fn:
|
448 |
+
state = getstate_fn()
|
449 |
+
else:
|
450 |
+
slots_to_save = copyreg._slotnames(obj.__class__) # type: ignore[attr-defined]
|
451 |
+
if slots_to_save:
|
452 |
+
state = (
|
453 |
+
obj.__dict__,
|
454 |
+
{
|
455 |
+
name: getattr(obj, name)
|
456 |
+
for name in slots_to_save
|
457 |
+
if hasattr(obj, name)
|
458 |
+
},
|
459 |
+
)
|
460 |
+
else:
|
461 |
+
state = obj.__dict__
|
462 |
+
|
463 |
+
return state
|
464 |
+
|
465 |
+
|
466 |
+
def _set_obj_state(obj, state):
|
467 |
+
if isinstance(state, tuple):
|
468 |
+
if not len(state) == 2:
|
469 |
+
raise RuntimeError(f"Invalid serialized state: {state}")
|
470 |
+
dict_state = state[0]
|
471 |
+
slots_state = state[1]
|
472 |
+
else:
|
473 |
+
dict_state = state
|
474 |
+
slots_state = None
|
475 |
+
|
476 |
+
# Starting with Python 3.11, the __dict__ attribute is lazily created
|
477 |
+
# and is serialized as None when not needed.
|
478 |
+
if dict_state:
|
479 |
+
for k, v in dict_state.items():
|
480 |
+
setattr(obj, k, v)
|
481 |
+
|
482 |
+
if slots_state:
|
483 |
+
for k, v in slots_state.items():
|
484 |
+
setattr(obj, k, v)
|
485 |
+
return obj
|
486 |
+
|
487 |
+
|
488 |
+
def _import_dotted_name(name):
|
489 |
+
components = name.split(".")
|
490 |
+
obj = __import__(components[0])
|
491 |
+
for component in components[1:]:
|
492 |
+
obj = getattr(obj, component)
|
493 |
+
return obj
|
494 |
+
|
495 |
+
|
496 |
+
# Taken from python 3.5 docs
|
497 |
+
def _accumulate(iterable, fn=lambda x, y: x + y):
|
498 |
+
"Return running totals"
|
499 |
+
# _accumulate([1,2,3,4,5]) --> 1 3 6 10 15
|
500 |
+
# _accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120
|
501 |
+
it = iter(iterable)
|
502 |
+
try:
|
503 |
+
total = next(it)
|
504 |
+
except StopIteration:
|
505 |
+
return
|
506 |
+
yield total
|
507 |
+
for element in it:
|
508 |
+
total = fn(total, element)
|
509 |
+
yield total
|
510 |
+
|
511 |
+
|
512 |
+
def _flatten_dense_tensors(tensors):
|
513 |
+
"""Flatten dense tensors into a contiguous 1D buffer. Assume tensors are of
|
514 |
+
same dense type.
|
515 |
+
|
516 |
+
Since inputs are dense, the resulting tensor will be a concatenated 1D
|
517 |
+
buffer. Element-wise operation on this buffer will be equivalent to
|
518 |
+
operating individually.
|
519 |
+
|
520 |
+
Args:
|
521 |
+
tensors (Iterable[Tensor]): dense tensors to flatten.
|
522 |
+
|
523 |
+
Returns:
|
524 |
+
A contiguous 1D buffer containing input tensors.
|
525 |
+
"""
|
526 |
+
return torch._C._nn.flatten_dense_tensors(tensors)
|
527 |
+
|
528 |
+
|
529 |
+
def _flatten_sparse_tensors(tensors):
|
530 |
+
"""Flatten sparse tensors into two contiguous 1D buffers, one of indices and
|
531 |
+
one of values. Assume tensors are of same sparse type.
|
532 |
+
|
533 |
+
Args:
|
534 |
+
tensors (Iterable[Tensor]): sparse tensors to flatten.
|
535 |
+
|
536 |
+
Returns:
|
537 |
+
A tuple of two contiguous 1D buffers, one containing input tensors'
|
538 |
+
indices and the other containing the values.
|
539 |
+
"""
|
540 |
+
flat_indices = torch._C._nn.flatten_dense_tensors(
|
541 |
+
[torch.Tensor._indices(t) for t in tensors]
|
542 |
+
)
|
543 |
+
flat_values = torch._C._nn.flatten_dense_tensors(
|
544 |
+
[torch.Tensor._values(t) for t in tensors]
|
545 |
+
)
|
546 |
+
return flat_indices, flat_values
|
547 |
+
|
548 |
+
|
549 |
+
def _unflatten_dense_tensors(flat, tensors):
|
550 |
+
"""View a flat buffer using the sizes of tensors. Assume that tensors are of
|
551 |
+
same dense type, and that flat is given by _flatten_dense_tensors.
|
552 |
+
|
553 |
+
Args:
|
554 |
+
flat (Tensor): flattened dense tensors to unflatten.
|
555 |
+
tensors (Iterable[Tensor]): dense tensors whose sizes will be used to
|
556 |
+
unflatten flat.
|
557 |
+
|
558 |
+
Returns:
|
559 |
+
Unflattened dense tensors with sizes same as tensors and values from
|
560 |
+
flat.
|
561 |
+
"""
|
562 |
+
return torch._C._nn.unflatten_dense_tensors(flat, tensors)
|
563 |
+
|
564 |
+
|
565 |
+
def _unflatten_sparse_tensors(flat, tensors):
|
566 |
+
"""View flat buffer (containing indices and values) using the sizes of
|
567 |
+
tensors. Assume that tensors are of same sparse type, and that flat is given
|
568 |
+
by _flatten_sparse_tensors.
|
569 |
+
|
570 |
+
Args:
|
571 |
+
flat (tuple(Tensor, Tensor)): flattened indices and values of sparse
|
572 |
+
tensors to unflatten.
|
573 |
+
tensors (Iterable[Tensor]): sparse tensors whose sizes will be used to
|
574 |
+
unflatten flat.
|
575 |
+
|
576 |
+
Returns:
|
577 |
+
Unflattened sparse tensors with sizes same as tensors and values from
|
578 |
+
flat.
|
579 |
+
"""
|
580 |
+
flat_indices, flat_values = flat
|
581 |
+
indices = torch._C._nn.unflatten_dense_tensors(
|
582 |
+
flat_indices, [torch.Tensor._indices(t) for t in tensors]
|
583 |
+
)
|
584 |
+
values = torch._C._nn.unflatten_dense_tensors(
|
585 |
+
flat_values, [torch.Tensor._values(t) for t in tensors]
|
586 |
+
)
|
587 |
+
outputs = []
|
588 |
+
for t, i, v in zip(tensors, indices, values):
|
589 |
+
outputs.append(t.new(i, v, t.size()))
|
590 |
+
return tuple(outputs)
|
591 |
+
|
592 |
+
|
593 |
+
def _reorder_tensors_as(tensors, ordered_tensors):
|
594 |
+
"""Assume that tensors are of same order as ordered_tensors within their
|
595 |
+
types, e.g., from _take_tensors. Reorder them to be of same order as
|
596 |
+
ordered_tensors.
|
597 |
+
|
598 |
+
Args:
|
599 |
+
tensors (Iterable[Tensor]): tensors to be reordered. They should be of
|
600 |
+
the same order as ordered_tensors within their own types.
|
601 |
+
ordered_tensors (Iterable[Tensor]): tensors whose order will be the
|
602 |
+
reference.
|
603 |
+
|
604 |
+
Returns:
|
605 |
+
Ordered tuple of tensors with contents from tensors and order of
|
606 |
+
ordered_tensors.
|
607 |
+
"""
|
608 |
+
type_dict = defaultdict(list)
|
609 |
+
for tensor in tensors:
|
610 |
+
type_dict[tensor.type()].append(tensor)
|
611 |
+
type_dict_ = {t: iter(coll) for t, coll in type_dict.items()}
|
612 |
+
return tuple(next(type_dict_[tensor.type()]) for tensor in ordered_tensors)
|
613 |
+
|
614 |
+
|
615 |
+
def _take_tensors(tensors, size_limit):
|
616 |
+
"""Group tensors into chunks. This generator yields a chunk at each time,
|
617 |
+
each containing tensors of same type up to certain byte limit in total size.
|
618 |
+
|
619 |
+
Args:
|
620 |
+
tensors (Sequence): A sequence of tensors to be separated into chunks.
|
621 |
+
size_limit (int): The limit of each chunk in bytes.
|
622 |
+
|
623 |
+
Yields:
|
624 |
+
Blocks of tensors of same type and within size_limit. The yielded
|
625 |
+
tensors are only ordered as the original sequence within its types.
|
626 |
+
"""
|
627 |
+
buf_dict: DefaultDict[str, List] = defaultdict(lambda: [[], 0])
|
628 |
+
for tensor in tensors:
|
629 |
+
t = tensor.type()
|
630 |
+
if tensor.is_sparse:
|
631 |
+
indices = torch.Tensor._indices(tensor)
|
632 |
+
values = torch.Tensor._values(tensor)
|
633 |
+
size = (
|
634 |
+
indices.numel() * indices.element_size()
|
635 |
+
+ values.numel() * values.element_size()
|
636 |
+
)
|
637 |
+
else:
|
638 |
+
size = tensor.numel() * tensor.element_size()
|
639 |
+
buf_and_size = buf_dict[t]
|
640 |
+
if buf_and_size[1] + size > size_limit and buf_and_size[1] > 0:
|
641 |
+
yield buf_and_size[0]
|
642 |
+
buf_and_size = buf_dict[t] = [[], 0]
|
643 |
+
buf_and_size[0].append(tensor)
|
644 |
+
buf_and_size[1] += size
|
645 |
+
for buf, _ in buf_dict.values():
|
646 |
+
if len(buf) > 0:
|
647 |
+
yield buf
|
648 |
+
|
649 |
+
|
650 |
+
# annotation decorator to get annotations in a way that is compatible
|
651 |
+
# with both Python 2 and 3
|
652 |
+
def annotate(ret, **kwargs):
|
653 |
+
def dec(fun):
|
654 |
+
fun.__annotations__ = dict(kwargs)
|
655 |
+
fun.__annotations__["return"] = ret
|
656 |
+
return fun
|
657 |
+
|
658 |
+
return dec
|
659 |
+
|
660 |
+
|
661 |
+
def render_call(fn, args, kwargs):
|
662 |
+
str_fn = torch.overrides.resolve_name(fn)
|
663 |
+
if str_fn is None:
|
664 |
+
str_fn = str(fn)
|
665 |
+
|
666 |
+
str_args: List[str] = []
|
667 |
+
with torch._tensor_str.printoptions(threshold=0, edgeitems=0):
|
668 |
+
str_args.extend(repr(a) for a in args)
|
669 |
+
str_args.extend(f"{k}={repr(v)}" for k, v in kwargs.items())
|
670 |
+
r = f"{str_fn}({', '.join(str_args)})"
|
671 |
+
return r
|
672 |
+
|
673 |
+
|
674 |
+
# NOTE [ Python Traceback Reference Cycle Problem ]
|
675 |
+
#
|
676 |
+
# When using sys.exc_info(), it is important to **not** store the exc_info[2],
|
677 |
+
# which is the traceback, because otherwise you will run into the traceback
|
678 |
+
# reference cycle problem, i.e., the traceback holding reference to the frame,
|
679 |
+
# and the frame (which holds reference to all the object in its temporary scope)
|
680 |
+
# holding reference the traceback.
|
681 |
+
|
682 |
+
|
683 |
+
class KeyErrorMessage(str):
|
684 |
+
r"""str subclass that returns itself in repr"""
|
685 |
+
|
686 |
+
def __repr__(self):
|
687 |
+
return self
|
688 |
+
|
689 |
+
|
690 |
+
class ExceptionWrapper:
|
691 |
+
r"""Wraps an exception plus traceback to communicate across threads"""
|
692 |
+
|
693 |
+
def __init__(self, exc_info=None, where="in background"):
|
694 |
+
# It is important that we don't store exc_info, see
|
695 |
+
# NOTE [ Python Traceback Reference Cycle Problem ]
|
696 |
+
if exc_info is None:
|
697 |
+
exc_info = sys.exc_info()
|
698 |
+
self.exc_type = exc_info[0]
|
699 |
+
self.exc_msg = "".join(traceback.format_exception(*exc_info))
|
700 |
+
self.where = where
|
701 |
+
|
702 |
+
def reraise(self):
|
703 |
+
r"""Reraises the wrapped exception in the current thread"""
|
704 |
+
# Format a message such as: "Caught ValueError in DataLoader worker
|
705 |
+
# process 2. Original Traceback:", followed by the traceback.
|
706 |
+
msg = f"Caught {self.exc_type.__name__} {self.where}.\nOriginal {self.exc_msg}"
|
707 |
+
if self.exc_type == KeyError:
|
708 |
+
# KeyError calls repr() on its argument (usually a dict key). This
|
709 |
+
# makes stack traces unreadable. It will not be changed in Python
|
710 |
+
# (https://bugs.python.org/issue2651), so we work around it.
|
711 |
+
msg = KeyErrorMessage(msg)
|
712 |
+
elif getattr(self.exc_type, "message", None):
|
713 |
+
# Some exceptions have first argument as non-str but explicitly
|
714 |
+
# have message field
|
715 |
+
raise self.exc_type(message=msg)
|
716 |
+
try:
|
717 |
+
exception = self.exc_type(msg)
|
718 |
+
except TypeError:
|
719 |
+
# If the exception takes multiple arguments, don't try to
|
720 |
+
# instantiate since we don't know how to
|
721 |
+
raise RuntimeError(msg) from None
|
722 |
+
raise exception
|
723 |
+
|
724 |
+
|
725 |
+
def _get_available_device_type():
|
726 |
+
if torch.cuda.is_available():
|
727 |
+
return "cuda"
|
728 |
+
if hasattr(torch, "xpu") and torch.xpu.is_available(): # type: ignore[attr-defined]
|
729 |
+
return "xpu"
|
730 |
+
custom_backend_name = torch._C._get_privateuse1_backend_name()
|
731 |
+
custom_device_mod = getattr(torch, custom_backend_name, None)
|
732 |
+
if custom_device_mod and custom_device_mod.is_available():
|
733 |
+
return custom_backend_name
|
734 |
+
# add more available device types here
|
735 |
+
return None
|
736 |
+
|
737 |
+
|
738 |
+
def _get_device_attr(get_member):
|
739 |
+
device_type = _get_available_device_type()
|
740 |
+
if device_type and device_type.lower() == "cuda":
|
741 |
+
return get_member(torch.cuda)
|
742 |
+
if device_type and device_type.lower() == "xpu":
|
743 |
+
return get_member(torch.xpu) # type: ignore[attr-defined]
|
744 |
+
if device_type == torch._C._get_privateuse1_backend_name():
|
745 |
+
return get_member(getattr(torch, device_type))
|
746 |
+
# add more available device types here
|
747 |
+
return None
|
748 |
+
|
749 |
+
|
750 |
+
def _get_current_device_index():
|
751 |
+
# current device index
|
752 |
+
return _get_device_attr(lambda m: m.current_device())
|
753 |
+
|
754 |
+
|
755 |
+
def _get_all_device_indices():
|
756 |
+
# all device index
|
757 |
+
return _get_device_attr(lambda m: list(range(m.device_count())))
|
758 |
+
|
759 |
+
|
760 |
+
def _get_devices_properties(device_ids):
|
761 |
+
# all device properties
|
762 |
+
return [_get_device_attr(lambda m: m.get_device_properties(i)) for i in device_ids]
|
763 |
+
|
764 |
+
|
765 |
+
def get_current_device_index() -> int:
|
766 |
+
r"""Checks if there are CUDA devices available and
|
767 |
+
returns the device index of the current default CUDA device.
|
768 |
+
Returns -1 in case there are no CUDA devices available.
|
769 |
+
Arguments: ``None``
|
770 |
+
"""
|
771 |
+
if torch.cuda.device_count() > 0:
|
772 |
+
return torch.cuda.current_device()
|
773 |
+
return -1
|
774 |
+
|
775 |
+
|
776 |
+
def _get_device_index(
|
777 |
+
device: Any, optional: bool = False, allow_cpu: bool = False
|
778 |
+
) -> int:
|
779 |
+
r"""Gets the device index from :attr:`device`, which can be a torch.device
|
780 |
+
object, a Python integer, or ``None``.
|
781 |
+
|
782 |
+
If :attr:`device` is a torch.device object, returns the device index if it
|
783 |
+
has index. Note that for a device without a specified index,
|
784 |
+
i.e., ``torch.device('xxx')``, this will return the current default
|
785 |
+
device of that type if :attr:`optional` is ``True``. If :attr:`allow_cpu` is ``True``,
|
786 |
+
CPU devices will be accepted and ``-1`` will be returned in this case.
|
787 |
+
|
788 |
+
If :attr:`device` is a Python integer, it is returned as is.
|
789 |
+
|
790 |
+
If :attr:`device` is ``None``, this will return the current default
|
791 |
+
device of the supported runtime platform if :attr:`optional` is ``True``.
|
792 |
+
i.e., the current default CUDA device will be returned if CUDA runtime is supported.
|
793 |
+
"""
|
794 |
+
if isinstance(device, str):
|
795 |
+
device = torch.device(device)
|
796 |
+
device_idx: Optional[int] = None
|
797 |
+
if isinstance(device, torch.device):
|
798 |
+
if not allow_cpu and device.type == "cpu":
|
799 |
+
raise ValueError(f"Expected a non cpu device, but got: {device}")
|
800 |
+
device_idx = -1 if device.type == "cpu" else device.index
|
801 |
+
if isinstance(device, int):
|
802 |
+
device_idx = device
|
803 |
+
if device_idx is None:
|
804 |
+
if optional:
|
805 |
+
# The eager API _get_current_device_index uses `lambda` functions which are
|
806 |
+
# not supported in JIT and hence not scriptable. The JIT equivalent API to get
|
807 |
+
# the current device index is `get_current_device_index()` which can
|
808 |
+
# be scripted. We use is_scripting to check the mode we are in and call the
|
809 |
+
# appropriate API.
|
810 |
+
if torch.jit.is_scripting():
|
811 |
+
device_idx = get_current_device_index()
|
812 |
+
else:
|
813 |
+
device_idx = _get_current_device_index()
|
814 |
+
else:
|
815 |
+
raise ValueError(
|
816 |
+
f"Expected a torch.device with a specified index or an integer, but got:{device}"
|
817 |
+
)
|
818 |
+
return device_idx
|
819 |
+
|
820 |
+
|
821 |
+
def _handle_complex(tensor):
|
822 |
+
"""
|
823 |
+
Returns a real view of a tensor if complex dtype else just the tensor
|
824 |
+
need to check if a UninitializedParameter because otherwise checking is_complex is an error for a LazyModule
|
825 |
+
"""
|
826 |
+
return (
|
827 |
+
torch.view_as_real(tensor)
|
828 |
+
if not isinstance(tensor, torch.nn.UninitializedParameter)
|
829 |
+
and tensor.is_complex()
|
830 |
+
else tensor
|
831 |
+
)
|
832 |
+
|
833 |
+
|
834 |
+
def _element_size(dtype):
|
835 |
+
"""
|
836 |
+
Returns the element size for a dtype, in bytes
|
837 |
+
"""
|
838 |
+
if not isinstance(dtype, torch.dtype):
|
839 |
+
raise RuntimeError(f"expected torch.dtype, but got {type(dtype)}")
|
840 |
+
|
841 |
+
if dtype.is_complex:
|
842 |
+
return torch.finfo(dtype).bits >> 2
|
843 |
+
elif dtype.is_floating_point:
|
844 |
+
return torch.finfo(dtype).bits >> 3
|
845 |
+
elif dtype == torch.bool:
|
846 |
+
# NOTE: torch.bool is not supported in torch.iinfo()
|
847 |
+
return 1
|
848 |
+
else:
|
849 |
+
return torch.iinfo(dtype).bits >> 3
|
850 |
+
|
851 |
+
|
852 |
+
class _ClassPropertyDescriptor:
|
853 |
+
def __init__(self, fget, fset=None):
|
854 |
+
self.fget = fget
|
855 |
+
|
856 |
+
def __get__(self, instance, owner=None):
|
857 |
+
if owner is None:
|
858 |
+
owner = type(instance)
|
859 |
+
return self.fget.__get__(instance, owner)()
|
860 |
+
|
861 |
+
|
862 |
+
def classproperty(func):
|
863 |
+
if not isinstance(func, (classmethod, staticmethod)):
|
864 |
+
func = classmethod(func)
|
865 |
+
return _ClassPropertyDescriptor(func)
|
866 |
+
|
867 |
+
|
868 |
+
# Whether we are compiling with torch.compile or not
|
869 |
+
def is_compiling():
|
870 |
+
return False
|
871 |
+
|
872 |
+
|
873 |
+
def _functionalize_sync(t):
|
874 |
+
# This code lives in python instead of C++ since conditioning on a certain python subclass
|
875 |
+
# is much more of a pain in C++.
|
876 |
+
from torch._subclasses.functional_tensor import (
|
877 |
+
FunctionalTensor,
|
878 |
+
maybe_disable_functional_mode,
|
879 |
+
)
|
880 |
+
|
881 |
+
ctx = (
|
882 |
+
maybe_disable_functional_mode
|
883 |
+
if isinstance(t, FunctionalTensor)
|
884 |
+
else nullcontext
|
885 |
+
)
|
886 |
+
if isinstance(t, FunctionalTensor):
|
887 |
+
# If a FunctionalTensorMode is active while syncing, we don't want it to intercept any ops that get called
|
888 |
+
# when we sync our inner tensor.
|
889 |
+
# Why?
|
890 |
+
# (1) If there are input mutations in the graph, then they will be re-applied during
|
891 |
+
# AOTAutograd when we call _sync() from inside of our functionalization kernels.
|
892 |
+
# (2) _sync() causes us to regenerate our updated the tensor from the updated base,
|
893 |
+
# which dispatches to a bunch of view ops
|
894 |
+
# (3) The input to these view ops is our inner FunctionalTensorWrapper
|
895 |
+
# (since the sync was called from C++), not the python FunctionalTensor
|
896 |
+
# (4) if a python FunctionalTensorMode is active, it will complain when it intercepts
|
897 |
+
# the view op, since it will see an input that is a C++ FunctionalTensorWrapper
|
898 |
+
# (aka a normal torch.Tensor) instead of a python `FunctionalTensor).
|
899 |
+
maybe_functional_mode = torch._C._unset_dispatch_mode(
|
900 |
+
torch._C._TorchDispatchModeKey.FUNCTIONAL
|
901 |
+
)
|
902 |
+
try:
|
903 |
+
torch._functionalize_sync(t.elem) # type: ignore[attr-defined]
|
904 |
+
finally:
|
905 |
+
if maybe_functional_mode is not None:
|
906 |
+
torch._C._set_dispatch_mode(maybe_functional_mode)
|
907 |
+
else:
|
908 |
+
torch._functionalize_sync(t) # type: ignore[attr-defined]
|
909 |
+
|
910 |
+
|
911 |
+
@functools.lru_cache(2)
|
912 |
+
def _get_device_module(device_type: str):
|
913 |
+
device_module = getattr(torch, device_type, None)
|
914 |
+
if device_module is None:
|
915 |
+
raise RuntimeError(
|
916 |
+
f"Device '{device_type}' does not have a corresponding module registered as 'torch.{device_type}'."
|
917 |
+
)
|
918 |
+
return device_module
|
env-llmeval/lib/python3.10/site-packages/torch/_vmap_internals.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import functools
|
2 |
+
import warnings
|
3 |
+
from typing import Any, Callable, List, Optional, Tuple, Union
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch import Tensor
|
7 |
+
from torch.utils._pytree import _broadcast_to_and_flatten, tree_flatten, tree_unflatten
|
8 |
+
|
9 |
+
in_dims_t = Union[int, Tuple]
|
10 |
+
out_dims_t = Union[int, Tuple[int, ...]]
|
11 |
+
|
12 |
+
|
13 |
+
# Checks that all args-to-be-batched have the same batch dim size
|
14 |
+
def _validate_and_get_batch_size(
|
15 |
+
flat_in_dims: List[Optional[int]], flat_args: List
|
16 |
+
) -> int:
|
17 |
+
batch_sizes = [
|
18 |
+
arg.size(in_dim)
|
19 |
+
for in_dim, arg in zip(flat_in_dims, flat_args)
|
20 |
+
if in_dim is not None
|
21 |
+
]
|
22 |
+
if batch_sizes and any(size != batch_sizes[0] for size in batch_sizes):
|
23 |
+
raise ValueError(
|
24 |
+
f"vmap: Expected all tensors to have the same size in the mapped "
|
25 |
+
f"dimension, got sizes {batch_sizes} for the mapped dimension"
|
26 |
+
)
|
27 |
+
return batch_sizes[0]
|
28 |
+
|
29 |
+
|
30 |
+
def _num_outputs(batched_outputs: Union[Tensor, Tuple[Tensor, ...]]) -> int:
|
31 |
+
if isinstance(batched_outputs, tuple):
|
32 |
+
return len(batched_outputs)
|
33 |
+
return 1
|
34 |
+
|
35 |
+
|
36 |
+
# If value is a tuple, check it has length `num_elements`.
|
37 |
+
# If value is not a tuple, make a tuple with `value` repeated `num_elements` times
|
38 |
+
def _as_tuple(
|
39 |
+
value: Any, num_elements: int, error_message_lambda: Callable[[], str]
|
40 |
+
) -> Tuple:
|
41 |
+
if not isinstance(value, tuple):
|
42 |
+
return (value,) * num_elements
|
43 |
+
if len(value) != num_elements:
|
44 |
+
raise ValueError(error_message_lambda())
|
45 |
+
return value
|
46 |
+
|
47 |
+
|
48 |
+
# Creates BatchedTensors for every Tensor in arg that should be batched.
|
49 |
+
# Returns the (potentially) batched arguments and the batch_size.
|
50 |
+
def _create_batched_inputs(
|
51 |
+
in_dims: in_dims_t, args: Tuple, vmap_level: int, func: Callable
|
52 |
+
) -> Tuple[Tuple, int]:
|
53 |
+
if not isinstance(in_dims, int) and not isinstance(in_dims, tuple):
|
54 |
+
raise ValueError(
|
55 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
56 |
+
f"expected `in_dims` to be int or a (potentially nested) tuple "
|
57 |
+
f"matching the structure of inputs, got: {type(in_dims)}."
|
58 |
+
)
|
59 |
+
if len(args) == 0:
|
60 |
+
raise ValueError(
|
61 |
+
f"vmap({_get_name(func)})(<inputs>): got no inputs. Maybe you forgot to add "
|
62 |
+
f"inputs, or you are trying to vmap over a function with no inputs. "
|
63 |
+
f"The latter is unsupported."
|
64 |
+
)
|
65 |
+
|
66 |
+
flat_args, args_spec = tree_flatten(args)
|
67 |
+
flat_in_dims = _broadcast_to_and_flatten(in_dims, args_spec)
|
68 |
+
if flat_in_dims is None:
|
69 |
+
raise ValueError(
|
70 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
71 |
+
f"in_dims is not compatible with the structure of `inputs`. "
|
72 |
+
f"in_dims has structure {tree_flatten(in_dims)[1]} but inputs "
|
73 |
+
f"has structure {args_spec}."
|
74 |
+
)
|
75 |
+
|
76 |
+
for arg, in_dim in zip(flat_args, flat_in_dims):
|
77 |
+
if not isinstance(in_dim, int) and in_dim is not None:
|
78 |
+
raise ValueError(
|
79 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
80 |
+
f"Got in_dim={in_dim} for an input but in_dim must be either "
|
81 |
+
f"an integer dimension or None."
|
82 |
+
)
|
83 |
+
if isinstance(in_dim, int) and not isinstance(arg, Tensor):
|
84 |
+
raise ValueError(
|
85 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
86 |
+
f"Got in_dim={in_dim} for an input but the input is of type "
|
87 |
+
f"{type(arg)}. We cannot vmap over non-Tensor arguments, "
|
88 |
+
f"please use None as the respective in_dim"
|
89 |
+
)
|
90 |
+
if in_dim is not None and (in_dim < 0 or in_dim >= arg.dim()):
|
91 |
+
raise ValueError(
|
92 |
+
f"vmap({_get_name(func)}, in_dims={in_dims}, ...)(<inputs>): "
|
93 |
+
f"Got in_dim={in_dim} for some input, but that input is a Tensor "
|
94 |
+
f"of dimensionality {arg.dim()} so expected in_dim to satisfy "
|
95 |
+
f"0 <= in_dim < {arg.dim()}."
|
96 |
+
)
|
97 |
+
|
98 |
+
batch_size = _validate_and_get_batch_size(flat_in_dims, flat_args)
|
99 |
+
# See NOTE [Ignored _remove_batch_dim, _add_batch_dim]
|
100 |
+
batched_inputs = [
|
101 |
+
arg if in_dim is None else torch._add_batch_dim(arg, in_dim, vmap_level)
|
102 |
+
for in_dim, arg in zip(flat_in_dims, flat_args)
|
103 |
+
]
|
104 |
+
return tree_unflatten(batched_inputs, args_spec), batch_size
|
105 |
+
|
106 |
+
|
107 |
+
# Undos the batching (and any batch dimensions) associated with the `vmap_level`.
|
108 |
+
def _unwrap_batched(
|
109 |
+
batched_outputs: Union[Tensor, Tuple[Tensor, ...]],
|
110 |
+
out_dims: out_dims_t,
|
111 |
+
vmap_level: int,
|
112 |
+
batch_size: int,
|
113 |
+
func: Callable,
|
114 |
+
allow_none_pass_through: bool = False,
|
115 |
+
) -> Tuple:
|
116 |
+
num_outputs = _num_outputs(batched_outputs)
|
117 |
+
out_dims_as_tuple = _as_tuple(
|
118 |
+
out_dims,
|
119 |
+
num_outputs,
|
120 |
+
lambda: f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must "
|
121 |
+
f"have one dim per output (got {num_outputs} outputs) of {_get_name(func)}.",
|
122 |
+
)
|
123 |
+
|
124 |
+
# NOTE [Ignored _remove_batch_dim, _add_batch_dim]
|
125 |
+
# There is something wrong with our type bindings for functions that begin
|
126 |
+
# with '_', see #40397.
|
127 |
+
if isinstance(batched_outputs, Tensor):
|
128 |
+
out_dim = out_dims_as_tuple[0]
|
129 |
+
return torch._remove_batch_dim(batched_outputs, vmap_level, batch_size, out_dim) # type: ignore[return-value]
|
130 |
+
if allow_none_pass_through:
|
131 |
+
return tuple(
|
132 |
+
(
|
133 |
+
torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
|
134 |
+
if out is not None
|
135 |
+
else None
|
136 |
+
)
|
137 |
+
for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
|
138 |
+
)
|
139 |
+
else:
|
140 |
+
return tuple(
|
141 |
+
torch._remove_batch_dim(out, vmap_level, batch_size, out_dim)
|
142 |
+
for out, out_dim in zip(batched_outputs, out_dims_as_tuple)
|
143 |
+
)
|
144 |
+
|
145 |
+
|
146 |
+
# Checks that `fn` returned one or more Tensors and nothing else.
|
147 |
+
# NB: A python function that return multiple arguments returns a single tuple,
|
148 |
+
# so we are effectively checking that `outputs` is a single Tensor or a tuple of
|
149 |
+
# Tensors.
|
150 |
+
def _validate_outputs(outputs: Any, func: Callable) -> None:
|
151 |
+
if isinstance(outputs, Tensor):
|
152 |
+
return
|
153 |
+
if not isinstance(outputs, tuple):
|
154 |
+
raise ValueError(
|
155 |
+
f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
|
156 |
+
f"Tensors, got type {type(outputs)} as the return."
|
157 |
+
)
|
158 |
+
for idx, output in enumerate(outputs):
|
159 |
+
if isinstance(output, Tensor):
|
160 |
+
continue
|
161 |
+
raise ValueError(
|
162 |
+
f"vmap({_get_name(func)}, ...): `{_get_name(func)}` must only return "
|
163 |
+
f"Tensors, got type {type(output)} for return {idx}."
|
164 |
+
)
|
165 |
+
|
166 |
+
|
167 |
+
def _check_out_dims_is_int_or_int_tuple(out_dims: out_dims_t, func: Callable) -> None:
|
168 |
+
if isinstance(out_dims, int):
|
169 |
+
return
|
170 |
+
if not isinstance(out_dims, tuple) or not all(
|
171 |
+
isinstance(out_dim, int) for out_dim in out_dims
|
172 |
+
):
|
173 |
+
raise ValueError(
|
174 |
+
f"vmap({_get_name(func)}, ..., out_dims={out_dims}): `out_dims` must be "
|
175 |
+
f"an int or a tuple of int representing where in the outputs the "
|
176 |
+
f"vmapped dimension should appear."
|
177 |
+
)
|
178 |
+
|
179 |
+
|
180 |
+
def _get_name(func: Callable):
|
181 |
+
if hasattr(func, "__name__"):
|
182 |
+
return func.__name__
|
183 |
+
|
184 |
+
# Not all callables have __name__, in fact, only static functions/methods do.
|
185 |
+
# A callable created via functools.partial or an nn.Module, to name some
|
186 |
+
# examples, don't have a __name__.
|
187 |
+
return repr(func)
|
188 |
+
|
189 |
+
|
190 |
+
# vmap(func)(inputs) wraps all Tensor inputs to be batched in BatchedTensors,
|
191 |
+
# sends those into func, and then unwraps the output BatchedTensors. Operations
|
192 |
+
# on BatchedTensors perform the batched operations that the user is asking for.
|
193 |
+
def vmap(func: Callable, in_dims: in_dims_t = 0, out_dims: out_dims_t = 0) -> Callable:
|
194 |
+
"""
|
195 |
+
Please use torch.vmap instead of this API.
|
196 |
+
"""
|
197 |
+
warnings.warn(
|
198 |
+
"Please use torch.vmap instead of torch._vmap_internals.vmap. ",
|
199 |
+
stacklevel=2,
|
200 |
+
)
|
201 |
+
return _vmap(func, in_dims, out_dims)
|
202 |
+
|
203 |
+
|
204 |
+
# A version of vmap but without the initial "experimental prototype" warning
|
205 |
+
def _vmap(
|
206 |
+
func: Callable,
|
207 |
+
in_dims: in_dims_t = 0,
|
208 |
+
out_dims: out_dims_t = 0,
|
209 |
+
allow_none_pass_through: bool = False,
|
210 |
+
) -> Callable:
|
211 |
+
# The `allow_none_pass_through` argument is a temporary workaround may be removed.
|
212 |
+
# Currently it enables us to wrap the call in `autograd.grad` to the autograd engine,
|
213 |
+
# which may return None if any of the inputs are unused. See the issue discussing this:
|
214 |
+
# https://github.com/facebookresearch/functorch/issues/159.
|
215 |
+
@functools.wraps(func)
|
216 |
+
def wrapped(*args):
|
217 |
+
_check_out_dims_is_int_or_int_tuple(out_dims, func)
|
218 |
+
vmap_level = torch._C._vmapmode_increment_nesting()
|
219 |
+
try:
|
220 |
+
batched_inputs, batch_size = _create_batched_inputs(
|
221 |
+
in_dims, args, vmap_level, func
|
222 |
+
)
|
223 |
+
batched_outputs = func(*batched_inputs)
|
224 |
+
if not allow_none_pass_through:
|
225 |
+
_validate_outputs(batched_outputs, func)
|
226 |
+
return _unwrap_batched(
|
227 |
+
batched_outputs,
|
228 |
+
out_dims,
|
229 |
+
vmap_level,
|
230 |
+
batch_size,
|
231 |
+
func,
|
232 |
+
allow_none_pass_through=allow_none_pass_through,
|
233 |
+
)
|
234 |
+
finally:
|
235 |
+
torch._C._vmapmode_decrement_nesting()
|
236 |
+
|
237 |
+
return wrapped
|
env-llmeval/lib/python3.10/site-packages/torch/_weights_only_unpickler.py
ADDED
@@ -0,0 +1,304 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Unpickler restricted to loading only state dicts
|
2 |
+
# Restrict constructing types to a list defined in _get_allowed_globals()
|
3 |
+
# Restrict BUILD operation to `Tensor`, `Parameter` and `OrderedDict` types only
|
4 |
+
# Restrict APPEND/APPENDS to `list`
|
5 |
+
# In `GLOBALS` operation do not do class lookup by name, but rather rely on dictionary
|
6 |
+
# defined by `_get_allowed_globals()` method, that contains:
|
7 |
+
# - torch types (Storage, dtypes, Tensor, `torch.Size`),
|
8 |
+
# - `torch._utils._rebuild` functions.
|
9 |
+
# - `torch.nn.Parameter`
|
10 |
+
# - `collections.OrderedDict`
|
11 |
+
|
12 |
+
# Based of https://github.com/python/cpython/blob/main/Lib/pickle.py
|
13 |
+
# Expected to be useful for loading PyTorch model weights
|
14 |
+
# For example:
|
15 |
+
# data = urllib.request.urlopen('https://download.pytorch.org/models/resnet50-0676ba61.pth').read()
|
16 |
+
# buf = io.BytesIO(data)
|
17 |
+
# weights = torch.load(buf, weights_only = True)
|
18 |
+
|
19 |
+
import functools as _functools
|
20 |
+
from collections import OrderedDict
|
21 |
+
from pickle import (
|
22 |
+
APPEND,
|
23 |
+
APPENDS,
|
24 |
+
BINFLOAT,
|
25 |
+
BINGET,
|
26 |
+
BININT,
|
27 |
+
BININT1,
|
28 |
+
BININT2,
|
29 |
+
BINPERSID,
|
30 |
+
BINPUT,
|
31 |
+
BINUNICODE,
|
32 |
+
BUILD,
|
33 |
+
bytes_types,
|
34 |
+
decode_long,
|
35 |
+
EMPTY_DICT,
|
36 |
+
EMPTY_LIST,
|
37 |
+
EMPTY_SET,
|
38 |
+
EMPTY_TUPLE,
|
39 |
+
GLOBAL,
|
40 |
+
LONG1,
|
41 |
+
LONG_BINGET,
|
42 |
+
LONG_BINPUT,
|
43 |
+
MARK,
|
44 |
+
NEWFALSE,
|
45 |
+
NEWOBJ,
|
46 |
+
NEWTRUE,
|
47 |
+
NONE,
|
48 |
+
PROTO,
|
49 |
+
REDUCE,
|
50 |
+
SETITEM,
|
51 |
+
SETITEMS,
|
52 |
+
SHORT_BINSTRING,
|
53 |
+
STOP,
|
54 |
+
TUPLE,
|
55 |
+
TUPLE1,
|
56 |
+
TUPLE2,
|
57 |
+
TUPLE3,
|
58 |
+
UnpicklingError,
|
59 |
+
)
|
60 |
+
from struct import unpack
|
61 |
+
from sys import maxsize
|
62 |
+
from typing import Any, Dict, List
|
63 |
+
|
64 |
+
import torch
|
65 |
+
|
66 |
+
|
67 |
+
# Unpickling machinery
|
68 |
+
@_functools.lru_cache(maxsize=1)
|
69 |
+
def _get_allowed_globals():
|
70 |
+
rc: Dict[str, Any] = {
|
71 |
+
"collections.OrderedDict": OrderedDict,
|
72 |
+
"torch.nn.parameter.Parameter": torch.nn.Parameter,
|
73 |
+
"torch.serialization._get_layout": torch.serialization._get_layout,
|
74 |
+
"torch.Size": torch.Size,
|
75 |
+
"torch.Tensor": torch.Tensor,
|
76 |
+
}
|
77 |
+
# dtype
|
78 |
+
for t in [
|
79 |
+
torch.complex32,
|
80 |
+
torch.complex64,
|
81 |
+
torch.complex128,
|
82 |
+
torch.float8_e5m2,
|
83 |
+
torch.float8_e4m3fn,
|
84 |
+
torch.float16,
|
85 |
+
torch.float32,
|
86 |
+
torch.float64,
|
87 |
+
torch.int8,
|
88 |
+
torch.int16,
|
89 |
+
torch.int32,
|
90 |
+
torch.int64,
|
91 |
+
]:
|
92 |
+
rc[str(t)] = t
|
93 |
+
# Tensor classes
|
94 |
+
for tt in torch._tensor_classes:
|
95 |
+
rc[f"{tt.__module__}.{tt.__name__}"] = tt
|
96 |
+
# Storage classes
|
97 |
+
for ts in torch._storage_classes:
|
98 |
+
if ts not in (torch.storage.TypedStorage, torch.storage.UntypedStorage):
|
99 |
+
# Wrap legacy storage types in a dummy class
|
100 |
+
rc[f"{ts.__module__}.{ts.__name__}"] = torch.serialization.StorageType(
|
101 |
+
ts.__name__
|
102 |
+
)
|
103 |
+
else:
|
104 |
+
rc[f"{ts.__module__}.{ts.__name__}"] = ts
|
105 |
+
# Rebuild functions
|
106 |
+
for f in [
|
107 |
+
torch._utils._rebuild_parameter,
|
108 |
+
torch._utils._rebuild_tensor,
|
109 |
+
torch._utils._rebuild_tensor_v2,
|
110 |
+
torch._utils._rebuild_tensor_v3,
|
111 |
+
torch._utils._rebuild_sparse_tensor,
|
112 |
+
torch._utils._rebuild_meta_tensor_no_storage,
|
113 |
+
torch._utils._rebuild_nested_tensor,
|
114 |
+
]:
|
115 |
+
rc[f"torch._utils.{f.__name__}"] = f
|
116 |
+
|
117 |
+
# Handles Tensor Subclasses, Tensor's with attributes.
|
118 |
+
# NOTE: It calls into above rebuild functions for regular Tensor types.
|
119 |
+
rc["torch._tensor._rebuild_from_type_v2"] = torch._tensor._rebuild_from_type_v2
|
120 |
+
return rc
|
121 |
+
|
122 |
+
|
123 |
+
class Unpickler:
|
124 |
+
def __init__(self, file, *, encoding: str = "bytes"):
|
125 |
+
self.encoding = encoding
|
126 |
+
self.readline = file.readline
|
127 |
+
self.read = file.read
|
128 |
+
self.memo: Dict[int, Any] = {}
|
129 |
+
|
130 |
+
def load(self):
|
131 |
+
"""Read a pickled object representation from the open file.
|
132 |
+
|
133 |
+
Return the reconstituted object hierarchy specified in the file.
|
134 |
+
"""
|
135 |
+
self.metastack = []
|
136 |
+
self.stack: List[Any] = []
|
137 |
+
self.append = self.stack.append
|
138 |
+
read = self.read
|
139 |
+
readline = self.readline
|
140 |
+
while True:
|
141 |
+
key = read(1)
|
142 |
+
if not key:
|
143 |
+
raise EOFError
|
144 |
+
assert isinstance(key, bytes_types)
|
145 |
+
# Risky operators
|
146 |
+
if key[0] == GLOBAL[0]:
|
147 |
+
module = readline()[:-1].decode("utf-8")
|
148 |
+
name = readline()[:-1].decode("utf-8")
|
149 |
+
full_path = f"{module}.{name}"
|
150 |
+
if full_path in _get_allowed_globals():
|
151 |
+
self.append(_get_allowed_globals()[full_path])
|
152 |
+
else:
|
153 |
+
raise RuntimeError(f"Unsupported class {full_path}")
|
154 |
+
elif key[0] == NEWOBJ[0]:
|
155 |
+
args = self.stack.pop()
|
156 |
+
cls = self.stack.pop()
|
157 |
+
if cls is not torch.nn.Parameter:
|
158 |
+
raise RuntimeError(f"Trying to instantiate unsupported class {cls}")
|
159 |
+
self.append(torch.nn.Parameter(*args))
|
160 |
+
elif key[0] == REDUCE[0]:
|
161 |
+
args = self.stack.pop()
|
162 |
+
func = self.stack[-1]
|
163 |
+
if func not in _get_allowed_globals().values():
|
164 |
+
raise RuntimeError(
|
165 |
+
f"Trying to call reduce for unrecognized function {func}"
|
166 |
+
)
|
167 |
+
self.stack[-1] = func(*args)
|
168 |
+
elif key[0] == BUILD[0]:
|
169 |
+
state = self.stack.pop()
|
170 |
+
inst = self.stack[-1]
|
171 |
+
if type(inst) is torch.Tensor:
|
172 |
+
# Legacy unpickling
|
173 |
+
inst.set_(*state)
|
174 |
+
elif type(inst) is torch.nn.Parameter:
|
175 |
+
inst.__setstate__(state)
|
176 |
+
elif type(inst) is OrderedDict:
|
177 |
+
inst.__dict__.update(state)
|
178 |
+
else:
|
179 |
+
raise RuntimeError(
|
180 |
+
f"Can only build Tensor, parameter or dict objects, but got {type(inst)}"
|
181 |
+
)
|
182 |
+
# Stack manipulation
|
183 |
+
elif key[0] == APPEND[0]:
|
184 |
+
item = self.stack.pop()
|
185 |
+
list_obj = self.stack[-1]
|
186 |
+
if type(list_obj) is not list:
|
187 |
+
raise RuntimeError(
|
188 |
+
f"Can only append to lists, but got {type(list_obj)}"
|
189 |
+
)
|
190 |
+
list_obj.append(item)
|
191 |
+
elif key[0] == APPENDS[0]:
|
192 |
+
items = self.pop_mark()
|
193 |
+
list_obj = self.stack[-1]
|
194 |
+
if type(list_obj) is not list:
|
195 |
+
raise RuntimeError(
|
196 |
+
f"Can only extend lists, but got {type(list_obj)}"
|
197 |
+
)
|
198 |
+
list_obj.extend(items)
|
199 |
+
elif key[0] == SETITEM[0]:
|
200 |
+
(v, k) = (self.stack.pop(), self.stack.pop())
|
201 |
+
self.stack[-1][k] = v
|
202 |
+
elif key[0] == SETITEMS[0]:
|
203 |
+
items = self.pop_mark()
|
204 |
+
for i in range(0, len(items), 2):
|
205 |
+
self.stack[-1][items[i]] = items[i + 1]
|
206 |
+
elif key[0] == MARK[0]:
|
207 |
+
self.metastack.append(self.stack)
|
208 |
+
self.stack = []
|
209 |
+
self.append = self.stack.append
|
210 |
+
elif key[0] == TUPLE[0]:
|
211 |
+
items = self.pop_mark()
|
212 |
+
self.append(tuple(items))
|
213 |
+
elif key[0] == TUPLE1[0]:
|
214 |
+
self.stack[-1] = (self.stack[-1],)
|
215 |
+
elif key[0] == TUPLE2[0]:
|
216 |
+
self.stack[-2:] = [(self.stack[-2], self.stack[-1])]
|
217 |
+
elif key[0] == TUPLE3[0]:
|
218 |
+
self.stack[-3:] = [(self.stack[-3], self.stack[-2], self.stack[-1])]
|
219 |
+
# Basic types construction
|
220 |
+
elif key[0] == NONE[0]:
|
221 |
+
self.append(None)
|
222 |
+
elif key[0] == NEWFALSE[0]:
|
223 |
+
self.append(False)
|
224 |
+
elif key[0] == NEWTRUE[0]:
|
225 |
+
self.append(True)
|
226 |
+
elif key[0] == EMPTY_TUPLE[0]:
|
227 |
+
self.append(())
|
228 |
+
elif key[0] == EMPTY_LIST[0]:
|
229 |
+
self.append([])
|
230 |
+
elif key[0] == EMPTY_DICT[0]:
|
231 |
+
self.append({})
|
232 |
+
elif key[0] == EMPTY_SET[0]:
|
233 |
+
self.append(set())
|
234 |
+
elif key[0] == BININT[0]:
|
235 |
+
self.append(unpack("<i", read(4))[0])
|
236 |
+
elif key[0] == BININT1[0]:
|
237 |
+
self.append(self.read(1)[0])
|
238 |
+
elif key[0] == BININT2[0]:
|
239 |
+
self.append(unpack("<H", read(2))[0])
|
240 |
+
elif key[0] == BINFLOAT[0]:
|
241 |
+
self.append(unpack(">d", self.read(8))[0])
|
242 |
+
elif key[0] == BINUNICODE[0]:
|
243 |
+
strlen = unpack("<I", read(4))[0]
|
244 |
+
if strlen > maxsize:
|
245 |
+
raise RuntimeError("String is too long")
|
246 |
+
strval = str(read(strlen), "utf-8", "surrogatepass")
|
247 |
+
self.append(strval)
|
248 |
+
elif key[0] == SHORT_BINSTRING[0]:
|
249 |
+
strlen = read(1)[0]
|
250 |
+
strdata = read(strlen)
|
251 |
+
if self.encoding != "bytes":
|
252 |
+
strdata = strdata.decode(self.encoding, "strict")
|
253 |
+
self.append(strdata)
|
254 |
+
elif key[0] == BINPERSID[0]:
|
255 |
+
pid = self.stack.pop()
|
256 |
+
# Only allow persistent load of storage
|
257 |
+
if type(pid) is not tuple and not type(pid) is not int:
|
258 |
+
raise RuntimeError(
|
259 |
+
f"persistent_load id must be tuple or int, but got {type(pid)}"
|
260 |
+
)
|
261 |
+
if (
|
262 |
+
type(pid) is tuple
|
263 |
+
and len(pid) > 0
|
264 |
+
and torch.serialization._maybe_decode_ascii(pid[0]) != "storage"
|
265 |
+
):
|
266 |
+
raise RuntimeError(
|
267 |
+
f"Only persistent_load of storage is allowed, but got {pid[0]}"
|
268 |
+
)
|
269 |
+
self.append(self.persistent_load(pid))
|
270 |
+
elif key[0] in [BINGET[0], LONG_BINGET[0]]:
|
271 |
+
idx = (read(1) if key[0] == BINGET[0] else unpack("<I", read(4)))[0]
|
272 |
+
self.append(self.memo[idx])
|
273 |
+
elif key[0] in [BINPUT[0], LONG_BINPUT[0]]:
|
274 |
+
i = (read(1) if key[0] == BINPUT[0] else unpack("<I", read(4)))[0]
|
275 |
+
if i < 0:
|
276 |
+
raise ValueError("negative argument")
|
277 |
+
self.memo[i] = self.stack[-1]
|
278 |
+
elif key[0] == LONG1[0]:
|
279 |
+
n = read(1)[0]
|
280 |
+
data = read(n)
|
281 |
+
self.append(decode_long(data))
|
282 |
+
# First and last deserializer ops
|
283 |
+
elif key[0] == PROTO[0]:
|
284 |
+
# Read and ignore proto version
|
285 |
+
read(1)[0]
|
286 |
+
elif key[0] == STOP[0]:
|
287 |
+
rc = self.stack.pop()
|
288 |
+
return rc
|
289 |
+
else:
|
290 |
+
raise RuntimeError(f"Unsupported operand {key[0]}")
|
291 |
+
|
292 |
+
# Return a list of items pushed in the stack after last MARK instruction.
|
293 |
+
def pop_mark(self):
|
294 |
+
items = self.stack
|
295 |
+
self.stack = self.metastack.pop()
|
296 |
+
self.append = self.stack.append
|
297 |
+
return items
|
298 |
+
|
299 |
+
def persistent_load(self, pid):
|
300 |
+
raise UnpicklingError("unsupported persistent id encountered")
|
301 |
+
|
302 |
+
|
303 |
+
def load(file, *, encoding: str = "ASCII"):
|
304 |
+
return Unpickler(file, encoding=encoding).load()
|
env-llmeval/lib/python3.10/site-packages/torch/backends/__init__.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import types
|
2 |
+
from contextlib import contextmanager
|
3 |
+
|
4 |
+
# The idea for this parameter is that we forbid bare assignment
|
5 |
+
# to torch.backends.<cudnn|mkldnn>.enabled and friends when running our
|
6 |
+
# test suite, where it's very easy to forget to undo the change
|
7 |
+
# later.
|
8 |
+
__allow_nonbracketed_mutation_flag = True
|
9 |
+
|
10 |
+
|
11 |
+
def disable_global_flags():
|
12 |
+
global __allow_nonbracketed_mutation_flag
|
13 |
+
__allow_nonbracketed_mutation_flag = False
|
14 |
+
|
15 |
+
|
16 |
+
def flags_frozen():
|
17 |
+
return not __allow_nonbracketed_mutation_flag
|
18 |
+
|
19 |
+
|
20 |
+
@contextmanager
|
21 |
+
def __allow_nonbracketed_mutation():
|
22 |
+
global __allow_nonbracketed_mutation_flag
|
23 |
+
old = __allow_nonbracketed_mutation_flag
|
24 |
+
__allow_nonbracketed_mutation_flag = True
|
25 |
+
try:
|
26 |
+
yield
|
27 |
+
finally:
|
28 |
+
__allow_nonbracketed_mutation_flag = old
|
29 |
+
|
30 |
+
|
31 |
+
class ContextProp:
|
32 |
+
def __init__(self, getter, setter):
|
33 |
+
self.getter = getter
|
34 |
+
self.setter = setter
|
35 |
+
|
36 |
+
def __get__(self, obj, objtype):
|
37 |
+
return self.getter()
|
38 |
+
|
39 |
+
def __set__(self, obj, val):
|
40 |
+
if not flags_frozen():
|
41 |
+
self.setter(val)
|
42 |
+
else:
|
43 |
+
raise RuntimeError(
|
44 |
+
"not allowed to set %s flags "
|
45 |
+
"after disable_global_flags; please use flags() context manager instead"
|
46 |
+
% obj.__name__
|
47 |
+
)
|
48 |
+
|
49 |
+
|
50 |
+
class PropModule(types.ModuleType):
|
51 |
+
def __init__(self, m, name):
|
52 |
+
super().__init__(name)
|
53 |
+
self.m = m
|
54 |
+
|
55 |
+
def __getattr__(self, attr):
|
56 |
+
return self.m.__getattribute__(attr)
|
57 |
+
|
58 |
+
|
59 |
+
from torch.backends import (
|
60 |
+
cpu as cpu,
|
61 |
+
cuda as cuda,
|
62 |
+
cudnn as cudnn,
|
63 |
+
mkl as mkl,
|
64 |
+
mkldnn as mkldnn,
|
65 |
+
mps as mps,
|
66 |
+
openmp as openmp,
|
67 |
+
quantized as quantized,
|
68 |
+
)
|
env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__init__.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
from contextlib import contextmanager
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
|
6 |
+
|
7 |
+
|
8 |
+
def is_available():
|
9 |
+
r"""Return whether PyTorch is built with MKL-DNN support."""
|
10 |
+
return torch._C._has_mkldnn
|
11 |
+
|
12 |
+
|
13 |
+
VERBOSE_OFF = 0
|
14 |
+
VERBOSE_ON = 1
|
15 |
+
VERBOSE_ON_CREATION = 2
|
16 |
+
|
17 |
+
|
18 |
+
class verbose:
|
19 |
+
"""
|
20 |
+
On-demand oneDNN (former MKL-DNN) verbosing functionality.
|
21 |
+
|
22 |
+
To make it easier to debug performance issues, oneDNN can dump verbose
|
23 |
+
messages containing information like kernel size, input data size and
|
24 |
+
execution duration while executing the kernel. The verbosing functionality
|
25 |
+
can be invoked via an environment variable named `DNNL_VERBOSE`. However,
|
26 |
+
this methodology dumps messages in all steps. Those are a large amount of
|
27 |
+
verbose messages. Moreover, for investigating the performance issues,
|
28 |
+
generally taking verbose messages for one single iteration is enough.
|
29 |
+
This on-demand verbosing functionality makes it possible to control scope
|
30 |
+
for verbose message dumping. In the following example, verbose messages
|
31 |
+
will be dumped out for the second inference only.
|
32 |
+
|
33 |
+
.. highlight:: python
|
34 |
+
.. code-block:: python
|
35 |
+
|
36 |
+
import torch
|
37 |
+
model(data)
|
38 |
+
with torch.backends.mkldnn.verbose(torch.backends.mkldnn.VERBOSE_ON):
|
39 |
+
model(data)
|
40 |
+
|
41 |
+
Args:
|
42 |
+
level: Verbose level
|
43 |
+
- ``VERBOSE_OFF``: Disable verbosing
|
44 |
+
- ``VERBOSE_ON``: Enable verbosing
|
45 |
+
- ``VERBOSE_ON_CREATION``: Enable verbosing, including oneDNN kernel creation
|
46 |
+
"""
|
47 |
+
|
48 |
+
def __init__(self, level):
|
49 |
+
self.level = level
|
50 |
+
|
51 |
+
def __enter__(self):
|
52 |
+
if self.level == VERBOSE_OFF:
|
53 |
+
return
|
54 |
+
st = torch._C._verbose.mkldnn_set_verbose(self.level)
|
55 |
+
assert (
|
56 |
+
st
|
57 |
+
), "Failed to set MKLDNN into verbose mode. Please consider to disable this verbose scope."
|
58 |
+
return self
|
59 |
+
|
60 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
61 |
+
torch._C._verbose.mkldnn_set_verbose(VERBOSE_OFF)
|
62 |
+
return False
|
63 |
+
|
64 |
+
|
65 |
+
def set_flags(_enabled):
|
66 |
+
orig_flags = (torch._C._get_mkldnn_enabled(),)
|
67 |
+
torch._C._set_mkldnn_enabled(_enabled)
|
68 |
+
return orig_flags
|
69 |
+
|
70 |
+
|
71 |
+
@contextmanager
|
72 |
+
def flags(enabled=False):
|
73 |
+
with __allow_nonbracketed_mutation():
|
74 |
+
orig_flags = set_flags(enabled)
|
75 |
+
try:
|
76 |
+
yield
|
77 |
+
finally:
|
78 |
+
with __allow_nonbracketed_mutation():
|
79 |
+
set_flags(orig_flags[0])
|
80 |
+
|
81 |
+
|
82 |
+
class MkldnnModule(PropModule):
|
83 |
+
def __init__(self, m, name):
|
84 |
+
super().__init__(m, name)
|
85 |
+
|
86 |
+
enabled = ContextProp(torch._C._get_mkldnn_enabled, torch._C._set_mkldnn_enabled)
|
87 |
+
|
88 |
+
|
89 |
+
# Cool stuff from torch/backends/cudnn/__init__.py and
|
90 |
+
# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
|
91 |
+
sys.modules[__name__] = MkldnnModule(sys.modules[__name__], __name__)
|
env-llmeval/lib/python3.10/site-packages/torch/backends/mkldnn/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.61 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__init__.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from functools import lru_cache as _lru_cache
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from ...library import Library as _Library
|
5 |
+
|
6 |
+
__all__ = ["is_built", "is_available", "is_macos13_or_newer"]
|
7 |
+
|
8 |
+
|
9 |
+
def is_built() -> bool:
|
10 |
+
r"""Return whether PyTorch is built with MPS support.
|
11 |
+
|
12 |
+
Note that this doesn't necessarily mean MPS is available; just that
|
13 |
+
if this PyTorch binary were run a machine with working MPS drivers
|
14 |
+
and devices, we would be able to use it.
|
15 |
+
"""
|
16 |
+
return torch._C._has_mps
|
17 |
+
|
18 |
+
|
19 |
+
@_lru_cache
|
20 |
+
def is_available() -> bool:
|
21 |
+
r"""Return a bool indicating if MPS is currently available."""
|
22 |
+
return torch._C._mps_is_available()
|
23 |
+
|
24 |
+
|
25 |
+
@_lru_cache
|
26 |
+
def is_macos13_or_newer(minor: int = 0) -> bool:
|
27 |
+
r"""Return a bool indicating whether MPS is running on MacOS 13 or newer."""
|
28 |
+
return torch._C._mps_is_on_macos_13_or_newer(minor)
|
29 |
+
|
30 |
+
|
31 |
+
_lib = None
|
32 |
+
|
33 |
+
|
34 |
+
def _init():
|
35 |
+
r"""Register prims as implementation of var_mean and group_norm."""
|
36 |
+
global _lib
|
37 |
+
if is_built() is False or _lib is not None:
|
38 |
+
return
|
39 |
+
from ..._decomp.decompositions import (
|
40 |
+
native_group_norm_backward as _native_group_norm_backward,
|
41 |
+
)
|
42 |
+
from ..._refs import native_group_norm as _native_group_norm, var_mean as _var_mean
|
43 |
+
|
44 |
+
_lib = _Library("aten", "IMPL")
|
45 |
+
_lib.impl("var_mean.correction", _var_mean, "MPS")
|
46 |
+
_lib.impl("native_group_norm", _native_group_norm, "MPS")
|
47 |
+
_lib.impl("native_group_norm_backward", _native_group_norm_backward, "MPS")
|
env-llmeval/lib/python3.10/site-packages/torch/backends/mps/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (1.73 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__init__.py
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
|
3 |
+
|
4 |
+
def is_available():
|
5 |
+
r"""Return whether PyTorch is built with OpenMP support."""
|
6 |
+
return torch._C.has_openmp
|
env-llmeval/lib/python3.10/site-packages/torch/backends/openmp/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (385 Bytes). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__init__.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import warnings
|
3 |
+
from contextlib import contextmanager
|
4 |
+
from functools import lru_cache as _lru_cache
|
5 |
+
from typing import Any
|
6 |
+
|
7 |
+
from torch.backends import __allow_nonbracketed_mutation, ContextProp, PropModule
|
8 |
+
|
9 |
+
try:
|
10 |
+
import opt_einsum as _opt_einsum # type: ignore[import]
|
11 |
+
except ImportError:
|
12 |
+
_opt_einsum = None
|
13 |
+
|
14 |
+
|
15 |
+
@_lru_cache
|
16 |
+
def is_available() -> bool:
|
17 |
+
r"""Return a bool indicating if opt_einsum is currently available."""
|
18 |
+
return _opt_einsum is not None
|
19 |
+
|
20 |
+
|
21 |
+
def get_opt_einsum() -> Any:
|
22 |
+
r"""Return the opt_einsum package if opt_einsum is currently available, else None."""
|
23 |
+
return _opt_einsum
|
24 |
+
|
25 |
+
|
26 |
+
def _set_enabled(_enabled: bool) -> None:
|
27 |
+
if not is_available() and _enabled:
|
28 |
+
raise ValueError(
|
29 |
+
f"opt_einsum is not available, so setting `enabled` to {_enabled} will not reap "
|
30 |
+
"the benefits of calculating an optimal path for einsum. torch.einsum will "
|
31 |
+
"fall back to contracting from left to right. To enable this optimal path "
|
32 |
+
"calculation, please install opt-einsum."
|
33 |
+
)
|
34 |
+
global enabled
|
35 |
+
enabled = _enabled
|
36 |
+
|
37 |
+
|
38 |
+
def _get_enabled() -> bool:
|
39 |
+
return enabled
|
40 |
+
|
41 |
+
|
42 |
+
def _set_strategy(_strategy: str) -> None:
|
43 |
+
if not is_available():
|
44 |
+
raise ValueError(
|
45 |
+
f"opt_einsum is not available, so setting `strategy` to {_strategy} will not be meaningful. "
|
46 |
+
"torch.einsum will bypass path calculation and simply contract from left to right. "
|
47 |
+
"Please install opt_einsum or unset `strategy`."
|
48 |
+
)
|
49 |
+
if not enabled:
|
50 |
+
raise ValueError(
|
51 |
+
f"opt_einsum is not enabled, so setting a `strategy` to {_strategy} will not be meaningful. "
|
52 |
+
"torch.einsum will bypass path calculation and simply contract from left to right. "
|
53 |
+
"Please set `enabled` to `True` as well or unset `strategy`."
|
54 |
+
)
|
55 |
+
if _strategy not in ["auto", "greedy", "optimal"]:
|
56 |
+
raise ValueError(
|
57 |
+
f"`strategy` must be one of the following: [auto, greedy, optimal] but is {_strategy}"
|
58 |
+
)
|
59 |
+
global strategy
|
60 |
+
strategy = _strategy
|
61 |
+
|
62 |
+
|
63 |
+
def _get_strategy() -> str:
|
64 |
+
return strategy
|
65 |
+
|
66 |
+
|
67 |
+
def set_flags(_enabled=None, _strategy=None):
|
68 |
+
orig_flags = (enabled, None if not is_available() else strategy)
|
69 |
+
if _enabled is not None:
|
70 |
+
_set_enabled(_enabled)
|
71 |
+
if _strategy is not None:
|
72 |
+
_set_strategy(_strategy)
|
73 |
+
return orig_flags
|
74 |
+
|
75 |
+
|
76 |
+
@contextmanager
|
77 |
+
def flags(enabled=None, strategy=None):
|
78 |
+
with __allow_nonbracketed_mutation():
|
79 |
+
orig_flags = set_flags(enabled, strategy)
|
80 |
+
try:
|
81 |
+
yield
|
82 |
+
finally:
|
83 |
+
# recover the previous values
|
84 |
+
with __allow_nonbracketed_mutation():
|
85 |
+
set_flags(*orig_flags)
|
86 |
+
|
87 |
+
|
88 |
+
# The magic here is to allow us to intercept code like this:
|
89 |
+
#
|
90 |
+
# torch.backends.opt_einsum.enabled = True
|
91 |
+
|
92 |
+
|
93 |
+
class OptEinsumModule(PropModule):
|
94 |
+
def __init__(self, m, name):
|
95 |
+
super().__init__(m, name)
|
96 |
+
|
97 |
+
global enabled
|
98 |
+
enabled = ContextProp(_get_enabled, _set_enabled)
|
99 |
+
global strategy
|
100 |
+
strategy = None
|
101 |
+
if is_available():
|
102 |
+
strategy = ContextProp(_get_strategy, _set_strategy)
|
103 |
+
|
104 |
+
|
105 |
+
# This is the sys.modules replacement trick, see
|
106 |
+
# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
|
107 |
+
sys.modules[__name__] = OptEinsumModule(sys.modules[__name__], __name__)
|
108 |
+
|
109 |
+
enabled = True if is_available() else False
|
110 |
+
strategy = "auto" if is_available() else None
|
env-llmeval/lib/python3.10/site-packages/torch/backends/opt_einsum/__pycache__/__init__.cpython-310.pyc
ADDED
Binary file (3.46 kB). View file
|
|
env-llmeval/lib/python3.10/site-packages/torch/backends/xnnpack/__init__.py
ADDED
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import types
|
3 |
+
|
4 |
+
import torch
|
5 |
+
|
6 |
+
|
7 |
+
class _XNNPACKEnabled:
|
8 |
+
def __get__(self, obj, objtype):
|
9 |
+
return torch._C._is_xnnpack_enabled()
|
10 |
+
|
11 |
+
def __set__(self, obj, val):
|
12 |
+
raise RuntimeError("Assignment not supported")
|
13 |
+
|
14 |
+
|
15 |
+
class XNNPACKEngine(types.ModuleType):
|
16 |
+
def __init__(self, m, name):
|
17 |
+
super().__init__(name)
|
18 |
+
self.m = m
|
19 |
+
|
20 |
+
def __getattr__(self, attr):
|
21 |
+
return self.m.__getattribute__(attr)
|
22 |
+
|
23 |
+
enabled = _XNNPACKEnabled()
|
24 |
+
|
25 |
+
|
26 |
+
# This is the sys.modules replacement trick, see
|
27 |
+
# https://stackoverflow.com/questions/2447353/getattr-on-a-module/7668273#7668273
|
28 |
+
sys.modules[__name__] = XNNPACKEngine(sys.modules[__name__], __name__)
|